query_id
stringlengths
32
32
query
stringlengths
9
4.01k
positive_passages
listlengths
1
1
negative_passages
listlengths
88
101
00586226ffc77d02ebb50ba9e226c65f
Wrap the target function and start a thread to run it
[ { "docid": "a71cffd8c668547f2fa0fa27a88b87e5", "score": "0.62145805", "text": "def thread_start(target, args):\n run_thread = threading.Thread(target=target,\n args=args)\n run_thread.setDaemon(True)\n run_thread.start()\n return run_thread", "title": "" } ]
[ { "docid": "6e924478f945629b09fa4ba2dcac36b5", "score": "0.75560284", "text": "def run_thread(targetfunction, argsforfunction=[]):\n threads[targetfunction.__name__] = Thread(\n target=targetfunction, args=argsforfunction\n )\n threads[targetfunction.__name__].start()", "title": "" }, { "docid": "66ff4206042cf60c2c0525785c3b1989", "score": "0.74153066", "text": "def threaded(fn):\n\n def wrapper(*args, **kwargs):\n thread = threading.Thread(target=fn, args=args, kwargs=kwargs)\n thread.start()\n return thread\n\n return wrapper", "title": "" }, { "docid": "0974f3c6d0497279ba941826b26e6c7c", "score": "0.7374909", "text": "def run_as_thread(target, *args, **kwargs):\n return Thread(target, *args, **kwargs)", "title": "" }, { "docid": "8f17819154cde5f2a22fbe678b320005", "score": "0.7177706", "text": "def callInThread(callable, *args, **kwargs):", "title": "" }, { "docid": "23ab6db024890b2c091e7686c616b5d7", "score": "0.71678865", "text": "def callFromThread(callable, *args, **kw):", "title": "" }, { "docid": "934b48575b0b9ed12c3a08d18641bdc8", "score": "0.7157975", "text": "def decorator(*args, **kwargs):\n\n t = Thread(target=function, args=args, kwargs=kwargs)\n t.daemon = True\n t.start()", "title": "" }, { "docid": "5261c94c39a77508c0d3f81be6cd0a64", "score": "0.71542627", "text": "def run_in_thread(self, fn, *args, **kwargs):\r\n thread = threading.Thread(target=fn, args=args, kwargs=kwargs)\r\n thread.start()\r\n \r\n return thread", "title": "" }, { "docid": "890c63da3d723d5ccd893273cd6ee786", "score": "0.6901263", "text": "def run_wrapper(self,fn):\r\n try:\r\n self.start()\r\n return fn()\r\n finally:\r\n self.stop()", "title": "" }, { "docid": "1036e631c5544d15ad67e3380cdeca9e", "score": "0.68981683", "text": "def run_wrapper(self,fn):\r\n \r\n try:\r\n self.start()\r\n return fn()\r\n finally:\r\n self.stop()", "title": "" }, { "docid": "87595f41dfd498a1181ec45e9ba57d2c", "score": "0.6834568", "text": "def run(self):\r\n try:\r\n if self.__target:\r\n self.__target(*self.__args, **self.__kwargs)\r\n finally:\r\n # Avoid a refcycle if the thread is running a function with\r\n # an argument that has a member that points to the thread.\r\n del self.__target, self.__args, self.__kwargs", "title": "" }, { "docid": "78cb4f3c1f36d60be265e69a377c35f2", "score": "0.6798825", "text": "def runProcessorThread(self, target):\n self.processorThread = threading.Thread(target=target)\n self.processorThread.start()", "title": "" }, { "docid": "2e80fca1e2322d445fe0296a0eb6b995", "score": "0.67890567", "text": "def threaded(function):\n @functools.wraps(function)\n def _threaded(*args, **kwargs):\n t = threading.Thread(target=function, args=args, kwargs=kwargs, name=time.time())\n t.daemon = True\n t.start()\n return t\n return _threaded", "title": "" }, { "docid": "69049c3c328238a6b08ccd4effbd7398", "score": "0.6760068", "text": "def thread(self) -> Callable[[F], F]:\n return self.lsp.thread()", "title": "" }, { "docid": "9f6904aec32b6de14bd1f22dbfbb568d", "score": "0.6707967", "text": "def wrap(*args, **kwargs):\n q = Queue.Queue()\n\n t = threading.Thread(target=wrapped_f, args=(q,)+args, kwargs=kwargs)\n t.start()\n t.result_queue = q\n return t", "title": "" }, { "docid": "c4c4e04d0451968e8590419b0534c11e", "score": "0.6653349", "text": "def _thread_wrapper(self, function, *args, **kwargs):\n try:\n function(*args, **kwargs)\n except:\n self.exception(\"Threaded execution failed\")", "title": "" }, { "docid": "f1b2d771bd3b389919f121c3016f367e", "score": "0.6650708", "text": "def _spawn_task_from_thread(\n self,\n func: Callable,\n args: tuple[Any, ...],\n kwargs: dict[str, Any],\n name: object,\n future: Future,\n ) -> None:\n raise NotImplementedError", "title": "" }, { "docid": "d6073deea3f347a05f9949252da33052", "score": "0.6516551", "text": "def run_in_background_thread(f):\n @functools.wraps(f)\n def wrapper(*args):\n util.run_in_background(lambda: f(*args))\n\n return wrapper", "title": "" }, { "docid": "a87a2e30002efc09492e8e5381e7bf63", "score": "0.6480534", "text": "def run(self):\n print(f'{self.getName()} has started')\n try:\n if self._target:\n self._target(*self._args, **self._kwargs)\n finally:\n # Avoid a refcycle if the thread is running a function with\n # an argument that has a member that points to the thread.\n del self._target, self._args, self._kwargs\n\n print(f'{self.getName()} has finished')", "title": "" }, { "docid": "c9df44f633eef595fa014c01e48c502b", "score": "0.6425315", "text": "def spawn(self, func, *args, **kwargs):\n t = Thread(target=func, args=args, kwargs=kwargs)\n with self._lock:\n self._threads.append(t)\n t.daemon=True\n t.start()\n return t", "title": "" }, { "docid": "79126a9b46fc081312806e7f2252aaaa", "score": "0.64056367", "text": "def spawn(func, *args, **kwargs):\n thread = Thread(target=func, args=args, kwargs=kwargs)\n thread.daemon = True\n thread.start()\n return thread", "title": "" }, { "docid": "8f931fd87081acc56b86de89157acd16", "score": "0.6354354", "text": "def create_thread(func, args):\n thread = threading.Thread(target=func, args=args)\n thread.start()\n return thread", "title": "" }, { "docid": "9096dd5c41fa854be41fa00f3d7941db", "score": "0.6311642", "text": "def spawn_thread(func, *args, **kwds):\n thr = threading.Thread(target=func, args=args, kwargs=kwds)\n thr.setDaemon(True)\n thr.start()\n return thr", "title": "" }, { "docid": "c61a9df265ba868aa84c05e4e745f5c2", "score": "0.6270677", "text": "def run_thread(self, kwargs):\n for function in kwargs:\n self.thread = threading.Thread(target=function)\n self.thread_list.append(self.thread)\n self.thread.start()", "title": "" }, { "docid": "1f36544b0d9bcc976e27e78ec35228f9", "score": "0.6244159", "text": "def callWhenRunning(callable, *args, **kw):", "title": "" }, { "docid": "257f117698d1f973257b1a906caf5189", "score": "0.62382567", "text": "def run_in_thread(func, *args, **kwargs):\n from threading import Thread\n thread = Thread(target=func, args=args, kwargs=kwargs)\n thread.daemon = True\n thread.start()\n return thread", "title": "" }, { "docid": "62133e2b66473783dc96f7442e25151a", "score": "0.6232211", "text": "def run(self, offset = 0):\n t = threading.Thread(target=self.function, args = self.pargs, kwargs = self.kwargs)\n time.sleep(offset)\n t.start()", "title": "" }, { "docid": "fb8f147eee614e02fb24e9b055ae132f", "score": "0.6211389", "text": "def thread(name=None, daemon=False):\n def wrapper(function):\n def decorator_thread(*args):\n thread = threading.Thread(target=function, args=args, daemon=daemon)\n thread.setName(name) if name != None else\\\n thread.setName(function.__name__ + \"_thread\")\n thread.start()\n return thread\n return decorator_thread\n return wrapper", "title": "" }, { "docid": "a44ecd407925fdb17e91578c9a90cfdb", "score": "0.6188712", "text": "def run_in_thread(func: Callable[..., Any]) -> Callable[..., Any]:\n @wraps(func)\n async def wrapper(*args: Any, **kwargs: Any) -> Any:\n loop = asyncio.get_running_loop()\n return await loop.run_in_executor(_EXECUTOR, partial(func, *args, **kwargs))\n return wrapper", "title": "" }, { "docid": "2b76fd66eb0b804cb33df22898c25998", "score": "0.6184781", "text": "def _future_target_wrapper(stack, func, *args, **kwargs):\n\n # Keep this for looking up via get_local_in_call_stack .\n pre_start_stack = stack\n\n return func(*args, **kwargs)", "title": "" }, { "docid": "1b0675a119fb50a47a8a9f7f2ab2496d", "score": "0.6164509", "text": "def mutlithread_this(self, fn, *args, **kwargs):\n\t\t# Pass the function to execute\n\t\tworker = Worker(fn, *args, **kwargs) # Any other args, kwargs are passed to the run function\n\n # Execute\n\t\tself.threadpool.start(worker)", "title": "" }, { "docid": "fbc94b10654e4431b11ce218e318e272", "score": "0.61642635", "text": "def run_wrapper(self, fn, alternate_buffer=True):\r\n try:\r\n self.start(alternate_buffer)\r\n return fn()\r\n finally:\r\n self.stop()", "title": "" }, { "docid": "c9ded4c805939f5ab1420e835ee437c0", "score": "0.61521393", "text": "def wrapper(*args, **kwargs):\n callable_delegate_attr = partial(delegate_attr, *args, **kwargs)\n return self._loop.run_in_executor(self._executor, callable_delegate_attr)", "title": "" }, { "docid": "12417b8928e3cd07f25b11e72006e4a9", "score": "0.611669", "text": "def run(self):\n self._return = self._target(*self._args, **self._kwargs)", "title": "" }, { "docid": "2b1e7653702fc25ea9dedbdd3b0975a8", "score": "0.6108133", "text": "def _make_thread(self):\n pass", "title": "" }, { "docid": "1219a7c78997c977a4af05180bdd8c6a", "score": "0.60992956", "text": "def __call__(self, *args, **kwargs):\n self._future = self._executor.submit(self._function, *args, **kwargs)", "title": "" }, { "docid": "48e07fd67b107e801e255015e2c66829", "score": "0.60881513", "text": "def thread_spawn(*args, **kwargs):\n t = None\n if len(args) == 1 and not kwargs:\n t = Thread(target=args[0], args=())\n else:\n t = Thread(target=args[0], args=args[1:], kwargs=kwargs)\n t.start()", "title": "" }, { "docid": "72bad74e1bb9d9a3a5677875ab14eb22", "score": "0.6061111", "text": "async def defer_to_thread(\n self,\n f: Callable[P, T],\n *args: P.args,\n **kwargs: P.kwargs,\n ) -> T:\n return await defer_to_thread(self._hs.get_reactor(), f, *args, **kwargs)", "title": "" }, { "docid": "94e88296ef0e85d6a1bf78d7cd238d25", "score": "0.6060893", "text": "def run(self):\n if self._target is not None:\n self._return = self._target(*self._args, **self._kwargs)", "title": "" }, { "docid": "338dc5fe7867492407ebd70db14fcaff", "score": "0.6050126", "text": "def start_thread(self, args, logger):\n self.run(args, logger)", "title": "" }, { "docid": "7ab2395c9d7c50bab8e805f897f9aa72", "score": "0.60394675", "text": "def start_thread(method_name, arguments):\n thread = Thread(target=method_name, args=arguments)\n thread.daemon = True\n thread.start()", "title": "" }, { "docid": "2a7470380ca22ec93c67e7fc371d34cb", "score": "0.60258955", "text": "def mainthread(func):\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n assert threading.current_thread() is threading.main_thread()\n return func(*args, **kwargs)\n\n return wrapper", "title": "" }, { "docid": "bc8f8ec24ea8cb6289cd49f17d4d5c7a", "score": "0.60100067", "text": "def run(self):\n target = getattr(self, '_target')\n if target is not None:\n self._return = target(*getattr(self, '_args'), **getattr(self,\n '_kwargs'))", "title": "" }, { "docid": "c458c861fe015ff4df12ba3599035223", "score": "0.5992365", "text": "def thread_handler(self, loop, *args, **kwargs):\n # Set the threadlocal for AsyncToSync\n self.threadlocal.main_event_loop = loop\n # Run the function\n return self.func(*args, **kwargs)", "title": "" }, { "docid": "c4c0fdb76d68b59f17901cd938c51a7a", "score": "0.5991445", "text": "def start_with_callback(func, kwargs=None):\n\tif kwargs is None:\n\t\tkwargs = {}\n\t\n\tdef _callback(func, **kw):\n\t\tfor i in range(5):\n\t\t\ttime.sleep(1)\n\t\t\tfunc(**kw)\n\t\n\tt = threading.Thread(target=_callback, args=(func,), kwargs=kwargs)\n\tt.start()\n\t\n\treturn t", "title": "" }, { "docid": "268204deabaa5312cd8d513f5e33057e", "score": "0.59714603", "text": "def __call__(self, function, *args, **kwargs):\n self.request.function = self._import_function(function)\n args = [function] + list(args)\n return self.run(*args, **kwargs)", "title": "" }, { "docid": "b22507e43cf15b68415c9e5f50620304", "score": "0.59137815", "text": "def run_threaded(job_func, ip):\n\n job_thread = threading.Thread(\n target=job_func,\n args=(ip,)\n )\n\n job_thread.start()", "title": "" }, { "docid": "649142ab595e228403f8dbff74129bd9", "score": "0.59120226", "text": "def runwrap(self, action=\"run\", *args, **kwargs):\n if action == \"run\":\n return self.run(*args, **kwargs)\n\n if action == \"delay\":\n return self.delay(*args, **kwargs)", "title": "" }, { "docid": "d92b28e7e00ae99eefd80bb199db9c4d", "score": "0.58911633", "text": "def _run_in_thread_loop(list_to_run, target, args_method):\n threads = []\n for item in list_to_run:\n if item:\n th = Thread(target=target, args=args_method(item, list_to_run))\n th.start()\n threads += [th]\n\n for t in threads:\n t.join()\n Logger().info(\"Still waiting to finish...\")", "title": "" }, { "docid": "f81b06e5ecb01766f59c8fb16a1da335", "score": "0.588973", "text": "def run(self):\n self._semaphores.acquire()\n self._return = self._target(*self._args, **self._kwargs)\n return self", "title": "" }, { "docid": "1416dabe03727f5734aa072ec7aa7960", "score": "0.58683145", "text": "def spawnf(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n return gevent.spawn(f, *args, **kwargs)\n return wrapper", "title": "" }, { "docid": "905900de93fc9c621359d94cd50b057c", "score": "0.5851752", "text": "def localCall(self, method, *args, **kwargs):\n func = getattr(self.server, method)\n if str(method) in self.oneway:\n threading.Thread(target=func, args=args, kwargs=kwargs).start()\n else:\n return func(*args, **kwargs)", "title": "" }, { "docid": "0f44c5f93b96523d3fd03ac59bdde2cc", "score": "0.5839301", "text": "def run(self):\n self.fn(*self.args, **self.kwargs)", "title": "" }, { "docid": "fa553003492116dc1d4add24b4c10713", "score": "0.5833388", "text": "def threaded_main(target, daemon=True):\n thread = threading.Thread(target=target,\n args=([sys.argv[0], '-c'],),\n daemon=daemon)\n thread.start()", "title": "" }, { "docid": "48d203143dc3ca56a7d1650441f946f2", "score": "0.5813439", "text": "def run(self, action : Actions, event: Event):\n self.stopEvent = event\n thread = Thread(target=self.thread_main, args=(self.methods[action],))\n thread.start()", "title": "" }, { "docid": "d232a535677e1936421448e84a30ef83", "score": "0.5812206", "text": "def rest_async(func):\n @wraps(func)\n def inner(*args, **kwargs):\n t = threading.Thread(target=func, args=args, kwargs=kwargs)\n t.daemon = True\n t.start()\n return inner", "title": "" }, { "docid": "4cf82bea0b68e66070934587037577f5", "score": "0.5780823", "text": "def run_fn(self):\n pass", "title": "" }, { "docid": "8831f76f07c09eb9f27b320355b026c9", "score": "0.57808125", "text": "def run():\n return _actual_start()", "title": "" }, { "docid": "1ed078c2eb47187177685276c41c15f5", "score": "0.57686394", "text": "def fakeTargetFunction():\r\n pass", "title": "" }, { "docid": "58b79a57392ef776a0d1f8f27768a867", "score": "0.575941", "text": "def async_task(f):\n @wraps(f)\n def _decorated(*args, **kwargs):\n thr = Thread(target=f, args=args, kwargs=kwargs)\n thr.start()\n return _decorated", "title": "" }, { "docid": "b53b0f0e7a335631d787151e527f0dc5", "score": "0.57575685", "text": "def raise_by_thread(window):\n thread = threading.Thread(target=lift, daemon=True)\n thread.start()", "title": "" }, { "docid": "492b28f4b6b85c0581d25521c5f86c34", "score": "0.5750738", "text": "def start(self):\n self._lock.acquire()\n try:\n if not self._thread:\n self._thread = Thread(target=self._target)\n self._thread.setDaemon(True)\n self._thread.start()\n finally:\n self._lock.release()", "title": "" }, { "docid": "d4e93796dc5d4eff7f717b569c18a3db", "score": "0.5738223", "text": "def wrapFn(fn, *args, **kwargs):\n return FunctionWrappingJob(fn, *args, **kwargs)", "title": "" }, { "docid": "5b98bbd7eb6da0aad005a8cd41b05441", "score": "0.5737577", "text": "def test_runner_inside_thread():\n\n class GetResult:\n def __init__(self) -> None:\n self.r = None\n\n def run(self):\n self.r = run_runner()\n\n result = GetResult()\n thread = Thread(target=result.run)\n thread.start()\n thread.join()\n\n assert result.r == 0", "title": "" }, { "docid": "5fec9a0f41bc117d6989d37131e8653d", "score": "0.57365775", "text": "async def asyncify_thread(self, func, *args, **kwargs):\n with ThreadPoolExecutor() as pool:\n return await self.bot.loop.run_in_executor(pool, partial(func, *args, **kwargs))", "title": "" }, { "docid": "46b24b2fcb5fa1cc7a1f028477a02c74", "score": "0.5733965", "text": "def start(self, init=threading.Thread):\n thread = init(target=self.run)\n thread.start()\n return thread", "title": "" }, { "docid": "ad3b4bcad770f5c92e929394b003ce34", "score": "0.5728239", "text": "def main():\n thread = MyThread(\"Ken\")\n thread.start()", "title": "" }, { "docid": "6f0457d6909cb1803a7690e741e11389", "score": "0.5725839", "text": "def threaded_task(function, *args, **kwargs):\n def _task(task_return):\n def _thread():\n result = function(*args, **kwargs)\n gobject.idle_add(task_return, result)\n thread = threading.Thread(target=_thread, args=())\n thread.setDaemon(True)\n thread.start()\n return _task", "title": "" }, { "docid": "60c6e938f62417935256d6cb602b90cc", "score": "0.5719978", "text": "def wrapper(self: Any, *args: Any, **kwargs: Any) -> None:\n self.msg_box = MyMsgBox.create_msg_box()\n self.my_thread = MyThread(self, func, *args, **kwargs)\n self.my_thread.update_message.connect(self.msg_box.label.setText) # type: ignore\n self.my_thread.update_title.connect(self.msg_box.setWindowTitle) # type: ignore\n self.my_thread.finished.connect(self.handle_thread_finished) # type: ignore\n self.msg_box.pushButton.clicked.connect(self.cancel_thread)\n self.my_thread.start()\n self.msg_box.exec_()", "title": "" }, { "docid": "db48df96aa5c71991f7d3b9548bfa10d", "score": "0.5706068", "text": "def thread_wrapped_func(func):\r\n @wraps(func)\r\n def decorated_function(*args, **kwargs):\r\n queue = Queue()\r\n def _queue_result():\r\n exception, trace, res = None, None, None\r\n try:\r\n res = func(*args, **kwargs)\r\n except Exception as e:\r\n exception = e\r\n trace = traceback.format_exc()\r\n queue.put((res, exception, trace))\r\n\r\n start_new_thread(_queue_result, ())\r\n result, exception, trace = queue.get()\r\n if exception is None:\r\n return result\r\n else:\r\n assert isinstance(exception, Exception)\r\n raise exception.__class__(trace)\r\n return decorated_function", "title": "" }, { "docid": "468d13e14885d8d6806e25ca2aa21534", "score": "0.57015234", "text": "def start_thread(thread_func, name=None):\n thread = threading.Thread(None, thread_func)\n thread.daemon = True\n thread.start()\n if name:\n thread.name = name\n return thread", "title": "" }, { "docid": "061d7cc2556a91da14202c7646c936c9", "score": "0.5699931", "text": "def test_decorator_works_in_a_new_thread():\n transport = MockTransportHandler()\n thread = threading.Thread(target=run_inside_another_thread, args=(transport,))\n thread.start()\n thread.join()\n\n output = transport.get_payloads()\n assert len(output) == 1\n\n spans = json.loads(output[0])\n assert len(spans) == 2\n assert spans[0][\"name\"] == \"service1_do_stuff\"\n assert spans[1][\"name\"] == \"index\"", "title": "" }, { "docid": "5ab2b2d88e6b71fe9498d0a0a0e03157", "score": "0.56975317", "text": "def wrapper(function_, queue__):\n # print(\"wrapper({name})\".format(name=function.__name__))\n thread_local.thread_io = StringIO()\n\n try:\n exitcode_ = function_()\n except Exception as ex:\n error(str(ex))\n exitcode_ = 1\n\n queue__.put((exitcode_, thread_local.thread_io.getvalue()))", "title": "" }, { "docid": "9a9e74bfd40462b3ea1e539fcbf84071", "score": "0.56805515", "text": "def spawn(func, *args, **kwargs):\r\n fiber = Fiber(func, args, kwargs)\r\n fiber.start()\r\n return fiber", "title": "" }, { "docid": "20f31d87137448ddf4e3c6423844913d", "score": "0.5662809", "text": "def run(self, *args, **kwargs):\n return self.func(*args, **kwargs)", "title": "" }, { "docid": "2cc73019e6a81667e0760490514f7f31", "score": "0.56531113", "text": "def run(self) -> Callable:\n pass # pragma: no cover", "title": "" }, { "docid": "dae69a5ed52f7cd55ca86cfb5385b2f0", "score": "0.56529945", "text": "def __call__(self):\n self.run()", "title": "" }, { "docid": "9062091da8cb19507f0a3c7358015c34", "score": "0.5640493", "text": "def run(self):\n response = self._target(*self._args, **self._kwargs)\n self.queue.put(response)", "title": "" }, { "docid": "cb0d507b46039556547317b253020d93", "score": "0.5633745", "text": "def submit(self, func, *args):\n return SerialFuture(func(*args))", "title": "" }, { "docid": "cb0d507b46039556547317b253020d93", "score": "0.5633745", "text": "def submit(self, func, *args):\n return SerialFuture(func(*args))", "title": "" }, { "docid": "1e934e1902437d7cc265ec5d45e42bd4", "score": "0.5630161", "text": "def start_background_task(self, target, *args, **kwargs):\n return self.eio.start_background_task(target, *args, **kwargs)", "title": "" }, { "docid": "f1a7c41cf07998e8d7308a26294bd9d2", "score": "0.5612633", "text": "def run(self):\n\t\treturn self.function()", "title": "" }, { "docid": "51cb66ea71365f7085c175e4a5b8d858", "score": "0.5581159", "text": "def run(self, func, *args, **kwargs):\n raise NotImplementedError()", "title": "" }, { "docid": "48c0abe08bea25029d3ae0696fa866dd", "score": "0.5581", "text": "def launch(self, target, start_fnc=None, who=None):\n target.sim = self\n launch = Launch(target, start_fnc)\n launch.bind(who if who is not None else self)\n self.__stack.push(launch, \"L\")\n target.start(start_fnc)\n self.__stack.pop()\n return target", "title": "" }, { "docid": "8fa2b4dda263a470e2ffc77c8a85fa81", "score": "0.5578043", "text": "def _do_request(self, callable):\n with self._lock:\n return callable()", "title": "" }, { "docid": "0009cf3576641f6ef2cace1f6bde3d54", "score": "0.55602056", "text": "def run():\n pass", "title": "" }, { "docid": "0009cf3576641f6ef2cace1f6bde3d54", "score": "0.55602056", "text": "def run():\n pass", "title": "" }, { "docid": "37c5537b4bed46f1e2c284d0bdfcd8f1", "score": "0.5559904", "text": "def nf(arg: Threadargs, **kwargs):\n def doita(): # the program to be run by _thread_template the return 0 says that no work need be done when stopping\n myprint(\n f'{mma.name} th={threading.current_thread().getName()}, t={monotonic()}')\n return 0\n\n # update the interval and function\n mma: Threadargs = arg._replace(\n interval=timesdic[arg.name], doit=doita)\n # run it\n return _thread_template(mma, printfun=myprint, **kwargs)", "title": "" }, { "docid": "2a20b5c9d9c36f149d780e658355e75d", "score": "0.55598927", "text": "def run_in_background(self, method):\n\n self.background_task = Thread(ThreadStart(method))\n self.background_task.Start()", "title": "" }, { "docid": "46ce473745affac7ba24b5b32c797a42", "score": "0.55568546", "text": "def start(self):\n from ubcs_auxiliary.multithreading import new_thread\n self.threads['running'] = new_thread(self.run)", "title": "" }, { "docid": "2fe667573df551b4398e7c6de2efe760", "score": "0.554656", "text": "def _callback(self, callback, *args):\n if callback:\n try:\n _callback = getattr(self, callback, None)\n if _callback is not None and callable(_callback):\n t = Thread(target=_callback, args=args)\n t.setDaemon(True)\n t.start()\n except Exception as e:\n self.log.error(\"error from callback {}: {}\".format(_callback, e))", "title": "" }, { "docid": "a0834ca6c32813120575745111dfce07", "score": "0.5546088", "text": "def own_threaded_callback(self, cmd, *args):\n func = getattr(self, \"own_tcb_%s\" % cmd)\n return func(*args)", "title": "" }, { "docid": "42e4fb2b1192109b93eedd46aeaffa15", "score": "0.5534494", "text": "def __init__(self, interval, function):\n self.interval = interval\n self.function = function\n self.thread = threading.Thread(target=self.run)\n # daemonize this thread, so that it allows the program to exit\n # even when a thread is running.\n self.thread.daemon = True\n self.thread.start()", "title": "" }, { "docid": "ac36545edf2ae75bc8da9f381bf08561", "score": "0.55340534", "text": "def timed(fn, args=(), kargs={}, timeout=TIMEOUT):\n if not timeout:\n timeout = TIMEOUT\n submission = __ReturningThread(fn, args, kargs)\n submission.start()\n submission.join(timeout)\n if submission.is_alive():\n raise Timeout(timeout)\n if submission.error is not None:\n raise submission.error\n return submission.result", "title": "" }, { "docid": "b13ea95e0d13d18693af372499860555", "score": "0.55311984", "text": "def threaded(finish=None, msg=\"Thread already running\"):\n def decorator(func):\n func.running = 0\n\n @functools.wraps(func)\n def threaded(*args, **kwargs):\n def run():\n try:\n result = func(*args, **kwargs)\n if result is None:\n result = ()\n elif not isinstance(result, tuple):\n result = (result,)\n\n if finish:\n sublime.set_timeout(\n functools.partial(finish, args[0], *result), 0\n )\n finally:\n func.running = 0\n if not func.running:\n func.running = 1\n t = threading.Thread(target=run)\n t.setDaemon(True)\n t.start()\n else:\n sublime.status_message(msg)\n\n threaded.func = func\n return threaded\n\n return decorator", "title": "" }, { "docid": "f3e2c6277257006220ebc490c974f343", "score": "0.5526807", "text": "def add_func(self, target, args=(), kwargs={}, inputs=(), outputs=(), settings=(), seperate_process=True, wd=None, stdin=None, stdout=None, stderr=None):\n if seperate_process:\n t = TaskUsingPythonProcess(self, target, args, kwargs, inputs, outputs, settings, wd if wd else self.workingdir)\n elif wd:\n raise ValueError('Working-directory cannot be changed except for seperate processes')\n else:\n t = TaskUsingPythonFunction(self, target, args, kwargs, inputs, outputs, settings)\n return self._add(t)", "title": "" }, { "docid": "031c5df34b7ba1781b1c2dabd3eb3feb", "score": "0.5524902", "text": "def run_in_executor(f):\n\n @functools.wraps(f)\n def inner(*args, **kwargs):\n loop = asyncio.get_event_loop()\n return loop.run_in_executor(None, functools.partial(f, *args, **kwargs))\n\n return inner", "title": "" }, { "docid": "f1248c2bc230c0f95b4c9cf3022a19ef", "score": "0.55218375", "text": "def run_async_from_worker_thread(\n __fn: Callable[..., Awaitable[T]], *args: Any, **kwargs: Any\n) -> T:\n call = partial(__fn, *args, **kwargs)\n return anyio.from_thread.run(call)", "title": "" }, { "docid": "4b23f8ee4ea0a4299c4658df2d681986", "score": "0.55199087", "text": "def submit(self, __fun, *args, **kwargs):\n\n future = Future().bind(__fun, *args, **kwargs)\n self.enqueue(future)\n return future", "title": "" }, { "docid": "78de8f561627fdf562f0bf1dd5ec5838", "score": "0.5504841", "text": "def start_long_running(text, long_running_function, *args, **kwargs):\n results = []\n args = list(args)\n long_running_thread = threading.Thread(target=run_long_running,\n args=tuple([long_running_function, results] + args),\n kwargs=kwargs)\n spin_thread = threading.Thread(target=dots, args=(text, long_running_thread))\n\n long_running_thread.start()\n spin_thread.start()\n long_running_thread.join()\n spin_thread.join()\n\n result = results[0]\n if isinstance(result, Exception):\n raise result\n else:\n return result", "title": "" }, { "docid": "0537b7d0c18425eaa3222585c6612a01", "score": "0.55033535", "text": "def run_server_thread(target, ip, port, exit_callback):\n thread = threading.Thread(\n target=target,\n args=(ip, port, exit_callback),\n )\n thread.daemon = True\n thread.start()", "title": "" } ]
1d03a6c432b1dfb23de19f78958d8c0c
GetInverseOrder(itkPermuteAxesImageFilterIUC3 self) > itkFixedArrayUI3
[ { "docid": "7426202c5fb06698cee92a700e33aceb", "score": "0.8726155", "text": "def GetInverseOrder(self) -> \"itkFixedArrayUI3 const &\":\n return _itkPermuteAxesImageFilterPython.itkPermuteAxesImageFilterIUC3_GetInverseOrder(self)", "title": "" } ]
[ { "docid": "f869bf6280669dbb704d90b2fc2419df", "score": "0.86088556", "text": "def GetInverseOrder(self) -> \"itkFixedArrayUI3 const &\":\n return _itkPermuteAxesImageFilterPython.itkPermuteAxesImageFilterIF3_GetInverseOrder(self)", "title": "" }, { "docid": "f69d662c93f72881fce6f0fbc323a5f6", "score": "0.85740393", "text": "def GetInverseOrder(self) -> \"itkFixedArrayUI3 const &\":\n return _itkPermuteAxesImageFilterPython.itkPermuteAxesImageFilterISS3_GetInverseOrder(self)", "title": "" }, { "docid": "f653119066d51e9147a6992816dfbe40", "score": "0.8566182", "text": "def GetInverseOrder(self) -> \"itkFixedArrayUI3 const &\":\n return _itkPermuteAxesImageFilterPython.itkPermuteAxesImageFilterIUS3_GetInverseOrder(self)", "title": "" }, { "docid": "860be473672270ebf67c3bb04c3488d1", "score": "0.8370375", "text": "def GetInverseOrder(self) -> \"itkFixedArrayUI2 const &\":\n return _itkPermuteAxesImageFilterPython.itkPermuteAxesImageFilterIUC2_GetInverseOrder(self)", "title": "" }, { "docid": "5660c2cc9b8b6705ade539e020e8bd87", "score": "0.82017636", "text": "def GetInverseOrder(self) -> \"itkFixedArrayUI2 const &\":\n return _itkPermuteAxesImageFilterPython.itkPermuteAxesImageFilterIF2_GetInverseOrder(self)", "title": "" }, { "docid": "12a3f65d451f83f8e221933496ec1b6f", "score": "0.81503916", "text": "def GetInverseOrder(self) -> \"itkFixedArrayUI2 const &\":\n return _itkPermuteAxesImageFilterPython.itkPermuteAxesImageFilterIUS2_GetInverseOrder(self)", "title": "" }, { "docid": "53c49f2fb81df32cd85060597b965dd4", "score": "0.8123055", "text": "def GetInverseOrder(self) -> \"itkFixedArrayUI2 const &\":\n return _itkPermuteAxesImageFilterPython.itkPermuteAxesImageFilterISS2_GetInverseOrder(self)", "title": "" }, { "docid": "1e53a0c048e9edf1623e25c48f8c63b8", "score": "0.7566149", "text": "def GetOrder(self) -> \"itkFixedArrayUI3 const &\":\n return _itkPermuteAxesImageFilterPython.itkPermuteAxesImageFilterIUC3_GetOrder(self)", "title": "" }, { "docid": "0625559a3c5f334c0072bc298e751ced", "score": "0.7447812", "text": "def GetOrder(self) -> \"itkFixedArrayUI3 const &\":\n return _itkPermuteAxesImageFilterPython.itkPermuteAxesImageFilterISS3_GetOrder(self)", "title": "" }, { "docid": "70fd222db600cd3753304d4a30ce2b04", "score": "0.7370732", "text": "def GetOrder(self) -> \"itkFixedArrayUI2 const &\":\n return _itkPermuteAxesImageFilterPython.itkPermuteAxesImageFilterIUC2_GetOrder(self)", "title": "" }, { "docid": "691db414fb94d483029f18d324dbcf18", "score": "0.7339359", "text": "def GetOrder(self) -> \"itkFixedArrayUI3 const &\":\n return _itkPermuteAxesImageFilterPython.itkPermuteAxesImageFilterIUS3_GetOrder(self)", "title": "" }, { "docid": "f665cb75eaf9265a873d3c364df09bdd", "score": "0.733608", "text": "def GetOrder(self) -> \"itkFixedArrayUI3 const &\":\n return _itkPermuteAxesImageFilterPython.itkPermuteAxesImageFilterIF3_GetOrder(self)", "title": "" }, { "docid": "166571fe18a4a600e059da77ad9113c3", "score": "0.71113044", "text": "def GetOrder(self) -> \"itkFixedArrayUI2 const &\":\n return _itkPermuteAxesImageFilterPython.itkPermuteAxesImageFilterISS2_GetOrder(self)", "title": "" }, { "docid": "2bb6dc880c196f93b3f07327c5bd13b2", "score": "0.70789343", "text": "def GetOrder(self) -> \"itkFixedArrayUI2 const &\":\n return _itkPermuteAxesImageFilterPython.itkPermuteAxesImageFilterIUS2_GetOrder(self)", "title": "" }, { "docid": "7d262d6fd25a4af6c0578b1fcb63116b", "score": "0.7034006", "text": "def GetOrder(self) -> \"itkFixedArrayUI2 const &\":\n return _itkPermuteAxesImageFilterPython.itkPermuteAxesImageFilterIF2_GetOrder(self)", "title": "" }, { "docid": "f84e27a3a6292338ccfca1e82101a1ec", "score": "0.6234679", "text": "def GetLayout(self) -> \"itkFixedArrayUI3\":\n return _itkTileImageFilterPython.itkTileImageFilterIUC2IUC3_GetLayout(self)", "title": "" }, { "docid": "815680b1f72789206a7e8bd45702d8cb", "score": "0.618092", "text": "def GetLayout(self) -> \"itkFixedArrayUI3\":\n return _itkTileImageFilterPython.itkTileImageFilterIUC3IUC3_GetLayout(self)", "title": "" }, { "docid": "bd068beee37acc5f9f9af174e35f3361", "score": "0.60055697", "text": "def test_unchanged(self):\r\n ds = Dataset()\r\n ds.PhotometricInterpretation = 'MONOCHROME1'\r\n ds.PixelRepresentation = 1\r\n ds.BitsStored = 8\r\n arr = np.asarray([-128, -127, -1, 0, 1, 126, 127], dtype='int8')\r\n out = apply_voi_lut(arr, ds)\r\n assert [-128, -127, -1, 0, 1, 126, 127] == out.tolist()", "title": "" }, { "docid": "64f0c35a67dd5fd61564cb8e32770894", "score": "0.5911617", "text": "def inverse(self):\n return self.transpose()", "title": "" }, { "docid": "51370f3c8d056676bdbce94518850251", "score": "0.5911541", "text": "def inverse(self) -> Array:\n canonical_identity = self._canonical(Identity())\n inverse = np.zeros(len(self.elems), dtype=int)\n\n for i, e1 in enumerate(self.elems):\n for j, e2 in enumerate(self.elems):\n prod = e1 @ e2\n if np.all(self._canonical(prod) == canonical_identity):\n inverse[i] = j\n\n return inverse", "title": "" }, { "docid": "f3c4bec6314cd3deac25848607b35833", "score": "0.5890066", "text": "def GetLayout(self) -> \"itkFixedArrayUI3\":\n return _itkTileImageFilterPython.itkTileImageFilterIF2IF3_GetLayout(self)", "title": "" }, { "docid": "679ba8aa20c5380456357f8e58ea7812", "score": "0.5844576", "text": "def inverse_indices(self):\n return self._raw_gwfns_data[4]", "title": "" }, { "docid": "15e09bcfd2100b277d295b64e69da5b1", "score": "0.58360857", "text": "def GetLayout(self) -> \"itkFixedArrayUI3\":\n return _itkTileImageFilterPython.itkTileImageFilterISS2ISS3_GetLayout(self)", "title": "" }, { "docid": "7779283f0e5b7c807e7a926c5cea9c96", "score": "0.5830684", "text": "def itkPermuteAxesImageFilterIUC3_cast(obj: 'itkLightObject') -> \"itkPermuteAxesImageFilterIUC3 *\":\n return _itkPermuteAxesImageFilterPython.itkPermuteAxesImageFilterIUC3_cast(obj)", "title": "" }, { "docid": "84465a682df95938d1cc797ea45339da", "score": "0.58279383", "text": "def GetLayout(self) -> \"itkFixedArrayUI3\":\n return _itkTileImageFilterPython.itkTileImageFilterIF3IF3_GetLayout(self)", "title": "" }, { "docid": "d7ebd0c2a9b7c421a315af316bc12dc1", "score": "0.5824781", "text": "def GetLayout(self) -> \"itkFixedArrayUI3\":\n return _itkTileImageFilterPython.itkTileImageFilterISS3ISS3_GetLayout(self)", "title": "" }, { "docid": "2064dd26ff10f69c5e91491f426b2e0f", "score": "0.5797988", "text": "def GetLayout(self) -> \"itkFixedArrayUI2\":\n return _itkTileImageFilterPython.itkTileImageFilterIUC2IUC2_GetLayout(self)", "title": "" }, { "docid": "35fe67fba768b66830aed15875060b9a", "score": "0.5743767", "text": "def inverse_transform(self, data):\n return np.array(list(map(lambda x: self.int_to_label[x], np.argmax(np.array(data), axis=1))))", "title": "" }, { "docid": "7c7f90b21ae1aa656c0875693428f621", "score": "0.5695651", "text": "def GetLayout(self) -> \"itkFixedArrayUI3\":\n return _itkTileImageFilterPython.itkTileImageFilterIUS2IUS3_GetLayout(self)", "title": "" }, { "docid": "bad3ca39250dc79cd6c58a6c5ee84f1a", "score": "0.563533", "text": "def GetLayout(self) -> \"itkFixedArrayUI3\":\n return _itkTileImageFilterPython.itkTileImageFilterIUS3IUS3_GetLayout(self)", "title": "" }, { "docid": "261413e67d00f19eb9e42c4e46819dd8", "score": "0.5626222", "text": "def inverse(self):\n if self.order()==2 and self.is_cubical():\n return HM(self.n(0), self.n(1), Matrix(SR, self.listHM()).inverse().transpose().list()) \n else:\n raise ValueError(\"not supported for order %d hypermatrices\" %self.order())", "title": "" }, { "docid": "de9de803fc1fd2aa34bef9289517146b", "score": "0.5603021", "text": "def image2array(image):\r\n arr = numpy.array(image)\r\n #im3 = numpy.rollaxis(arr, axis=-1).astype(numpy.float32)\r\n arr2 = arr.swapaxes(0, 2).swapaxes(1, 2)\r\n #result = im3 - arr2\r\n #print result.max(), result.min()\r\n #return arr.swapaxes(0, 2).swapaxes(1, 2).astype(numpy.float32)\r\n return arr2#im3\r", "title": "" }, { "docid": "9c60041791b58e30a68b95ee885e20ab", "score": "0.55957675", "text": "def SetOrder(self, order: 'itkFixedArrayUI3') -> \"void\":\n return _itkPermuteAxesImageFilterPython.itkPermuteAxesImageFilterIUC3_SetOrder(self, order)", "title": "" }, { "docid": "0921c5272073fdda78a646d35e0364b0", "score": "0.5555656", "text": "def inverse(self):\n if self._user_inverse is not None:\n return self._user_inverse\n elif self._inverse is not None:\n result = self._inverse()\n if result is not NotImplemented:\n if not self._has_inverse_bounding_box:\n result.bounding_box = None\n return result\n\n raise NotImplementedError(\n \"No analytical or user-supplied inverse transform \"\n \"has been implemented for this model.\"\n )", "title": "" }, { "docid": "2b3f9ae9f21c74e10f93e93fbb4f97ed", "score": "0.554025", "text": "def __invert__(self):\n return _snap.TUInt___invert__(self)", "title": "" }, { "docid": "b0269950379a43138a1363d343a72bd1", "score": "0.5531085", "text": "def cast(obj: 'itkLightObject') -> \"itkPermuteAxesImageFilterIUC3 *\":\n return _itkPermuteAxesImageFilterPython.itkPermuteAxesImageFilterIUC3_cast(obj)", "title": "" }, { "docid": "77738805730ccbbdd30985e06051669b", "score": "0.55306053", "text": "def itkPermuteAxesImageFilterIUC2_cast(obj: 'itkLightObject') -> \"itkPermuteAxesImageFilterIUC2 *\":\n return _itkPermuteAxesImageFilterPython.itkPermuteAxesImageFilterIUC2_cast(obj)", "title": "" }, { "docid": "f0e055d30838f85a0640f4766a8d5e87", "score": "0.5503103", "text": "def inverse_transform(self, outputs: np.ndarray) -> np.ndarray:\n raise NotImplementedError(\"Needs to be overwritten by child class.\")", "title": "" }, { "docid": "c794df5c16d87b2c279834f3ebac7c73", "score": "0.5496121", "text": "def __imul__(self, *args):\n return _geom.indx3___imul__(self, *args)", "title": "" }, { "docid": "4c7e1dab83d0ffcfb77e1277ed943ff6", "score": "0.5484448", "text": "def inverse(array):\n return 1 / (array)", "title": "" }, { "docid": "ca4a58b90f492bb4c6720249aa05c730", "score": "0.5466282", "text": "def __invert__(self):\n return BinArray([~x for x in self], width=self._width)", "title": "" }, { "docid": "3f23fcf20c1aa4785ca6b9c685dada39", "score": "0.5462797", "text": "def GetInverse(self, inverse: 'itkMatrixOffsetTransformBaseF33') -> \"bool\":\n return _itkMatrixOffsetTransformBasePython.itkMatrixOffsetTransformBaseF33_GetInverse(self, inverse)", "title": "" }, { "docid": "7e501bf2defd8b2a39cd9a67a0154979", "score": "0.54378885", "text": "def GetFixedImage(self) -> \"itkImageUC3 const *\":\n return _itkPDEDeformableRegistrationFunctionPython.itkPDEDeformableRegistrationFunctionIUC3IUC3IVF33_GetFixedImage(self)", "title": "" }, { "docid": "56ac8deb1ceaee188055d25c552f3751", "score": "0.5413944", "text": "def test_apply_inverse(self):\n\n gtt = OCIO.GradingToneTransform(OCIO.GRADING_LOG)\n vals = OCIO.GradingTone(OCIO.GRADING_LOG)\n vals.midtones = OCIO.GradingRGBMSW(1.6, 0.5, 1.5, 0.7, 0.1, 1.2)\n vals.scontrast = 1.4\n gtt.setValue(vals)\n\n cfg = OCIO.Config().CreateRaw()\n proc = cfg.getProcessor(gtt)\n cpu = proc.getDefaultCPUProcessor()\n\n # Apply the transform and keep the result.\n pixel = [0.48, 0.18, 0.18]\n rgb1 = cpu.applyRGB(pixel)\n\n # The processing did something.\n self.assertAlmostEqual(0.645454, rgb1[0], delta=1e-5)\n self.assertAlmostEqual(0.076331, rgb1[1], delta=1e-5)\n self.assertAlmostEqual(0.130564, rgb1[2], delta=1e-5)\n\n # Invert.\n gtt.setDirection(OCIO.TRANSFORM_DIR_INVERSE)\n proc = cfg.getProcessor(gtt)\n cpu = proc.getDefaultCPUProcessor()\n pixel2 = cpu.applyRGB(rgb1)\n\n # Invert back to original value.\n self.assertAlmostEqual(pixel[0], pixel2[0], delta=1e-5)\n self.assertAlmostEqual(pixel[1], pixel2[1], delta=1e-5)\n self.assertAlmostEqual(pixel[2], pixel2[2], delta=1e-5)", "title": "" }, { "docid": "98db862289474c6c46f9eeecc5dfed1a", "score": "0.5409541", "text": "def SetOrder(self, order: 'itkFixedArrayUI2') -> \"void\":\n return _itkPermuteAxesImageFilterPython.itkPermuteAxesImageFilterIUC2_SetOrder(self, order)", "title": "" }, { "docid": "84a59f24d31a2f19559dc8fa3a319749", "score": "0.5400393", "text": "def inverse_transforms(self):\n \n result = []\n for transform in self.transforms[::-1]:\n if transform.endswith(\"0GenericAffine.mat\"):\n result.append([transform, 1])\n else:\n result.append(transform.replace(\"Warp.nii\", \"InverseWarp.nii\"))\n return result", "title": "" }, { "docid": "b96283d6edca67a3fa4157baf56c9312", "score": "0.53988653", "text": "def voltear_flip_atras(self):\r\n\t\treturn self.voltear_flip(\"b\")", "title": "" }, { "docid": "6601c6156eae9acebe953dd6de34019a", "score": "0.5396041", "text": "def inverse(self):\n return self.__invert__()", "title": "" }, { "docid": "f76e8b9810ef8f2bd8bc59bd641b395c", "score": "0.5395834", "text": "def inverse(self):\n return self._inverse()", "title": "" }, { "docid": "517407bb68898871a20232c83a34be8e", "score": "0.5394294", "text": "def GetInverse(self, inverse: 'itkMatrixOffsetTransformBaseD33') -> \"bool\":\n return _itkMatrixOffsetTransformBasePython.itkMatrixOffsetTransformBaseD33_GetInverse(self, inverse)", "title": "" }, { "docid": "7ac2e8443ed7ede2b857b8baa8803b8d", "score": "0.53878033", "text": "def SetOrder(self, order: 'itkFixedArrayUI3') -> \"void\":\n return _itkPermuteAxesImageFilterPython.itkPermuteAxesImageFilterISS3_SetOrder(self, order)", "title": "" }, { "docid": "69369050f617e698e41893abc7baeaa1", "score": "0.5375612", "text": "def inverse(self, u, context):\n\n x_size = u.size()[:-1]\n perm = self.arn.permutation\n input_size = u.size(-1)\n x = [torch.zeros(x_size, device=u.device)] * input_size\n\n # Expensive\n for idx in perm:\n if self.iaf_parametrization:\n m, s = self.arn(torch.stack(x, dim=-1), context)\n sigma = torch.sigmoid(\n s + self.initial_bias * torch.ones_like(s))\n x[idx] = ((u[..., idx] - m[..., idx]) / sigma[..., idx] +\n m[..., idx])\n else:\n mu, alpha = self.arn(torch.stack(x, dim=-1), context)\n x[idx] = (u[..., idx] * torch.exp(alpha[..., idx]) +\n mu[..., idx])\n x = torch.stack(x, dim=-1)\n\n # log|det df/du|\n if self.iaf_parametrization:\n log_det_Jf = - (torch.log(sigma)).sum(-1)\n else:\n log_det_Jf = alpha.sum(-1)\n\n return x, log_det_Jf", "title": "" }, { "docid": "0a56da5ad5db619cf9d98e915b30870b", "score": "0.53721106", "text": "def inverted(self):\n return RRYScale.InvertedRRYTransform(self.thresh)", "title": "" }, { "docid": "5ee31c9fe16c043be1c6891637086ada", "score": "0.53697175", "text": "def itkPermuteAxesImageFilterIF3_cast(obj: 'itkLightObject') -> \"itkPermuteAxesImageFilterIF3 *\":\n return _itkPermuteAxesImageFilterPython.itkPermuteAxesImageFilterIF3_cast(obj)", "title": "" }, { "docid": "4a32f955d7741aa8db752dd490d40070", "score": "0.53602374", "text": "def inverse_transform(\n self, y: npt.NDArray[np.float32]\n ) -> list[Performance]:\n return self.encoder.inverse_transform(y)", "title": "" }, { "docid": "080b6a04654c07b470c546c3a3207b23", "score": "0.53578997", "text": "def GetFixedImage(self) -> \"itkImageUC3 const *\":\n return _itkPDEDeformableRegistrationFunctionPython.itkPDEDeformableRegistrationFunctionIUC3IUC3IVF43_GetFixedImage(self)", "title": "" }, { "docid": "134761f62408db84cb68d5542f08b519", "score": "0.5344168", "text": "def GetFixedImage(self) -> \"itkImageUC3 const *\":\n return _itkPDEDeformableRegistrationFunctionPython.itkPDEDeformableRegistrationFunctionIUC3IUC3IVF23_GetFixedImage(self)", "title": "" }, { "docid": "5dab07df2a7646cc87d2d438cacddfbc", "score": "0.53358775", "text": "def Clone(self) -> \"itkPermuteAxesImageFilterIUC3_Pointer\":\n return _itkPermuteAxesImageFilterPython.itkPermuteAxesImageFilterIUC3_Clone(self)", "title": "" }, { "docid": "8415abd290d922bd41950bd284a06a28", "score": "0.5317358", "text": "def test_voi_multi_view(self):\r\n ds = dcmread(VOI_08_1F)\r\n assert 8 == ds.BitsAllocated\r\n assert 8 == ds.BitsStored\r\n assert 0 == ds.PixelRepresentation\r\n item0 = ds.VOILUTSequence[0]\r\n # Add another view thats the inverse\r\n ds.VOILUTSequence.append(Dataset())\r\n item1 = ds.VOILUTSequence[1]\r\n item1.LUTDescriptor = [256, 0, 16]\r\n item1.LUTData = item0.LUTData[::-1]\r\n\r\n arr = ds.pixel_array\r\n assert 0 == arr[387, 448]\r\n assert 76 == arr[178, 126]\r\n assert 178 == arr[186, 389]\r\n assert 255 == arr[129, 79]\r\n\r\n out0 = apply_voi_lut(arr, ds)\r\n assert 0 == out0[387, 448]\r\n assert 19532 == out0[178, 126]\r\n assert 45746 == out0[186, 389]\r\n assert 65535 == out0[129, 79]\r\n\r\n out1 = apply_voi_lut(arr, ds, index=1)\r\n assert 65535 == out1[387, 448]\r\n assert 46003 == out1[178, 126]\r\n assert 19789 == out1[186, 389]\r\n assert 0 == out1[129, 79]", "title": "" }, { "docid": "2ad8dbe99c550ddc6633edbe18744218", "score": "0.53148115", "text": "def _inverse(self):\n return torch.inverse(self._compose_mat())", "title": "" }, { "docid": "9891f17c94a1987cf2f703c271528519", "score": "0.53043735", "text": "def itkPermuteAxesImageFilterISS3_cast(obj: 'itkLightObject') -> \"itkPermuteAxesImageFilterISS3 *\":\n return _itkPermuteAxesImageFilterPython.itkPermuteAxesImageFilterISS3_cast(obj)", "title": "" }, { "docid": "84da939db63db7dae6d3fe4139fbdfe9", "score": "0.53019625", "text": "def np3d(self) -> np.uint8:\n return np.uint8(np.array([[list(self.__dict__.values())]]))", "title": "" }, { "docid": "81afc46a18680403aaa30d2097658f7b", "score": "0.5299055", "text": "def inverse_transform(self, Y: np.ndarray) -> np.ndarray:\n raise NotImplemented", "title": "" }, { "docid": "0ae4d4cfae207d030f5500efcf7a8b8e", "score": "0.5298027", "text": "def __invert__(self):\r\n\t\t\r\n\t\t# invert\r\n\t\tv = self.invert()\r\n\t\t\r\n\t\treturn v", "title": "" }, { "docid": "9a3f759adb944c49202787c513f90f91", "score": "0.52965826", "text": "def i2up(self):\n array_ndim, array_type, array_shape, array_handle = \\\n _min3p.f90wrap_gen__array__i2up(f90wrap.runtime.empty_handle)\n if array_handle in self._arrays:\n i2up = self._arrays[array_handle]\n else:\n i2up = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t,\n f90wrap.runtime.empty_handle,\n _min3p.f90wrap_gen__array__i2up)\n self._arrays[array_handle] = i2up\n return i2up", "title": "" }, { "docid": "33c330d382874d89d5e530dc482abafe", "score": "0.52924186", "text": "def voltear_flip_izquierda(self):\r\n\t\treturn self.voltear_flip(\"l\")", "title": "" }, { "docid": "5f2c7fd05ab79211b781a6a09cee72ce", "score": "0.5291485", "text": "def inverse(self, x_bar):\n\t\treturn self.db_inverse(x_bar).numpy()", "title": "" }, { "docid": "74406b035b213a0d0a907104f1575525", "score": "0.52896374", "text": "def correct_orientation(im):\n return np.fliplr(im)", "title": "" }, { "docid": "fd6c40807acd66545d8bcaa235a98844", "score": "0.5287909", "text": "def itkBoundedReciprocalImageFilterIUC3IUC3_cast(obj: 'itkLightObject') -> \"itkBoundedReciprocalImageFilterIUC3IUC3 *\":\n return _itkBoundedReciprocalImageFilterPython.itkBoundedReciprocalImageFilterIUC3IUC3_cast(obj)", "title": "" }, { "docid": "5d5ffd4767e17ea3b07028f4f262f1a8", "score": "0.527593", "text": "def invert(image):\r\n return 255 - image", "title": "" }, { "docid": "0dd99af296181bd99d7dc9193adcbf43", "score": "0.5261962", "text": "def cast(obj: 'itkLightObject') -> \"itkPermuteAxesImageFilterIUC2 *\":\n return _itkPermuteAxesImageFilterPython.itkPermuteAxesImageFilterIUC2_cast(obj)", "title": "" }, { "docid": "2d8bb5c6e452218d9f174d499b117943", "score": "0.5259978", "text": "def apply_inverse(self, v):\n out = np.zeros_like(v)\n #\n for i in np.arange(self.nTi):\n begin = int(self.Tisizes_accum[i])\n end = int(self.Tisizes_accum[i+1])\n out[begin:end] = self.my_fft.ifft_w_fft(v[begin:end], self.invspectrum)\n #endfor\n return out", "title": "" }, { "docid": "0c7158db6c58b7c913f8835784f8c8e5", "score": "0.5258626", "text": "def prepro(self,I):\n I = I[::2,::2,0] # downsample by factor of 2\n I[I == 43] = 0 # erase background (background type 1)\n I[I != 0] = 1 # everything else (paddles, ball) just set to 1\n return I.astype(np.float).ravel()", "title": "" }, { "docid": "d7a5f039f5514ec76568bb60d1de7ad8", "score": "0.5256989", "text": "def itkAbsoluteValueDifferenceImageFilterIUC3IUC3IUC3_cast(*args):\n return _itkAbsoluteValueDifferenceImageFilterPython.itkAbsoluteValueDifferenceImageFilterIUC3IUC3IUC3_cast(*args)", "title": "" }, { "docid": "6e8e0b376a475c7509c26c09e787fd65", "score": "0.5251735", "text": "def GetLayout(self) -> \"itkFixedArrayUI2\":\n return _itkTileImageFilterPython.itkTileImageFilterIF2IF2_GetLayout(self)", "title": "" }, { "docid": "9dcc343f5e18123462863c0d96359bc1", "score": "0.52452886", "text": "def SetOrder(self, order: 'itkFixedArrayUI3') -> \"void\":\n return _itkPermuteAxesImageFilterPython.itkPermuteAxesImageFilterIUS3_SetOrder(self, order)", "title": "" }, { "docid": "2fddaa6c9691544f8805720619eafd8f", "score": "0.52438325", "text": "def invordvs(self):\n array_ndim, array_type, array_shape, array_handle = \\\n _min3p.f90wrap_gen__array__invordvs(f90wrap.runtime.empty_handle)\n if array_handle in self._arrays:\n invordvs = self._arrays[array_handle]\n else:\n invordvs = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t,\n f90wrap.runtime.empty_handle,\n _min3p.f90wrap_gen__array__invordvs)\n self._arrays[array_handle] = invordvs\n return invordvs", "title": "" }, { "docid": "9837564e7eede2dc4ffa22a5945484d5", "score": "0.5243141", "text": "def binary_invert(binary_image:\"napari.types.LabelsData\") -> \"napari.types.LabelsData\":\n return (np.asarray(binary_image) == 0) * 1", "title": "" }, { "docid": "a936b058876e9ec8dc66c07fd35c2bf7", "score": "0.5230745", "text": "def test_neg_ov_outputarray_a3(self):\n\t\t# Calculate the expected result.\n\t\texpected = [-(x) for x in self.datainf]\n\n\t\t# This is the actual test.\n\t\tarrayfunc.neg(self.datainf, self.dataout, matherrors=True)\n\n\t\tfor dataoutitem, expecteditem in zip(list(self.dataout), expected):\n\t\t\t# The behavour of assertEqual is modified by addTypeEqualityFunc.\n\t\t\tself.assertEqual(dataoutitem, expecteditem)", "title": "" }, { "docid": "a936b058876e9ec8dc66c07fd35c2bf7", "score": "0.5230745", "text": "def test_neg_ov_outputarray_a3(self):\n\t\t# Calculate the expected result.\n\t\texpected = [-(x) for x in self.datainf]\n\n\t\t# This is the actual test.\n\t\tarrayfunc.neg(self.datainf, self.dataout, matherrors=True)\n\n\t\tfor dataoutitem, expecteditem in zip(list(self.dataout), expected):\n\t\t\t# The behavour of assertEqual is modified by addTypeEqualityFunc.\n\t\t\tself.assertEqual(dataoutitem, expecteditem)", "title": "" }, { "docid": "a936b058876e9ec8dc66c07fd35c2bf7", "score": "0.5230745", "text": "def test_neg_ov_outputarray_a3(self):\n\t\t# Calculate the expected result.\n\t\texpected = [-(x) for x in self.datainf]\n\n\t\t# This is the actual test.\n\t\tarrayfunc.neg(self.datainf, self.dataout, matherrors=True)\n\n\t\tfor dataoutitem, expecteditem in zip(list(self.dataout), expected):\n\t\t\t# The behavour of assertEqual is modified by addTypeEqualityFunc.\n\t\t\tself.assertEqual(dataoutitem, expecteditem)", "title": "" }, { "docid": "a936b058876e9ec8dc66c07fd35c2bf7", "score": "0.5230745", "text": "def test_neg_ov_outputarray_a3(self):\n\t\t# Calculate the expected result.\n\t\texpected = [-(x) for x in self.datainf]\n\n\t\t# This is the actual test.\n\t\tarrayfunc.neg(self.datainf, self.dataout, matherrors=True)\n\n\t\tfor dataoutitem, expecteditem in zip(list(self.dataout), expected):\n\t\t\t# The behavour of assertEqual is modified by addTypeEqualityFunc.\n\t\t\tself.assertEqual(dataoutitem, expecteditem)", "title": "" }, { "docid": "2d75af8f37d3409e69fd85e946b25ec4", "score": "0.5224591", "text": "def GetDirection(self) -> \"itkMatrixD33 const &\":\n return _itkImportImageFilterPython.itkImportImageFilterUC3_GetDirection(self)", "title": "" }, { "docid": "28340f7f5ddd3c53af2fc8273d225b64", "score": "0.52239215", "text": "def invert3x3(m):\n return (m[0], m[3], m[6], m[1], m[4], m[7], m[2], m[5], m[8])", "title": "" }, { "docid": "28340f7f5ddd3c53af2fc8273d225b64", "score": "0.52239215", "text": "def invert3x3(m):\n return (m[0], m[3], m[6], m[1], m[4], m[7], m[2], m[5], m[8])", "title": "" }, { "docid": "1c65cc080e65cdd92154cc828462f1c2", "score": "0.52198243", "text": "def test_voi_uint8(self):\r\n ds = Dataset()\r\n ds.PixelRepresentation = 0\r\n ds.BitsStored = 8\r\n ds.VOILUTSequence = [Dataset()]\r\n item = ds.VOILUTSequence[0]\r\n item.LUTDescriptor = [4, 0, 8]\r\n item.LUTData = [0, 127, 128, 255]\r\n arr = np.asarray([0, 1, 128, 254, 255], dtype='uint8')\r\n out = apply_voi_lut(arr, ds)\r\n assert 'uint8' == out.dtype\r\n assert [0, 127, 255, 255, 255] == out.tolist()", "title": "" }, { "docid": "d4a9b587152496ad3da538790582e55d", "score": "0.52116096", "text": "def GetLayout(self) -> \"itkFixedArrayUI2\":\n return _itkTileImageFilterPython.itkTileImageFilterISS2ISS2_GetLayout(self)", "title": "" }, { "docid": "2506e014badd74872afcca10e4ff0ca1", "score": "0.52099496", "text": "def itkNarrowBandImageFilterBaseIF3IF3_cast(*args):\n return _itkNarrowBandImageFilterBasePython.itkNarrowBandImageFilterBaseIF3IF3_cast(*args)", "title": "" }, { "docid": "2d5df6dc4bd93428188c05db8cb4717a", "score": "0.520773", "text": "def inverse(self):\n try:\n if self._inverse is None:\n self._inverse = np.linalg.inv(self.cell.T)\n except AttributeError:\n self._inverse = np.linalg.inv(self.cell.T)\n return self._inverse", "title": "" }, { "docid": "150987f63c2645ce376a902bced8d423", "score": "0.52051055", "text": "def test_voi_int8(self):\r\n ds = Dataset()\r\n ds.PixelRepresentation = 1\r\n ds.BitsStored = 8\r\n ds.VOILUTSequence = [Dataset()]\r\n item = ds.VOILUTSequence[0]\r\n item.LUTDescriptor = [4, 0, 8]\r\n item.LUTData = [0, 127, 128, 255]\r\n arr = np.asarray([0, -1, 2, -128, 127], dtype='int8')\r\n out = apply_voi_lut(arr, ds)\r\n assert 'uint8' == out.dtype\r\n assert [0, 0, 128, 0, 255] == out.tolist()", "title": "" }, { "docid": "3b77c2cc576e17fc354389f272c1ac77", "score": "0.5201163", "text": "def invert(self, *args):\n return _pymaxwell5.Cvector_invert(self, *args)", "title": "" }, { "docid": "738f287f868389e01a85b19a0a27fef7", "score": "0.51998514", "text": "def GetOrientedIntensityImage(self, *args):\n return _itkLabelGeometryImageFilterPython.itkLabelGeometryImageFilterIUC3IUL3_GetOrientedIntensityImage(self, *args)", "title": "" }, { "docid": "b6e079a7736535ff4290181b587e8a7f", "score": "0.51989627", "text": "def GetOrientedIntensityImage(self, *args):\n return _itkLabelGeometryImageFilterPython.itkLabelGeometryImageFilterIUL3IUC3_GetOrientedIntensityImage(self, *args)", "title": "" }, { "docid": "2e9d777c760073e0a3d36df388ab1065", "score": "0.5190601", "text": "def itkPermuteAxesImageFilterIUS3_cast(obj: 'itkLightObject') -> \"itkPermuteAxesImageFilterIUS3 *\":\n return _itkPermuteAxesImageFilterPython.itkPermuteAxesImageFilterIUS3_cast(obj)", "title": "" }, { "docid": "d9984912598bbabec185618a79b81f9d", "score": "0.5189627", "text": "def __inverse_flip(self, img_v, img_h):\n return img_v.flip(2), img_h.flip(3)", "title": "" }, { "docid": "d9984912598bbabec185618a79b81f9d", "score": "0.5189627", "text": "def __inverse_flip(self, img_v, img_h):\n return img_v.flip(2), img_h.flip(3)", "title": "" }, { "docid": "e0e0dc95937fda6917288ac888bb4fb1", "score": "0.5185044", "text": "def SetLayout(self, _arg: 'itkFixedArrayUI3') -> \"void\":\n return _itkTileImageFilterPython.itkTileImageFilterIUC2IUC3_SetLayout(self, _arg)", "title": "" }, { "docid": "80500c284ce5ef8015e0d3a4f417fa7f", "score": "0.51835406", "text": "def SetOrder(self, order: 'itkFixedArrayUI3') -> \"void\":\n return _itkPermuteAxesImageFilterPython.itkPermuteAxesImageFilterIF3_SetOrder(self, order)", "title": "" }, { "docid": "6304b0c1ad4ad1acac9eced91aa2c4ee", "score": "0.51821417", "text": "def __imul__(self, *args):\n return _geom.vctr3___imul__(self, *args)", "title": "" }, { "docid": "1f99a365ecd132668aca02280332bf0b", "score": "0.51778555", "text": "def GetOrientedIntensityImage(self, *args):\n return _itkLabelGeometryImageFilterPython.itkLabelGeometryImageFilterIUC3IUC3_GetOrientedIntensityImage(self, *args)", "title": "" } ]
c3bebcd033b560947ad52049832a17d9
Get domain numberings for a domain if it is present in target
[ { "docid": "2dc32ead8e14e19b59bd0851c38169d1", "score": "0.7273047", "text": "def get_domain_number(target, domain, database):\n query = \"SELECT num FROM component WHERE target='{}' AND domain = {} ORDER BY num\".format(target, domain)\n return [num for (num,) in database.execute(query)]", "title": "" } ]
[ { "docid": "0be2d57dcdfadd4f8ef78a3cb7b24073", "score": "0.5897098", "text": "def find_domain():\r\n task_list=[]\r\n for x in range(0, len(input_dict[1])):\r\n task_list.append(input_dict[1][x].split(\",\")[1].split())\r\n for task_num in range(0,len(task_list)):\r\n domain[task_list[task_num][0]]=[]\r\n task_duration=task_list[task_num][1]\r\n for time_s in tasks_domain:\r\n if int(time_s)+int(task_duration) in tasks_domain and floor(int(time_s) / 10)==floor((int(time_s) + int(task_duration)) / 10):\r\n domain[task_list[task_num][0]].append((int(time_s),int(time_s)+int(task_duration)))\r\n return domain", "title": "" }, { "docid": "52852dfb5405325696f11d2bdecfb6d5", "score": "0.58459824", "text": "def get_domain(target, method, database):\n\n # For every domain\n query = \"SELECT component.num, component.domain FROM component INNER JOIN domain ON component.domain = domain.id WHERE component.target = '{}' AND domain.method = {} ORDER BY component.num;\".format(target, method)\n print(query)\n components, domains = zip(*database.execute(query))\n return (list(components), list(domains))", "title": "" }, { "docid": "142076b1d9882b3925a39bfe79aa89b3", "score": "0.5734408", "text": "def detect_domains (nffg):\n return {infra.domain for infra in nffg.infras}", "title": "" }, { "docid": "30639bf439c9fe1b43beb407555d947d", "score": "0.55964726", "text": "def get_domains(targets, method, database):\n domains = {}\n\n for target in targets:\n domains[target] = get_domain(target, method, database)\n\n return domains", "title": "" }, { "docid": "0f4290e1e6d67c9447b57063f9ca8596", "score": "0.5587962", "text": "def _domain(self):\n if self.p < 1 and self.p != 0:\n return [self.args[0] >= 0]\n else:\n return []", "title": "" }, { "docid": "d314f0bdac99bac3c877fcd916332ca1", "score": "0.5532537", "text": "def get_episode_numbers_for_mentioned_domain(self, domain: str) -> list:\n for i in self.entries:\n if domain in i.summary_detail.value:\n yield i.itunes_episode", "title": "" }, { "docid": "3aa5b79f2011e4572df4e37d9a6b1670", "score": "0.54772455", "text": "def _find_dk_domain(self, assignment, trgt_attr):\n # cell_probabilities will hold domain values and their probabilities\n cell_probabilities = []\n # always have the initial value in the returned domain values unless\n # it is null\n if assignment[trgt_attr] is not None:\n cell_values = {(assignment[trgt_attr])}\n else:\n cell_values = {()}\n for attr in assignment:\n if attr == trgt_attr:\n continue\n attr_val = assignment[attr]\n\n if attr in self.coocurence_lookup:\n if attr_val in self.coocurence_lookup[attr]:\n if trgt_attr in self.coocurence_lookup[attr][attr_val]:\n if trgt_attr in self.coocurence_lookup[attr][attr_val]:\n cell_probabilities += \\\n [(k, v) for\n k, v in\n self.coocurence_lookup[attr][attr_val][\n trgt_attr].iteritems()]\n\n # Sort cell_values and chop after k and chop below threshold2\n cell_probabilities.sort(key=lambda t: t[1], reverse=True)\n\n for tuple in cell_probabilities:\n value = tuple[0]\n probability = tuple[1]\n if len(cell_values) == self.dk_breakoff or \\\n probability < self.threshold2:\n break\n cell_values.add(value)\n return cell_values", "title": "" }, { "docid": "9cf0539e45fcce499fc6b9cb482eb6e9", "score": "0.54594374", "text": "def _get_isns_get_discovery_domain_set(self):\n return self.__isns_get_discovery_domain_set", "title": "" }, { "docid": "fce10b547b0c98d4614257c6484f3340", "score": "0.543678", "text": "def get_domain_ids(self):\n domain_ids = set()\n for base in self.staple_bases:\n if base.domain != None:\n domain_ids.add(base.domain)\n for base in self.scaffold_bases: \n if base.domain != None:\n domain_ids.add(base.domain)\n return list(domain_ids)", "title": "" }, { "docid": "224e108796311e9c01891de56b001d70", "score": "0.5421687", "text": "def read_domains_from_topdom_output_bed(bedfile):\n with open(bedfile) as bed:\n line = bed.readline()\n domains = []\n counter = 0\n while line:\n lline = line.strip().split()\n if lline[-1] == \"domain\":\n counter += 1\n domains.append(lline)\n line = bed.readline()\n print(\"{} domains found\".format(str(counter)))\n return domains", "title": "" }, { "docid": "d5aac26a6aa7d977cbd46a2e2c3d5de2", "score": "0.5387222", "text": "def domain_to_ints(domain: str) -> List[int]:\n return [\n domain_name_dictionary.get(y, domain_name_dictionary.get(np.NaN))\n for y in domain.lower()\n ]", "title": "" }, { "docid": "dc22757a88d7f0985364707bb6f92508", "score": "0.5385777", "text": "def FindDomain(self):\n dom = [list(tpl) for tpl in self.Domain]\n for v_idx in range(self.num_vars):\n v = self.Vars[v_idx]\n for f in self.OrthSys:\n Orth = self.OrthSys[f]\n if v in Orth.Vars:\n idx = Orth.Vars.index(v)\n rng = Orth.Domain[idx]\n if (dom[v_idx][0] == None) or (rng[0] < dom[v_idx][0]):\n dom[v_idx][0] = rng[0]\n if (dom[v_idx][1] == None) or (rng[1] > dom[v_idx][1]):\n dom[v_idx][1] = rng[1]\n self.Domain = [tuple(lst) for lst in dom]\n # defines the default sampling measure object\n self.SampleMeasure = Measure(self.Domain, 1)", "title": "" }, { "docid": "0172ad7915f8c830ef08d32ec4b9f534", "score": "0.5357114", "text": "def _get_isns_get_discovery_domain(self):\n return self.__isns_get_discovery_domain", "title": "" }, { "docid": "fc130ed621c0f184a0c8e3ab61f2149a", "score": "0.5353564", "text": "def map_to_domain(self, idx, bound=False):\n if bound:\n idx = int(round(idx))\n idx = min(len(self.domain) - 1, max(0, idx))\n try:\n val = self.domain[idx]\n except IndexError:\n val = None\n return val", "title": "" }, { "docid": "fc130ed621c0f184a0c8e3ab61f2149a", "score": "0.5353564", "text": "def map_to_domain(self, idx, bound=False):\n if bound:\n idx = int(round(idx))\n idx = min(len(self.domain) - 1, max(0, idx))\n try:\n val = self.domain[idx]\n except IndexError:\n val = None\n return val", "title": "" }, { "docid": "caa7324e2424d992ff2993ac2bd2d474", "score": "0.53223825", "text": "def get_advertiser_domain_pagerrank(self) -> int:\n raise NotImplementedError", "title": "" }, { "docid": "477d88772d4a2dd33675e9e402f34698", "score": "0.53122586", "text": "def map_to_domain(self, idx, bound=False):\n return idx", "title": "" }, { "docid": "0b1e904487166fcd37d7dcd9cfad0b65", "score": "0.5306905", "text": "def GetDomains(self):\n if self.mTableNameDomains:\n statement = \"SELECT rep_nid, rep_from, rep_to, family FROM %s \" % self.mTableNameDomains +\\\n \" ORDER BY rep_nid, rep_from\" \n result = self.mDbhandle.Execute(statement).fetchall()\n else:\n file = open(self.mFileNameDomains, \"r\")\n result = []\n \n for line in file:\n d = map(string.atoi, string.split(string.split(line[:-1], \"\\t\")[0],\"_\"))\n\n ## add dummy family\n if len(d) == 3: d.append(0)\n \n result.append( d )\n\n file.close()\n\n self.mDomains = {}\n last_nid = None\n \n for domain_nid, domain_from, domain_to, family in result:\n if last_nid != domain_nid:\n if last_nid:\n self.mDomains[last_nid] = domains\n domains = []\n last_nid = domain_nid\n \n domains.append( (domain_from, domain_to, family) )\n\n self.mDomains[last_nid] = domains", "title": "" }, { "docid": "574aaa3168e6abeea4e45372b7ef4ede", "score": "0.52753675", "text": "def get_domain(key):", "title": "" }, { "docid": "92abacae45897b7a6945512b1cec2a86", "score": "0.5268759", "text": "def get_source2target_domain_mapping(source_domains: List[str], target_domains: List[str]) -> Dict[str, List[str]]:\n mapping = {}\n for dom in source_domains:\n mapping[dom] = [d for d in target_domains if d != dom]\n return mapping", "title": "" }, { "docid": "95b640fe82371a2a20402cb5d55a37be", "score": "0.52399826", "text": "def get_multi_doms(el_targets, pfam_d):\n act_lkp = {}\n arch_lkp = {}\n dom_lkp = {}\n for target in el_targets:\n try:\n doms = pfam_d[target[0]]['domains']\n except KeyError:\n print \"No entry in Pfam for: %s\"% target\n continue\n #inv_doms = [x for x in doms if x not in valid_doms]\n if len(doms) <= 1:\n\t print 'impossible!'#len(inv_doms) == len(doms):\n arch = ', '.join(sorted(doms))\n try:\n arch_lkp[arch] += 1\n act_lkp[arch] += target[2]\n except KeyError:\n arch_lkp[arch] = 1\n act_lkp[arch] = target[2]\n for dom in set(doms):\n try:\n dom_lkp[dom] += 1\n except KeyError:\n dom_lkp[dom] = 1\n return(arch_lkp, dom_lkp, act_lkp)", "title": "" }, { "docid": "f00e41cd474dacf1a07ca7ac1ba384da", "score": "0.52283293", "text": "def getdomains(self, flow):\n i = 1\n sr = 0\n while True:\n try:\n self.flow_parser.reset_anchor()\n startrow, endrow = self.flow_parser.find_range('DOMAIN:', sr)\n if startrow == -1 or endrow == -1: \n break\n #import pdb; pdb.set_trace()\n sr = endrow\n domain_parser = StringArrayParser(self.flow_parser.data[startrow:endrow])\n \n #get the domain name from the first line\n name = self.getname('DOMAIN:', domain_parser.data[0])\n dmn = Domain(name)\n \n #get the Domain Type\n #import pdb; pdb.set_trace()\n dmn.domaintype = domain_parser.transfer_keyvar('Domain Type', 1)\n dmn.location = domain_parser.transfer_keyvar('Location', 1)\n \n #print 'Domain ' + dmn.name + ' ' + dmn.domaintype + ' startrow ' + str(domain_startrow) + \\\n #' endrow ' + str(domain_endrow)\n \n # look for boundaries\n anchor = \"BOUNDARY:\"\n #n = len(anchor)\n while True:\n #import pdb; pdb.set_trace()\n startrow, endrow = domain_parser.find_range(anchor)\n if startrow == -1 or endrow == -1: \n break\n boundary_parser = CCLEntityParser(domain_parser.data[startrow:endrow])\n #get the boundary name from the first line\n #import pdb; pdb.set_trace()\n #line = boundary_parser.data[boundary_parser.current_row].strip()\n name = self.getname(anchor, boundary_parser.data[0])\n boundary_parser.parse()\n #import pdb; pdb.set_trace()\n b = Boundary(name, boundary_parser.dictionary)\n btype = b.type\n #print 'Boundary ' + name + ' ' + btype + ' startrow ' + str(boundary_startrow) + \\\n #' endrow ' + str(boundary_endrow)\n if btype == 'INLET':\n dmn.inlets.append(b)\n elif btype == 'OUTLET':\n dmn.outlets.append(b)\n elif btype == 'OPENING':\n dmn.openings.append(b)\n elif btype == 'WALL':\n dmn.walls.append(b)\n elif btype == 'INTERFACE':\n dmn.interfaces.append(b)\n elif btype == 'SYMMETRY':\n dmn.symmetries.append(b)\n else:\n do_logging(self.logger, 'Domain ' + dmn.name + ': Boundary ' + name + ': unknown type ' + btype, both = True)\n \n flow.domains.append(dmn)\n i = i + 1\n except RuntimeError: #anchor not found\n break", "title": "" }, { "docid": "2972a2fef420218438dcc3e2aa2830bb", "score": "0.5223736", "text": "def main(self, number):\n labels = [self.gen_label(i) for i in self.domain_length]\n domain_name = [i + \".\" + target_dist for i in labels]\n forth = [i + \".\" + self.gen_label(63) + \".\" + target_dist for i in labels]\n domain_name.extend(forth)\n fifth = [i + \".\" + self.gen_label(63) + \".\" + self.gen_label(63) + \".\" + target_dist for i in labels]\n domain_name.extend(fifth)\n sixth = [i + \".\" + self.gen_label(63) + \".\" + self.gen_label(63) + \".\" + self.gen_label(63) + \".\" + target_dist for i in labels]\n domain_name.extend(sixth)\n return domain_name", "title": "" }, { "docid": "842138f1d1d9bbdda4b26a26b9dd07d0", "score": "0.51943773", "text": "def extractHavingIpAdress():\n\n parts = elements.netloc.split('.')\n\n # Number of times a number appears in the domain\n countNum = 0\n # Numver of times a hexadecimal appears in the domain\n countHex = 0\n # Number of times a 'Normal' string appears in the domain\n countNormal = 0\n\n for part in parts:\n if part.isdigit():\n countNum = countNum + 1\n else:\n try:\n int(part, 16)\n countHex = countHex + 1\n except ValueError:\n countNormal = countNormal + 1\n \n if countNum + countHex > 0:\n features[\"having_IP_Address\"] = -1\n else:\n features[\"having_IP_Address\"] = 1", "title": "" }, { "docid": "7cd4dfed511ffcd42d14c57d22963839", "score": "0.5192208", "text": "def __get_domain(self, predicate):\n\t\tdomain = []\n\n\t\tfor a in self.__atoms:\n\t\t\tfor fact in self.__facts[predicate]:\n\t\t\t\tif a in fact and a not in domain:\n\t\t\t\t\tdomain.append(a)\n\n\t\treturn domain", "title": "" }, { "docid": "b0e8850f26fa88bcfb4cd5640194db24", "score": "0.5189625", "text": "def retrieve_domains():\n pools = Pool.read_pools('pools.csv', cached_only=False)\n return OrderedDict((x, True) for x in [p.domain for p in pools]).keys()", "title": "" }, { "docid": "6eaa6ef3d3ad073561b8ca47e6deb3a8", "score": "0.5168006", "text": "def get_episode_numbers_for_mentioned_domain(self, domain: str) -> list:\n episodes = []\n for entry in self.entries:\n summary = entry[\"summary\"]\n domains_raw = re.findall(\"https?://[^/\\\"]+\", summary)\n domains = [x.split(\"//\")[1] for x in domains_raw]\n if domain in domains:\n episodes.append(entry[\"itunes_episode\"])\n return episodes", "title": "" }, { "docid": "3c91f8cff401d946ee6dc3001c8668d0", "score": "0.5162845", "text": "def extractHavingSubDomain():\n\n list = elements.hostname.split(\".\")\n if len(list) > 3:\n features[\"having_Sub_Domain\"] = -1\n else:\n features[\"having_Sub_Domain\"] = 1", "title": "" }, { "docid": "ba3de747950a07e4f44f2f7d6f6b88cf", "score": "0.51399106", "text": "def get_common_domains(url=COMMON_DOMAINS):\r\n r = requests.get(url)\r\n r_data = r.content\r\n soup = BeautifulSoup(r_data, \"html.parser\")\r\n target = soup.find(\"div\", attrs=TARGET_DIV)\r\n target = str(target)\r\n domains = re.findall(r'height:24px;\"/></td><td>(.*..*)</td><td>', target)\r\n return domains", "title": "" }, { "docid": "5840704e6f0236f82c709bb4fdfb2c12", "score": "0.51103675", "text": "def get_domain(self, context, domain_id):", "title": "" }, { "docid": "a8ffe221ca35b886f8bc5f7cbfae14d5", "score": "0.5083156", "text": "def _list_domains(self, func):\n dom = {}\n dom.setdefault(0)\n\n for mail in self.m:\n for domain in getattr(mail,func)():\n dom[domain] = dom.get(domain,0) + 1\n\n del dom[0] # Remove default value.\n result = [ (count, domain) for domain, count in dom.items() ]\n result.sort()\n \n return result", "title": "" }, { "docid": "304722434813824066d165d7ac6e0913", "score": "0.5070358", "text": "def get_external_domain_ids (self, domain, topo_nffg):\n domain_mgr = self.domains.get_component_by_domain(domain_name=domain)\n new_ids = {infra.id for infra in topo_nffg.infras}\n if domain_mgr is None:\n log.error(\"No manager has been found for domain %s in %s\"\n % (domain, self.domains.domains))\n return new_ids\n try:\n if new_ids:\n # Remove oneself from domains\n new_ids.remove(domain_mgr.bgp_domain_id)\n except KeyError:\n log.warning(\"Detected domains does not include own BGP ID: %s\" %\n domain_mgr.bgp_domain_id)\n return new_ids", "title": "" }, { "docid": "fe4c5bfd6daf69cc3fde1bc9c47447b2", "score": "0.50664", "text": "def get_common_domains(url=COMMON_DOMAINS):\r\n response = requests.get(url)\r\n soup = bs4.BeautifulSoup(response.content, 'html.parser')\r\n div = soup.find_all('div', attrs=TARGET_DIV)[0]\r\n domains = []\r\n for row in div.find_all('tr'):\r\n cols = row.find_all('td')\r\n domain = cols[2].text.strip()\r\n percent = float(cols[3].text.strip(' %'))\r\n domains.append((percent, domain))\r\n domains = sorted(domains, key=lambda d: d[0], reverse=True)[0:100]\r\n return [d[1] for d in domains]", "title": "" }, { "docid": "77faab9eb1138e46cc4337aa34e4dd01", "score": "0.50499713", "text": "def domain(self):\n raise NotImplementedError()", "title": "" }, { "docid": "aa0c82252feade59c3b5110f62157172", "score": "0.50472194", "text": "def __compute_domains(self):\n domains = {}\n for type_domain in self.attrs[\"domain\"]:\n domains[type_domain] = self.types[type_domain]\n return domains", "title": "" }, { "docid": "f5abfca1574ac88ae5530c40e8d9acc2", "score": "0.50073206", "text": "def get_domain_names(MaxResults=None, NextToken=None):\n pass", "title": "" }, { "docid": "b088d1c8ca1383159e76fd94c1483611", "score": "0.49995908", "text": "def find_domain(problem):\n dir, name = os.path.split(problem)\n number_match = NUMBER.search(name)\n number = number_match.group(0)\n domain = os.path.join(dir, 'domain.pddl')\n for file in os.listdir(dir):\n if 'domain' in file and number in file:\n domain = os.path.join(dir, file)\n break\n if not os.path.isfile(domain):\n logging.error('Domain file \"{0}\" can not be found'.format(domain))\n sys.exit(1)\n logging.info('Found domain {0}'.format(domain))\n return domain", "title": "" }, { "docid": "f2c1b30026879f1405d6536b735085d9", "score": "0.49670932", "text": "def nis_domain(self):\n return self._nis_domain", "title": "" }, { "docid": "8ba9bbb1cc1a44e336b71882dd16b286", "score": "0.49620488", "text": "def tld_sub_domain_testing(self):\n domain = self.hostname\n psl = PublicSuffixList()\n psl.accept_unknown = False\n if domain is None:\n domain = \"\"\n else:\n try:\n domain = domain[:len(domain) - (len(psl.publicsuffix(domain)) + 1)]\n except TypeError:\n pass\n\n subdomains = domain.split(\".\")\n\n for subdomain in subdomains:\n if psl.publicsuffix(\"x.\" + subdomain) is not None:\n self.tldSubDomainWeight = 1\n return\n self.tldSubDomainWeight = 0", "title": "" }, { "docid": "0fb646552eb2d1b04d90b9e4d1572bf0", "score": "0.49544242", "text": "def discretize_domain(bounds_domain, number_points_each_dimension):\n if len(number_points_each_dimension) != len(bounds_domain):\n raise ValueError(\"Dimensions are wrong!\")\n\n points = []\n for bound, number_points in zip(bounds_domain, number_points_each_dimension):\n points.append(np.linspace(bound.lower_bound, bound.upper_bound, number_points))\n\n domain = []\n for point in itertools.product(*points):\n domain.append(list(point))\n\n return domain", "title": "" }, { "docid": "1b828fa0bf5a1ce2f8ff6706f25bfdf0", "score": "0.49535644", "text": "def get_domain(entity_id):\n return entity_id.split('.')[0]", "title": "" }, { "docid": "731622bdab175c3176b50f4bddd991f3", "score": "0.4942755", "text": "def dns_record_testing(self):\n\n if len(self.hostname.split(\"www.\")) == 2:\n domain = self.hostname.split(\"www.\")[1]\n else:\n domain = self.hostname\n\n try:\n empty = True\n resolver = dns.resolver.Resolver()\n answer = resolver.query(domain, \"NS\")\n i = 0\n while empty and i < len(answer):\n if answer[i].target != \"\":\n empty = False\n i += 1\n except:\n self.dnsWeight = 1\n return\n\n if not empty:\n self.dnsWeight = 0\n return\n\n self.dnsWeight = 1", "title": "" }, { "docid": "6461bdc0eb00c096261e3bb03bad825e", "score": "0.49390092", "text": "def get_common_domains(url=COMMON_DOMAINS):\n domains = []\n r = requests.get(url)\n r.raise_for_status()\n soup = Soup(r.text, \"html.parser\")\n for tr in soup.find(\"div\", TARGET_DIV).find_all(\"tr\"):\n domains.append(tr.find_all(\"td\")[2].text)\n\n return domains", "title": "" }, { "docid": "6efd6dc126e825a62bd4319a4088f050", "score": "0.4929199", "text": "def collect_domain_urls (self, mapping):\n for m in mapping:\n try:\n domain = m['bisbis']['domain']\n except KeyError:\n log.error(\"Missing domain from mapping:\\n%s\" % m)\n continue\n url = self.get_domain_url(domain=domain)\n if url:\n log.debug(\"Found URL: %s for domain: %s\" % (url, domain))\n else:\n log.error(\"URL is missing from domain: %s!\" % domain)\n url = \"N/A\"\n m['bisbis']['url'] = url\n return mapping", "title": "" }, { "docid": "1ce535e267af038d940be8cfd7052240", "score": "0.49242648", "text": "def get_domain():\n from ... import __domain__\n return __domain__", "title": "" }, { "docid": "653459af5c008cf98fd0e6038d424277", "score": "0.48897004", "text": "def ns(domain):\n\n result = []\n\n try:\n answers = dns.resolver.resolve(domain, 'NS')\n except Exception:\n answers = []\n\n for rdata in answers:\n result.append(str(rdata.target).lower())\n\n return result", "title": "" }, { "docid": "f4f13a5269e1f3515a0909cd24629bd5", "score": "0.48624626", "text": "def get_ids(self, origin):\n\n\t\tif origin == 'all':\n\t\t\traise NotImplementedError\n\t\telif origin == 'gen':\n\t\t\tids = self.btmsm.get_ids('gen')\n\t\t\tids += self.contcm.get_ids('gen')\n\t\t\tids += self.mfacm.get_ids('gen')\n\t\t\tids += self.nf19Cm.get_ids('gen')\n\t\t\tids += self.nf21Cm.get_ids('gen')\n\t\t\tids += self.nycm.get_ids('gen')\n\t\t\tids += self.pipcm.get_ids('gen')\n\t\t\tids += self.percm.get_ids('gen')\n\t\t\tids += self.stancm.get_ids('gen')\n\t\t\tids += self.wilkcm.get_ids('gen')\n\t\t\t\n\t\t\tif self.ttm.has_booknlp() and self.ttm.has_corenlp():\n\t\t\t\tids = [self.ttm.get_id()] + ids\n\n\t\t\tif self.ttlm.has_booknlp() and self.ttlm.has_corenlp():\n\t\t\t\tids = [self.ttlm.get_id()] + ids\n\n\t\t\tif self.pnpm.has_booknlp() and self.pnpm.has_corenlp():\n\t\t\t\tids = [self.pnpm.get_id()] + ids\n\n\t\t\tif self.ppm.has_booknlp() and self.ppm.has_corenlp():\n\t\t\t\tids = [self.ppm.get_id()] + ids\n\n\t\t\tif self.atotcm.has_booknlp() and self.atotcm.has_corenlp():\n\t\t\t\tids = [self.atotcm.get_id()] + ids\n\n\t\t\treturn ids\n\t\telif origin == 'novels':\n\t\t\traise NotImplementedError\n\t\telse:\n\t\t\traise ValueError(\"'origin' argument must be 'all', 'gen', or \"\n\t\t\t\t\"'novels'.\")", "title": "" }, { "docid": "912f6fa0102dd4305f6490542ec8c660", "score": "0.48496756", "text": "def testDomainParamInput(marklist,domlist,nodecounts): \n assert len(marklist) == len(domlist)\n for ind,doms in enumerate(domlist):\n boundlocs = [loc for dom in doms for loc in dom]\n assert len(boundlocs) == len(set(boundlocs))\n assert min(boundlocs) >= 1 and max(boundlocs) <= nodecounts[ind]\n return True", "title": "" }, { "docid": "170ad537578ec98663eb559fcfdfd593", "score": "0.48477918", "text": "def get_common_domains(url=COMMON_DOMAINS):\n response = requests.get(COMMON_DOMAINS)\n soup = BeautifulSoup(response.text, 'html.parser')\n domains = soup.find('div', class_=TARGET_DIV['class'])\n domains = domains.find_all('tr')\n return [domain.find_all('td')[2].text for domain in domains]", "title": "" }, { "docid": "ee1cefe54f3a00d1cae1a6ba5de11da1", "score": "0.48474875", "text": "def domains (self):\n return [mgr.domain_name for mgr in self.__repository.itervalues()]", "title": "" }, { "docid": "9305da98fa10ffa62589066a579587f0", "score": "0.48284027", "text": "def order_domain_values(csp, variable):\n return [value for value in variable.domain]", "title": "" }, { "docid": "a71931acd96a7b0310c8e1230bcfff2d", "score": "0.4827415", "text": "def domains(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"domains\")", "title": "" }, { "docid": "621bef97f7f9badc6159530fb9ecbe51", "score": "0.48205632", "text": "def _find_cell_domain(self):\n for cell in self.assignments:\n # In this part we get all values for cell_index's\n # attribute_to_be_pruned\n\n # if the cell is dirty call find domain\n # else, call get negative examples (domain for clean cells)\n if cell.dirty == 1:\n self.cell_domain[cell.cellid] = self._find_dk_domain(\n self.assignments[cell],\n self.attribute_to_be_pruned[cell.cellid])\n else:\n self.cell_domain[cell.cellid] = self._find_clean_domain(\n self.assignments[cell],\n self.attribute_to_be_pruned[cell.cellid])\n return", "title": "" }, { "docid": "a3c31b76052e4eaf058766a8e0b36b40", "score": "0.48108482", "text": "def get_distribution(domain, record_type, servers, NUM_TRIALS):\n distribution = {\"control\": {}, \"target\": {}}\n # A new event loop will be spawned for every 50 requests\n # NUM_TRIALs must be divisible by 50, otherwise range(num_loops) will throw\n # an exception\n num_loops = int(NUM_TRIALS / BATCH_SIZE) or 1\n for server in servers:\n responses = []\n\n for i in range(num_loops):\n tasks = [\n dns_coroutine(domain, record_type, servers[server])\n for _ in range(BATCH_SIZE)\n ]\n loop = asyncio.get_event_loop()\n responses.append(loop.run_until_complete(asyncio.gather(*tasks)))\n for response in responses:\n for answer in response:\n for ans in answer[\"answers\"]:\n distribution[server][ans] = distribution[server].get(ans, 0) + 1\n return distribution", "title": "" }, { "docid": "19f24cd60dcd3e6b77fd75361847ae59", "score": "0.48071975", "text": "def get_domains_request(self, domain_id: Optional[int]) -> Dict:\n url_suffix = '/domain'\n if domain_id:\n url_suffix = f'{url_suffix}/{domain_id}'\n return self._http_request(method='GET', url_suffix=url_suffix)", "title": "" }, { "docid": "ad028bd2ede9f2b321f3e5c1b34ae832", "score": "0.4783835", "text": "def linked_downstream_reports_by_domain(master_domain, report_id):\n from corehq.apps.linked_domain.dbaccessors import get_linked_domains\n linked_domains = {}\n for domain_link in get_linked_domains(master_domain):\n linked_domains[domain_link.linked_domain] = any(\n r for r in get_linked_report_configs(domain_link.linked_domain, report_id)\n )\n return linked_domains", "title": "" }, { "docid": "b639cdf414c3e9a246b7a91bfdb22489", "score": "0.47825468", "text": "def get_domains(self):\n return Domain.filter(self.rd)", "title": "" }, { "docid": "a46c7b1f2a93a76c02e0574113bfb5b1", "score": "0.47659114", "text": "def domains(self):\n return [d.domain for d in self]", "title": "" }, { "docid": "bfb3e73e975cbe84aef56e1c60b8992a", "score": "0.47579932", "text": "def checkdomain(domain):\n try:\n for char in domain:\n if char !=\".\" and char!= \"-\" and char not in string.lowercase \\\n and int(char) not in range(0,10):\n return 0\n except:\n return 0\n parts=tldextract.extract(domain)\n if parts.suffix=='' or len(parts.domain)>63:\n return 0\n for subpart in parts.subdomain.split('.'):\n if len(subpart)>63:\n return 0\n\n #if the domain is in the hard coded whitelist, reject\n e2LD = \".\".join([parts.domain, parts.suffix])\n if e2LD in whitelist.websites:\n return 0\n if domain in whitelist.emailProviders or domain in whitelist.contentProviders:\n return 0\n #if domain is in Alexa top 100,000, reject", "title": "" }, { "docid": "b8f90eda73fa891ab34beb738f8aaec7", "score": "0.47552934", "text": "def get_random_domain():\n domain = random.choice(get_data('all_domain.txt', DOMAIN))\n if domain in BLACK_DOMAIN:\n self.get_random_domain()\n else:\n return domain", "title": "" }, { "docid": "5421f1268937946a5960f20c9c30d44b", "score": "0.47526902", "text": "def extend_sibling_domain(self, target_model, target_id):\n new_domain = super(wizard_create_timetracking, self).extend_sibling_domain(target_model, target_id)\n if target_model == self._get_target_model():\n new_domain = (self._get_target_field(), '=', int(target_id))\n\n return new_domain", "title": "" }, { "docid": "bb0432076ce7c24cd5bac1337de6a56a", "score": "0.4746811", "text": "def cut_domain(self):\n d = self._data\n domain_part = []\n up = 0\n i = self._offset\n while d[i] != self.DOMAIN_END:\n length = struct.unpack(\"!B\", d[i])[0]\n if length >= 0xc0:\n if i >= self._offset:\n self._offset += 2\n i = struct.unpack(\"!H\", d[i:i+2])[0] -0xc000\n continue\n up = i + length + 1\n domain_part.append(d[i+1:up])\n if up >= self._offset:\n self._offset += (length + 1)\n i = up\n if up >= self._offset:\n self._offset += 1\n return \".\".join(domain_part)", "title": "" }, { "docid": "91fdfd9b3b46e163a63be29c404ce150", "score": "0.47442424", "text": "def domain_names(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"domain_names\")", "title": "" }, { "docid": "258062eed9dedb732e7a560ca2338d44", "score": "0.47421396", "text": "def _get_dot11r_domainid(self):\n return self.__dot11r_domainid", "title": "" }, { "docid": "258062eed9dedb732e7a560ca2338d44", "score": "0.47421396", "text": "def _get_dot11r_domainid(self):\n return self.__dot11r_domainid", "title": "" }, { "docid": "258062eed9dedb732e7a560ca2338d44", "score": "0.47421396", "text": "def _get_dot11r_domainid(self):\n return self.__dot11r_domainid", "title": "" }, { "docid": "258062eed9dedb732e7a560ca2338d44", "score": "0.47421396", "text": "def _get_dot11r_domainid(self):\n return self.__dot11r_domainid", "title": "" }, { "docid": "97cc79c5f4cd58101fd8da3420093a0c", "score": "0.47377706", "text": "def get_domain_detail(DomainName=None):\n pass", "title": "" }, { "docid": "22c4661cd70b73f5e824f014df89647e", "score": "0.47361803", "text": "def dump_domain_ranges(self):\n\t\tpass", "title": "" }, { "docid": "0547af3859cf330d52d81f6be585e5ee", "score": "0.4729601", "text": "def get_delivered_domains(self):\n for addr in self._delivered:\n yield addr.split('@')[1]", "title": "" }, { "docid": "89c04eee5c8b9519fc7dc331e49261e3", "score": "0.4725693", "text": "def get_target_wwns_from_pg(self, portgroup_id):\n target_wwns = []\n port_ids = self.get_ports_from_pg(portgroup_id)\n for port in port_ids:\n dir_id = port.split(':')[0]\n port_no = port.split(':')[1]\n wwn = self.get_port_identifier(dir_id, port_no)\n target_wwns.append(wwn)\n return target_wwns", "title": "" }, { "docid": "b1609f5a734cd694cbbc7b0349a3d782", "score": "0.47238207", "text": "def get_domain_list(self):\n dat = self.make_message_structure(None, MsgType.REQUEST_GET_DOMAINLIST)\n return self.send_msg(dat)", "title": "" }, { "docid": "f7c87985f1af4e950016be240f370309", "score": "0.47121626", "text": "def _get_target_luns(self, target):\n port = target['portId']\n gid = target['hostGroupNumber']\n mapping_list = []\n luns_info = self.client.get_luns(port, gid)\n if luns_info:\n for lun_info in luns_info:\n mapping_list.append((port, gid, lun_info['lun'],\n lun_info['ldevId']))\n return mapping_list", "title": "" }, { "docid": "c86274b0ee370deb6f1d6eacf7684360", "score": "0.4710478", "text": "def domains(self) -> list[tuple[int, str, bool]] | None:\n return self.properties[DBUS_ATTR_DOMAINS]", "title": "" }, { "docid": "77f38468c69a66360dc181b3bb0b734d", "score": "0.47077808", "text": "def get_es_domain(region, env):\n try:\n\n es_client = boto3.client('es', region_name=region)\n\n custom_print('[INFO] Retrieving list of es domains for ' + str(env))\n response = es_client.list_domain_names()\n es_domain_list = []\n for key in response['DomainNames']:\n if env in key['DomainName']:\n es_domain_name = key['DomainName']\n es_domain_list.append(es_domain_name)\n custom_print('[INFO] Found ' + str(es_domain_list))\n\n return es_domain_list\n\n except Exception as error:\n custom_print('[ERROR] ' + str(error))\n return 2", "title": "" }, { "docid": "4e8f7559d5ed6ff26787d7c20e39d59c", "score": "0.46992344", "text": "def _get_domains(hass: core.HomeAssistant, config: dict[str, Any]) -> set[str]:\n # Filter out the repeating and common config section [homeassistant]\n domains = {key.partition(\" \")[0] for key in config if key != core.DOMAIN}\n\n # Add config entry domains\n if not hass.config.safe_mode:\n domains.update(hass.config_entries.async_domains())\n\n # Make sure the Hass.io component is loaded\n if \"SUPERVISOR\" in os.environ:\n domains.add(\"hassio\")\n\n return domains", "title": "" }, { "docid": "779ffa93a090ad4ac2cc34aca0ae0baa", "score": "0.4697948", "text": "def domain(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"domain\")", "title": "" }, { "docid": "779ffa93a090ad4ac2cc34aca0ae0baa", "score": "0.4697948", "text": "def domain(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"domain\")", "title": "" }, { "docid": "193b990b6937694c0423a53931811108", "score": "0.46947294", "text": "def domains_count(self):\n return self._domains_count", "title": "" }, { "docid": "3bdbfc17a359fc219c8629edfe44e876", "score": "0.46895248", "text": "def get_domains_command(client: Client, args: Dict) -> CommandResults:\n domain_id = arg_to_number(args.get('domain_id', None))\n limit = arg_to_number(args.get('limit', DEFAULT_LIMIT)) or DEFAULT_LIMIT\n page = arg_to_number(args.get('page'))\n page_size = arg_to_number(args.get('page_size'))\n if (page and not page_size) or (not page and page_size):\n raise Exception('Please provide both page and page_size arguments.')\n\n response = client.get_domains_request(domain_id)\n results = response.get('DomainDescriptor', {})\n contents = []\n if domain_id is not None:\n title = f'Domain no.{domain_id}'\n results = {\n 'ID': results.get('id'),\n 'Name': results.get('name'),\n 'childdomains': results.get('childdomains')\n }\n contents = [{\n 'ID': results.get('ID'),\n 'Name': results.get('Name')\n }]\n else:\n title = 'List of Domains'\n children = [results]\n h_r_get_domains(children, contents)\n contents = pagination(contents, limit, page, page_size)\n readable_outputs = tableToMarkdown(\n name=title,\n t=contents,\n removeNull=True\n )\n return CommandResults(\n readable_output=readable_outputs,\n outputs_prefix='NSM.Domains',\n outputs=results,\n raw_response=results,\n outputs_key_field='ID'\n )", "title": "" }, { "docid": "fbe76d1800eb9eedfeca21eba28e2ce8", "score": "0.468819", "text": "def domain_count_dict(domains):\n domain_count = {} \n for domain in domains: \n if not domain in domain_count:\n domain_count[domain] = 1\n else:\n domain_count[domain] = domain_count[domain] + 1\n return domain_count", "title": "" }, { "docid": "b7f33d2376a678d3af84b36e8debcead", "score": "0.46867645", "text": "def has_periodic_targets(self):\n pc_indicator = self.program.indicator_set.filter(admin_type=Indicator.ADMIN_PARTICIPANT_COUNT).first()\n pts = pc_indicator.periodictargets.all()\n pts_count = pts.count()\n values = [pts_count]\n periods = []\n for pt in pts:\n periods.append(pt.period)\n values.append(periods)\n return values", "title": "" }, { "docid": "ecefe43a942fb5532e31e98f344089ed", "score": "0.46854612", "text": "def list_domains(self):\n r = self.make_call('execute/DomainInfo/list_domains')\n if r is None:\n return None\n return r['data']", "title": "" }, { "docid": "4f537810872ef7b00b18befa2d9e2bdc", "score": "0.4684296", "text": "def get_domain():\n from .. import __domain__\n return __domain__", "title": "" }, { "docid": "01c869fda99b7095a21a139c8b6d7617", "score": "0.4674541", "text": "def _parse_domain(self, domain):\n pieces = domain.split('.')\n return '.'.join(pieces[-2:]), '.'.join(pieces[:-2])", "title": "" }, { "docid": "07185b539421f8efa17c0726914fef96", "score": "0.4672695", "text": "def _get_domain(hyper):\n\n hyperparameters = []\n for hps in SELECTED_HPS:\n hyperparameters_ = [\n hyper.fixed('l2', None, length=1), # disable global l2\n hyper.fixed('train_proportion', 1.0, length=1),\n hyper.fixed('dataset', 'cifar100', length=1),\n hyper.fixed('train_epochs', 250, length=1),\n ]\n for name, value in hps.items():\n hyperparameters_.append(hyper.fixed(name, value, length=1))\n hyperparameters.append(hyper.product(hyperparameters_))\n\n return hyper.chainit(hyperparameters)", "title": "" }, { "docid": "6db95733489c9de6cc5a7fc382f7cd0e", "score": "0.46726337", "text": "def menu_dn_analysis(self):\n DNP = namedtuple('DNP', ['dn', 'partition'])\n dnps = [DNP._make((line1.directory_number, line1.partition))\n for phone in self.proxy.phones.list\n if (line1 := phone.lines.get(1))]\n\n def do_analysis(dnps: List[DNP]):\n \"\"\"\n Analysis of a set of DNs\n :param dnps:\n :return:\n \"\"\"\n # group DNs by len\n dn_by_len: Dict[int, List[str]] = defaultdict(list)\n for dnp in dnps:\n dn_by_len[len(dnp.dn)].append(dnp.dn)\n\n DNCluster = namedtuple('DNCluster', ['prefix', 'dns'])\n\n def find_clusters(prefix: str, digit_strings: List[str], total_count=None) -> List[Tuple[str, List[str]]]:\n if not prefix:\n total_count = len(digit_strings)\n if len(digit_strings[0]) <= 1:\n return []\n\n # determine DNs per next level digit\n first_digits = set()\n next_level_dns: Dict[str, List[str]] = defaultdict(set)\n for ds in digit_strings:\n first_digit = ds[0]\n first_digits.add(first_digit)\n next_level_dns[first_digit].add(ds[1:])\n first_digits = sorted(first_digits)\n total_count /= len(first_digits)\n for fd in first_digits:\n nld = sorted(next_level_dns[fd])[:10]\n output = [f'{prefix}{fd}-{ds}' for ds in nld]\n if len(next_level_dns[fd]) > 10:\n output.append('...')\n remaining_length = len(next(dn for dn in next_level_dns[fd]))\n density = 9 ** remaining_length\n\n print(\n f'prefix {prefix}-{fd}: {int(total_count)} {len(next_level_dns[fd])}/{density} digit strings: '\n f'{\", \".join(output)}')\n for fd in first_digits:\n find_clusters(prefix=f'{prefix}{fd}', digit_strings=list(next_level_dns[fd]),\n total_count=total_count)\n\n return []\n\n for dn_len in dn_by_len:\n print(f' len({dn_len}):')\n find_clusters('', dn_by_len[dn_len])\n return []\n\n # analysis of all DNS\n print('All DNs')\n do_analysis(dnps)\n\n dn_by_partition: Dict[str, List[DNP]] = defaultdict(list)\n for dnp in dnps:\n dn_by_partition[dnp.partition].append(dnp)\n\n # analysis by partition\n for partition in dn_by_partition:\n print(f'Partition \\'{partition}\\'')\n do_analysis(dn_by_partition[partition])", "title": "" }, { "docid": "978095cbfa21f4caf5a563d8026b88c7", "score": "0.4668879", "text": "def domain(self) -> str:\n ...", "title": "" }, { "docid": "8d73ce50dacaa85a4867166f4bcdd287", "score": "0.4664889", "text": "def getDomains(self, dom_path) -> dict:\n domains = dict()\n with open(dom_path) as file:\n lines = file.readlines()\n for line in lines[1:]:\n elements = list(map(int, line.split()))\n domain_id = elements[0]\n domain_values = elements[2:]\n domains[domain_id] = domain_values\n\n return domains", "title": "" }, { "docid": "fbc04ab55e22031f3c2a485cc96eaa5a", "score": "0.4664311", "text": "def get_role_domains(role, tab):", "title": "" }, { "docid": "15cbe20edf17237dfa290b5b93583524", "score": "0.46621257", "text": "def union(self, domain):", "title": "" }, { "docid": "543ea26e71f4e1b6f3d308bd046dd24f", "score": "0.4661884", "text": "def get_ips(self,domain,targets,messages):\n message_type = \"RESOLVE_IP\"\n message_status = \"\"\n message_text = \"\"\n try:\n dnsinfo = socket.gethostbyname_ex(str(domain['domain']))\n message_status = \"INFO\"\n message_text = { 'ips': dnsinfo[2] }\n messages['messages'].append({'message': { 'type': message_type, 'status': message_status, 'domain': str(domain['domain']), 'data': message_text }})\n except IOError, e:\n message_status = \"ERROR\"\n message_text = str(e)\n messages['messages'].append({'message': { 'type': message_type, 'status': message_status, 'domain': str(domain['domain']), 'data': message_text }})\n return False\n for ip in dnsinfo[2]:\n targets['targets'].append({'ip': ip, 'port': domain['port']})\n return True", "title": "" }, { "docid": "67c382a2cbdc5993584cc00903bc5fbe", "score": "0.4655445", "text": "def get_domain_suggestions(DomainName=None, SuggestionCount=None, OnlyAvailable=None):\n pass", "title": "" }, { "docid": "bf7df547f061bd41737bbf095e1b5340", "score": "0.46520448", "text": "def domain_reduction_singleton_domains(csp, queue=None) :\n if queue == None:\n queue = csp.get_all_variables()\n dequeue = []\n while len(queue) > 0:\n var = queue.pop(0)\n dequeue.append(var)\n eliminate = eliminate_from_neighbors(csp, var)\n if eliminate == None:\n return None\n else:\n for i in eliminate:\n if len(csp.get_domain(i)) == 1:\n queue.append(i)\n return dequeue", "title": "" }, { "docid": "53e660d0e7d865725f11b2bca8d847bb", "score": "0.46507874", "text": "def domain(self) :\n\t\ttry :\n\t\t\treturn self._domain\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "e9b4d0b1abac7d838a8f42bbfd46d739", "score": "0.46489978", "text": "def get_domains(self):\n params = {\"orgSlug\": slugify(self.name), \"after\": \"\"}\n has_next = True\n domain_list = []\n\n # The maximum number of domains that can be requested at once is 100\n # This loop gets 100 domains, checks if there are more, and if there are\n # it gets another 100 starting after the last domain it got\n while has_next:\n result = self.client.execute_query(queries.GET_ORG_DOMAINS, params)\n\n if \"error\" in result:\n print(\"Server error: \", result)\n raise ValueError(\"Unable to get domains for \" + self.name)\n\n for edge in result[\"findOrganizationBySlug\"][\"domains\"][\"edges\"]:\n domain_list.append(dom.Domain(self.client, **edge[\"node\"]))\n\n has_next = result[\"findOrganizationBySlug\"][\"domains\"][\"pageInfo\"][\n \"hasNextPage\"\n ]\n\n params[\"after\"] = result[\"findOrganizationBySlug\"][\"domains\"][\"pageInfo\"][\n \"endCursor\"\n ]\n\n return domain_list", "title": "" }, { "docid": "34a6a535479f13d3beb0a81319acd6d1", "score": "0.46350187", "text": "def get_domain_output(name: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetDomainResult]:\n ...", "title": "" }, { "docid": "47b53b37a766bffdcc0b9ad147e45f21", "score": "0.4634146", "text": "def _find_clean_domain(self, assignment, trgt_attr):\n cell_probabilities = []\n\n # Always have the initial value in the returned domain values unless\n # it is Null\n if assignment[trgt_attr] is not None:\n cell_values = {(assignment[trgt_attr])}\n else:\n cell_values = {()}\n for attr in assignment:\n if attr == trgt_attr:\n continue\n attr_val = assignment[attr]\n\n if attr in self.coocurence_lookup:\n if attr_val in self.coocurence_lookup[attr]:\n if trgt_attr in self.coocurence_lookup[attr][attr_val]:\n if trgt_attr in self.coocurence_lookup[attr][attr_val]:\n cell_probabilities += \\\n [(k, v) for\n k, v\n in\n self.coocurence_lookup[attr][attr_val][\n trgt_attr].iteritems()]\n\n # get l values from the lookup exactly like in dirty where l < k\n # get k-l random once from the domain\n cell_probabilities.sort(key=lambda t: t[1])\n while len(cell_probabilities) > 0: # for now l = k/2\n if len(cell_values) == self.clean_breakoff/2:\n break\n tuple = cell_probabilities.pop()\n value = tuple[0]\n cell_values.add(value)\n\n random.shuffle(cell_probabilities)\n\n while len(cell_probabilities) > 0:\n if len(cell_values) == self.clean_breakoff:\n break\n tuple = cell_probabilities.pop()\n value = tuple[0]\n cell_values.add(value)\n return cell_values", "title": "" }, { "docid": "bdeeff1f6dd5507eac45779efb9e5d3f", "score": "0.4630026", "text": "def get_trixie_domain_data():\n dict = {\n \"HitID\": \"gnl|CDD|334841\",\n \"DomainID\": \"pfam02195\",\n \"Name\": \"ParBc\",\n \"Description\": \"ParB-like nuclease domain\"\n }\n return dict", "title": "" }, { "docid": "dc2d841cfe1a3720d0f05aebfbc19815", "score": "0.46287408", "text": "def domain_size(domain):\n fixed_domain_sizes = {\n \"current collector\": 3,\n \"negative particle\": 5,\n \"positive particle\": 7,\n \"negative electrode\": 11,\n \"separator\": 13,\n \"positive electrode\": 17,\n \"negative particle size\": 19,\n \"positive particle size\": 23,\n }\n if domain in [[], None]:\n size = 1\n elif all(dom in fixed_domain_sizes for dom in domain):\n size = sum(fixed_domain_sizes[dom] for dom in domain)\n else:\n size = sum(hash(dom) % 100 for dom in domain)\n return size", "title": "" } ]
2f8cd6721dc274ac9d37686b45269349
get the match number of two sequences
[ { "docid": "fe06a3652b1ead85337a756c0cf74983", "score": "0.80829686", "text": "def GetSeqMatch(seq1, seq2):\r\n matchCnt = 0\r\n for idx, curChar1 in enumerate(seq1):\r\n curChar2 = seq2[idx]\r\n if curChar1 == curChar2: matchCnt += 1\r\n return matchCnt", "title": "" } ]
[ { "docid": "24ebcce5635738a22d8a171f3d367e30", "score": "0.68315715", "text": "def num_matches(list1, list2):\n list1.sort()\n list2.sort()\n if list1 == [] or list2 == []:\n return 0\n if list1[0] == list2[0]:\n return 1 + num_matches(list1[1:], list2[1:])\n if list1[0] < list2[0]:\n return num_matches(list1[1:], list2)\n if list1[0] > list2[0]:\n return num_matches(list1, list2[1:])", "title": "" }, { "docid": "9ff5ef61d560d996b9b77aec7f37621f", "score": "0.6821103", "text": "def numMatchingSubseq(S, words):\n res = 0\n n1 = len(S)\n for w in words:\n n2 = len(w)\n pt1 = 0\n pt2 = 0\n while pt1 < n1 and n1 - pt1 >= n2 - pt2:\n if pt2 == n2 - 1 and S[pt1] == w[pt2]:\n res += 1\n break\n elif S[pt1] == w[pt2]:\n pt2 += 1\n pt1 += 1\n else:\n pt1 += 1\n return res", "title": "" }, { "docid": "4ee2a68b0f6ec72327eedc7af39f1800", "score": "0.6764013", "text": "def sim(w1,w2):\n\n\tcnt = 0\n\tfor i, c in enumerate(w1):\n\t\tif c == w2[i]:\n\t\t\tcnt += 1\n\n\treturn cnt", "title": "" }, { "docid": "3639868cd3edf8203c70b1eb132296ae", "score": "0.6695735", "text": "def repeatedStringMatch(self, a: str, b: str) -> int:\n minCnt = ceil(len(b) / len(a))\n for cnt in [minCnt, minCnt + 1]:\n if b in a * cnt:\n return cnt\n\n return -1", "title": "" }, { "docid": "bced5e2ddac5821b07c52ea1e7f13626", "score": "0.66847074", "text": "def match(s1, s2):\n pos = -1\n for i, (c1, c2) in enumerate(zip(s1, s2)):\n if c1 != c2:\n if pos != -1:\n return -1\n else:\n pos = i\n return pos", "title": "" }, { "docid": "95467ab7a7a7f88085f1819a4b1f7add", "score": "0.66544527", "text": "def get_indices_from_alignment(align1, align2, debug=False):\n matchLocations = ''\n \n \n basesCounted = 0\n threeprimeMatches = 0\n mostThreeprimeMatches = 0\n \n align1Matches = [match.start() for match in (re.finditer(r'\\w', str(align1)))] # Use regexes to find the indices of all characters that are not dashes.\n align2Matches = [match.start() for match in (re.finditer(r'\\w', str(align2)))]\n\n matchLocations = [val for val in align1Matches if val in align2Matches] # The intersection of the above two lists (indices of all matches).\n \n if debug:\n print('\\n' + align1)\n print(align2 + '\\n\\n')\n \n return matchLocations, align1Matches", "title": "" }, { "docid": "8f3f4e8b9dd5fdde0510c233372b6f08", "score": "0.66161394", "text": "def _pairwise_distance(self, sequence1, sequence2):\n sum = 0\n pairs = zip(sequence1, sequence2)\n length = 0\n for pair in pairs:\n if (pair[0] != pair[1]):\n sum += 1\n length += 1\n return sum", "title": "" }, { "docid": "1bd00f830ba49b86ca4be8d4ab2000bd", "score": "0.65855867", "text": "def numMatchingSubseq(self, S: str, words: List[str]) -> int:\n char_indexes = collections.defaultdict(list)\n for idx, char in enumerate(S):\n char_indexes[char].append(idx)\n ans = 0\n N = len(S)\n for word in words:\n flag = 1\n idx_prev = -1\n for char in word:\n i = bisect.bisect_right(char_indexes[char], idx_prev) \n if i == len(char_indexes[char]):\n flag = 0\n break\n idx_prev = char_indexes[char][i]\n ans += flag\n return ans", "title": "" }, { "docid": "c9684aea0915cdc8617f166456571373", "score": "0.6544276", "text": "def slidingmatch(self, seq, match, maxmis):\n # pairwise2.align does bad when aligning 2 sequences of different size without gap penalty. \n # But don't want gap penalty in the match score, so do alignment first with gap penalty to get\n # the region of the match, then do second alignment without gap penalty of the subseq to get\n # the score\n # only used for LTR circle, which are not many. 1st pass screen with scores only to speed things up\n if len(match) == 0:\n return 0\n elif pairwise2.align.localxs(seq, match,-1,-1, score_only=True) >= len(match) - maxmis:\n self.s, self.m, self.score, self.start, self.end = pairwise2.align.localxs(seq, match,-1,-1)[0]\n self.subseq = seq[self.start:self.end]\n if pairwise2.align.localxd(self.subseq, match, -1,-1,0,0, score_only=True):\n return self.start+1 if pairwise2.align.localxd(self.subseq, match, -1,-1,0,0, score_only=True) \\\n >= self.end - self.start - maxmis else 0\n else:\n return 0\n else:\n return 0", "title": "" }, { "docid": "44b15886f586f9250a5e34660baeefb2", "score": "0.6543291", "text": "def part_1(init_a: int, init_b: int, tests_count: int = 40_000_000) -> int:\n\n matches = count_matches(*gen_pair(init_a, init_b), tests_count=tests_count)\n print(f\"part 1: {matches} matches\")\n return matches", "title": "" }, { "docid": "962bc8dc89824c91538ea70700c7b321", "score": "0.64985466", "text": "def test_sequence_matcher2(self):\n a = [\"a\", \"b\"]\n b = [\"a\", \"b\", \"d\", \"c\"]\n sm = SequenceMatcher()\n sm.set_seq1(a)\n sm.set_seq2(b)\n self.assertEqual(sm.distance(), 2)\n sm.set_seqs(b, a)\n self.assertEqual(sm.distance(), 2)", "title": "" }, { "docid": "d20c04e81cc9aaa8b2fd5fe29c61ea37", "score": "0.64480567", "text": "def get_matches(l1, l2):\n if len(l1) > len(l2):\n l1, l2 = l2, l1\n\n l2 = set(l2)\n return sum(1 for x in l1 if x in l2)", "title": "" }, { "docid": "a18ace68ca52eb316d6c8349bb6565e1", "score": "0.6444652", "text": "def exactmatch(self, seq, match, start):\n if len(seq) < start + len(match):\n score = 0\n elif len(match) == 0:\n score = 0\n else:\n score = 1 if seq[(start-1):start-1+len(match)] == match else 0\n return score", "title": "" }, { "docid": "1601c9bb6803c3bf7dba8acd89a1f85a", "score": "0.6402631", "text": "def get_pid_two_seqs(self, seq1, seq2):\n matches = 0.0; total = 0.0\n for ind, seq1char in enumerate(seq1):\n seq2char = seq2[ind]\n if seq1char == \"_\" and seq2char == \"_\":\n continue\n total += 1.0\n if seq1char == seq2char:\n matches += 1.0\n return (matches/total * 100)", "title": "" }, { "docid": "215b5874acc079fccb083c84cae254bf", "score": "0.63866293", "text": "def jscore(s1,s2):\r\n\r\n if len(s1) == 0 or len(s2) == 0:\r\n return 0\r\n else:\r\n if s1[0] in s2:\r\n next_char = rem_first(s1[0], s2)\r\n #print(next_char)\r\n return jscore(s1[1:], next_char) + 1\r\n else:\r\n next_char = rem_first(s1[0], s2)\r\n return jscore(s1[1:], next_char)", "title": "" }, { "docid": "13fc6345f47ac27a1eda78350ae0397a", "score": "0.63681173", "text": "def countMutations( self, seq1, seq2 ):\n\t\tassert len( seq1 ) == len( seq2 )\n\t\tassert len( seq1 ) % 3 == 0\n\t\tassert len( seq2 ) % 3 == 0\n\t\t\n\t\tprint str(seq1)\n\t\tprint str(seq2)\n\t\t\n\t\tns_counts = []\n\t\ts_counts = []\n\t\tfor i in range( len( seq1 )/ 3 ):\n\t\t\tc1 = seq1[3*i:3*i+3]\n\t\t\tc2 = seq2[3*i:3*i+3]\n\t\t\tprint \"Codon\", c1, \"to\", c2\n\t\t\t( ns, s ) = MutationCounter.countMutationsInCodons( self, c1, c2 )\n\t\t\tprint \"ns:\", ns, \"s:\", s\n\t\t\tns_counts += ns\n\t\t\ts_counts += s\n\t\treturn ( ns_counts, s_counts )", "title": "" }, { "docid": "27397a463186c5fcf6c1ce69fde26202", "score": "0.6359819", "text": "def matching_number(G): \n return len(maximal_matching(G,M))", "title": "" }, { "docid": "c471787f31b034012fd9fa76761f5193", "score": "0.631771", "text": "def compute(strand_one, strand_two):\n if len(strand_one) != len(strand_two):\n raise ValueError\n else:\n return sum(strand_one[i] != strand_two[i] for i in range(len(strand_one)))", "title": "" }, { "docid": "83f20a1c93fc500cc9302460e1f191a0", "score": "0.6317327", "text": "def similiarity(self, name1, name2):\n\n n = min(len(name1), len(name2))\n for i in range(n):\n if (name1[i] != name2[i]):\n return i + 1\n return n", "title": "" }, { "docid": "51852f89eb8a2e61357974c216e71432", "score": "0.6310881", "text": "def seq_helper(s1,s2, i, j):\n if (i == len(s1)):\n return (len(s2) - j) * -2\n if (j == len(s2)):\n return (len(s1) - i)*-2\n\n if (s1[i] == s2[j] ):\n return seq_helper(s1,s2,i+1, j+1)\n else:\n return 1 + min(\n seq_helper(s1,s2,i+1,j+1),\n seq_helper(s1,s2,i+1,j),\n seq_helper(s1,s2,i, j+1)\n )", "title": "" }, { "docid": "90be2eec233ad756aeb34b7010dd884e", "score": "0.63070947", "text": "def overlap_match(a, b):\n return np.sum(a*b)", "title": "" }, { "docid": "a5c91bc4ec339ce83d4879027a9ed6c5", "score": "0.625421", "text": "def get_num_pairs(seq):\n n = len(seq)\n return int(n * (n-1)/2) # sum of arphmetic progression (n-1)...1", "title": "" }, { "docid": "aec189bba72a415f0d0cb2a0a403c6c8", "score": "0.6245744", "text": "def similar(a, b):\n return SequenceMatcher(None, a, b).ratio()", "title": "" }, { "docid": "c1c148a89c8ed0f2fa5eda6ce55e3c2a", "score": "0.6243692", "text": "def get_symbols_count(src_seq_lens, tgt_seq_lens):\n return tf.reduce_sum(src_seq_lens) + tf.reduce_sum(tgt_seq_lens)", "title": "" }, { "docid": "a896e6e805e93115d71185d819e6ab13", "score": "0.62370336", "text": "def count_reps(seq):\n ...", "title": "" }, { "docid": "f4b75b2b897004bfb349ff1fde138009", "score": "0.6216449", "text": "def match_rank(self, other):\n if self.components[-1] != other.components[-1]:\n return 0\n rank = 0\n for comp1, comp2 in zip(reversed(self.components),\n reversed(other.components)):\n if comp1 != comp2:\n break\n rank += 1\n return rank", "title": "" }, { "docid": "789fb3e34e63da319991455aac1f8477", "score": "0.6216066", "text": "def moveCount(arr1: list, arr2: list) -> int:\n set1 = set(arr1)\n set2 = set(arr2)\n\n result = set1 ^ set2\n return len(result)", "title": "" }, { "docid": "ae7fd09ce6d20965e85324d18c74bac7", "score": "0.6211473", "text": "def get_match_score(d1, d2):\n bf = cv2.BFMatcher()\n matches = bf.knnMatch(d1, d2, k=2)\n\n sim = 0\n for m, n in matches:\n if m.distance < 0.70 * n.distance:\n sim += 1\n\n return sim", "title": "" }, { "docid": "bada94934e688d45470addf0900731bf", "score": "0.62112707", "text": "def compare(self, w1, w2):\n d = 0\n for i in range(min(len(w1), len(w2))):\n if w1[i] != w2[i]:\n break\n else:\n d += 1\n return d, len(w1)-d", "title": "" }, { "docid": "27711f08611a7176ab73862e7055d9b7", "score": "0.6160463", "text": "def part_2(init_a: int, init_b: int, tests_count: int = 5_000_000) -> int:\n\n matches = count_matches(*gen_pair(init_a, init_b, only_divisible=True), tests_count=tests_count)\n print(f\"part 2: {matches} matches using divisibility\")\n return matches", "title": "" }, { "docid": "0ad217f7b808a7c87c8e4d842ad24f64", "score": "0.61582595", "text": "def calculate_score(s1, s2, l1, l2, startpoint):\n matched = \"\" # to hold string displaying alignements\n score = 0\n for i in range(l2): # Runs the loop as many times as the l2\n if (i + startpoint) < l1: # Checks to see if l1 has been exceeded\n if s1[i + startpoint] == s2[i]: # if the bases match\n matched = matched + \"*\"\n score = score + 1\n else:\n matched = matched + \"-\"\n\n # some formatted output\n print(\".\" * startpoint + matched) # Shows where the base matches are \n print(\".\" * startpoint + s2) # Shows where s1 is being compared to s2\n print(s1)\n print(score) \n print(\" \")\n\n return score", "title": "" }, { "docid": "28b57dd9732f9b9929e4c4f82693554e", "score": "0.6147481", "text": "def count_ctdc(seq1, seq2):\n sum_var = 0\n for aa in seq1:\n sum_var = sum_var + seq2.count(aa)\n return sum_var", "title": "" }, { "docid": "8cefc7277a9ab87266dd4d669b4cf387", "score": "0.6138126", "text": "def distance(seq1, seq2):\n return levenshtein(seq1, seq2)", "title": "" }, { "docid": "ddf6531e3a75779239511aa36992839f", "score": "0.61369675", "text": "def score_align (seq1, seq2, sm, g):\n res = 0;\n for i in range(len(seq1)):\n res = res + score_pos(seq1[i],seq2[i],sm,g)\n \n return res", "title": "" }, { "docid": "ec915dc3eb9f5118f5626ffd4f84e5b0", "score": "0.6131369", "text": "def hamming_distance(str1,str2):\n\tbit_str1 = BitArray(str1)\n\tbit_str2 = BitArray(str2)\n\treturn (Bits(\"0b\"+bit_str2.bin)^Bits(\"0b\"+bit_str1.bin)).count(True)", "title": "" }, { "docid": "0325311a451a9058389a0910ae0b69a4", "score": "0.61293304", "text": "def common_prefix_length(str1, str2):\n if len(str1) > len(str2):\n return common_prefix_length(str2, str1)\n else:\n m = 0\n for i in range(len(str1)):\n if str1[i] == str2[i]:\n m += 1\n else:\n break\n return m", "title": "" }, { "docid": "bcbe60122ede06ccf7460e9d1c52b1f5", "score": "0.61158437", "text": "def match(long_sequence, short_sequence):\n best_match = ''\n match_num = 0\n for i in range(len(long_sequence)-len(short_sequence)+1):\n s = long_sequence[i:i+len(short_sequence)]\n a = 0\n for j in range(len(short_sequence)):\n if s[j] == short_sequence[j]:\n a += 1\n if match_num <= a:\n match_num = a\n best_match = s\n match_rate = match_num / len(short_sequence) * 100\n print('The best match is: '+str(best_match))\n print('The match rate is: '+str(match_rate)+'%')", "title": "" }, { "docid": "70ddeca09de03c35e8f851047eb95891", "score": "0.6113973", "text": "def getMergeCnt(patternStr):\r\n strLis = parsePatternStr(patternStr)\r\n lengthLis = map(lambda x:len(x), strLis)\r\n return sum(lengthLis) - len(lengthLis)", "title": "" }, { "docid": "62d25713fa1b856cb8fb268dc7bd796d", "score": "0.61123496", "text": "def get_match_count(self):\n return len(self.matches)", "title": "" }, { "docid": "b796a7c3b119770469b61c7c2fc769d9", "score": "0.6107027", "text": "def score_char_overlap(term1: str, term2: str) -> int:\n num_char_matches = 0\n for char in term2:\n if char in term1:\n term1 = term1.replace(char, \"\", 1)\n num_char_matches += 1\n return num_char_matches", "title": "" }, { "docid": "20d1838982b984fcb800b51dc5e95df0", "score": "0.60933656", "text": "def numMatchingSubseq(self, S: str, words: List[str]) -> int:\n\n heads = [[] for _ in range(26)]\n for i, word in enumerate(words):\n heads[ord(word[0]) - ord(\"a\")].append((i, 1))\n\n ans = 0\n for c in S:\n old_bucket = heads[ord(c) - ord(\"a\")][:]\n heads[ord(c) - ord(\"a\")] = []\n for i, j in old_bucket:\n if j == len(words[i]):\n ans += 1\n else:\n heads[ord(words[i][j]) - ord(\"a\")].append((i, j + 1))\n return ans", "title": "" }, { "docid": "5a3750abeae78cdd5018eed39b5cab0e", "score": "0.6083809", "text": "def matching_subsequences_ratio(\n seq1: Sequence[str], seq2: Sequence[str], **kwargs\n) -> float:\n return difflib.SequenceMatcher(a=seq1, b=seq2, **kwargs).ratio()", "title": "" }, { "docid": "9349037f2ea68e4db1c23c3d8c1d5cae", "score": "0.6074299", "text": "def get_score_of_two_chars(substitution_matrix: SubstitutionMatrix, char_a: str, char_b: str) -> int:\n return int(substitution_matrix.get_distance(char_a, char_b))", "title": "" }, { "docid": "2c223c03d4df36d8e73ae19dd672e122", "score": "0.6072878", "text": "def first_match(cls, a, b):\n ## find match\n\n start = b.find(a[:cls.min_size])\n if start == -1:\n msg = 'No match of size >= {} found'.format(cls.min_size)\n raise ValueError(msg)\n\n ## try to expand match\n\n n = cls.min_size\n while a[:n] == b[start:start+n] and n <= len(a):\n n += 1\n\n return start, n-1", "title": "" }, { "docid": "b3bc7c2b8b603bbec25312b322766677", "score": "0.6072145", "text": "def test_sequence_matcher(self):\n a = [\"a\", \"b\"]\n b = [\"a\", \"b\", \"d\", \"c\"]\n sm = SequenceMatcher(a=a, b=b)\n opcodes = [\n [\"equal\", 0, 1, 0, 1],\n [\"equal\", 1, 2, 1, 2],\n [\"insert\", 2, 2, 2, 3],\n [\"insert\", 2, 2, 3, 4],\n ]\n self.assertEqual(sm.distance(), 2)\n self.assertEqual(sm.ratio(), 2 / 3)\n self.assertEqual(sm.quick_ratio(), 2 / 3)\n self.assertEqual(sm.real_quick_ratio(), 2 / 3)\n self.assertEqual(sm.distance(), 2)\n # This doesn't return anything, saves the value in the sm cache.\n self.assertTrue(not sm._compute_distance_fast())\n self.assertEqual(sm.get_opcodes(), opcodes)\n self.assertEqual(list(sm.get_matching_blocks()), [[0, 0, 1], [1, 1, 1]])", "title": "" }, { "docid": "91e411efbe43b24a11424a30d0ccbdfd", "score": "0.6064722", "text": "def match_ends(words):\n\n count = 0\n\n for word in words:\n if len(word) > 1 and word[0] == word[-1]:\n count += 1\n\n return count", "title": "" }, { "docid": "e5895ded851cde37690de7af36eea42d", "score": "0.60448664", "text": "def naive_with_counts(p, t):\n occurrences = []\n num_alignments = 0\n num_character_comparisons = 0\n for i in range(len(t) - len(p) + 1): # loop over alignments\n match = True\n for j in range(len(p)): # loop over characters\n num_character_comparisons += 1\n if t[i+j] != p[j]: # compare characters\n match = False\n break\n if match:\n occurrences.append(i) # all chars matched; record\n num_alignments += 1\n return occurrences, num_alignments, num_character_comparisons", "title": "" }, { "docid": "56034329b529a061cb760db63784b0fb", "score": "0.60416627", "text": "def part2():\n sections = read_input()\n res = 0\n for start1, end1, start2, end2 in sections:\n if start1 <= end2 and end1 >= start2:\n res += 1\n return res", "title": "" }, { "docid": "286c0a6f2f03e5f7502a38c5cb2710af", "score": "0.60328776", "text": "def match_count(target, pred):\n target_len = len(target)\n pred_len = len(pred)\n count = 0\n target_i = 0\n pred_i = 0\n target_end = 0\n pred_end = 0\n next_target = True\n next_pred = True\n while target_i < target_len and pred_i < pred_len:\n target_word = target[target_i]\n pred_word = pred[pred_i]\n\n # choose from target and prediction list to iterate according to the former iter\n if next_target:\n target_end += len(target_word)\n next_target = False\n if next_pred:\n pred_end += len(pred_word)\n next_pred = False\n\n # Exact match of the location and string\n if target_word == pred_word and target_end == pred_end:\n count += 1\n target_i += 1\n pred_i += 1\n next_pred = True\n next_target = True\n # Choose the list of shorter length to iterate\n else:\n if target_end > pred_end:\n pred_i += 1\n next_pred = True\n elif target_end < pred_end:\n target_i += 1\n next_target = True\n else:\n pred_i += 1\n target_i += 1\n next_pred = True\n next_target = True\n return count", "title": "" }, { "docid": "47bcfbe1a035672d33bf4fe287e3ca67", "score": "0.6031924", "text": "def get_best_match(self, seq1, seq2):\r\n\t\r\n\t\trseqlen = len(seq2)\r\n\t\tmm = ()\r\n\t\tif 'F' in self.cfg['TSD_ORIENTATION']:\r\n\t\t\toffset = 0\r\n\t\t\twhile 1:\r\n\t\t\t\tm = self.get_seed(seq1, seq2, offset)\r\n\t\t\t\tif not m:\r\n\t\t\t\t\tbreak\r\n\t\t\t\tem = self.extend_match(seq1, seq2, m)\r\n\t\t\t\tif not mm or em[3] > mm[3]:\r\n\t\t\t\t\tmm = em\r\n\t\t\t\toffset = em[0][1]\r\n\t\tif 'R' in self.cfg['TSD_ORIENTATION']:\r\n\t\t\tseq3 = self._revcomp(seq2)\r\n\t\t\toffset = 0\r\n\t\t\twhile 1:\r\n\t\t\t\tm = self.get_seed(seq1, seq3, offset)\r\n\t\t\t\tif not m:\r\n\t\t\t\t\tbreak\r\n\t\t\t\tem = self.extend_match(seq1, seq3, m, 'R')\r\n\t\t\t\tif not mm or em[3] > mm[3]:\r\n\t\t\t\t\ts = rseqlen - em[1][1];\r\n\t\t\t\t\tem[1][1] = rseqlen - em[1][0];\r\n\t\t\t\t\tem[1][0] = s;\r\n\t\t\t\t\tmm = em\r\n\t\t\t\toffset = em[0][1]\r\n\t\treturn mm", "title": "" }, { "docid": "0e1718dc1ce08e88353306e3c67e5961", "score": "0.60215116", "text": "def getPatternDist(patternStr1, patternStr2):\r\n dist = 0\r\n strLis1 = parsePatternStr(patternStr1)\r\n strLis2 = parsePatternStr(patternStr2)\r\n for strIdx, curStr1 in enumerate(strLis1):\r\n curStr2 = strLis2[strIdx]\r\n if not isSinglePatternSimi(curStr1, curStr2): dist += 1\r\n return dist", "title": "" }, { "docid": "f582b329caee76a5602a3daef7edb6f0", "score": "0.60167724", "text": "def match(p_entities, r_entities, type):\n p_entities = [tuple(entity) for entity in p_entities if entity[-1] == type]\n r_entities = [tuple(entity) for entity in r_entities if entity[-1] == type]\n pcount = len(p_entities)\n rcount = len(r_entities)\n correct = len(list(set(p_entities) & set(r_entities)))\n return [pcount, rcount, correct]", "title": "" }, { "docid": "910b90bd8606e1961b8d6e1d8f83aefa", "score": "0.6003306", "text": "def count_mathcing_tuples(plagiarized, source, synonyms):\n tuple_match_count = 0\n for tup in plagiarized:\n synonyms_tuples = build_synonyms_tuples(tup, synonyms)\n for synonyms_tuple in synonyms_tuples:\n if synonyms_tuple in source:\n tuple_match_count += 1\n return tuple_match_count", "title": "" }, { "docid": "c8df941c65698b0466c81470190f16fd", "score": "0.599935", "text": "def _diff_count(string1, string2):\n assert isinstance(string1, str)\n assert isinstance(string2, str)\n if string1 == string2:\n return 0\n minlen = min(len(string1), len(string2))\n diffcount = abs(len(string1) - len(string2))\n for ii in range(0,minlen):\n if string1[ii] != string2[ii]:\n diffcount += 1\n return diffcount", "title": "" }, { "docid": "662bb37a943a4e26d7fca2c35337dc69", "score": "0.59865487", "text": "def find_overlap(read1,read2):\n # Returns a list with one element. The element is the no. of bp with which two read overlap\n candidates_overlaps = [l for l in range(min(len(read1),len(read2))/2, min(len(read1),len(read2))) \n if read1[-l:] == read2[:l]]\n # If two reads overlap, return the no. of overlapping bps, else return 0\n return max(candidates_overlaps) if len(candidates_overlaps)>0 else 0", "title": "" }, { "docid": "a57682c9b59d3d64de84b497e26ecfc3", "score": "0.59842837", "text": "def intersection(phrase_a: str, phrase_b: str) -> int:\n return len(tokenized(phrase_a) & tokenized(phrase_b))", "title": "" }, { "docid": "16d77c7d08e0b38dcecc9c3af38eac55", "score": "0.59769773", "text": "def sets_levenshtein(a,b):\n c = a.intersection(b)\n return len(a-c)+len(b-c)", "title": "" }, { "docid": "ad076cab461547ec37e14fb25ed8daf8", "score": "0.597429", "text": "def get_matches_count(self):\n distances = np.array([dists for dists in self.gt_distances.values()])\n matches_mask = distances != -1\n return distances[matches_mask].size", "title": "" }, { "docid": "1e3152fe5909cd18a84f733948893def", "score": "0.59677494", "text": "def mcsdr_edge_count(arr1, arr2):\n return len(find_mcsdr(arr1, arr2))", "title": "" }, { "docid": "2e678a8b2c0732b51e22ef59ae916d64", "score": "0.5962293", "text": "def similarity(A,B):\n return len( A & B ) / len( A | B )", "title": "" }, { "docid": "7de9234b246d3c60e683d258387c97b3", "score": "0.5959693", "text": "def match(string1, string2):\r\n\r\n best_length = 0\r\n max_length1 = len(string1) - 1\r\n max_length2 = len(string2) - 1\r\n # for all possible string1 start points\r\n for idx1 in range(max_length1):\r\n # for all possible string2 start points\r\n for idx2 in range(max_length2):\r\n # check if these characters match\r\n if string1[idx1] == string2[idx2]:\r\n this_match_count = 1\r\n # see how long the match continues\r\n while idx1 + this_match_count <= max_length1 and idx2 + this_match_count <= max_length2:\r\n current_index1 = idx1 + this_match_count\r\n current_index2 = idx2 + this_match_count\r\n if string1[current_index1] == string2[current_index2]:\r\n this_match_count += 1\r\n else:\r\n break\r\n\r\n # compare to best so far\r\n if this_match_count > best_length:\r\n best_length = this_match_count\r\n\r\n # now return the result\r\n return best_length", "title": "" }, { "docid": "4560ad05e2caf2a81a9d6123a637059c", "score": "0.59555465", "text": "def match_twosided(desc1,desc2):\n \n matches_12 = match(desc1,desc2)\n matches_21 = match(desc2,desc1)\n \n ndx_12 = matches_12.nonzero()[0]\n \n #remove matches that are not symmetric\n for n in ndx_12:\n if matches_21[int(matches_12[n])] != n:\n matches_12[n] = 0\n \n return matches_12", "title": "" }, { "docid": "2ebeb61a091a658501653ba73ba5cb2f", "score": "0.595043", "text": "def match_ends(words):\n \n cnt = 0\n for w in words:\n if len(w) > 1:\n if w[0] == w[-1]:\n cnt += 1\n\n return cnt", "title": "" }, { "docid": "2987f858ee48d74c6beb5793a0338a67", "score": "0.59372854", "text": "def hamming_distance(s1, s2) -> int:\n if len(s1) != len(s2):\n raise ValueError(\"Undefined for sequences of unequal length.\")\n return sum(el1 != el2 for el1, el2 in zip(s1, s2))", "title": "" }, { "docid": "59ae9d4c2c61fce3945d84cc5407100b", "score": "0.59336126", "text": "def count_winning_pairs(sample_1, sample_2):\n sample_1, sample_2 = np.array(sample_1), np.array(sample_2)\n n_total_wins = 0\n for x in sample_1:\n n_wins = np.sum(x > sample_2) + 0.5*np.sum(x == sample_2)\n n_total_wins += n_wins\n return n_total_wins", "title": "" }, { "docid": "58ad0897fc828ecff591c82f6c8c78ff", "score": "0.5931003", "text": "def SN_fuzzymatch(self, seq, match, start, maxmis, sn_position, sn_length):\n self.subseq1 = seq[start-1:sn_position-1]\n self.subseq2 = seq[sn_position-1+sn_length:(start-1+len(match))]\n self.match1 = match[0:sn_position-start]\n self.match2 = match[sn_position-start+sn_length:]\n if len(self.subseq1)+len(self.subseq2)+sn_length < len(match):\n score = 0\n elif len(match) == 0:\n score = 0\n else:\n self.score1 = pairwise2.align.localxd(self.subseq1, self.match1, -1,-1,0,0, score_only=True)\n self.score2 = pairwise2.align.localxd(self.subseq2, self.match2, -1,-1,0,0, score_only=True)\n score = 1 if self.score1 + self.score2 >= len(match) - maxmis else 0\n return score", "title": "" }, { "docid": "bbb109c54bc81d422e760ff0d0d532f1", "score": "0.5927392", "text": "def match_sequence(needle, haystack, maxdiff):\n nlen = len(needle)\n found = False\n for i in range(len(haystack)-nlen):\n if np.abs(haystack[i:i+nlen] - needle).max() < maxdiff:\n found = True\n break\n if not found:\n i = None\n return i", "title": "" }, { "docid": "a7f6f5714dc497a5a6aafe4b78e5743d", "score": "0.59269017", "text": "def nmatches_rec(txt, pat, t, p):\n if p==0:\n return 1\n\n if t==0:\n return 0\n\n matches = 0\n for i in range(t, 0, -1):\n if txt[t-i] == pat[p-1]:\n matches += nmatches_rec(txt, pat, t-i, p-1)\n\n return matches", "title": "" }, { "docid": "af5dfb3ff6c6766c690622704642bef2", "score": "0.59242564", "text": "def common_suffix_length(str1, str2):\n if len(str1) > len(str2):\n return common_suffix_length(str2, str1)\n else:\n m = 0\n for i in range(len(str1)):\n if str1[len(str1) - 1 - i] == str2[len(str2) - 1 - i]:\n m += 1\n else:\n break\n return m", "title": "" }, { "docid": "7835d79cd3ddfebc7994e8306cda413e", "score": "0.5904704", "text": "def barcode_distance(str_a, str_b):\n assert len(str_a) == len(str_b)\n return sum([1 for i in xrange(len(str_a)) if str_a[i] != str_b[i]])", "title": "" }, { "docid": "50a219c71a3a7025dea175144397ed44", "score": "0.58798605", "text": "def get_reference_score(s):\r\n counter = 0\r\n s = [c for c in s]\r\n for c in s:\r\n if c == '(': # each open parenthese corresponds to a pairing\r\n counter += 1\r\n \r\n return counter", "title": "" }, { "docid": "1aba6e8c78b5db2097fa0012c2b195a2", "score": "0.586764", "text": "def matching(H,E,F,seqs,d,go, ge ,max_number_of_matching):\n seq1 = seqs[0]\n seq2 = seqs[1]\n H_arg_maxs = np.argwhere(H == np.max(H))\n array=[([\"\" for seq in seqs] ,tuple(arg_max) ) for arg_max in H_arg_maxs]\n ended_matching_array=[]\n while(array):\n next_array=[]\n for record in array:\n index = record[1]\n matching = record[0]\n if(H[index] == 0):\n ended_matching_array.append( ([matching[i] for i in range(len(seqs))],index ) )\n else:\n x = index[0]\n y = index[1]\n if(H[x,y] == H[x-1,y-1] + d[(seq1[x-1],seq2[y-1])] ):\n symbols = (seq1[x-1] , seq2[y-1])\n next_matching = [symbols[i] + matching[i] for i in range(len(seqs))]\n next_array.append((next_matching,(x-1,y-1)))\n if(H[x,y] == E[x,y]):\n codition = True\n symbols = (\"\",\"\")\n while(codition):\n symbols = (seq1[x-1] + symbols[0], \"_\" + symbols[1])\n if(H[x-1][y] + go + ge == E[x,y]):\n next_matching = [symbols[i] + matching[i] for i in range(len(seqs))]\n next_array.append((next_matching,(x - 1,y)))\n codition = (E[x,y] == E[x-1][y] + ge)\n x = x - 1 \n pass\n if(H[x,y] == F[x,y]):\n codition = True\n symbols = (\"\",\"\")\n while(codition):\n symbols = (\"_\" + symbols[0], seq2[y - 1] + symbols[1])\n if(H[x][y - 1] + go + ge == F[x ,y]):\n next_matching = [symbols[i] + matching[i] for i in range(len(seqs))]\n next_array.append((next_matching,(x ,y - 1)))\n codition = (F[x,y] == F[x][y - 1] + ge)\n y = y - 1\n array = next_array[0:max_number_of_matching - len(ended_matching_array)]\n return ended_matching_array", "title": "" }, { "docid": "4a85f031d229657ff8390899ae4f85f6", "score": "0.5862201", "text": "def match_twosided(desc1,desc2):\n matches_12 = match(desc1,desc2)\n matches_21 = match(desc2,desc1)\n ndx_12 = matches_12.nonzero()[0]\n\n # remove matches that are not symmetric\n for n in ndx_12:\n if matches_21[int(matches_12[n])] != n:\n matches_12[n] = 0\n\n return matches_12", "title": "" }, { "docid": "0f323f4e0320d62910ff5c8bdd05df65", "score": "0.586128", "text": "def protien_pass_count(seq):\r\n plus_minus_count = seq.count(\"+-\")\r\n return plus_minus_count", "title": "" }, { "docid": "2bb7c3db58ece533387776316f3be755", "score": "0.58499205", "text": "def sim4gram(text1, text2):\n list1 = word2ngrams(text1)\n list2 = word2ngrams(text2)\n return len(list(set(list1) & set(list2)))/len(list(set(list1) | set(list2)))", "title": "" }, { "docid": "68c0f279400f43135d2e51eed643daff", "score": "0.58368987", "text": "def distance(strand1, strand2):\n if len(strand1) != len(strand2):\n return False\n return sum([1 for base1, base2 in zip(strand1, strand2) if base1 != base2])", "title": "" }, { "docid": "e71a99ae4903b244ccab0ca72b16d7f7", "score": "0.583631", "text": "def compute_seq(x, y):\n sm = difflib.SequenceMatcher(None, x, y)\n seq = ''\n for match in sm.get_matching_blocks():\n seq += x[match.a:match.a+match.size]\n return seq", "title": "" }, { "docid": "533f1e960b95013b693895450494c0ef", "score": "0.5835949", "text": "def _needleman_wunsch(\n cls, seq1, seq2, match=1, mismatch=-3, gap_open=-11, gap_extend=-4\n ):\n alignments = pairwise2.align.globalms(\n seq1, seq2, match, mismatch, gap_open, gap_extend\n )\n assert len(alignments[0][0]) == len(alignments[0][1])\n return alignments[0][0], alignments[0][1]", "title": "" }, { "docid": "fc4527228ce91fcee669def6c663d657", "score": "0.5833368", "text": "def identify(names1, names2):\n import numpy as np\n n = len(names1);\n match = -np.ones(n, dtype = int);\n for i in range(n):\n pos = np.nonzero(names2 == names1[i])[0];\n if len(pos) > 0:\n match[i] = pos[0];\n return match;", "title": "" }, { "docid": "61dada72884a8d6c4616597aa492e2ee", "score": "0.58325744", "text": "def count_same_in_index(list,list2):\n count=0\n for i in range(len(list)):\n if list[i] != 0 and list2[i] != 0:\n count+=1\n return count", "title": "" }, { "docid": "5353da2a8c17c60213317e15f64191a8", "score": "0.5826752", "text": "def score(str_1_list,str_2_list):\n \n score_1_list = str_1_list[:] #here we make a copy of the string lists.\n score_2_list = str_2_list[:]\n \n match = 0 #setting our match variable to zero\n#The following lines check the length of the sequences and append indels to the\n#end of the shorter sequence.\n \n if range(len(score_1_list))== range(len(score_2_list)):\n pass \n elif len(score_1_list) < len(score_2_list):\n while len(score_1_list) < len(score_2_list):\n score_1_list.append('-')\n elif len(score_2_list) < len(score_1_list):\n while len(score_2_list) < len(score_1_list):\n score_2_list.append('-')\n else:\n pass\n \n#from using stackoverflow we found that zip joins two lists by grouping the\n#specific indices. Enumerate then assigns the indices to the pairs of the lists\n \n for i, (a,b) in enumerate(zip(score_1_list,score_2_list)):\n if a == b: #match counter increases when the two are equal\n match +=1\n else:\n score_1_list[i] = a.upper()\n score_2_list[i] = b.upper()\n \n#min() looks at the len() of the two lists and takes the smallest value.\n#mismatch is calculated from the difference of min() and match\n \n mismatch = min(len(score_1_list),len(score_2_list))-match\n \n#matches and mismatches are displayed\n \n print 'There are',match,'matches, and',mismatch,'mismatches.'\n print 'String 1: ',\"\".join(score_1_list)\n print 'String 2: ',\"\".join(score_2_list)\n print", "title": "" }, { "docid": "56aeea5fddffcf4a8fff07f223cf7b77", "score": "0.58203185", "text": "def estimate_match_states(sequences, threshold = 0.5):\n if not common_sequence_length(sequences):\n raise ValueError(\"Not all sequences of common length.\")\n\n # Initialise the number of match states\n num_match_states = 0\n\n # Cycle through index of sequences checking for a match state at each \n # point\n for i in range(len(sequences[0])):\n\n # Check if a match state in the current position\n if check_match_state(sequences, i, threshold = threshold):\n num_match_states += 1\n\n return num_match_states", "title": "" }, { "docid": "caa5662a0b563e13c4f193c0048bba09", "score": "0.58149344", "text": "def similar_str(str1, str2):\n max_len = tmp = pos1 = pos2 = 0\n len1, len2 = len(str1), len(str2)\n\n for p in range(len1):\n for q in range(len2):\n tmp = 0\n while p + tmp < len1 and q + tmp < len2 \\\n and str1[p + tmp] == str2[q + tmp]:\n tmp += 1\n\n if tmp > max_len:\n max_len, pos1, pos2 = tmp, p, q\n\n return max_len, pos1, pos2", "title": "" }, { "docid": "e97d000707023c34b3ab45e6ca1952bf", "score": "0.58134043", "text": "def match_rate(t1, t2):\n if t1.shape == t2.shape:\n diff = np.sum(np.abs(t1 - t2)) # trial is binary\n return 1 - (diff / float(t1.shape[0] * t1.shape[1]))\n else:\n print(\"Matched shape? No!\")\n return 0", "title": "" }, { "docid": "4ebcb5efc1decde16c66eb0d62840b2c", "score": "0.580616", "text": "def similarity(self, other):\n # Even quick_ratio is fairly slow on big inputs, capture just the start.\n max_size = 1024\n return difflib.SequenceMatcher(None, self.output[:max_size],\n other.output[:max_size]).quick_ratio()", "title": "" }, { "docid": "5f0af8b7b212d4e6cd8a5f8408f3409d", "score": "0.58030057", "text": "def hamming_distance(a, b):\n res = 0\n if len(a) != len(b):\n return \"Error: No son iguales\"\n else:\n for i, j in zip(a, b):\n if i != j:\n res += 1\n return res", "title": "" }, { "docid": "094109f985b020bd705e63d2fe590a3e", "score": "0.5802971", "text": "def count_qs_part_2(group):\n # Always beware of a trailing newline in python ( •̀ᴗ•́ )و ̑̑\n replies = group.strip('\\n').split('\\n')\n int_set = set(replies[0])\n for rep in replies[1:]:\n int_set = int_set.intersection(rep)\n\n return len(int_set)", "title": "" }, { "docid": "b16441b86a67af53e76bd783af161264", "score": "0.57986325", "text": "def levenshtein(seq1, seq2):\n size_x = len(seq1) + 1\n size_y = len(seq2) + 1\n mat = np.zeros((size_x, size_y))\n for x in range(size_x):\n mat[x, 0] = x\n for y in range(size_y):\n mat[0, y] = y\n\n for x in range(1, size_x):\n for y in range(1, size_y):\n if seq1[x-1] == seq2[y-1]:\n mat[x, y] = min(\n mat[x-1, y] + 1,\n mat[x-1, y-1],\n mat[x, y-1] + 1\n )\n else:\n mat[x, y] = min(\n mat[x-1, y] + 1,\n mat[x-1, y-1] + 1,\n mat[x, y-1] + 1\n )\n\n return mat[size_x - 1, size_y - 1]", "title": "" }, { "docid": "831479c11cb7eb4392572e5b75c9c433", "score": "0.5795004", "text": "def get_excess_disjoint(self, genome1: Genome, genome2: Genome) -> float:\n matching = 0.0\n for gene1 in genome1.genes:\n for gene2 in genome2.genes:\n if gene1.innovation_number == gene2.innovation_number:\n matching += 1\n\n return len(genome1.genes) + len(genome2.genes) - 2 * matching", "title": "" }, { "docid": "77e77f215f05344962f8ec0e11459dfb", "score": "0.57832927", "text": "def rank(self, w1, w2):\n return len(self.words_closer_than(w1, w2)) + 1", "title": "" }, { "docid": "f993beb2a9b0a228fc8ef2cff34045e1", "score": "0.5777695", "text": "def similar(string1, string2):\n return SequenceMatcher(None, string1, string2).ratio()", "title": "" }, { "docid": "f598ae87a5c2e6acd10b90b70c7f8b94", "score": "0.5769206", "text": "def calc_ident(r, ref):\n\n print r\n print ref\n print r.aligned_pairs\n\n if r.is_unmapped:\n return -1.0\n op_counts = {'indel': 0, 'match': 0, 'mismatch': 0}\n for (qpos, rpos) in r.aligned_pairs:\n print ref[rpos].upper()\n print r.seq[qpos].upper()\n print \"----\"\n if qpos is None or rpos is None:\n op = 'indel'\n else:\n assert rpos < len(ref)\n if ref[rpos].upper() == r.seq[qpos].upper():\n op = 'match'\n else:\n op = 'mismatch'\n op_counts[op] += + 1\n ident = op_counts['match']/float(sum(op_counts.values()))\n\n print ident\n print op_counts['match']\n print op_counts['mismatch']\n \n\n return ident", "title": "" }, { "docid": "87d3704d741b3a25a120814e5c0e71bd", "score": "0.576589", "text": "def overlap(a, b, overlap_n):\n start = 0 \n while True:\n start = a.find(b[:overlap_n], start) \n if start == -1: \n return 0\n if b.startswith(a[start:]):\n return len(a)-start\n start += 1", "title": "" }, { "docid": "2146fedba36c5a6c56fd5a6bf0cf6d9f", "score": "0.5765256", "text": "def ncs_recur(x, y, i, j):\n # if we reach the end of the two strings, we found a common subsequence\n if i == len(x) and j == len(y):\n return 1\n\n # else if we reach the end of the first string only, no common subsequence\n # was found\n if i == len(x):\n return 0\n\n count = 0\n\n # if we have matching characters, recurse on substrings\n if j < len(y) and x[i] == y[j]:\n count += ncs_recur(x, y, i + 1, j + 1)\n\n # regardless of characters matches, we recurse on a substring of x because\n # a character match has two outcomes: being counted or not (later\n # characters of may match the same character of y)\n count += ncs_recur(x, y, i + 1, j)\n\n return count", "title": "" }, { "docid": "5d5602b6688d9f89f4d349284585d480", "score": "0.57629323", "text": "def enumerate_matches(tnums_a,tnums_b,tags_a,tags_b,\n max_shift=20.0,max_drift=0.005,max_delta=0.500,\n verbose=False,max_matches=1):\n visited={}\n full_matches=[]\n next_a=next_b=0\n matches=[]\n \n # When trying to find a bad ping, keep track of how far the match\n # got. This might be a little generous, in the sense that these\n # are tracked separately, and there could be one set of matches\n # that gets further in a, and a separate set that gets further\n # in b. maybe that's ok, depending on how these values get used.\n # 2019-12-03: HERE so far this has tracked the last matched ping, and\n # on failure started axing successive pings.\n # This is problematic because we may consider many ping after the\n # last match, and much later consider a pair that fails.\n # tempting to track the greatest next_a/next_b while building the\n # longest match. but the search can go quite far (up to max shift)\n # on either side without having to advance the other side, such\n # that we may see very far beyond the real culprit.\n # Solution probably to make test smarter, and allow it to return\n # potential bad apples.\n max_match_a=-1\n max_match_b=-1\n\n # New approach to finding bad apples.\n # Keep the test result from the longest set of matches\n longest_fail=None\n max_matched=0\n longest_match=None\n \n stack=[] # tuples of next_a,next_b,matches\n \n def do_test(next_a,next_b,matches):\n return test_matches(next_a,next_b,matches,tnums_a,tnums_b,tags_a,tags_b,\n max_shift=max_shift,max_drift=max_drift,max_delta=max_delta)\n\n stack.append( (next_a,next_b,matches) )\n\n while stack:\n next_a,next_b,matches = stack.pop()\n\n # Check to see if this has already been tried\n key=(next_a,next_b,tuple(matches))\n if key in visited: continue\n visited[key]=1\n\n if verbose:\n print(f\"match next_a={next_a} next_b={next_b} matches={matches}\")\n\n test_result=do_test(next_a,next_b,matches)\n if test_result is not True:\n if len(matches)>max_matched:\n max_matched=len(matches)\n longest_fail=test_result\n longest_match=matches\n elif len(matches)==max_matched:\n longest_fail=( list(set( longest_fail[0]+test_result[0])), \n list(set( longest_fail[1]+test_result[1])) )\n continue\n \n if len(matches):\n max_match_a=max(max_match_a,matches[-1][0])\n max_match_b=max(max_match_b,matches[-1][1])\n \n if (next_a==len(tnums_a)) and (next_b==len(tnums_b)):\n # termination condition\n if len(matches)>0:\n full_matches.append(matches)\n print(\"MATCH\")\n if len(full_matches)>=max_matches:\n break\n continue\n elif (next_a==len(tnums_a)):\n # finish out b\n stack.append( (next_a,len(tnums_b),matches) )\n elif (next_b==len(tnums_b)):\n # finish out a\n stack.append( (len(tnums_a),next_b,matches) )\n else:\n # This ordering will prefer chasing down matches before\n # trying to drop things. Hopefully that means that we'll\n # find the non-empty match first, and could potentially stop\n # early if that is sufficient.\n if tnums_a[next_a]<tnums_b[next_b] + max_shift:\n # drop which ever one is earlier. doesn't work in general, but I'm not\n # explicitly including shifts yet\n stack.append( (next_a+1,next_b,matches) )\n if tnums_b[next_b]<tnums_a[next_a] + max_shift:\n stack.append( (next_a,next_b+1,matches) )\n\n # is it possible to declare the next two a match?\n if (tags_a[next_a]==tags_b[next_b]):\n time_diff=tnums_a[next_a]-tnums_b[next_b]\n if (np.abs(time_diff)<max_shift):\n # sure, try a match\n stack.append( (next_a+1,next_b+1,matches + [(next_a,next_b)]) )\n else:\n # Tags match, but time diff is too big for max_shift\n pass\n\n # if len(full_matches)==0:\n # print(\"Failed to complete. Longest match is \")\n # print(longest_match)\n \n return full_matches,max_match_a,max_match_b,longest_fail", "title": "" }, { "docid": "749f1fe2d175a4045eea8af6e43237d3", "score": "0.5761397", "text": "def sortandcount(seq):\r\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\r\n #If the list has one element:\r\n if len(seq) == 1:\r\n #there are no inversions (0, seq)\r\n return 0, seq\r\n\r\n #Else\r\n else:\r\n #Divide the list into two halves\r\n mid = len(seq)/2\r\n mid = int(mid)\r\n #A containts the first n/2 elements\r\n a = seq[:mid]\r\n #B contains the remaining n/2 elements\r\n b = seq[mid:]\r\n\r\n print(\"A: \", a)\r\n print(\"B: \", b)\r\n\r\n #(ra, A) = sortandcount(a)\r\n (ra, a) = sortandcount(a)\r\n\r\n #(rb, B) = sortandcount(b)\r\n (rb, b) = sortandcount(b)\r\n\r\n #(r, seq) = meergeandcount(a, b)\r\n (r, seq) = mergeandcount(a, b)\r\n #endif\r\n\r\n #Return r = ra + rb + r, and the sorted list seq\r\n r = ra + rb + r\r\n\r\n #print(\"R: \", r)\r\n\r\n return r, seq", "title": "" }, { "docid": "6558ba3555fa1a0e2663914f056d341a", "score": "0.5758837", "text": "def compute_symmetric_gestalt_pattern_matching(val1, val2):\n return 0.5 * (\n SequenceMatcher(None, val1, val2).ratio()\n + SequenceMatcher(None, val2, val1).ratio()\n )", "title": "" }, { "docid": "74334ce800c53d1d790661f8e96734a6", "score": "0.5755867", "text": "def _count_diff_NG86(codon1, codon2, codon_table):\n if not isinstance(codon1, str) or not isinstance(codon2, str):\n raise TypeError(\n \"_count_diff_NG86 accepts string object to represent codon\"\n f\" ({type(codon1)}, {type(codon2)} detected)\"\n )\n if len(codon1) != 3 or len(codon2) != 3:\n raise RuntimeError(\n \"codon should be three letter string\"\n f\" ({len(codon1)}, {len(codon2)} detected)\"\n )\n SN = [0, 0] # synonymous and nonsynonymous counts\n if codon1 == \"---\" or codon2 == \"---\":\n return SN\n base_tuple = (\"A\", \"C\", \"G\", \"T\")\n if not all(i in base_tuple for i in codon1):\n raise RuntimeError(\n f\"Unrecognized character detected in codon1 {codon1}\"\n \" (Codons consist of A, T, C or G)\"\n )\n if not all(i in base_tuple for i in codon2):\n raise RuntimeError(\n f\"Unrecognized character detected in codon2 {codon2}\"\n \" (Codons consist of A, T, C or G)\"\n )\n if codon1 == codon2:\n return SN\n else:\n diff_pos = []\n for i, k in enumerate(zip(codon1, codon2)):\n if k[0] != k[1]:\n diff_pos.append(i)\n\n def compare_codon(codon1, codon2, codon_table, weight=1):\n \"\"\"Compare two codon accounting for different pathways.\"\"\"\n sd = nd = 0\n if len(set(map(codon_table.forward_table.get, [codon1, codon2]))) == 1:\n sd += weight\n else:\n nd += weight\n return (sd, nd)\n\n if len(diff_pos) == 1:\n SN = [\n i + j\n for i, j in zip(\n SN, compare_codon(codon1, codon2, codon_table=codon_table)\n )\n ]\n elif len(diff_pos) == 2:\n for i in diff_pos:\n temp_codon = codon1[:i] + codon2[i] + codon1[i + 1 :]\n SN = [\n i + j\n for i, j in zip(\n SN,\n compare_codon(\n codon1, temp_codon, codon_table=codon_table, weight=0.5\n ),\n )\n ]\n SN = [\n i + j\n for i, j in zip(\n SN,\n compare_codon(\n temp_codon, codon2, codon_table=codon_table, weight=0.5\n ),\n )\n ]\n elif len(diff_pos) == 3:\n paths = list(permutations([0, 1, 2], 3))\n tmp_codon = []\n for p in paths:\n tmp1 = codon1[: p[0]] + codon2[p[0]] + codon1[p[0] + 1 :]\n tmp2 = tmp1[: p[1]] + codon2[p[1]] + tmp1[p[1] + 1 :]\n tmp_codon.append((tmp1, tmp2))\n SN = [\n i + j\n for i, j in zip(\n SN, compare_codon(codon1, tmp1, codon_table, weight=0.5 / 3)\n )\n ]\n SN = [\n i + j\n for i, j in zip(\n SN, compare_codon(tmp1, tmp2, codon_table, weight=0.5 / 3)\n )\n ]\n SN = [\n i + j\n for i, j in zip(\n SN, compare_codon(tmp2, codon2, codon_table, weight=0.5 / 3)\n )\n ]\n return SN", "title": "" }, { "docid": "b7381e19019768dc8b1a6111f688ec51", "score": "0.57547957", "text": "def match(text:str, pattern:str) -> int :\n \n lps = prefixtable(pattern)\n\n textidx, patternidx = 0, 0\n\n while textidx < len(text):\n if text[textidx] == pattern[patternidx]:\n textidx += 1\n patternidx += 1\n\n else:\n if patternidx > 0:\n patternidx = lps[patternidx - 1]\n else:\n textidx += 1\n\n\n if patternidx == len(pattern):\n return textidx - patternidx\n\n\n return -1", "title": "" }, { "docid": "555ce6f8c3b5783fb7b7937e85ec8af0", "score": "0.57532156", "text": "def advantageCount(self, nums1: List[int], nums2: List[int]) -> List[int]:\n s1, s2 = sorted(nums1, reverse = True), sorted(nums2, reverse = True)\n \n out = []\n \n left, right = 0, len(s1) - 1\n for i in range(len(s2)):\n if s1[left] > s2[i]:\n out.append(s1[left])\n left += 1\n else:\n out.append(s1[right])\n right -= 1\n \n match = defaultdict(list)\n for i in range(len(out)):\n match[s2[i]].append(out[i])\n \n out = []\n \n for num2 in nums2:\n out.append(match[num2].pop())\n \n return out", "title": "" } ]
5796bc034f99d5527a461c1ea52e0712
Divide two functions >>> f1 = ... >>> f2 = ... >>> ff = f1 / f2
[ { "docid": "4ed6aca71ab02122c6ea963f6b92d76a", "score": "0.6818164", "text": "def __div__ ( self , other ) :\n if self is other : return self.__constant ( 1 )\n elif isinstance ( other , constant_types ) and isequal ( other , 1 ) : return self\n elif isinstance ( other , constant_types ) and iszero ( other ) : return self.__constant ( 0 )\n return self.__f2_op__ ( other , Ostap.MoreRooFit.Division , pattern = \"(%s/%s)\" )", "title": "" } ]
[ { "docid": "dadd470302d0200c7346cf74ed6fd6a2", "score": "0.79255766", "text": "def division(first, second):\n return first / second", "title": "" }, { "docid": "cea96b88efb4ec0b45112fa00abc625a", "score": "0.7900606", "text": "def div(a,b):\n return a/b", "title": "" }, { "docid": "e6728d5119c9f7d47e6ce5606c77c776", "score": "0.7686335", "text": "def divide(num1, num2):\n \n return num1/num2", "title": "" }, { "docid": "a13e93237f9c4d06a6e3a96e6733ce51", "score": "0.76167375", "text": "def divide(num1, num2):\n return num1/num2", "title": "" }, { "docid": "9638c4faa1a81bd0248005a26c18676d", "score": "0.76135135", "text": "def div(a, b):\n def divide(a, b):\n \"\"\"Division\"\"\"\n return a / b\n return op_with_scalar_cast(a, b, divide)", "title": "" }, { "docid": "4ee694a19b8a9a9294834ef2475ce65a", "score": "0.7587659", "text": "def division(num1, num2):\n return float(num1)/float(num2)", "title": "" }, { "docid": "8b2020a4002bf49a732e2ad2535fb33b", "score": "0.7562746", "text": "def divide(n1,n2):\n return n1 / n2", "title": "" }, { "docid": "4938d99da566017672fe64a0ea7551c5", "score": "0.75423765", "text": "def division (num1: float, num2: float) -> float:\n # logic here\n num3 = num1 / num2\n return num3", "title": "" }, { "docid": "b31a4c6a513d2a202e068429581f1e3f", "score": "0.7541362", "text": "def div(num1, num2):\r\n return num1 / num2", "title": "" }, { "docid": "12f8aad64341cd2f89d0e9ddbe571217", "score": "0.7522578", "text": "def divide(n1, n2):\n return n1 / n2", "title": "" }, { "docid": "a81b2f2f3d7752c7400183f7d881c405", "score": "0.74803615", "text": "def div(num1, num2):\n return num1 / num2", "title": "" }, { "docid": "a81b2f2f3d7752c7400183f7d881c405", "score": "0.74803615", "text": "def div(num1, num2):\n return num1 / num2", "title": "" }, { "docid": "cd69e8723e239867ffa8598720b129d4", "score": "0.745357", "text": "def divide(x, y):\n return x / y", "title": "" }, { "docid": "782f841b47b656c95acc377ec7632759", "score": "0.7403915", "text": "def __div__(f1,f2):\n assert isNumeric(f1) and isNumeric(f2)\n return f1 * f2.copy().invert()", "title": "" }, { "docid": "b51a76ef5eee40bbb7fd3fb890a08ea7", "score": "0.7369315", "text": "def division(var_a, var_b):\n return var_a / var_b", "title": "" }, { "docid": "3249b43870fe8693c34c84a4ec4c768e", "score": "0.7365133", "text": "def divide (x, y):\n return x / y", "title": "" }, { "docid": "04e61f4d29d3fb204a1d7681a37caf55", "score": "0.73507315", "text": "def divides(a, b):\n\treturn ________", "title": "" }, { "docid": "b715c61bead73be078c2f9fb335485b3", "score": "0.7282187", "text": "def divide(x1, x2, out=None):\n return _ufunc_helper(x1, x2, _npi.true_divide, _np.divide, _npi.true_divide_scalar,\n _npi.rtrue_divide_scalar, out)", "title": "" }, { "docid": "999aaa91b5604a9e4ae57c195ceb6c45", "score": "0.72317255", "text": "def div(x, y):\n return divide(x, y)", "title": "" }, { "docid": "4ded699f7e11340325b75e6eb650532c", "score": "0.7175921", "text": "def divide(numerator: float, denominator:float) -> float:\n return numerator / denominator", "title": "" }, { "docid": "cb3a70f4dfa7a486f7b4a7ff0eef0de8", "score": "0.7148423", "text": "def quotient(x, y):\n return x // y", "title": "" }, { "docid": "0e41703357edd4c755f137f3785881c5", "score": "0.7138539", "text": "def divide_(a: T.Tensor, b: T.Tensor) -> None:\n ne.evaluate(\"b / a\", out=b)", "title": "" }, { "docid": "b9e83af6bb3e81cea2c7378a3e09cdf2", "score": "0.7135799", "text": "def Div(a, b):\r\n\ttt = Require(b > 0)\r\n\tc = a / b\r\n\treturn c", "title": "" }, { "docid": "a9e947094b5cb892d3546b19b8f19c3c", "score": "0.7123777", "text": "def Div(a, b):\n\tRequire(b > 0)\n\tc = a / b\n\treturn c", "title": "" }, { "docid": "4e5923b77ed96c579746b42d86773bdc", "score": "0.7119507", "text": "def divide(self, a, b):\n return a.__div__(b, context=self)", "title": "" }, { "docid": "24202b3e6d04e1feb8b6cb6e32198d22", "score": "0.7109099", "text": "def __div__(self, other):\n return _binary_func(self, other, backend.get().af_div)", "title": "" }, { "docid": "0108ccd4cca94838def3d38ccf5e55b7", "score": "0.70914584", "text": "def divide(num):\n #define function for divide\n #add quotient variable that divides num1 and num2\n #return quotient\n return num[0]/num[1]", "title": "" }, { "docid": "02b908ffc8ffd0768c66530fe9186922", "score": "0.7025466", "text": "def divide(x, y):\n return float(x) / y # Use float() for compatibility prior to 3.0", "title": "" }, { "docid": "b7701108842f81ee43940aa2c2ede2b2", "score": "0.7005267", "text": "def divide(a: T.Tensor, b: T.Tensor) -> T.Tensor:\n return b / a", "title": "" }, { "docid": "c7007c85fd5924f3796310c06fc83f42", "score": "0.6978763", "text": "def careful_divide(a: float, b: float) -> float:\n try:\n return a / b\n except ZeroDivisionError as e:\n raise ValueError('Invalid inputs')", "title": "" }, { "docid": "c7007c85fd5924f3796310c06fc83f42", "score": "0.6978763", "text": "def careful_divide(a: float, b: float) -> float:\n try:\n return a / b\n except ZeroDivisionError as e:\n raise ValueError('Invalid inputs')", "title": "" }, { "docid": "5b54396b4b398a927519ee8782d2efc6", "score": "0.6973006", "text": "def Div(a, b):\n _noUseYetNeedReturn = Require(b > 0)\n c = a / b\n return c", "title": "" }, { "docid": "7bb0122e6e26fd49cd4a6230172dfa34", "score": "0.6929195", "text": "def calc(operand_1, operand_2):\n return operand_1/operand_2", "title": "" }, { "docid": "7bb0122e6e26fd49cd4a6230172dfa34", "score": "0.6929195", "text": "def calc(operand_1, operand_2):\n return operand_1/operand_2", "title": "" }, { "docid": "d518bcf347c4d0ca105ec21dd7138ceb", "score": "0.6877735", "text": "def __div__(self, other):\n return self._op_common(other, _ud.divide)", "title": "" }, { "docid": "276c4e375ef5bf1c0c15deebac353666", "score": "0.6848819", "text": "def __rdiv__(self, other):\n return _binary_funcr(other, self, backend.get().af_div)", "title": "" }, { "docid": "d14759086c27649be270227924f06ded", "score": "0.68434733", "text": "def __div__ ( self , other ) :\n if self is other : return self.__constant ( 1 ) \n elif isinstance ( other , constant_types ) and isequal ( other , 1 ) : return self\n elif isinstance ( other , constant_types ) and iszero ( other ) : return self.__constant ( 0 ) \n return self.__f3_op__ ( other , Ostap.MoreRooFit.Division , pattern = \"(%s/%s)\" )", "title": "" }, { "docid": "8bbf1fc2b1dcfc1d32bc88bbd102b35b", "score": "0.6783169", "text": "def __div__ ( self , other ) :\n if self is other : return self.__constant ( 1 )\n elif isinstance ( other , constant_types ) and isequal ( other , 1 ) : return self\n elif isinstance ( other , constant_types ) and iszero ( other ) : return self.__constant ( 0 )\n return self.__f1_op__ ( other , Ostap.MoreRooFit.Division , pattern = \"(%s/%s)\" )", "title": "" }, { "docid": "001cdcbaf43edd306c1ebbac41c64373", "score": "0.6766377", "text": "def DivideBy(val): # pylint: disable=invalid-name\n return tl.Fn('DivideBy', lambda x: x / val)", "title": "" }, { "docid": "a595b9d171220055653c49e1f1bd3907", "score": "0.67596966", "text": "def divide(a, b):\n try:\n return float(a) / float(b)\n except ValueError:\n return \"You can't divide that data type!\"\n except ZeroDivisionError:\n return \"You can't divide by zero!\"", "title": "" }, { "docid": "3e620aa00febb5152fd028f6a82d9cff", "score": "0.6746849", "text": "def divide(a,b):\n if b == 0:\n raise(ValueError)\n \n return a / b", "title": "" }, { "docid": "3fcb09a1942d0734f1e810f792de6612", "score": "0.6700537", "text": "def vars_divide ( self , var1 , var2 , name = '' , title = '' ) :\n \n f1 = isinstance ( var1 , num_types )\n f2 = isinstance ( var2 , num_types )\n\n if f1 and f2 :\n res = float ( var1 ) / float ( var2 )\n return ROOT.RooFit.RooConst ( res ) \n elif f1 :\n var1 = ROOT.RooFit.RooConst ( var1 ) \n return self.vars_divide ( var1 , var2 , name , title )\n elif f2 :\n return self.vars_multiply ( var1 , 1.0/var2 , name , title )\n \n self.aux_keep.append ( var1 )\n self.aux_keep.append ( var2 )\n \n name = name if name else self.roo_name ( \"divide_%s_%s\" % ( var1.name , var2.name ) )\n title = title if title else \"(%s)/(%s)\" % ( var1.name , var2.name )\n \n result = Ostap.MoreRooFit.Division ( var1 , var2 , name , title )\n self.aux_keep.append ( result )\n \n return result", "title": "" }, { "docid": "cce031f1be8140db5c25247c2ade6164", "score": "0.6693114", "text": "def dividir(num1, num2):\n if num2 == 0:\n return \"Error/0\"\n return num1 / num2", "title": "" }, { "docid": "0f32737baf9fd10e1505db4049a4a3e1", "score": "0.66758806", "text": "def _div(left, right):\n\n if _is_number(left) or _is_number(right):\n return _div_certain(left, right)\n return _div_uncertain(left, right)", "title": "" }, { "docid": "81b52e91f520215135082cfa0b54ddd2", "score": "0.66694623", "text": "def f(x, y):\n return x/y + y/x", "title": "" }, { "docid": "b6f9937028a948dcf8a04b3ae0a55d91", "score": "0.66692555", "text": "def div(x, y):\n\n # Not yet implemented!", "title": "" }, { "docid": "e43a2b9144040b64eadc8e5e4cbb31d5", "score": "0.66593677", "text": "def div(num1, num2):\r\n try:\r\n return num1 / num2\r\n except ZeroDivisionError:\r\n print(\"You can't divide by Zero!\")\r\n return 0", "title": "" }, { "docid": "2a47840bf5431d57e3c2ac54690756d8", "score": "0.66362923", "text": "def _div(mul_f, c1, c2):\n # c1, c2 are trimmed copies\n [c1, c2] = as_series([c1, c2])\n if c2[-1] == 0:\n raise ZeroDivisionError()\n\n lc1 = len(c1)\n lc2 = len(c2)\n if lc1 < lc2:\n return c1[:1]*0, c1\n elif lc2 == 1:\n return c1/c2[-1], c1[:1]*0\n else:\n quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype)\n rem = c1\n for i in range(lc1 - lc2, - 1, -1):\n p = mul_f([0]*i + [1], c2)\n q = rem[-1]/p[-1]\n rem = rem[:-1] - q*p[:-1]\n quo[i] = q\n return quo, trimseq(rem)", "title": "" }, { "docid": "f45c9405abdc5c8aee00e63219ccc45d", "score": "0.6594917", "text": "def divide(x, y, name=None):\n return math_ops.div([x, y], name=name)", "title": "" }, { "docid": "ead09ce2673db91e4c739e804cbcfb78", "score": "0.656766", "text": "def divide(numerator, denominator):\n ensure_divisibility(numerator, denominator)\n return numerator // denominator", "title": "" }, { "docid": "ead09ce2673db91e4c739e804cbcfb78", "score": "0.656766", "text": "def divide(numerator, denominator):\n ensure_divisibility(numerator, denominator)\n return numerator // denominator", "title": "" }, { "docid": "247137ccdd401655e2c2e41f6fae2c1f", "score": "0.65449524", "text": "def div():\r\n a = int(input(\"Enter Your First Number:- \"))\r\n b = int(input(\"Enter Your Second Number:- \"))\r\n div = a/b\r\n print(\"Answer:-\", div)\r\n return div", "title": "" }, { "docid": "b375d7e62b1f32b67aaca141e51098e0", "score": "0.65331644", "text": "def _divide(cls, x, y):\n try:\n return float(x) / y\n except ValueError:\n return x\n except TypeError:\n return x\n except ZeroDivisionError:\n return x", "title": "" }, { "docid": "98ed448409a9f07dcb8e1180b6362f6c", "score": "0.65161026", "text": "def __div__(self, other):\n\n if not (is_p_vector(other) or is_p_scalar(other)\n or isinstance(other, int) or isinstance(other, float)):\n raise Exception(\"Second argument type {} is not supported\".format(type(other)))\n # call wrapper function\n return self.call_lua_op(q_consts.DIV, self, other)", "title": "" }, { "docid": "82aa5a4f438aea866492d7438d47051a", "score": "0.6513437", "text": "def test_division(self):\n prime = 5\n f1 = FFInt(1, prime)\n f2 = FFInt(2, prime)\n f3 = FFInt(3, prime)\n f4 = FFInt(4, prime)\n self.assertTrue((f4 / f3) == f3)\n self.assertTrue((f2 / f2) == f1)\n self.assertTrue((f1 / f4) == f4)", "title": "" }, { "docid": "29c536a4fe633e572e2eb530e7f33217", "score": "0.6501837", "text": "def division_algo_coeff(a,b):\n if(b == 0):\n raise TypeError(\n \"Cannot divide by zero\"\n )\n quotient = int(a/b)\n remainder = a%b\n return quotient , remainder", "title": "" }, { "docid": "a904212c4f5b402037cc08730d0b0660", "score": "0.6496014", "text": "def divide(ds1, ds2):", "title": "" }, { "docid": "6e3ed0aab6ba49234f1f428f91342a60", "score": "0.64672565", "text": "def divide_by(y):\n return lambda x : x / y", "title": "" }, { "docid": "4faa7f8a870c8a9ade9d248bd26981df", "score": "0.64628774", "text": "def _safe_divide(x: int, y: int) -> float:\n if y == 0:\n return 0.\n else:\n return x / y", "title": "" }, { "docid": "5466c776a77bd610509f6130f18c9064", "score": "0.6455634", "text": "def divide(numerator, denominator):\n return np.divide(numerator, denominator)", "title": "" }, { "docid": "ecce78f4656710edfd80f5131064085e", "score": "0.64448303", "text": "def __div__(self, other):\n return binary_op(self, other, ' / ')", "title": "" }, { "docid": "ec73494dd1955f8a82a35cbac12cc3e8", "score": "0.6437988", "text": "def division(self, num1, num2):\n if num2 == 0:\n raise CalculatorException(\"Divisor can't be 0\")\n\n return num1 / num2", "title": "" }, { "docid": "18bd00a0e6237639380c83dd34729a9a", "score": "0.6426933", "text": "def float_div(self, size, a, b, flags=None):\n\t\treturn self.expr(LowLevelILOperation.LLIL_FDIV, a.index, b.index, size=size, flags=flags)", "title": "" }, { "docid": "152c30962fd862bb3d0b87ffba9687fd", "score": "0.6409481", "text": "def __div__(p1, p2):\n raise NotImplemented()", "title": "" }, { "docid": "482fddb243b0661faf7e3056c08a8c8b", "score": "0.6406764", "text": "def div(a,b):\n res, fail = divmod(a,b)\n if fail:\n raise ValueError(\"%r does not divide %r\" % (b,a))\n else:\n return res", "title": "" }, { "docid": "abdd86fe22056dc079f0b177ca327975", "score": "0.6403546", "text": "def divide2(num):\n return num / 2", "title": "" }, { "docid": "40e3ca838566b97fa748f43ceddca50f", "score": "0.64034075", "text": "def div(self):\n return self.a / self.b", "title": "" }, { "docid": "e4125ab5f2a00ca46ee13a6efc7b6d2b", "score": "0.6385709", "text": "def div( value, arg ):\n try:\n return float(value) / arg\n except:\n print(\"v\", value)\n print(\"a\", arg)\n return \"\"", "title": "" }, { "docid": "684734a9709c216f733eee349dd3de69", "score": "0.6382699", "text": "def f(a, b, c, d=10, f=100):\n x = a + b + c\n y = x * d\n z = x * f\n print(z / y)", "title": "" }, { "docid": "7ecc0d25bc66928cd78dfcd5171c7afb", "score": "0.6382692", "text": "def divide_tt(self, a, b, out):", "title": "" }, { "docid": "7788813f1dbd4fd3e6f30c67d04f3166", "score": "0.6365791", "text": "def safe_divide(a, b=None):\n if callable(a):\n @wraps(a)\n def wrapper(*args, **kwargs):\n try:\n return a(*args, **kwargs)\n except ZeroDivisionError:\n get_logger().warning(\"ZeroDivisionError; returning 0 instead\")\n return 0\n return wrapper\n else:\n try:\n result = a / b\n except ZeroDivisionError:\n get_logger().warning(\"ZeroDivisionError; returning 0 instead\")\n result = 0\n\n return result", "title": "" }, { "docid": "35524121ae969df2e312dea1b0d56427", "score": "0.6328596", "text": "def __div__(self, other):\n return self._op(other, operator.div)", "title": "" }, { "docid": "de026e32ad598d7f4f316a65d155ab8c", "score": "0.62856543", "text": "def test_divide():\n a = lin.Vector2([ 1.0, 2.0])\n b = lin.Vector2([-1.0, 4.0])\n\n c = a / b\n assert type(c) is lin.Vector2\n assert (c[0] == -1.0) and (c[1] == 0.5)\n\n c = a / 2.0\n assert type(c) is lin.Vector2\n assert (c[0] == 0.5) and (c[1] == 1.0)\n\n c = 2.0 / a\n assert type(c) is lin.Vector2\n assert (c[0] == 2.0) and (c[1] == 1.0)\n\n c = lin.divide(a, 2.0)\n assert type(c) is lin.Vector2\n assert (c[0] == 0.5) and (c[1] == 1.0)", "title": "" }, { "docid": "caa1fdc3dbd1a4f73ebced0e758262ef", "score": "0.6271475", "text": "def fdiv(self,numRegDestinoHex,numRegOp1,numRegOp2):\n\t\top1 = self.bancoRegistros.get(int(numRegOp1,16))\n\t\top2 = self.bancoRegistros.get(int(numRegOp2,16))\n\t\tif util.hex_to_float(op2) == 0.0:\n\t\t\tprint \"Division entre cero \\nCódigo de error: 1\"\n\t\t\tsys.exit(0)\n\t\tdivision = util.hex_to_float(op1) / util.hex_to_float(op2)\n\t\tself.bancoRegistros.actualizaRegistro(int(numRegDestinoHex,16),util.float_to_hex(division))", "title": "" }, { "docid": "5e7ba87e0fcd5d3f8f38dbbb4932da37", "score": "0.626276", "text": "def __div__ (a, b):\n if isinstance (b, Hist):\n a.assert_match (b)\n with warnings.catch_warnings ():\n warnings.filterwarnings ('ignore')\n values = a.values / b.values\n if a.errors is not None and b.errors is not None:\n errors = values * np.sqrt (\n (a.errors / a.values)**2 + (b.errors / b.values)**2)\n else:\n errors = None\n return Hist (a.bins, values, errors)\n else:\n b = 1.0 * b\n return Hist (a.bins, a.values / b, a.errors / b)", "title": "" }, { "docid": "0629de556a03ff77b21327bac13144cb", "score": "0.6262063", "text": "def __div__(self, other, context=None):\n return self._divide(other, context=context)", "title": "" }, { "docid": "769ecb6b11b6bd8a1aa1231be79a507a", "score": "0.62587506", "text": "def dividir():\n\ta = int(input(\"Ingrese un numero entero: \"))\n\tb = int(input(\"Ingrese un numero entero: \"))\n\tprint(a / b)", "title": "" }, { "docid": "1922b36e303a5b62d850ca1f699e21bd", "score": "0.62546736", "text": "def __rdiv__ ( self , other ) :\n if self is other : return self.__constant ( 1 ) \n elif isinstance ( other , constant_types ) and iszero ( other ) : return self.__constant ( 0 ) \n return self.__f3_rop__ ( other , Ostap.MoreRooFit.Division , pattern = \"(%s/%s)\" )", "title": "" }, { "docid": "431965755f7fcb7988a08801770f9903", "score": "0.62518287", "text": "def divide(x, y):\r\n\r\n dividendo = max(x, y)\r\n divisor = min(x, y)\r\n\r\n try:\r\n return float(dividendo) / divisor\r\n except ZeroDivisionError:\r\n print (\"Error; el dividendo es igual a cero\")\r\n return None", "title": "" }, { "docid": "5fc74f82d73346bef17fdff89fccd961", "score": "0.6249606", "text": "def test_divide(self):\n self.assertEqual(2, divide(6, 3))\n self.assertEqual(2.5, divide(5, 2))", "title": "" }, { "docid": "5fc74f82d73346bef17fdff89fccd961", "score": "0.6249606", "text": "def test_divide(self):\n self.assertEqual(2, divide(6, 3))\n self.assertEqual(2.5, divide(5, 2))", "title": "" }, { "docid": "f9cb403214f28e91bc16e5d1992f2cbd", "score": "0.62359285", "text": "def div( a, b ):\n\twith np.errstate(divide='ignore', invalid='ignore'):\n\t\tc = np.true_divide( a, b )\n\t\tc[ ~ np.isfinite( c )] = 0 # -inf inf NaN\n\treturn c", "title": "" }, { "docid": "c16fea7ec8e8364bb384981a2de4488a", "score": "0.62323946", "text": "def div(x, y):\n return \"<h1>The result is: \\n\" + str(x / y)", "title": "" }, { "docid": "fe65a3cfd99eaff5a4dbfc932241bca7", "score": "0.6232312", "text": "def safe_print_division(a, b):\n try:\n result = a / b\n except:\n result = None\n finally:\n print(\"Inside result: {}\".format(result))\n return result", "title": "" }, { "docid": "6bb5900bdd7aa87c82d8505c7a807255", "score": "0.6231361", "text": "def __rdiv__ ( self , other ) :\n if self is other : return self.__constant ( 1 )\n elif isinstance ( other , constant_types ) and iszero ( other ) : return self.__constant ( 0 )\n return self.__f2_rop__ ( other , Ostap.MoreRooFit.Division , pattern = \"(%s/%s)\" )", "title": "" }, { "docid": "b1b030d16ddf759060fb2124b323e3d1", "score": "0.62297505", "text": "def __rdiv__ ( self , other ) :\n if self is other : return self.__constant ( 1 )\n elif isinstance ( other , constant_types ) and iszero ( other ) : return self.__constant ( 0 )\n return self.__f1_rop__ ( other , Ostap.MoreRooFit.Division , pattern = \"(%s/%s)\" )", "title": "" }, { "docid": "8881f2a49247bf9ee5f6760aacfc4cf8", "score": "0.62273103", "text": "def divide(self,f):\n return self.multiply(f.inverse())", "title": "" }, { "docid": "35fdc017ef79f56e1d9cb6be9ef5e177", "score": "0.62027335", "text": "def divide_images(image1: \"napari.types.ImageData\", image2: \"napari.types.ImageData\") -> \"napari.types.ImageData\":\n return image1 / image2", "title": "" }, { "docid": "6d536b78dedb06927064b4151d403e09", "score": "0.62025076", "text": "def divide(value, arg):\n try:\n return float(value) / float(arg)\n except ZeroDivisionError:\n return float(value)\n except (ValueError, TypeError):\n return ''", "title": "" }, { "docid": "f6a8f787193c6e7981fdd43eba5c80a6", "score": "0.6193764", "text": "def divide(numbers):\n return reduce(lambda x, y: x / y, numbers)", "title": "" }, { "docid": "c049cd4c6f83335bdbd62224d4095260", "score": "0.6176464", "text": "def div(self, a, b):\n b_inverse = self.mul_inverse(b)\n result = self.mul(a, b_inverse)\n return result", "title": "" }, { "docid": "e92fab78048a8555a092949ffb51412d", "score": "0.61700034", "text": "def my_func(x,y):\n assert(y!=0), \"No division by 0\"\n return int(x/y)", "title": "" }, { "docid": "2202e3b96fed833c4efdd1d3a2e33c80", "score": "0.6161609", "text": "def test_3operands_indiv(self):\n f12: Fraction = Fraction(1, 2)\n f48: Fraction = Fraction(4, 8)\n f44: Fraction = Fraction(4, 4)\n \"\"\"test that fuction which gives you result\"\"\"\n self.assertTrue((f12 / f48 / f44).simplify() == Fraction(1,1))", "title": "" }, { "docid": "2f8db1f54595bddf18224c784af58fa2", "score": "0.61497545", "text": "def test_div_basic():\n\n def test_f(a, b):\n c = a.div(b)\n return c.div(c)\n\n x = torch.randn(4)\n y = torch.randn(4)\n\n jitVsGlow(test_f, x, y, expected_fused_ops={\"aten::div\"})", "title": "" }, { "docid": "731b434db72389099734c2372547764e", "score": "0.6144553", "text": "def divide(self):\n pass", "title": "" }, { "docid": "7d7a76e455f84629bdbf9bdee27bdfb7", "score": "0.61393356", "text": "def divide(a,b):\n if b != 0:\n quotient = round(a/b,4)\n print(\"The quotient of \" + str(a) + \" and \" + str(b) + \" is \" + str(quotient))\n return str(a) + \" / \" + str(b) + \" = \" + str(quotient)\n else:\n print(\"You can not divide by zero.\")\n return \"DIV ERROR\"", "title": "" }, { "docid": "3bfaabac8fa4f9506505627d5a40269d", "score": "0.61362296", "text": "def welch_df(a, b): # calculating this allows you to work with other variables to get a p-value!\n s1 = a.var(ddof=1) \n s2 = b.var(ddof=1)\n n1 = a.size\n n2 = b.size\n \n numerator = (s1/n1 + s2/n2)**2\n denominator = (s1/ n1)**2/(n1 - 1) + (s2/ n2)**2/(n2 - 1)\n \n return numerator/denominator", "title": "" }, { "docid": "cfcabcc5407d9d881394dbae104161ab", "score": "0.6107077", "text": "def try_divide(x, y, val=0.0):\n if y != 0.0:\n val = float(x) / y\n return val", "title": "" }, { "docid": "cfcabcc5407d9d881394dbae104161ab", "score": "0.6107077", "text": "def try_divide(x, y, val=0.0):\n if y != 0.0:\n val = float(x) / y\n return val", "title": "" }, { "docid": "13aa115e2dc91bad64519f61e2ab7a3e", "score": "0.6097081", "text": "def try_divide(x, y, val=0.0):\n if y != 0.0:\n val = float(x) / y\n return val", "title": "" } ]
4ab5c8df5747eb88cda571a3127f936a
Here we are displaying index page.
[ { "docid": "669f62e23491941533b3b0a5faad4be9", "score": "0.0", "text": "def payment(request):\n user = User.objects.get(username=request.user)\n settings = Billing.objects.filter(user=user).first()\n\n # What you want the button to do.\n paypal_dict = {\n \"business\": settings.paypal_email,\n \"amount\": settings.price,\n \"item_name\": settings.item_name,\n \"invoice\": \"{}\".format(str(uuid.uuid4())),\n \"notify_url\": \"https://cookstart.io\" + reverse('paypal-ipn'),\n \"return\": \"https://cookstart.io\" + reverse('announceusio:index'),\n \"cancel_return\": \"https://cookstart.io\" + reverse('announceusio:index'),\n \"custom\": \"premium_plan\", # Custom command to correlate to some function later (optional)\n }\n\n # Create the instance.\n form = PayPalPaymentsForm(initial=paypal_dict)\n context = {\"form\": form}\n return render(request, \"announceusio/payment.html\", context)", "title": "" } ]
[ { "docid": "6ed18a3b9c8828349f7b90206b8317e0", "score": "0.85786694", "text": "def index():\n # Render the main page-index\n return render_template(\"index.html\")", "title": "" }, { "docid": "d331dfd34f01bb05ab7a084f36c479b7", "score": "0.83920103", "text": "def index():\n return 'MAKE an Index page'", "title": "" }, { "docid": "e20a9d5925b584e53a59db9cc08a92c7", "score": "0.8367186", "text": "def index():\n\n\t\treturn render_template('index.html', title='RedPanda', current_page='RedPanda')", "title": "" }, { "docid": "67b12216da544454c022fd5f6e130e1e", "score": "0.82968634", "text": "def index():\r\n\treturn render_template('index.html')", "title": "" }, { "docid": "1db97e11620340b8c2ad1d834b01bb90", "score": "0.82765484", "text": "def index():\n return render_template(\"index.html\")", "title": "" }, { "docid": "27a808cdc5360b5599ce15ec986e00af", "score": "0.82687587", "text": "def index():\n\treturn render_template('index.html')", "title": "" }, { "docid": "27a808cdc5360b5599ce15ec986e00af", "score": "0.82687587", "text": "def index():\n\treturn render_template('index.html')", "title": "" }, { "docid": "73b4ee07b8b32b20c4eab46a90048cb0", "score": "0.82660097", "text": "def index():\n\treturn render_template(\"index.html\")", "title": "" }, { "docid": "c32f8d671aca7889ce669955da0535c6", "score": "0.8155998", "text": "def index():\n return render_template('index.html', title='Home')", "title": "" }, { "docid": "b2ae8ac5452aa4982e4dbd45f3a187d5", "score": "0.81463563", "text": "def index(request):\n\n # Get all index data\n\n return render(request, 'index.html')", "title": "" }, { "docid": "cd6f3badc16c562130daa97a707b114c", "score": "0.8108506", "text": "def IndexPage():\n return render_template('Index.html')", "title": "" }, { "docid": "a5f2a6d58956013894b26549a0056ae0", "score": "0.80666846", "text": "def index():\n return render_template('Index.html')", "title": "" }, { "docid": "99120ae809831d1f9c816d3829618829", "score": "0.8063992", "text": "def index():\n return render_template('index11.html')", "title": "" }, { "docid": "9a0e957deef34d50d85f5bbea8131fa2", "score": "0.8034214", "text": "def index():\n return render_template(\"index.html\",\n title=\"Ohio Family Survival Store\",)", "title": "" }, { "docid": "0140d0dfc4f6c0e55de3846be1bf2ba0", "score": "0.80313456", "text": "def index(self):\n pass", "title": "" }, { "docid": "0140d0dfc4f6c0e55de3846be1bf2ba0", "score": "0.80313456", "text": "def index(self):\n pass", "title": "" }, { "docid": "105db6bf1f8a712594ca6389b2dd2d68", "score": "0.80293274", "text": "def indexing(request):\n return helpers.render_page('indexing.html')", "title": "" }, { "docid": "d9cbbe8d6d2b8b0d468a510014e48acf", "score": "0.80199826", "text": "def index():\n\n return render_template('index.html')", "title": "" }, { "docid": "61fb800c0f558ed554fca5508cf8be30", "score": "0.7996915", "text": "def index():\n\n return render_template('index.html', title='CONP | Home', user=current_user)", "title": "" }, { "docid": "7db7e1ab11cf924ad621c1a44824b2b4", "score": "0.79844695", "text": "def index():\n\n return render_template(\"index.html\")", "title": "" }, { "docid": "d8da823bb7b75f6a78ab15c3b4870867", "score": "0.7981584", "text": "def index_page():\n return render_template(\"index.html\")", "title": "" }, { "docid": "72a28853b575782eaa6c8e16aed03893", "score": "0.7975805", "text": "def index():\n\n return render_template('index.html')", "title": "" }, { "docid": "72a28853b575782eaa6c8e16aed03893", "score": "0.7975805", "text": "def index():\n\n return render_template('index.html')", "title": "" }, { "docid": "72a28853b575782eaa6c8e16aed03893", "score": "0.7975805", "text": "def index():\n\n return render_template('index.html')", "title": "" }, { "docid": "72a28853b575782eaa6c8e16aed03893", "score": "0.7975805", "text": "def index():\n\n return render_template('index.html')", "title": "" }, { "docid": "fc96dc8f4f3ae0d4e727011b4d2ec272", "score": "0.79652923", "text": "def index():\n return render_template(\"index.html\")", "title": "" }, { "docid": "fc96dc8f4f3ae0d4e727011b4d2ec272", "score": "0.79652923", "text": "def index():\n return render_template(\"index.html\")", "title": "" }, { "docid": "fc96dc8f4f3ae0d4e727011b4d2ec272", "score": "0.79652923", "text": "def index():\n return render_template(\"index.html\")", "title": "" }, { "docid": "fc96dc8f4f3ae0d4e727011b4d2ec272", "score": "0.79652923", "text": "def index():\n return render_template(\"index.html\")", "title": "" }, { "docid": "fc96dc8f4f3ae0d4e727011b4d2ec272", "score": "0.79652923", "text": "def index():\n return render_template(\"index.html\")", "title": "" }, { "docid": "fc96dc8f4f3ae0d4e727011b4d2ec272", "score": "0.79652923", "text": "def index():\n return render_template(\"index.html\")", "title": "" }, { "docid": "fc96dc8f4f3ae0d4e727011b4d2ec272", "score": "0.79652923", "text": "def index():\n return render_template(\"index.html\")", "title": "" }, { "docid": "fc96dc8f4f3ae0d4e727011b4d2ec272", "score": "0.79652923", "text": "def index():\n return render_template(\"index.html\")", "title": "" }, { "docid": "fc96dc8f4f3ae0d4e727011b4d2ec272", "score": "0.79652923", "text": "def index():\n return render_template(\"index.html\")", "title": "" }, { "docid": "fc96dc8f4f3ae0d4e727011b4d2ec272", "score": "0.79652923", "text": "def index():\n return render_template(\"index.html\")", "title": "" }, { "docid": "fc96dc8f4f3ae0d4e727011b4d2ec272", "score": "0.79652923", "text": "def index():\n return render_template(\"index.html\")", "title": "" }, { "docid": "fc96dc8f4f3ae0d4e727011b4d2ec272", "score": "0.79652923", "text": "def index():\n return render_template(\"index.html\")", "title": "" }, { "docid": "fc96dc8f4f3ae0d4e727011b4d2ec272", "score": "0.79652923", "text": "def index():\n return render_template(\"index.html\")", "title": "" }, { "docid": "fc96dc8f4f3ae0d4e727011b4d2ec272", "score": "0.79652923", "text": "def index():\n return render_template(\"index.html\")", "title": "" }, { "docid": "fc96dc8f4f3ae0d4e727011b4d2ec272", "score": "0.79652923", "text": "def index():\n return render_template(\"index.html\")", "title": "" }, { "docid": "fc96dc8f4f3ae0d4e727011b4d2ec272", "score": "0.79652923", "text": "def index():\n return render_template(\"index.html\")", "title": "" }, { "docid": "fc96dc8f4f3ae0d4e727011b4d2ec272", "score": "0.79652923", "text": "def index():\n return render_template(\"index.html\")", "title": "" }, { "docid": "fc96dc8f4f3ae0d4e727011b4d2ec272", "score": "0.79652923", "text": "def index():\n return render_template(\"index.html\")", "title": "" }, { "docid": "fc96dc8f4f3ae0d4e727011b4d2ec272", "score": "0.79652923", "text": "def index():\n return render_template(\"index.html\")", "title": "" }, { "docid": "a905fb886ac06831242b1eb30e3c08d3", "score": "0.7953786", "text": "def index():\n return render_template('index.html')", "title": "" }, { "docid": "a905fb886ac06831242b1eb30e3c08d3", "score": "0.7953786", "text": "def index():\n return render_template('index.html')", "title": "" }, { "docid": "a905fb886ac06831242b1eb30e3c08d3", "score": "0.7953786", "text": "def index():\n return render_template('index.html')", "title": "" }, { "docid": "a905fb886ac06831242b1eb30e3c08d3", "score": "0.7953786", "text": "def index():\n return render_template('index.html')", "title": "" }, { "docid": "a905fb886ac06831242b1eb30e3c08d3", "score": "0.7953786", "text": "def index():\n return render_template('index.html')", "title": "" }, { "docid": "a905fb886ac06831242b1eb30e3c08d3", "score": "0.7953786", "text": "def index():\n return render_template('index.html')", "title": "" }, { "docid": "a905fb886ac06831242b1eb30e3c08d3", "score": "0.7953786", "text": "def index():\n return render_template('index.html')", "title": "" }, { "docid": "a905fb886ac06831242b1eb30e3c08d3", "score": "0.7953786", "text": "def index():\n return render_template('index.html')", "title": "" }, { "docid": "a905fb886ac06831242b1eb30e3c08d3", "score": "0.7953786", "text": "def index():\n return render_template('index.html')", "title": "" }, { "docid": "a905fb886ac06831242b1eb30e3c08d3", "score": "0.7953786", "text": "def index():\n return render_template('index.html')", "title": "" }, { "docid": "a905fb886ac06831242b1eb30e3c08d3", "score": "0.7953786", "text": "def index():\n return render_template('index.html')", "title": "" }, { "docid": "a905fb886ac06831242b1eb30e3c08d3", "score": "0.7953786", "text": "def index():\n return render_template('index.html')", "title": "" }, { "docid": "a905fb886ac06831242b1eb30e3c08d3", "score": "0.7953786", "text": "def index():\n return render_template('index.html')", "title": "" }, { "docid": "a905fb886ac06831242b1eb30e3c08d3", "score": "0.7953786", "text": "def index():\n return render_template('index.html')", "title": "" }, { "docid": "a905fb886ac06831242b1eb30e3c08d3", "score": "0.7953786", "text": "def index():\n return render_template('index.html')", "title": "" }, { "docid": "a905fb886ac06831242b1eb30e3c08d3", "score": "0.7953786", "text": "def index():\n return render_template('index.html')", "title": "" }, { "docid": "a905fb886ac06831242b1eb30e3c08d3", "score": "0.7953786", "text": "def index():\n return render_template('index.html')", "title": "" }, { "docid": "a905fb886ac06831242b1eb30e3c08d3", "score": "0.7953786", "text": "def index():\n return render_template('index.html')", "title": "" }, { "docid": "a905fb886ac06831242b1eb30e3c08d3", "score": "0.7953786", "text": "def index():\n return render_template('index.html')", "title": "" }, { "docid": "a905fb886ac06831242b1eb30e3c08d3", "score": "0.7953786", "text": "def index():\n return render_template('index.html')", "title": "" }, { "docid": "a905fb886ac06831242b1eb30e3c08d3", "score": "0.7953786", "text": "def index():\n return render_template('index.html')", "title": "" }, { "docid": "a905fb886ac06831242b1eb30e3c08d3", "score": "0.7953786", "text": "def index():\n return render_template('index.html')", "title": "" }, { "docid": "a905fb886ac06831242b1eb30e3c08d3", "score": "0.7953786", "text": "def index():\n return render_template('index.html')", "title": "" }, { "docid": "a905fb886ac06831242b1eb30e3c08d3", "score": "0.7953786", "text": "def index():\n return render_template('index.html')", "title": "" }, { "docid": "a905fb886ac06831242b1eb30e3c08d3", "score": "0.7953786", "text": "def index():\n return render_template('index.html')", "title": "" }, { "docid": "a905fb886ac06831242b1eb30e3c08d3", "score": "0.7953786", "text": "def index():\n return render_template('index.html')", "title": "" }, { "docid": "a905fb886ac06831242b1eb30e3c08d3", "score": "0.7953786", "text": "def index():\n return render_template('index.html')", "title": "" }, { "docid": "a905fb886ac06831242b1eb30e3c08d3", "score": "0.7953786", "text": "def index():\n return render_template('index.html')", "title": "" }, { "docid": "a905fb886ac06831242b1eb30e3c08d3", "score": "0.7953786", "text": "def index():\n return render_template('index.html')", "title": "" }, { "docid": "a905fb886ac06831242b1eb30e3c08d3", "score": "0.7953786", "text": "def index():\n return render_template('index.html')", "title": "" }, { "docid": "a905fb886ac06831242b1eb30e3c08d3", "score": "0.7953786", "text": "def index():\n return render_template('index.html')", "title": "" }, { "docid": "a905fb886ac06831242b1eb30e3c08d3", "score": "0.7953786", "text": "def index():\n return render_template('index.html')", "title": "" }, { "docid": "a905fb886ac06831242b1eb30e3c08d3", "score": "0.7953786", "text": "def index():\n return render_template('index.html')", "title": "" }, { "docid": "a905fb886ac06831242b1eb30e3c08d3", "score": "0.7953786", "text": "def index():\n return render_template('index.html')", "title": "" }, { "docid": "a905fb886ac06831242b1eb30e3c08d3", "score": "0.7953786", "text": "def index():\n return render_template('index.html')", "title": "" }, { "docid": "a905fb886ac06831242b1eb30e3c08d3", "score": "0.7953786", "text": "def index():\n return render_template('index.html')", "title": "" }, { "docid": "a905fb886ac06831242b1eb30e3c08d3", "score": "0.7953786", "text": "def index():\n return render_template('index.html')", "title": "" }, { "docid": "a905fb886ac06831242b1eb30e3c08d3", "score": "0.7953786", "text": "def index():\n return render_template('index.html')", "title": "" }, { "docid": "a905fb886ac06831242b1eb30e3c08d3", "score": "0.7953786", "text": "def index():\n return render_template('index.html')", "title": "" }, { "docid": "a905fb886ac06831242b1eb30e3c08d3", "score": "0.7953786", "text": "def index():\n return render_template('index.html')", "title": "" }, { "docid": "3202bd5f4bdab0df8b2b267e92dc3518", "score": "0.7945471", "text": "def index():\n\n return {\n 'page': 'index',\n }", "title": "" }, { "docid": "d686a8c7a97e4556cb0a8ccf484033e0", "score": "0.78984916", "text": "def index():\n return render_template('plain-index.html')", "title": "" }, { "docid": "2dc14c141b62bef986ee0f2c817682b3", "score": "0.78984654", "text": "def index():\n\n return render_template('index.html', **make_context())", "title": "" }, { "docid": "c31146257ab95d1fcbc9dfceb3352146", "score": "0.7895428", "text": "def index():\r\n\r\n return render_template('index.html', title='Connect Your Neurons')", "title": "" }, { "docid": "de89637bf748aac1b2ca8bbb0957019a", "score": "0.7839899", "text": "def index():\n title = 'Home Page'\n return render_template('index.html', title=title)", "title": "" }, { "docid": "35cb44f4ef4e6fc5fd7b93a9335422fa", "score": "0.7816781", "text": "def index():\n\n return render_template('home/index.html')", "title": "" }, { "docid": "7f6532c8c1b1e46bdf313554e972642c", "score": "0.7788547", "text": "def index(*args, **kwargs):\n return render_template('index.html')", "title": "" }, { "docid": "7536fb1fe79c342d81ed4a8ae7a7ad55", "score": "0.7786642", "text": "def index():\n get_all_data_postgres()\n return render_template('mainpage.html', title='Main')", "title": "" }, { "docid": "486438fce7f7fcf3dcf59ab7d7d7ea3f", "score": "0.7771765", "text": "def index():\n return render_template('index.html', title='Home', posts='') # TODO:fix", "title": "" }, { "docid": "1b951a5884fcd8b9d19f441da4a6cc2a", "score": "0.77201056", "text": "def index():\n return render_template('layouts/index.html')", "title": "" }, { "docid": "71f16a56b2918438b0658301f11ba7f3", "score": "0.77153236", "text": "def index(): \n \n return render_template(\n \"index.html\",\n data=new)", "title": "" }, { "docid": "933f4dadc021955d50c070e86151a843", "score": "0.7707255", "text": "def index():\n title = 'newsbites'\n topheadlines = get_top_headlines()\n return render_template('index.html', context = topheadlines, title=title)", "title": "" }, { "docid": "cc9c863d58a65ef6c82d51c47055cfa0", "score": "0.7696918", "text": "def index():\n entries = models.Entry.select().limit(100)\n return render_template('index.html', entries=entries)", "title": "" }, { "docid": "5df7ba801ed26afd72f94b72d730fba9", "score": "0.7692318", "text": "def index():\n user = g.user\n return render_template(\n 'index.html',\n title='Home Page',\n year=datetime.now().year,\n )", "title": "" }, { "docid": "17c9922296e6da9c8645b2e258be9bdc", "score": "0.7689297", "text": "def index(request):\n # Render the HTML template index.html\n return render(\n request,\n 'index.html',\n )", "title": "" }, { "docid": "f800729320a9119e3278da8e294af2b3", "score": "0.7668006", "text": "def show_index(request):\n return render(request,'index.html')", "title": "" }, { "docid": "4a494a38621637df21a6abf9af4b3377", "score": "0.7661594", "text": "def show_index():\n order = Post.pubdate.desc()\n posts = get_posts(None, order)\n\n # add paginator just in case someone would like a post list as homepage\n paginator = Paginator(posts, app.config['ENTRIES_PER_PAGE'],\n 1, 'show_postlist')\n return viewer.render('index.html', posts=posts, paginator=paginator)", "title": "" } ]
b4955fe99573a0588e1f42e9a7e5df02
Ensure marking a test is backing up old results if asked to.
[ { "docid": "4bc7f7bfe7076f8dca7e042f5371926a", "score": "0.6014928", "text": "def test_fixture_is_backing_up_old_results_to_default_path_if_no_path_provided(testdir):\n results_path = testdir.tmpdir.join(\"results.json\")\n results_path.ensure(file=True) # 'touch' the file\n\n # Run a dummy test that performs queries\n # and triggers a counting of the query number\n testdir.makepyfile(test_file=DUMMY_TEST_QUERY)\n\n with mock.patch(\"pytest_django_queries.plugin.create_backup\") as mocked_backup:\n from pytest_django_queries.plugin import DEFAULT_OLD_RESULT_FILENAME\n\n results = testdir.runpytest(\n \"--django-db-bench\", results_path, \"--django-backup-queries\"\n )\n mocked_backup.assert_called_with(str(results_path), DEFAULT_OLD_RESULT_FILENAME)\n\n # Ensure the tests have passed\n results.assert_outcomes(1, 0, 0)\n assert results_path.check()", "title": "" } ]
[ { "docid": "2e1730d8980b0b51499f0f539e52bcfb", "score": "0.7026157", "text": "def test_fixture_is_backing_up_old_results(testdir):\n results_path = testdir.tmpdir.join(\"results.json\")\n old_results_path = testdir.tmpdir.join(\"results.old.json\")\n\n # Run a dummy test that performs queries\n # and triggers a counting of the query number\n testdir.makepyfile(test_file=DUMMY_TEST_QUERY)\n\n results = testdir.runpytest(\n \"--django-db-bench\", results_path, \"--django-backup-queries\", old_results_path\n )\n\n # Ensure the tests have passed\n results.assert_outcomes(1, 0, 0)\n\n # Ensure the results file was created\n assert results_path.check()\n assert (\n not old_results_path.check()\n ), \"Nothing should have been backed up--there was nothing to back up\"\n\n # Create another test to generate more results,\n # to ensure the backup results were actually the previous ones\n testdir.makepyfile(test_otherfile=DUMMY_TEST_QUERY)\n\n # Run again the tests\n results = testdir.runpytest(\n \"--django-db-bench\", results_path, \"--django-backup-queries\", old_results_path\n )\n\n # Ensure the tests have passed\n results.assert_outcomes(2, 0, 0)\n\n # Ensure the results file was created\n assert results_path.check()\n assert old_results_path.check(), \"The backup file should have been created\"\n\n # Check contents\n assert json.load(results_path) == {\n \"test_file\": {\n \"test_count_db_query_number\": {\"query-count\": 2, \"duplicates\": 0}\n },\n \"test_otherfile\": {\n \"test_count_db_query_number\": {\"query-count\": 2, \"duplicates\": 0}\n },\n }\n assert json.load(old_results_path) == {\n \"test_file\": {\"test_count_db_query_number\": {\"query-count\": 2, \"duplicates\": 0}}\n }", "title": "" }, { "docid": "ed09fb57e7e12d7d6e048ad305cff5c0", "score": "0.6418493", "text": "def test_fixture_is_not_backing_up_if_not_asked_to(testdir):\n results_path = testdir.tmpdir.join(\"results.json\")\n results_path.ensure(file=True) # 'touch' the file\n\n # Run a dummy test that performs queries\n # and triggers a counting of the query number\n testdir.makepyfile(test_file=DUMMY_TEST_QUERY)\n\n with mock.patch(\"pytest_django_queries.plugin.create_backup\") as mocked_backup:\n results = testdir.runpytest(\"--django-db-bench\", results_path)\n assert mocked_backup.call_count == 0\n\n # Ensure the tests have passed\n results.assert_outcomes(1, 0, 0)\n assert results_path.check()", "title": "" }, { "docid": "801bd77f2e93c79e6e535a9a06179abe", "score": "0.622513", "text": "def RedoTest(self):\n return False", "title": "" }, { "docid": "d7300c99bb592ecdefc4dd018a27f9d6", "score": "0.6013557", "text": "def test_no_bump_unreviewed(self, mock_sign_file):\n for status in amo.UNREVIEWED_FILE_STATUSES:\n self.file_.update(status=status)\n file_hash = self.file_.generate_hash()\n assert self.version.version == '0.0.1'\n tasks.sign_addons([self.addon.pk])\n assert not mock_sign_file.called\n self.version.reload()\n assert self.version.version == '0.0.1'\n self.file_.reload() # Otherwise self.file_.file doesn't get re-opened\n assert file_hash == self.file_.generate_hash()\n self.assert_no_backup()", "title": "" }, { "docid": "4acf1a1350c346d1ce7b8c9338d59adf", "score": "0.59043825", "text": "def mark_results_done(self):\n self.get_results_done_flag_path().touch(exist_ok=True)", "title": "" }, { "docid": "65f4e57781e03809866f33ac5c3f3ee8", "score": "0.5897425", "text": "def test_save_old_result_doesnt_become_latest(self):\n envs = self.F.EnvironmentFactory.create_full_set(\n {\"OS\": [\"OS X\"], \"Language\": [\"English\"]})\n run = self.F.RunFactory.create(environments=envs)\n rcv = self.F.RunCaseVersionFactory.create(run=run)\n u = self.F.UserFactory.create()\n\n rcv.result_pass(envs[0], user=u)\n r1 = rcv.results.get(is_latest=True)\n\n rcv.result_fail(envs[0], user=u)\n r2 = rcv.results.get(is_latest=True)\n\n r1 = self.refresh(r1)\n r1.comment=\"this is it\"\n r1.save()\n\n r1 = self.refresh(r1)\n r2 = self.refresh(r2)\n\n self.assertEqual(r2.status, \"failed\")\n self.assertEqual(r2.is_latest, True)\n self.assertEqual(r1.is_latest, False)", "title": "" }, { "docid": "6e4deac8ba7888951ad1129ce11bdc58", "score": "0.5878163", "text": "def keep_failed(self):\n self._keep_failed = True", "title": "" }, { "docid": "08b2ce4778d17588b2f68502a322c4a0", "score": "0.586139", "text": "def rollback_checkpoints(self, rollback=1):", "title": "" }, { "docid": "1ad3f496a89e06e0893a27a19096f206", "score": "0.5812223", "text": "def test_database_inaccessible_alarm_rollback_previous():", "title": "" }, { "docid": "673b503a446aa88833ff9be1ef6611c9", "score": "0.58040094", "text": "def test_update_verification(self):\n assert False", "title": "" }, { "docid": "bebb680faaae472b931633f8db6f8aad", "score": "0.5759236", "text": "def before_backup_impl(self):", "title": "" }, { "docid": "751165ba5494efb5d05c2c9d1768f79c", "score": "0.57482654", "text": "def test_update_sequencing_run_status(self):\n pass", "title": "" }, { "docid": "6163a1c5dc3b37fe9d75c1cda06b7b3e", "score": "0.57427824", "text": "def test_trigger_savepoint_disposal(self):\n pass", "title": "" }, { "docid": "961b5a7fdbf41a555ed6b35e40fb8712", "score": "0.5729632", "text": "def test_cover_update_if_delete(self):\n raise AssertionError", "title": "" }, { "docid": "667249321da76a0d3781cd0cd951d321", "score": "0.57203734", "text": "def test_leave_version_document_history(self):\n pass", "title": "" }, { "docid": "4d579431ded98e383502fde15e8a104c", "score": "0.56797904", "text": "def update_non_existing_test(self):\n self.cache.update(self.prediction)", "title": "" }, { "docid": "02213ab50af307cc8a27c8a150177741", "score": "0.567402", "text": "def test_finalcheck_nochange(self):\n response = self.client.get(self.final_check_url)\n self.assertCache(response, changed=False)", "title": "" }, { "docid": "2e3991ff3f9ec8a7619b1faa4d962e1c", "score": "0.56593734", "text": "def test_fixture_is_invoked_when_marked(testdir):\n results_path = testdir.tmpdir.join(\"results.json\")\n\n # Run a dummy test that performs queries\n # and triggers a counting of the query number\n testdir.makepyfile(test_file=DUMMY_TEST_QUERY)\n results = testdir.runpytest(\"--django-db-bench\", results_path)\n\n # Ensure the tests have passed\n results.assert_outcomes(1, 0, 0)\n\n # Ensure the results file was created\n assert results_path.check()\n assert json.load(results_path) == {\n \"test_file\": {\"test_count_db_query_number\": {\"query-count\": 2, \"duplicates\": 0}}\n }", "title": "" }, { "docid": "9085a31a7512e3384060ca3aa562edde", "score": "0.5629782", "text": "def test_rollback_fs_metastores(tmpdir):\n factory = FileSystemMetadataStoreFactory(basedir=str(tmpdir))\n for version in range(3):\n factory.get_store(version).set_annotation(key='A', value=version)\n factory.rollback(1)\n for version in range(2):\n factory.get_store(version).get_annotation(key='A') == version\n factory.get_store(2).get_annotation(key='A') is None", "title": "" }, { "docid": "4855a2f56b9c7f2c2a9a79ad5b5223f1", "score": "0.56252545", "text": "def test_override_historical_check(golden_list_importer, logger, mocked_statsd, db_conn, metadata_db_conn,\n mocked_config, tmpdir):\n expect_success(golden_list_importer, 20, db_conn, logger)\n with get_importer(GoldenListImporter,\n db_conn,\n metadata_db_conn,\n mocked_config.db_config,\n tmpdir,\n logger,\n mocked_statsd,\n GoldenListParams(filename='sample_golden_list_historicalcheck.csv',\n perform_historic_check=False)) as imp:\n expect_success(imp, 9, db_conn, logger)", "title": "" }, { "docid": "7293606c3e3995b7bd1a69f2a0a88d3c", "score": "0.5622995", "text": "def _postcheck(self):\n pass", "title": "" }, { "docid": "12141c152387d5f60c72b70ad04ca7a3", "score": "0.56210923", "text": "def post_check(self):\n\t\treturn True", "title": "" }, { "docid": "82133c74ae94c534ecc7f67880c7ae64", "score": "0.5618323", "text": "def test_live_migration_overcommitted(self):\n pass", "title": "" }, { "docid": "f6ef2fd2c6dc8c74ed86846a3837132c", "score": "0.5606392", "text": "def test_unregister_update(self):\n # Get the object to work with\n obj = self.obj\n\n # Change something\n obj.boolean = True\n obj.save()\n\n # Check for log entries\n self.assertEqual(obj.history.count(), 0, msg=\"There are no log entries\")", "title": "" }, { "docid": "1ea065136876f085dc757c5d15dda595", "score": "0.5580077", "text": "def test_is_obsolete_true(self):\n self.stamp1.passport = self.stamp2.passport\n self.stamp1.save()\n self.assertTrue(self.stamp1.is_obsolete())", "title": "" }, { "docid": "c619208a846e1e6f533fbccf5fb262c0", "score": "0.5578395", "text": "def store_after_invalidate_test(self):\n self.cache.store(self.prediction)\n self.cache.invalidate()\n self.cache.store(self.prediction)", "title": "" }, { "docid": "d5fc176eed839b6c6196eca08adac839", "score": "0.5574767", "text": "def test_update_sequencing_run(self):\n pass", "title": "" }, { "docid": "2d7366e1c374ee909b4ea84c820517d0", "score": "0.55661064", "text": "def test_only_obsoletes(self):\n args = [self.command]\n\n # -- Check the update summary ------------------------------\n expected = [{\"type\": \"progress\", \"current\": 0, \"total\": 1, \"hint\": \"Downloading the package metadata...\"},\n {\"type\": \"progress\", \"current\": 0, \"total\": 1, \"hint\": \"Processing the package metadata...\"},\n {\"type\": \"recap\",\n \"install\": [{\"new\": \"2-1.nb5.0\", \"name\": \"baz\"}],\n \"remove\": [{\"name\": \"bar\", \"old\": \"1-1.nb5.0\", \"reason\": \"Replaced by baz-2-1.nb5.0\"}]}]\n self._run_nbyum_test(args, expected)", "title": "" }, { "docid": "3c2c3f7e221d2d2b0428026340fc68ca", "score": "0.55548567", "text": "def test_teardown(self):\n assert self.search_behaviour.teardown() is None\n self.assert_quantity_in_outbox(0)", "title": "" }, { "docid": "5b8e3e776a33fb901fc24b88b4d53743", "score": "0.5553339", "text": "def test_verify_reset(self):\n pass", "title": "" }, { "docid": "b67b504caacd59ab638e0f8184ccdd76", "score": "0.5548889", "text": "def test_duplicate_check_override(golden_list_importer, db_conn, logger):\n expect_success(golden_list_importer, 20, db_conn, logger)", "title": "" }, { "docid": "785a8af6effe8e8527f56cc1e0ea21f5", "score": "0.5546981", "text": "def test_cover_update(self):\n raise AssertionError", "title": "" }, { "docid": "0f734f34de6740cf121d228858e9b63f", "score": "0.55456513", "text": "def test_finalcheck_data_changed(self):\n\n objs = [\n self.event,\n self.user,\n self.address,\n self.medical_details,\n *self.emergency_contacts,\n self.reg,\n self.reg.options.first(),\n self.reg.options.first().option,\n ]\n\n for obj in objs:\n with self.subTest(data=repr(obj)):\n response = self.client.get(self.final_check_url)\n obj.save()\n self.assertCache(response, changed=True)", "title": "" }, { "docid": "1c4c8b013b8edc57790499db30c2d418", "score": "0.5544601", "text": "def test_900_mark_deletion(self):\n self.assertIsNone(self.s.mark_for_delete())\n self.assertTrue(self.s.MarkedForDelOn)", "title": "" }, { "docid": "0eb411fb42ce6d793c7f5a5de38cc3a8", "score": "0.5537532", "text": "def purge_old_tests(mock_subs=None, dry_run=False):\n logger.info(\"Initializing startup tasks\")\n index = mdf_toolbox.translate_index(CONFIG[\"INGEST_TEST_INDEX\"])\n # Init clients\n clients = mdf_toolbox.confidential_login(services=[\"search_ingest\", \"transfer\"],\n **CONFIG[\"GLOBUS_CREDS\"])\n transfer_client = clients[\"transfer\"]\n search_client = clients[\"search_ingest\"]\n # Get datetime with definition of \"old\"\n old_cutoff = datetime.utcnow() - timedelta(days=CONFIG[\"TEST_TTL\"])\n # Find all test submissions older than the limit (or specified in mock_subs)\n logger.info(\"Scanning status database for old test submissions\")\n test_subs = scan_table(\"status\", filters=[(\"source_id\", \"^\", \"_test_\")])[\"results\"]\n if mock_subs:\n logger.info(\"Using mock_subs list instead of age filter\")\n old_subs = [sub for sub in test_subs if sub[\"source_id\"] in mock_subs]\n else:\n old_subs = [sub for sub in test_subs\n if datetime.fromisoformat(sub[\"submission_time\"][:-1]) < old_cutoff]\n logger.info(\"Found {} submissions to purge: {}\"\n .format(len(old_subs), [s[\"source_id\"] for s in old_subs]))\n\n # Delete all submission information for each old submission\n logger.debug(\"Scan complete. Purge initiated.\")\n for sub in old_subs:\n logger.info(\"\\n\\nPurging submission {}\\n{}\".format(sub[\"source_id\"], \"=\"*80))\n\n # Delete from curation DB (if present)\n try:\n curation_read = read_table(\"curation\", sub[\"source_id\"])\n if curation_read[\"success\"] and not dry_run:\n curation_delete = delete_from_table(\"curation\", sub[\"source_id\"])\n if curation_delete[\"success\"]:\n logger.info(\"Deleted task for {} from curation database\"\n .format(sub[\"source_id\"]))\n else:\n logger.error(\"Unable to delete task {} from curation database: {}\"\n .format(sub[\"source_id\"], curation_delete[\"error\"]))\n elif curation_read[\"success\"] and dry_run:\n logger.info(\"Dry run: Skipping curation task deletion for {}\"\n .format(sub[\"source_id\"]))\n elif \"not found\" in curation_read[\"error\"]:\n logger.info(\"No active curation task for {}\".format(sub[\"source_id\"]))\n else:\n logger.error(\"Unable to read curation database for {}: {}\"\n .format(sub[\"source_id\"], curation_read[\"error\"]))\n except Exception as e:\n logger.error(\"Error with curation database for {}: {}\"\n .format(sub[\"source_id\"], repr(e)))\n\n # Locate files to purge\n # Fetch current version dataset entry, try to locate files based on that\n sub_source_name = split_source_id(sub[\"source_id\"])[\"source_name\"]\n current_q = {\n \"q\": \"mdf.source_name:{} AND mdf.resource_type:dataset\".format(sub_source_name),\n \"advanced\": True\n }\n try:\n current_ds = mdf_toolbox.gmeta_pop(search_client.post_search(index, current_q))\n except Exception as e:\n logger.error(\"Exception fetching current submission version {}: {}\"\n .format(sub_source_name, repr(e)))\n current_ds = None\n # Only process results if results exist\n if current_ds:\n ds_md = current_ds[0]\n logger.debug(\"Found current dataset entry {}\".format(ds_md[\"mdf\"][\"source_id\"]))\n # If the current version is the old version, files to delete are current files\n if ds_md[\"mdf\"][\"source_id\"] == sub[\"source_id\"]:\n old_data = ds_md[\"data\"][\"endpoint_path\"]\n # Otherwise, make reasonable guess about file location - replace\n # current source_id in path with old source_id\n else:\n current_path = ds_md[\"data\"][\"endpoint_path\"]\n old_path = current_path.replace(ds_md[\"mdf\"][\"source_id\"], sub[\"source_id\"])\n # Sanity-check - ensure path changed to not delete current data\n # If current sub is old, was caught earlier\n if old_path != current_path:\n old_data = old_path\n else:\n logger.info(\"Current data path '{}' nonstandard (no source_id)\"\n .format(current_path))\n old_data = None\n else:\n logger.info(\"No current dataset entry for {}\".format(sub_source_name))\n old_data = None\n # Delete data location found\n if old_data:\n logger.debug(\"\\nStarting data deletion for '{}'\".format(old_data))\n old_data_info = urllib.parse.urlparse(old_data)\n # Check that location exists and is directory\n # All submissions should always be in a directory\n dir_res = mdf_toolbox.globus_check_directory(transfer_client, old_data_info.netloc,\n old_data_info.path)\n if not dir_res[\"exists\"]:\n logger.info(\"Data location '{}' not found\".format(old_data))\n elif not dir_res[\"is_dir\"]:\n logger.error(\"Data location '{}' is not a directory, skipping\"\n .format(old_data))\n else:\n logger.info(\"Deleting all files at data location '{}'\".format(old_data))\n try:\n if not dry_run:\n tdelete = globus_sdk.DeleteData(transfer_client, old_data_info.netloc,\n recursive=True)\n tdelete.add_item(old_data_info.path)\n tdelete_res = transfer_client.submit_delete(tdelete)\n if tdelete_res[\"code\"] != \"Accepted\":\n logger.error(\"Transfer Delete not accepted: {}\"\n .format(tdelete_res[\"code\"]))\n else:\n error_timestamps = set()\n while not transfer_client.task_wait(tdelete_res[\"task_id\"]):\n for event in transfer_client.task_event_list(\n tdelete_res[\"task_id\"]):\n if event[\"is_error\"] and event[\"time\"] not in error_timestamps:\n error_timestamps.add(event[\"time\"])\n logger.error(\"Ongoing Transfer Delete error: {}\"\n .format(event))\n task = transfer_client.get_task(tdelete_res[\"task_id\"]).data\n if task[\"status\"] == \"SUCCEEDED\":\n logger.info(\"Data location '{}' deleted\".format(old_data))\n else:\n logger.error(\"Delete task for '{}' failed: {}\"\n .format(old_data, task))\n else:\n logger.info(\"Dry run: Skipping data deletion for {}\"\n .format(sub[\"source_id\"]))\n except Exception as e:\n logger.error(\"Error deleting location '{}' for {}: {}\"\n .format(old_data, sub[\"source_id\"], repr(e)))\n else:\n logger.info(\"No old data location found. No files deleted.\")\n\n # Delete from Search\n logger.info(\"\\nDeleting Search entries for {}\".format(sub[\"source_id\"]))\n # Delete by source_id - this ensures only the expired version is purged\n del_q = {\n \"q\": \"mdf.source_id:{}\".format(sub[\"source_id\"]),\n \"advanced\": True\n }\n if not dry_run:\n delete_payload = [index, del_q]\n delete_res = perform_search_task(search_client.delete_by_query, delete_payload,\n get_task=search_client.get_task,\n ping_time=CONFIG[\"SEARCH_RETRIES\"],\n retries=CONFIG[\"SEARCH_PING_TIME\"], quiet=True)\n if delete_res[\"success\"]:\n logger.debug(\"Search entries cleared from {}\".format(sub[\"source_id\"]))\n elif delete_res[\"error\"]:\n logger.error(\"Search deletion error on {}: {}\"\n .format(sub[\"source_id\"], delete_res[\"error\"]))\n else:\n logger.critical(\"Unknown Search deletion error on {}: {}\"\n .format(sub[\"source_id\"], delete_res))\n else:\n logger.info(\"Dry run: Skipping Search entry deletion for {}\".format(sub[\"source_id\"]))\n\n # Delete from status DB\n logger.info(\"\\nDeleting status database entry for {}\".format(sub[\"source_id\"]))\n if not dry_run:\n try:\n status_delete = delete_from_table(\"status\", sub[\"source_id\"])\n if status_delete[\"success\"]:\n logger.info(\"Deleted {} from status database\".format(sub[\"source_id\"]))\n else:\n logger.error(\"Unable to delete {} from status database: {}\"\n .format(sub[\"source_id\"], status_delete[\"error\"]))\n except Exception as e:\n logger.error(\"Error with status database for {}: {}\"\n .format(sub[\"source_id\"], repr(e)))\n else:\n logger.info(\"Dry run: Skipping status entry deletion for {}\".format(sub[\"source_id\"]))\n\n # Finished with this submission\n logger.debug(\"Terminating purge of {}\".format(sub[\"source_id\"]))\n\n # Finished will all identified submissions\n logger.info(\"\\nAll {} expired submissions purged as possible.\".format(len(old_subs)))\n return", "title": "" }, { "docid": "169d55adc97f97f34494de8f5cc257f7", "score": "0.5533265", "text": "def test_overwrite_potential(self):\n pass", "title": "" }, { "docid": "a95795f3f14eca44131c1bca6006e109", "score": "0.5505122", "text": "def test_delete_results(self):\n pass", "title": "" }, { "docid": "48453b72ca3bc1d22d53795ebd18d217", "score": "0.54897946", "text": "def test_flaky_thing_that_succeeds_then_fails_then_succeeds(self):\n # pylint:disable=no-self-use\n TestExample._threshold += 1\n assert TestExample._threshold != 1", "title": "" }, { "docid": "ca4a40a532be9e59f30c2e2366888390", "score": "0.5489424", "text": "def test_reset(self):\n pass", "title": "" }, { "docid": "53365f1bcb4c68197395c5329417ac2d", "score": "0.5484903", "text": "def check_out(self):\n return True", "title": "" }, { "docid": "ff0e5195d8b3c59801472a0160579f3a", "score": "0.548313", "text": "def test_flaky_thing_that_fails_then_succeeds(self):\n # pylint:disable=no-self-use\n TestExample._threshold += 1\n assert TestExample._threshold >= 1", "title": "" }, { "docid": "93159b4755bed2631b50a66ab9a7e815", "score": "0.5481183", "text": "def test_double_sync(self):\n self.sync_and_check()\n self.sync_and_check(mx=0)", "title": "" }, { "docid": "8be397e92a9d4a4a273f2bf23d798e34", "score": "0.54748935", "text": "def tearDown(self):\n LimitTestBase.tearDown(self)\n self.assertEquals(0, len(self.expected_incr))\n self.assertEquals(0, len(self.expected_add))\n dos.memcache.incr = self.old_incr\n dos.memcache.add = self.old_add", "title": "" }, { "docid": "1307481c8595ec32bc96d7bac1994b53", "score": "0.5472922", "text": "def test_backfill(self) -> None:\n\n alert = MockAlert.from_random()\n alert.previous = [MockAlert.from_random() for _ in range(10)]\n for a in alert.previous:\n a.data = {**a.data,\n 'source_name': alert.source_name,\n 'object_aliases': alert.object_aliases,\n 'object_type_name': alert.object_type_name, }\n\n alert.previous = sorted(alert.previous, key=(lambda _a: _a.observation_time))\n\n obs_count = Observation.count()\n alert_count = Alert.count()\n\n records = alert.backfill_database()\n assert Observation.count() == obs_count + len(alert.previous) + 1\n assert Alert.count() == alert_count + len(alert.previous) + 1\n\n obs_count = Observation.count()\n alert_count = Alert.count()\n\n for a in records[-2:]:\n Alert.delete(a.id)\n Observation.delete(a.observation_id)\n\n assert Observation.count() == obs_count - 2\n assert Alert.count() == alert_count - 2\n\n obs_count = Observation.count()\n alert_count = Alert.count()\n\n alert.backfill_database()\n assert Observation.count() == obs_count + 2\n assert Alert.count() == alert_count + 2", "title": "" }, { "docid": "35e3b930f8e25cf581a83b279ad691ac", "score": "0.5467795", "text": "def finish_test(self, test):\n pass", "title": "" }, { "docid": "47598502f2fc8def53520fd0273274ac", "score": "0.54633784", "text": "def sweep(self):\n for s in self.dirty.copy():\n rpki.log.debug(\"Sweeping %r\" % s)\n if s.sql_deleted:\n s.sql_delete()\n else:\n s.sql_store()\n self.assert_pristine()", "title": "" }, { "docid": "012bb57977477fe045710daa87fe7ff1", "score": "0.5452215", "text": "def post_test(self, test_num):\n super(FuzzerTarget, self).post_test(test_num)\n if self.report.get('status') != Report.PASSED:\n self.save_report_to_disc()", "title": "" }, { "docid": "98814d93b965e703ddde53997941a50a", "score": "0.54323465", "text": "def test_partial_update_verification(self):\n assert False", "title": "" }, { "docid": "45623035b0aa066492e29d8ca93b68d1", "score": "0.54237896", "text": "def test_gateway_lb_target_unavailable_rollback_previous():", "title": "" }, { "docid": "e5d650ac3ced3c4814c840ef741aceed", "score": "0.5423415", "text": "def test_repeated_fits(self):\n with self.set_func('repitition_changes'):\n with pytest.raises(AssertionError):\n super().test_repeated_fits()", "title": "" }, { "docid": "f34d8356c678ee368d226d5c5af80dbd", "score": "0.5419543", "text": "def test_update_learner(self):\n pass", "title": "" }, { "docid": "f2ec9f86c322e50f90fc6998478078ce", "score": "0.5394984", "text": "def test_update12(self):\n pass", "title": "" }, { "docid": "2d0aecaee04c6432a8aaf901f23f601d", "score": "0.5374228", "text": "def test_past_bugs(self):\n pass", "title": "" }, { "docid": "268166dcf42d9c37226904fba796f1eb", "score": "0.5370469", "text": "def mark_final(self, testName, result, resultMessage):\n self.set_result(result, resultMessage)\n if \"FAILED\" in self.result_list:\n self.log.error(testName + \"###test FAILED \" + resultMessage)\n self.result_list.clear()\n self.assertFalse(True, 'There is failed tests')\n else:\n self.log.info(testName + \"### test Successful\")\n self.result_list.clear()\n assert True == True", "title": "" }, { "docid": "237e2e2122cbf5eb114a9c176e36e405", "score": "0.5360129", "text": "def test_ok_to_update_fails(self):\n assert not utils.ok_to_update()", "title": "" }, { "docid": "eb651e9e84ebcb7b2fbf042014bd976a", "score": "0.53600925", "text": "def test_reused_ids_with_rollback(self):\n sid = transaction.savepoint()\n\n model = M2MRefModel.objects.create()\n added_model = ReffedModel.objects.create()\n model.m2m.add(added_model)\n self.assertEqual(model.pk, 1)\n self.assertEqual(added_model.pk, 1)\n self.assertEqual(model.counter, 1)\n self.assertEqual(model.counter_2, 1)\n\n # Roll back, and set up the test again.\n transaction.savepoint_rollback(sid)\n\n model = M2MRefModel.objects.create()\n added_model = ReffedModel.objects.create()\n model.m2m.add(added_model)\n self.assertEqual(model.pk, 1)\n self.assertEqual(added_model.pk, 1)\n self.assertEqual(model.counter, 1)\n self.assertEqual(model.counter_2, 1)", "title": "" }, { "docid": "20322380d2b531d5b77e0ba42b8999c7", "score": "0.5357896", "text": "def _test_finished(self):\n if self._test:\n self._tests.append(self._test)\n self._test = None", "title": "" }, { "docid": "d452fe003560700072b6cbfd2e54b0c7", "score": "0.5355407", "text": "def test_outdated_done_indef(self):\n\n # boiler plate to create baseline and indef with retired RP\n yesterday = now - timedelta(days=1)\n weekago = now - timedelta(weeks=1)\n org, rp2, rp2_id = self.setup_org_n_rp(\n org_name='testorg', rp_name='v2', retired_as_of=yesterday)\n org, rp3, rp3_id = self.setup_org_n_rp(org=org, rp_name='v3')\n org_id = org.id\n self.test_user.organizations.append(org)\n audit = Audit(user_id=TEST_USER_ID, subject_id=TEST_USER_ID)\n uc = UserConsent(\n user_id=TEST_USER_ID, organization_id=org_id,\n audit=audit, agreement_url='http://no.com',\n acceptance_date=weekago)\n with SessionScope(db):\n db.session.add(audit)\n db.session.add(uc)\n db.session.commit()\n\n self.setup_qb(\n questionnaire_name='epic23', qb_name='baseline v2',\n classification='baseline', rp_id=rp2_id)\n self.setup_qb(\n questionnaire_name='epic26', qb_name='baseline v3',\n classification='baseline', rp_id=rp3_id)\n qb2_indef = self.setup_qb(\n questionnaire_name='irondemog', qb_name='indef v2',\n classification='indefinite', rp_id=rp2_id)\n self.setup_qb(\n questionnaire_name='irondemog_v3', qb_name='indef v3',\n classification='indefinite', rp_id=rp3_id)\n\n # for today, should get the v3 baseline\n user = db.session.merge(self.test_user)\n a_s = AssessmentStatus(user=user, as_of_date=now)\n self.assertEqual(\n ['epic26', 'irondemog_v3'],\n a_s.instruments_needing_full_assessment(classification='all'))\n\n # create done QNR for indefinite dated prior to rp transition\n # belonging to older qb - confirm that clears indef work as of then\n mock_qr('irondemog', timestamp=weekago, qb=qb2_indef)\n user = db.session.merge(self.test_user)\n a_s = AssessmentStatus(user=user, as_of_date=weekago)\n self.assertEqual([], a_s.instruments_needing_full_assessment(\n classification='indefinite'))\n\n # move forward in time; user should no longer need indefinite, even\n # tho RP changed\n qb2_indef = db.session.merge(qb2_indef)\n self.assertEqual([qb2_indef], QuestionnaireBank.qbs_for_user(\n user, classification='indefinite', as_of_date=now))\n a_s = AssessmentStatus(user=user, as_of_date=now)\n self.assertEqual([], a_s.instruments_needing_full_assessment(\n classification='indefinite'))\n self.assertEqual(['epic26'], a_s.instruments_needing_full_assessment(\n classification='all'))", "title": "" }, { "docid": "0ea1ed13a450af065602189257f6170d", "score": "0.5355133", "text": "def store_existing_test(self):\n self.cache.store(self.prediction)\n self.cache.store(self.prediction)", "title": "" }, { "docid": "c9fd42c423435e514e34d26aa221aec2", "score": "0.53515273", "text": "def dont_reset_worker_backlogs_if_they_already_have_them():", "title": "" }, { "docid": "7db4d8e5d130c56beb0dcc296bf1c4a8", "score": "0.5348404", "text": "def test_is_new_false_for_old_titles(self):\n self.title.updated_at = timezone.now() - timezone.timedelta(30)\n self.title.save()\n self.assertFalse(self.title.is_new)\n\n self.title.updated_at = timezone.now() - timezone.timedelta(6)\n self.title.save()\n self.assertFalse(self.title.is_new)", "title": "" }, { "docid": "58a5802016350e49fd6a73aef56b4aca", "score": "0.53482896", "text": "def afterTest(self, test):\n # Restore transaction support on tests\n from django.conf import settings\n from django.db import connections\n from django.core import mail\n from django.core.urlresolvers import clear_url_caches\n\n mail.outbox = []\n\n if hasattr(test.context, '_nosedjango_root_urlconf'):\n settings.ROOT_URLCONF = test.context._nosedjango_root_urlconf\n clear_url_caches()\n\n use_transaction_isolation = self.should_use_transaction_isolation(\n test, settings)\n\n if use_transaction_isolation:\n self.restore_transaction_support()\n logger.debug(\"Rolling back\")\n self.exit_atomics()\n self.restore_autocommit()\n self.rollback()\n if self.transaction_is_managed():\n self.transaction.leave_transaction_management()\n # If connection is not closed Postgres can go wild with\n # character encodings.\n for connection in connections.all():\n connection.close()\n else:\n # Have to clear the db even if we're using django because django\n # doesn't properly flush the database after a test. It relies on\n # flushing before a test, so we want to avoid the case where a\n # django test doesn't flush and then a normal test runs, because it\n # will expect the db to already be flushed\n self._flush_db()\n self._loaded_test_fixtures = []\n\n self.call_plugins_method('afterRollback', settings)", "title": "" }, { "docid": "0325f69d4626c0d953c27e7de01b382a", "score": "0.53455347", "text": "async def test_not_saving_while_stopping(\n hass: HomeAssistant, hass_storage: dict[str, Any]\n) -> None:\n store = storage.Store(hass, MOCK_VERSION, MOCK_KEY)\n hass.state = CoreState.stopping\n await store.async_save(MOCK_DATA)\n assert store.key not in hass_storage", "title": "" }, { "docid": "c98a80357b749178619489a7d865b58d", "score": "0.5339342", "text": "def test_act_not_is_searching(self):\n # setup\n self.strategy.is_searching = False\n\n # operation\n self.search_behaviour.act()\n\n # after\n self.assert_quantity_in_outbox(0)", "title": "" }, { "docid": "5e0a53d71b298d63df1c57d099c01bc2", "score": "0.5336282", "text": "def test_flaky_thing_that_fails_then_succeeds():\n TestExampleFlakyTestCase._threshold += 1\n assert TestExampleFlakyTestCase._threshold >= 1", "title": "" }, { "docid": "ca2ab71e32c661683ca81fabf2a8ab50", "score": "0.5333833", "text": "def test_unforceGc(self):\n self.config.postOptions()\n runner = self.makeRunner()\n runner.run(self.test)\n self.assertEqual(self.log, ['test', 'test'])", "title": "" }, { "docid": "98d5588a59c7712d8964a75a33e5a862", "score": "0.53263634", "text": "def test_flaky_thing_that_fails_then_succeeds():\n TestExampleFlakyTests._threshold += 1\n assert TestExampleFlakyTests._threshold >= 1", "title": "" }, { "docid": "49c61f472295d8a74338eefd8d69b779", "score": "0.5323825", "text": "def test_award_update_from_latest_transaction():\n\n agency1 = baker.make(\"references.Agency\")\n agency2 = baker.make(\"references.Agency\")\n\n award = baker.make(\n \"search.AwardSearch\",\n award_id=1,\n awarding_agency_id=agency1.id,\n period_of_performance_current_end_date=datetime.date(2016, 1, 1),\n description=\"original award\",\n generated_unique_award_id=\"AWD_1\",\n )\n\n # adding transaction with same info should not change award values\n baker.make(\n \"search.TransactionSearch\",\n transaction_id=1,\n award=award,\n awarding_agency_id=award.awarding_agency_id,\n period_of_performance_current_end_date=award.period_of_performance_current_end_date,\n transaction_description=award.description,\n action_date=datetime.date(2016, 2, 1),\n generated_unique_award_id=\"AWD_1\",\n )\n transaction = TransactionNormalized.objects.filter(id=1).first()\n\n update_awards()\n award.refresh_from_db()\n\n assert award.awarding_agency_id == agency1.id\n assert award.period_of_performance_current_end_date == datetime.date(2016, 1, 1)\n assert award.description == \"original award\"\n assert award.latest_transaction == transaction\n\n # adding an older transaction with different info updates award's total obligation amt and the description\n # (which is sourced from the earliest txn), but other info remains unchanged\n baker.make(\n \"search.TransactionSearch\",\n transaction_id=2,\n award=award,\n awarding_agency_id=agency2.id,\n period_of_performance_current_end_date=datetime.date(2017, 1, 1),\n transaction_description=\"older description\",\n action_date=datetime.date(2016, 1, 1),\n generated_unique_award_id=\"AWD_1\",\n )\n update_awards()\n award.refresh_from_db()\n\n assert award.awarding_agency_id == agency1.id\n assert award.period_of_performance_current_end_date == datetime.date(2016, 1, 1)\n assert award.description == \"older description\"\n\n # adding an newer transaction with different info updates award's total obligation amt and also overrides\n # other values\n baker.make(\n \"search.TransactionSearch\",\n transaction_id=999,\n award=award,\n awarding_agency_id=agency2.id,\n period_of_performance_current_end_date=datetime.date(2010, 1, 1),\n transaction_description=\"new description\",\n action_date=datetime.date(2017, 1, 1),\n generated_unique_award_id=\"AWD_1\",\n )\n\n update_awards()\n award.refresh_from_db()\n\n assert award.awarding_agency_id == agency2.id\n assert award.period_of_performance_current_end_date == datetime.date(2010, 1, 1)\n # award desc should still reflect the earliest txn\n assert award.description == \"older description\"", "title": "" }, { "docid": "fb2ab10fd844f411e0611dd4631fcb7d", "score": "0.530847", "text": "def test_new_stick():\n if not start_test('new_stick'):\n return", "title": "" }, { "docid": "b89840df684a81fe3f3d2a7cdb532c41", "score": "0.53050184", "text": "def test_saved_breach(all_breaches, saved_breach):\r\n assert saved_breach in all_breaches", "title": "" }, { "docid": "2d32f5143fb4a02da4561a33fafcae03", "score": "0.530498", "text": "def test_is_last_not_set(tmpdir) -> None:\n\n class InvalidModel(AsyncBoringModel):\n def training_step(self, dataloader_iter: Iterator) -> STEP_OUTPUT:\n output = super().training_step(dataloader_iter)\n del output[\"is_last\"]\n return output\n\n trainer = Trainer(max_epochs=1, accumulate_grad_batches=2, default_root_dir=tmpdir)\n m = InvalidModel()\n with pytest.raises(MisconfigurationException):\n trainer.fit(m)", "title": "" }, { "docid": "6759f714974a7c40aa970a16056aa2d2", "score": "0.5300785", "text": "def test_versioncheck_for_update(self):\n\n Foo = self.classes.Foo\n\n s1 = self._fixture()\n f1s1 = Foo(value=\"f1 value\")\n s1.add(f1s1)\n s1.commit()\n\n s2 = fixture_session()\n f1s2 = s2.get(Foo, f1s1.id)\n # not sure if I like this API\n s2.refresh(f1s2, with_for_update=True)\n f1s2.value = \"f1 new value\"\n\n assert_raises(\n exc.DBAPIError, s1.refresh, f1s1, with_for_update={\"nowait\": True}\n )\n s1.rollback()\n\n with conditional_sane_rowcount_warnings(update=True):\n s2.commit()\n s1.refresh(f1s1, with_for_update={\"nowait\": True})\n assert f1s1.version_id == f1s2.version_id", "title": "" }, { "docid": "030071e749f9a5ba2f7f2d8527a27f3f", "score": "0.529697", "text": "def test_update_dataset_workload(self):\n pass", "title": "" }, { "docid": "32a20f2120b1fa00a8806868dc7f7fe3", "score": "0.5294405", "text": "def test_delete_fabs_success():\n\n # Award/Transaction deleted based on 1-1 transaction\n baker.make(Award, id=1, generated_unique_award_id=\"TEST_AWARD_1\")\n baker.make(TransactionNormalized, id=1, award_id=1, unique_award_key=\"TEST_AWARD_1\")\n baker.make(TransactionFABS, transaction_id=1, published_fabs_id=301, unique_award_key=\"TEST_AWARD_1\")\n\n # Award kept despite having one of their associated transactions removed\n baker.make(Award, id=2, generated_unique_award_id=\"TEST_AWARD_2\")\n baker.make(TransactionNormalized, id=2, award_id=2, action_date=\"2019-01-01\", unique_award_key=\"TEST_AWARD_2\")\n baker.make(TransactionNormalized, id=3, award_id=2, action_date=\"2019-01-02\", unique_award_key=\"TEST_AWARD_2\")\n baker.make(TransactionFABS, transaction_id=2, published_fabs_id=302, unique_award_key=\"TEST_AWARD_2\")\n baker.make(TransactionFABS, transaction_id=3, published_fabs_id=303, unique_award_key=\"TEST_AWARD_2\")\n\n # Award/Transaction untouched at all as control\n baker.make(Award, id=3, generated_unique_award_id=\"TEST_AWARD_3\")\n baker.make(TransactionNormalized, id=4, award_id=3, unique_award_key=\"TEST_AWARD_3\")\n baker.make(TransactionFABS, transaction_id=4, published_fabs_id=304, unique_award_key=\"TEST_AWARD_3\")\n\n # Award is not deleted; old transaction deleted; new transaction uses old award\n baker.make(Award, id=4, generated_unique_award_id=\"TEST_AWARD_4\")\n baker.make(TransactionNormalized, id=5, award_id=4, unique_award_key=\"TEST_AWARD_4\")\n baker.make(TransactionFABS, transaction_id=5, published_fabs_id=305, unique_award_key=\"TEST_AWARD_4\")\n baker.make(\n SourceAssistanceTransaction,\n published_fabs_id=306,\n afa_generated_unique=\"TEST_TRANSACTION_6\",\n unique_award_key=\"TEST_AWARD_4\",\n is_active=True,\n modified_at=\"2022-02-18 18:27:50.813471\",\n created_at=\"2022-02-18 18:27:50.813471\",\n updated_at=\"2022-02-18 18:27:50.813471\",\n action_date=\"2022-02-18 18:27:50.813471\",\n )\n\n update_awards()\n\n # Main call\n updated_and_delete_awards = delete_stale_fabs([301, 302, 305])\n expected_updated_and_delete_awards = [1, 2, 4]\n assert sorted(updated_and_delete_awards) == expected_updated_and_delete_awards\n\n # Update and Delete Awards\n upsert_fabs_transactions(ids_to_upsert=[306], update_and_delete_award_ids=updated_and_delete_awards)\n\n # Awards\n awards_left = Award.objects.all()\n award_ids_left = set([award.id for award in awards_left])\n expected_awards_ids_left = [2, 3, 4]\n assert sorted(award_ids_left) == expected_awards_ids_left\n assert len(award_ids_left) == len(expected_awards_ids_left)\n\n latest_transaction_ids = set([award.latest_transaction_id for award in awards_left])\n new_award_transaction_id = TransactionNormalized.objects.filter(award_id=4).values_list(\"id\", flat=True).first()\n expected_latest_transaction_ids = sorted([3, 4, new_award_transaction_id])\n assert sorted(latest_transaction_ids) == expected_latest_transaction_ids\n\n # Transaction Normalized\n transactions_left = TransactionNormalized.objects.all()\n\n transaction_norm_ids_left = set([transaction.id for transaction in transactions_left])\n expected_transaction_norm_ids_left = sorted([3, 4, new_award_transaction_id])\n assert sorted(transaction_norm_ids_left) == expected_transaction_norm_ids_left\n\n # Transaction FABS\n transactions_fabs_left = TransactionFABS.objects.all()\n\n transaction_fabs_left = set([transaction_fabs.published_fabs_id for transaction_fabs in transactions_fabs_left])\n expected_transaction_fabs_left = [303, 304, 306]\n assert sorted(transaction_fabs_left) == expected_transaction_fabs_left", "title": "" }, { "docid": "9838673d03bae0a34fd995f7dfe01b85", "score": "0.52763283", "text": "def testSkipped(self, test):\n pass", "title": "" }, { "docid": "b5696af1cd76402cc6b484816b96d6dc", "score": "0.5274394", "text": "def test_noop(self):\n # Verbosely...\n res1 = self.bm_run_redo()\n self.assertEqual(res1[1], \"Checking 0 files.\")\n\n # Non-verbosely...\n res1q = self.bm_run_redo(VERBOSE=\"0\")\n self.assertEqual(res1q[1], \"\")\n\n # Add a 3 day old sample sheet and a new one which does not relate\n # to any run, and a new one in another directory.\n self.sheets.make(self.thismonth + '/sheet_XXXX.csv', hours_age=72)\n self.sheets.make(self.thismonth + '/sheet_YYYY.csv', hours_age=2)\n self.sheets.make('2000/2/sheet_YYYY.csv', hours_age=2)\n\n res2 = self.bm_run_redo()\n self.assertEqual(res2[1], \"Checking 1 files.\")\n\n # See no pipelines were touched at any time\n for pd in glob(self.seqdata.sandbox + '/*_*_*_*/pipeline'):\n self.assertTrue( os.lstat(pd).st_mtime < self.unixtime )\n\n # Finally set HTLB to not pick up anything\n self.environment['REDO_HOURS_TO_LOOK_BACK'] = '1'\n res3 = self.bm_run_redo()\n self.assertEqual(res3[1], \"Checking 0 files.\")", "title": "" }, { "docid": "888c7f8f3917871fa80e4105e3ee6849", "score": "0.5269013", "text": "def test_is_obsolete_false(self):\n self.assertFalse(self.stamp1.is_obsolete())\n self.assertFalse(self.stamp2.is_obsolete())", "title": "" }, { "docid": "36951fad94c0ef5e0d82ec7ef8a38241", "score": "0.5268156", "text": "def needs_reset(self):\n return True", "title": "" }, { "docid": "44ffc862369197560303880325fc4bc4", "score": "0.5264641", "text": "def test_steady_state_basic_backup_restore(self):\n indexer_stats_before_backup = self.rest.get_indexer_metadata()\n index_backup_clients = [\n IndexBackupClient(self.master, bucket.name)\n for bucket in self.buckets]\n for backup_client in index_backup_clients:\n self.assertTrue(\n backup_client.backup()[0],\n msg=f\"Backup failed for {backup_client.bucket}\")\n self.rest.delete_all_buckets()\n self._recreate_bucket_structure()\n for backup_client in index_backup_clients:\n self.assertTrue(\n backup_client.restore()[0],\n msg=f\"Restore failed for {backup_client.bucket}\")\n self.wait_until_indexes_online(defer_build=self.defer_build)\n indexer_stats_after_restore = self.rest.get_indexer_metadata()\n self._verify_indexes(indexer_stats_before_backup['status'],\n indexer_stats_after_restore['status'])", "title": "" }, { "docid": "6928761e079457a6515bbfaec9893c76", "score": "0.5258709", "text": "def test_update_workflow(self):\n pass", "title": "" }, { "docid": "b90739cb301a21b242d3575368a7a2f4", "score": "0.5256164", "text": "def test_run_patch(self, *args, **kwargs):\n\n global counter\n counter += 1\n self.extra_bash_arg_1 = str(counter).zfill(10)\n\n og_test_run(self, *args, **kwargs)\n\n sql = \"\"\"SELECT s1.attacker_asn, s1.victim, s1.trace_hijacked_adopting AS v1_hj, s2.trace_hijacked_adopting AS v2_hj, s1.extra_bash_arg_1 FROM simulation_results s1\n INNER JOIN simulation_results s2\n ON s1.attacker_asn = s2.attacker_asn AND s2.victim = s1.victim\n AND s1.trace_total_adopting = s2.trace_total_adopting\n AND s1.trace_total_collateral = s2.trace_total_collateral\n WHERE s1.adopt_pol = 'ROVPP_V1' AND s2.adopt_pol = 'ROVPP_V2'\n ORDER BY s1.extra_bash_arg_1 DESC\n \"\"\"\n with Database() as db:\n db.execute(\"CREATE INDEX IF NOT EXISTS simp_index ON sim_test_ases(asn)\")\n results = db.execute(sql)\n for result in results:\n # IF v1 has less hijacks than v2\n if result[\"v1_hj\"] < result[\"v2_hj\"]:\n ases_left = set([x[\"asn\"] for x in db.execute(\"SELECT asn FROM sim_test_ases\")])\n\n dont_delete = set()\n for row in db.execute(f\"SELECT peer_as_1 FROM peers WHERE peer_as_2 = {self.attack.attacker} OR peer_as_2 = {self.attack.victim}\"):\n dont_delete.add(row[\"peer_as_1\"])\n for row in db.execute(f\"SELECT peer_as_2 FROM peers WHERE peer_as_1 = {self.attack.attacker} OR peer_as_1 = {self.attack.victim}\"):\n dont_delete.add(row[\"peer_as_2\"])\n for row in db.execute(f\"SELECT provider_as FROM provider_customers WHERE customer_as = {self.attack.attacker} OR customer_as = {self.attack.victim}\"):\n dont_delete.add(row[\"provider_as\"])\n dont_delete.add(self.attack.attacker)\n dont_delete.add(self.attack.victim)\n ases_left = ases_left.difference(dont_delete)\n \n # Save a reference\n for table_name in [\"peers\", \"provider_customers\", \"sim_test_ases\"]:\n db.execute(f\"DROP TABLE IF EXISTS saved_{table_name}\")\n db.execute(f\"CREATE UNLOGGED TABLE saved_{table_name} AS (SELECT * FROM {table_name});\")\n ases_left_saved = deepcopy(ases_left)\n\n for num_to_remove in [10000, 5000, 2000, 1000, 500, 100, 50, 10, 5, 1]:\n count_failures = 0\n while True:\n\n if count_failures > 40 and num_to_remove >= 500:\n break\n if count_failures > 100:\n break\n # this is necessary to make sure that we don't try to remove more than what's left\n if len(ases_left) < num_to_remove:\n # Note that this only breaks out of the inner while loop, which is what we want\n break\n if len(ases_left) > 1000 and num_to_remove < 100:\n break\n removal_ases = list(random.sample(ases_left, num_to_remove))\n csv_path = \"/tmp/shrinktest.csv\"\n utils.rows_to_db([[x] for x in removal_ases], csv_path, RemovalASesTable)\n with RemovalASesTable() as db2:\n print(\"Removing\")\n db2.execute(f\"DELETE FROM peers USING {db2.name} WHERE peer_as_1 = {db2.name}.asn OR peer_as_2 = {db2.name}.asn\")\n print(\"Removed from peers\")\n db2.execute(f\"DELETE FROM provider_customers pc USING {db2.name} r WHERE provider_as = asn OR customer_as = asn\")\n print(\"Removed from provider customers\")\n db2.execute(f\"DELETE FROM sim_test_ases s USING {db2.name} WHERE {db2.name}.asn = s.asn\")\n print(\"Removal complete\")\n ases_left = list(sorted(set(ases_left).difference(set(removal_ases))))\n for adopt_pol in [Non_Default_Policies.ROVPP_V1, Non_Default_Policies.ROVPP_V2]:\n self.adopt_pol = adopt_pol\n max_count = 10\n counter += 1\n self.extra_bash_arg_1 = str(counter).zfill(max_count)\n og_test_run(self, *args, **kwargs)\n assert counter < max_count ** 10, \"zfill will fail\"\n result = db.execute(sql)[0]\n print(f\"\\n\\n\\n\\n\\n\\n\\n\\n{len(ases_left_saved)} left\\n\\n\\n\\n\\n\\n\\n\\n\\n\")\n # If alter is successful (v1 still better than v2), save new peers, customer providers, sim test ases, ases left\n if result[\"v1_hj\"] < result[\"v2_hj\"]:\n print(f\"\\n\\n\\n\\n\\n\\n\\n\\nSUCCESSFULLY REMOVED {len(removal_ases)}\\n\\n\\n\\n\\n\\n\\n\\n\\n\")\n count_failures = 0\n for table in [\"peers\", \"provider_customers\", \"sim_test_ases\"]:\n db.execute(f\"DROP TABLE IF EXISTS saved_{table}\")\n db.execute(f\"CREATE TABLE saved_{table} AS (SELECT * FROM {table})\")\n ases_left_saved = deepcopy(ases_left)\n else:\n count_failures += 1\n print(f\"\\n\\n\\n\\n\\n\\n\\n\\nFAILED TO REMOVE {len(removal_ases)} {count_failures} times\\n\\n\\n\\n\\n\\n\\n\\n\\n\")\n # reset to the old savings\n for table in [\"peers\", \"provider_customers\", \"sim_test_ases\"]:\n db.execute(f\"DROP TABLE IF EXISTS {table}\")\n db.execute(f\"CREATE TABLE {table} AS( SELECT * FROM saved_{table})\")\n ases_left = deepcopy(ases_left_saved)\n import sys\n sys.stdout.flush()\n raise Exception(\"break out of sim\")", "title": "" }, { "docid": "1c5a15835e524b1462c483bf8e88525b", "score": "0.5255492", "text": "def test_update_sequencing_run_contents(self):\n pass", "title": "" }, { "docid": "c2c0fe7965c8e437664d8f59e8d36ced", "score": "0.5246125", "text": "def report_retired(self):\n self.retired = True", "title": "" }, { "docid": "3a42ba994ea66cdf90e1ff683142c1be", "score": "0.5238844", "text": "def post_compare_failed(self, other):\n\n pass", "title": "" }, { "docid": "b391a8085c863a76cc230a55e794b87c", "score": "0.52385443", "text": "def test_rollback_disk_move(self):\n self.run_scenario()", "title": "" }, { "docid": "bad159715ec04bb865c252da69ffd4ef", "score": "0.5238068", "text": "def test_mark_incomplete(self):\n self.solution.mark_complete(5)\n self.solution.mark_incomplete()\n loaded = Solution.objects.get(id=self.solution.id)\n self.assertEqual(loaded.impact, None)\n self.assertFalse(loaded.is_completed)\n self.assertIsNone(loaded.time_completed)", "title": "" }, { "docid": "93925f37cd27e321c334ca24c830b342", "score": "0.5237572", "text": "def pytest_tavern_beta_after_every_response(expected, response) -> None:", "title": "" }, { "docid": "c02550bd8e1514d6016ff113ad601d9e", "score": "0.5230403", "text": "def test_update_status(self):\n self._e._update_status(1)\n\n # Cleanup.\n self._e.reset()\n sql = \"\"\"UPDATE job_item\nSET extract_ts = null\nWHERE id = 1\"\"\"\n self._e.db(sql)", "title": "" }, { "docid": "0dc09bb70a9eec3cf5e368ab7c345916", "score": "0.52292544", "text": "def save_test(self, test):", "title": "" }, { "docid": "a2f7421d154e074cf7ad6f486c854530", "score": "0.52280253", "text": "def test_superset(self):\n installed_eggs = [\"dummy-1.0.0-1.egg\", \"another_dummy-1.0.0-1.egg\"]\n\n with mock_history_get_state_context(installed_eggs):\n enpkg = unconnected_enpkg_factory()\n ret = enpkg.revert_actions(set(installed_eggs[:1]))\n\n self.assertEqual(ret, [(\"remove\", \"another_dummy-1.0.0-1.egg\")])", "title": "" }, { "docid": "9538f9125ba1e357e3fe1fa89f51de96", "score": "0.5204299", "text": "def test_unbookmark_article_that_has_already_been_unbookmarked(self):\n self.add_article()\n self.register_and_login_new_user()\n article = Article.objects.all().first()\n self.client.post(reverse(\"articles:bookmark_article\", kwargs={'slug': article.slug}))\n self.client.delete(reverse(\"articles:bookmark_article\", kwargs={'slug': article.slug}))\n url = reverse(\"articles:bookmark_article\", kwargs={'slug': article.slug})\n response = self.client.delete(url)\n self.assertEqual(response.status_code, 400)\n self.assertIn(response.data['error'], 'Article does not exist in your bookmarks list')", "title": "" }, { "docid": "6f09e330844a90ccf6525814626557bd", "score": "0.5202093", "text": "def callback_populate_bookmarked_tests(self, bm_name):\r\n test_list = self.bookmark_dd[bm_name]\r\n self.createbatch_view.insert_tests_to_batch2(test_list)", "title": "" }, { "docid": "711f061b66d931a8361b57b9fec49142", "score": "0.5202073", "text": "def test_duplicate_check_fails(golden_list_importer, logger, db_conn):\n expect_success(golden_list_importer, 20, db_conn, logger)", "title": "" }, { "docid": "2fcb3d7477f390df28aa799b04473ee6", "score": "0.52003413", "text": "def test_persist_beliefs(setup_beliefs):\n sensor = Sensor.query.filter_by(name=\"epex_da\").one_or_none()\n bdf: tb.BeliefsDataFrame = TimedBelief.search(sensor)\n\n # Form new beliefs\n df = bdf.reset_index()\n df[\"belief_time\"] = df[\"belief_time\"] + timedelta(hours=1)\n df[\"event_value\"] = df[\"event_value\"] * 10\n bdf = df.set_index(\n [\"event_start\", \"belief_time\", \"source\", \"cumulative_probability\"]\n )\n\n TimedBelief.add(bdf)\n bdf: tb.BeliefsDataFrame = TimedBelief.search(sensor)\n assert len(bdf) == setup_beliefs * 2", "title": "" }, { "docid": "cad2300f52c64eaeb7d31588becaac13", "score": "0.51993525", "text": "def assert_pristine(self):\n assert not self.dirty, \"Dirty objects in SQL cache: %s\" % self.dirty", "title": "" }, { "docid": "491447cf5aca236fe1d59290fbfc5a16", "score": "0.5197196", "text": "def test_count_only_flag(submissions, award_data1):\n\n Award.objects.filter(pk=award_data1).update(update_date=OLD_DATE)\n original_datetime = Award.objects.get(id=award_data1)\n\n call_command(SCRIPT_NAME, \"--dry-run\")\n call_command(SCRIPT_NAME, \"--all\", \"--dry-run\")\n\n after = Award.objects.get(id=award_data1)\n\n assert original_datetime.update_date == after.update_date, \"Award was incorrectly updated\"", "title": "" }, { "docid": "3779f60a5775f7a7ea1c57f78fbdb5d6", "score": "0.5191985", "text": "def test_is_expired_after_other_process_updates(self) -> None:\n manager = IntegrationManager(IntegrationConfig)\n self.assertFalse(manager.is_expired())\n\n gen_sync = GenerationSynchronizer(manager._gen_sync.cache_key,\n normalize_cache_key=False)\n gen_sync.mark_updated()\n\n self.assertTrue(manager.is_expired())", "title": "" }, { "docid": "383cd369fc70d3b9cf0c683e1447e170", "score": "0.5191644", "text": "def test_restore_specific_backup(self):\n # Given\n self.job.start()\n\n with open(self.test_file, 'a') as f:\n f.write('append')\n self.job.start()\n\n out = destroy_filesystem(self.job.filesystem)\n self.assertEqual(0, out.returncode, msg=out.stderr)\n\n # When\n backups = self.job._backup_db.get_backup_times('inc')\n self.job.restore(backups[0])\n\n # Then\n with open(self.test_file, 'r') as f:\n out = f.read()\n self.assertEqual(self.test_data + 'append', out)", "title": "" }, { "docid": "0040431cfe8734c5d9f73169e7a57472", "score": "0.5190215", "text": "def test_reset_validation(self):\n pass", "title": "" }, { "docid": "5a5cc484c3a9f755bc81d31f7d11cc02", "score": "0.5189242", "text": "def backup(self):", "title": "" } ]
acda800e0f29b50094bc5190cd90f6cc
private method of searching target node in the input subtree (backtracking version)
[ { "docid": "ce043f14d2a3c226d3dcfba097277b2b", "score": "0.0", "text": "def __treeSearch(self, root: BinaryTreeNode, key: int) -> BinaryTreeNode:\n if root == None or root.val == key: return root\n if root.val < key: return self.__treeSearch(root.right, key)\n else: return self.__treeSearch(root.left, key)", "title": "" } ]
[ { "docid": "09e7622b90f87cdc4caafc95b3d23ebb", "score": "0.6933266", "text": "def breadth_first_search(self, source_node, target_node):\n return False", "title": "" }, { "docid": "99a99728f1faac3dd8cf8e1e6093c62f", "score": "0.68992054", "text": "def recursive_search(node, indents = 0):\n #print node first before recursive call\n print('|' + indents * '--' + works[node])\n\n #base case\n if not links[node]:\n return\n\n #recursively call all source nodes that follow target\n for tail_node in links[node]:\n recursive_search(tail_node, indents + 1)", "title": "" }, { "docid": "7f6b1aed82fdf9bb8e4a778ebec98d18", "score": "0.6748383", "text": "def dfs(start, target):\n print(\"Visiting Node \" + str(start[\"value\"]))\n if start[\"value\"] == target:\n # We have found the goal node we we're searching for\n print(\"Found the node we're looking for!\")\n return start\n\n # Recurse with all children\n for i in range(len(start[\"children\"])):\n result = dfs(start[\"children\"][i], target)\n if result is not None:\n # We've found the goal node while going down that child\n return result\n\n # We've gone through all children and not found the goal node\n print(\"Went through all children of \" + str(start[\"value\"]) + \", returning to it's parent.\")\n return None", "title": "" }, { "docid": "da38b0f4ea782bc01d523039a0dff591", "score": "0.656872", "text": "def depth_first_search(self, source_node, target_node):\n if source_node not in self.nodes:\n _log_print(\"ERROR: Graph.depth_first_search -- invalid source (%s)\" % source_node)\n return\n elif target_node not in self.nodes:\n _log_print(\"ERROR: Graph.depth_first_search -- invalid target (%s)\" % target_node)\n return\n\n # starting max capacity (should be restricted as graph is traveresed)\n capacity = sys.maxint\n # list of found nodes (which we should attempt to visit latter)\n node_list = []\n node_list.append(source_node)\n # list of capacities for the found nodes\n capacity_list = []\n capacity_list.append(capacity)\n # used to get the search path\n parent_list = []\n # list of already visited nodes\n visited = {}\n\n target_found = False\n while len(node_list):\n # TODO: consider combining these 3 lists into one (list of named tuples)\n current_node = node_list.pop()\n current_capacity = capacity_list.pop()\n capacity = min(capacity, current_capacity)\n\n # we have not previously seen this node\n if current_node not in visited:\n visited[current_node] = 1\n # get the outbound edges from the current node\n # edges = self.nodes[current_node].edges\n residuals = self.nodes[current_node].residuals\n for i, edge in enumerate(residuals):\n if edge == target_node:\n # add final items to the search path\n parent_list.append(current_node)\n parent_list.append(target_node)\n # prepare to exit the outer while loop\n node_list = []\n # prepare to enter post processing step\n target_found = True\n # exit the inner for loop\n break\n # check that the edge has capacity\n if residuals[edge] > 0:\n # add to the list the while loop is pulling from\n node_list.append(edge)\n capacity_list.append(residuals[edge])\n parent_list.append(current_node)\n\n search_path = []\n if target_found:\n node_last = 0\n for i in range((len(parent_list) - 1), -1, -1):\n if parent_list[i] != node_last:\n search_path.append(parent_list[i])\n node_last = parent_list[i]\n search_path.reverse()\n\n for sp in search_path:\n _debug_print(\"%s ==> \" % sp, end=\"\")\n _debug_print(\" (%s)\" % capacity)\n else:\n capacity = 0\n\n return capacity, search_path", "title": "" }, { "docid": "74aa6bd9af7f19e3b636b681e29cf12f", "score": "0.65590024", "text": "def search(self, query):\n \n # When we reach a leaf it's either equal to the target or\n # equal to the place the target would go.\n if self.is_leaf():\n return self\n\n # Otherwise the current node defines a splitting line\n # and it's got to have at least one child\n if query[self.axis] <= self.value:\n \n if self.left_child:\n return self.left_child.search(query)\n else:\n # If no left sub-tree, try the right sub-tree\n return self.right_child.search(query)\n \n else:\n \n if self.right_child:\n return self.right_child.search(query)\n else:\n # If no right sub-tree, try the left sub-tree\n return self.left_child.search(query)", "title": "" }, { "docid": "098edbfc0d6a4c9f72dab132e556258f", "score": "0.65012383", "text": "def tree_search(problem, frontier):\n assert isinstance(problem, Problem)\n\n node = Node(problem.initial)\n\n # Debug\n print node\n print dir(node)\n print node.state\n print type(node.state)\n # end Debug\n\n if problem.goal_test(node.state):\n return node\n frontier.append(node)\n\n while frontier:\n # print frontier\n currentNode = frontier.pop()\n # print \"popped: \", currentNode\n if problem.goal_test(currentNode.state):\n return currentNode\n for childNode in currentNode.expand(problem):\n if childNode not in frontier:\n # print \"push: \", childNode\n frontier.append(childNode)\n return None", "title": "" }, { "docid": "01b9675416f9909d11581e2a8fce67fb", "score": "0.64627814", "text": "def search(self, elem):\n curr = self.start\n search_stack = []\n node = Node(elem)\n while curr is not None:\n search_stack.append(curr)\n right = curr.right\n if right is None:\n curr = curr.down\n continue\n if node < right:\n curr = curr.down\n continue\n if node > right:\n curr = right\n continue\n if node == right:\n # We found the top-most node with the correct value!\n # Goodrich's implementation of a skip list requires\n # we continue moving down and return the base node, though.\n curr = right\n while curr.down is not None:\n search_stack.append(curr)\n curr = curr.down\n # We exited the loop before we captured the bottom-most element\n search_stack.append(curr)\n return search_stack\n return search_stack", "title": "" }, { "docid": "380f36d1cbea6401cc5a1ce4eedb892f", "score": "0.63747", "text": "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n # nodes to be processed, format: (location, action, action cost)\n process_stack = util.Stack()\n \n # nodes visited, store location -(x,y)\n node_visited = []\n # store action - 'South'/'West'/'North'/'East'\n action_taken = []\n # nodes have been visited but not on the return path, store location -(x,y)\n node_processed = []\n # nodes still have 2 or more successors that haven't been visited\n last_branching_node_arr = []\n\n\n #get start node and push it onto the stack\n start_node = problem.getStartState()\n process_stack.push(start_node)\n \n #print \"Is the start a goal?\", problem.isGoalState(problem.getStartState())\n end_node = False\n #if the successor fringe node is not the goal state, keep searching\n while not process_stack.isEmpty():\n \n current_node = process_stack.pop()\n # add direction to start node\n if len(current_node) == 2:\n current_node = (current_node,'')\n if current_node[0] in node_visited or current_node in node_processed:\n continue\n if problem.isGoalState(current_node[0]):\n print(\"success\")\n #print len(node_visited)\n #print len(action_taken)\n action_taken.append(current_node[1])\n # exclude action_taken[0] for the start node\n action_taken = action_taken[1:]\n return action_taken\n\n\n ##This part will take care of the end note situation\n if end_node: \n #print(current_node[0])\n \n last_branch_node = last_branching_node_arr.pop()\n distance = abs(last_branch_node[0]-current_node[0][0])+abs(last_branch_node[1]-current_node[0][1])\n #search\n while distance >1:\n last_branch_node = last_branching_node_arr.pop()\n distance = abs(last_branch_node[0]-current_node[0][0])+abs(last_branch_node[1]-current_node[0][1])\n #print last_branch_node\n\n node_index = node_visited.index(last_branch_node)\n node_processed.extend(node_visited[node_index+1:])\n node_visited = node_visited[:node_index+1]\n action_taken = action_taken[:node_index+1]\n \n end_node = False\n\n \n node_visited.append(current_node[0])\n action_taken.append(current_node[1])\n\n \n successors = problem.getSuccessors(current_node[0])\n target = len(successors)\n for side_node in successors:\n if side_node[0] in node_visited or side_node[0] in node_processed:\n target -=1\n\n if target == 0 :\n #print \"This is end node*******************************\"\n end_node = True\n # add last_branching_node for (successors haven't been visited -1) to retreat\n elif target ==2:\n last_branching_node_arr.append(current_node[0])\n elif target == 3:\n last_branching_node_arr.append(current_node[0])\n last_branching_node_arr.append(current_node[0])\n\n \n for fringe_node in successors:\n #push the node to the to-visit stack\n process_stack.push(fringe_node)\n \n #print \"fringe node is:\" ,fringe_node\n \n #print(\"here wait a sec\")\n \n print(\"Fail\")\n return action_taken[1:]\n util.raiseNotDefined()", "title": "" }, { "docid": "09b7dd7942821d9e2801fa2b1cd6ae20", "score": "0.6314214", "text": "def bfs_find_target_iterative_all_paths(graph, cur, target):\n\tfrontier = deque()\n\tfrontier.append((cur, [cur]))\n\tresult = []\n\twhile frontier:\n\t\texpand, path = frontier.popleft()\n\n\t\t# if found the target:\n\t\tif expand == target:\n\t\t\tresult.append(path)\n\n\t\t# search in frontier\n\t\tfor neighbor in graph[expand]:\n\n\t\t\t# If not exist in the current path, other path can still use it though.\n\t\t\tif neighbor not in path:\n\t\t\t\tfrontier.append((neighbor, path + [neighbor]))\n\n\treturn result", "title": "" }, { "docid": "4e796e7d7cb71c1299e8fc1e5dd1a384", "score": "0.6300135", "text": "def find(self, val):\n if self._root is None:\n return False\n target = val\n found = False\n def _check(number):\n nonlocal target, found\n if number == target:\n found = True\n self.bfs(_check)\n if found:\n print('YES')\n return True\n print(\"NO\")\n return False", "title": "" }, { "docid": "440855a45072568752e1c66fe214d5ba", "score": "0.62243164", "text": "def __contains__(self, target):\n node = self.root\n while node:\n if target == node.value:\n return True\n if target < node.value:\n node = node.left\n else:\n node = node.right\n\n return False", "title": "" }, { "docid": "7a5eb5303d528e116c0a351eaffc8fd6", "score": "0.62022084", "text": "def contains(self, target):\n print(f'00LOOKING for value {target}')\n if self.value == target:\n # Found the target value\n print('found the target')\n return True\n elif self.value > target:\n # Go left if the target is less\n print('left node traversal...')\n if self.left is None:\n # First see if the left node is empty; if not, the target doesn't exist\n print('target does NOT exist')\n return False\n else:\n # Verified the left node exists; now can call this method on it\n return self.left.contains(target)\n else:\n # Go right when the target is greater than or equal to the value of the current node\n print('right node traversal...')\n if self.right is None:\n # First see if the right node is empty; if not, the target doesn't exist\n print('target does NOT exist')\n return False\n else:\n # Verified the right node exists; now can call this method on it\n return self.right.contains(target)", "title": "" }, { "docid": "792ec1615f657002351fbadebc7960e2", "score": "0.6189636", "text": "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n # nodes to be processed, format: (location, action, action cost)\n process_queue = util.Queue()\n\n # nodes visited, store location -(x,y)\n node_visited = []\n\n # get start node and push it onto the stack\n start_node = problem.getStartState()\n # create a list[] for each path\n process_queue.push((start_node,[]))\n\n # if the successor fringe node is not the goal state, keep searching\n while not process_queue.isEmpty():\n\n current_node = process_queue.pop()\n if current_node[0] in node_visited:\n continue\n if problem.isGoalState(current_node[0]):\n print(\"success\")\n # action is the path saved in current node\n return current_node[1]\n\n node_visited.append(current_node[0])\n successors = problem.getSuccessors(current_node[0])\n # restore the path from the start to current node\n parent_path = current_node[1]\n for fringe_node in successors:\n location = fringe_node[0]\n # connect the path from current node to each fringe node\n fringe_path = parent_path + [fringe_node[1]]\n # push the node to the to-visit stack\n process_queue.push((location, fringe_path))\n\n print(\"Fail\")\n util.raiseNotDefined()", "title": "" }, { "docid": "9292ac63b855ea2e37f150b8f81f2dd2", "score": "0.61696595", "text": "def recursive_binary_search(lyst, target):\n if all(isinstance(x, int) for x in lyst) and isinstance(target, int):\n low_index = 0\n high_index = len(lyst) - 1\n return recursive_binary_search_helper(lyst, low_index, high_index, target)\n raise ValueError(\"List and target must only be integers\")", "title": "" }, { "docid": "13e0552facdb17fadf44f9d75d9662f9", "score": "0.6169128", "text": "def search(self, initial_state):\n\n\n \tnode = (0, None, initial_state, None) # node = (depth, parent, state, action)\n\n \tself.open_list.append(node) # add node onto the list\n\n \theapq.heapify(self.open_list) # heapify the list in order to have the smallest depth first in the list\n\n \tnode = heapq.heappop(self.open_list) # assign node to be the top node at the top of the heap\n\n \twhile(not self.problem.isgoal(node[2])): # while we haven't found the solution\n \t\ttotal_actions = self.problem.actions(node[2]) # get all possible actions from node\n \t\tfor i in total_actions: # for each action\n \t\t\tchild_i = self.child_node(node, i) # get the child node for this action\n \t\t\theapq.heappush(self.open_list, child_i) # push the child onto the heap\n \t\tnode = heapq.heappop(self.open_list) # node is reassigned to be the node at the top of the heap\n\n \treturn node # return node that is the goal found in while loop", "title": "" }, { "docid": "cef3e231a78ca8f861d4b007851c5b54", "score": "0.6165643", "text": "def depthFirstSearch_test(self):\n\n \"\"\"start=[0,250]\n end=[250,400]\"\"\"\n ans = []\n\n stack = []\n visited = []\n #print(self.start)\n startNode = Node(self.start,self.street,self.direction)\n stack.append(startNode.positon)\n #start.set_visited()\n\n\n #print(startNode.positon)\n print(\"A\")\n print(visited)\n print(stack)\n pos = stack[len(stack)-1]\n node = Node(pos,self.street,self.direction)\n # *********NODE A**********\n if node.positon == self.end:\n # return stack\n ans.append(stack)\n e = stack.pop()\n visited.append(e)\n else:\n child = self.get_unvisited_child(node.positon, visited) # list_remove_list(node.get_lower_node(),visited)\n if child == []:\n e = stack.pop()\n visited.append(e)\n last_node = Node(e, self.street, self.direction)\n lower_nodes = last_node.get_lower_node()\n visited = list_remove_list(visited, lower_nodes)\n else:\n child_node = Node(child, self.street, self.direction)\n print(child)\n #print(child_node.positon)\n stack.append(child_node.positon)\n\n print(\"B\")\n print(visited)\n print(stack)\n pos = stack[len(stack) - 1]\n node = Node(pos, self.street, self.direction)\n # *********NODE B**********\n if node.positon == self.end:\n # return stack\n ans.append(stack)\n e = stack.pop()\n visited.append(e)\n else:\n child = self.get_unvisited_child(node.positon, visited) # list_remove_list(node.get_lower_node(),visited)\n if child == []:\n #print(stack)\n e = stack.pop()\n #print(e)\n visited.append(e)\n last_node = Node(e, self.street, self.direction)\n lower_nodes = last_node.get_lower_node()\n visited = list_remove_list(visited, lower_nodes)\n else:\n child_node = Node(child, self.street, self.direction)\n stack.append(child_node.positon)\n\n print(\"C\")\n print(visited)\n print(stack)\n pos = stack[len(stack) - 1]\n node = Node(pos, self.street, self.direction)\n # *********NODE C**********\n if node.positon == self.end:\n # return stack\n print(\"got an answer:\")\n print(stack)\n ans.append(stack)\n e = stack.pop()\n visited.append(e)\n else:\n child = self.get_unvisited_child(node.positon, visited) # list_remove_list(node.get_lower_node(),visited)\n if child == []:\n e = stack.pop()\n visited.append(e)\n last_node = Node(e, self.street, self.direction)\n lower_nodes = last_node.get_lower_node()\n visited = list_remove_list(visited, lower_nodes)\n else:\n child_node = Node(child, self.street, self.direction)\n stack.append(child_node.positon)\n\n print(\"E\")\n print(stack)\n print(visited)\n pos = stack[len(stack) - 1]\n node = Node(pos, self.street, self.direction)\n # *********NODE E**********\n if node.positon == self.end:\n # return stack\n ans.append(stack)\n e = stack.pop()\n visited.append(e)\n else:\n child = self.get_unvisited_child(node.positon, visited) # list_remove_list(node.get_lower_node(),visited)\n if child == []:\n e = stack.pop()\n visited.append(e)\n last_node = Node(e, self.street, self.direction)\n lower_nodes = last_node.get_lower_node()\n print(\"lower_nodes:\")\n print(lower_nodes)\n visited = list_remove_list(visited, lower_nodes)\n else:\n child_node = Node(child, self.street, self.direction)\n stack.append(child_node.positon)\n\n print(stack)\n print(visited)\n print(\"After finished all nodes:\")\n\n pos = stack[len(stack)-1] #get the latest node in STACK\n node = Node(pos,self.street,self.direction)\n print(\"current Node:\")\n print(node.positon)\n if node.positon == self.end:\n #return stack\n ans.append(stack)\n e = stack.pop()\n visited.append(e)\n else:\n print(visited)\n print(node.positon)\n #TODO!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n #Problem here: \"visited\" includes Node C, but the children of E is empty\n #Solution: changed the get_unvisited_child, when children is empty, just return directly []\n child = self.get_unvisited_child(node.positon,visited) #list_remove_list(node.get_lower_node(),visited)\n if child == []:\n e = stack.pop()\n visited.append(e)\n last_node = Node(e,self.street,self.direction)\n lower_nodes = last_node.get_lower_node()\n visited = list_remove_list(visited,lower_nodes)\n else:\n child_node = Node(child,self.street,self.direction)\n stack.append(child_node.positon)\n #node.set_visited()\n\n return ans", "title": "" }, { "docid": "41258cd34c8393083bdd992eaa817920", "score": "0.6161125", "text": "def search(self, initial_state):\n\n\t\th = self.heuristic(initial_state) # get heuristic cost to get to the solution from the initial states\n\t\tnode = (h, h, 0, None, initial_state, None) # node = (f, (h)euristic, depth, parent, state, action)\n\t\tself.open_list.append(node) # add node onto a list\n\t\theapq.heapify(self.open_list) # heapify the list of nodes so least heuristic cost is at the top\n\n\t\twhile (len(self.open_list) > 0): # while there is something in the open list\n\t\t\tnode = heapq.heappop(self.open_list) # assign node to be the top node in the list\n\t\t\tif self.problem.isgoal(node[4]): # if goal/solution is found\n\t\t\t\treturn node # return that solution\n\n\t\t\ttotal_actions = self.problem.actions(node[4]) # get all possible actions from node\n\n\t\t\tfor action in total_actions: # for every action\n\t\t\t\tchild_i = self.child_node(node, action) # get child of that action\n\n\t\t\t\tif (not self.is_in_list(self.closed_list, child_i)): # if the child has already been visited/looked at\n\t\t\t\t\tself.closed_list.append(node) # add node that has already been checked onto the closed list\n\t\t\t\t\theapq.heappush(self.open_list, child_i) # push the child onto the heap\n\n\t\treturn None", "title": "" }, { "docid": "dec1e8686c17304bd9df4509ceac2bdf", "score": "0.6139453", "text": "def __contains__(self, target):\n node = self.root\n while node:\n if target < node.value :\n node = node.left\n elif target > node.value:\n node = node.right\n else:\n return True\n \n return False", "title": "" }, { "docid": "6eff6646c938caa1516721c630eab65f", "score": "0.61380714", "text": "def tree_search(t, pred):\n L = Link.empty\n def subtree_search(subtr):\n nonlocal L # Hint: nonlocal may be helpful in this line!\n print(\"Debug: \", subtr.label)\n if pred(subtr.label):\n L = Link(subtr.label, L)\n for branch in subtr.branches:\n subtree_search(branch)\n subtree_search(t)\n return L", "title": "" }, { "docid": "5de72ca7c69bff4d108f9cb9c2b1236a", "score": "0.6101697", "text": "def breadth_first_tree_search(problem):\r\n\r\n frontier = deque([Node(problem.initial)]) # FIFO queue\r\n\r\n while frontier:\r\n #print(frontier)\r\n node = frontier.popleft()\r\n if problem.goal_test(node.state):\r\n return node\r\n frontier.extend(node.expand(problem))\r\n return None", "title": "" }, { "docid": "558d4ac107f4ec30abdb607c5ee19672", "score": "0.6098119", "text": "def scan_vote_history(self, target_votes):\n # Basically the idea here is that we are going to keep\n # spinning backwards over the path until we find a node \n # that seems good. When we find it, we set selected, the \n # vote, set selected to true and break\n\n found = False\n # self.vote_stack = (node, votes, [(child, votes), (child2, votes2), ...]\n\n # This is the \"ephemeral\" blacklist, so we don't pick something we already have\n # right here\n last_choice = self.vote_stack[-1][0] #current_node\n last_choice_list = [last_choice]\n\n for index, choice in enumerate(reversed(self.vote_stack)):\n # Check each child\n for nb in choice[2]:\n if nb[0] in last_choice_list:\n continue\n # Does it have the number of votes we want?\n if nb[1] == target_votes:\n # Check to see if its in the list already\n if nb[0] in self.current_path:\n continue\n\n if nb[0] in self.old_branches:\n if target_votes >= self.old_branches[nb[0]]:\n #print \"Skipping old branch (target:\", target_votes, \")\"\n continue\n\n found = True\n selected = nb[0]\n prev_votes = nb[1]\n break\n if found == True:\n break\n last_choice_list.append(choice[0]) # Ie the one we took to get to the deeper branch\n \n if found == False:\n # We didn't find anything\n return None, None, None\n\n return selected, prev_votes, index", "title": "" }, { "docid": "e22d671bc602268c4a6df8d71774f46a", "score": "0.6093638", "text": "def _search(self, node, vector):\n while True:\n #x = round(vector.get(node.feature, 0.0), 1)\n x = node.feature in vector\n b = False\n for n in node.children:\n if n.value == x: \n b = True\n break\n if b is False:\n return node.type\n node = n", "title": "" }, { "docid": "49dc7fb59262fbdbdf13d4efffc8dd4a", "score": "0.60898584", "text": "def search_bfs(self, start, to_find):\n if len(self.graph) == 0 or start not in self.graph:\n return False\n\n visited = {}\n adjacent_node_queue = [start]\n\n while len(adjacent_node_queue) > 0:\n current = adjacent_node_queue.pop()\n if current == to_find:\n return True\n else:\n visited[current] = True\n for sibling in self.graph.get(current):\n if sibling not in visited:\n adjacent_node_queue.append(sibling)\n\n return False", "title": "" }, { "docid": "ecbbcae30062f96cde6a14a062b08008", "score": "0.60672563", "text": "def graphSearch(problem, frontier):\n explored = set()\n frontier.push(Node(problem.startingState()))\n while frontier.isEmpty() is False:\n leaf = frontier.pop()\n if problem.isGoal(leaf.state):\n return getPath(leaf, problem)\n if leaf.state not in explored:\n explored.add(leaf.state)\n for child in expand(leaf, problem):\n frontier.push(child)\n return None", "title": "" }, { "docid": "f174c3af6446d7ee40546eb30e02b509", "score": "0.6052888", "text": "def depthFirstSearch(problem):\n fringe = util.Stack()\n def add_to_fringe_fn(fringe, node, cost):\n fringe.push(node)\n \n return genericSearch(problem, fringe, add_to_fringe_fn)", "title": "" }, { "docid": "7a2d19050d8ae8e9e4afaffd096f130f", "score": "0.6049934", "text": "def search(grid, node, goal, heuristic, unexplored, visited, greedy, path):\n visited.append(node)\n\n if node.value == goal:\n return set_path(node, path), len(visited)\n else:\n # Add valid neighboring nodes to unexplored queue\n expand_node(grid, node, goal, heuristic, visited, unexplored, greedy)\n\n if not unexplored:\n return None, len(visited)\n else:\n # Search through next node in queue\n return search(grid, heapq.heappop(unexplored), goal, heuristic, unexplored, visited, greedy, path)", "title": "" }, { "docid": "6f4a7b0a526f83ce0d8834fd0981f93f", "score": "0.6046138", "text": "def graph_search(problem, frontier):\n assert isinstance(problem, Problem)\n frontier.append(Node(problem.initial))\n explored = set() # initial empty set of explored states\n while frontier:\n leafNode = frontier.pop()\n if problem.goal_test(leafNode.state):\n return leafNode\n explored.add(leafNode.state)\n # Python note: next line uses of a generator\n frontier.extend(child for child in leafNode.expand(problem)\n if child.state not in explored\n and child not in frontier)\n return None", "title": "" }, { "docid": "6efdd7c81be79d4184ecc5eecabece06", "score": "0.60271233", "text": "def depth_first_tree_search(problem):\r\n\r\n frontier = [Node(problem.initial)] # Stack\r\n\r\n while frontier:\r\n #print(frontier)\r\n node = frontier.pop()\r\n if problem.goal_test(node.state):\r\n return node\r\n frontier.extend(node.expand(problem))\r\n return None", "title": "" }, { "docid": "d0c05c018cce586a84e517c805103205", "score": "0.6009196", "text": "def search(self, nums, target):\r\n l,r=0,len(nums)-1\r\n while l<=r:\r\n mid=(l+r)/2\r\n if nums[mid]==target:\r\n return mid\r\n\r\n # if left is sorted, then target might be there if inside, or target in other side \r\n if nums[l]<=nums[mid]:\r\n if target>=nums[l] and target<=nums[mid]:\r\n r=mid-1\r\n else:\r\n l=mid+1\r\n\r\n else:\r\n # if right is sorted, then target might be there if inside or target in other side\r\n if target>nums[mid] and target<=nums[r]:\r\n l=mid+1\r\n else:\r\n r=mid-1\r\n return -1", "title": "" }, { "docid": "7f60a4297b36075cddb9c75c3cea0bc1", "score": "0.5978234", "text": "def search_range(nums: List[int], target: int) -> List[int]:\n if not nums:\n return [-1, -1]\n #\n size = len(nums)\n if size == 1:\n return [0, 0] if nums[0] == target else [-1, -1]\n #\n res = bisearch(nums, target, 0, size - 1)\n return [-1, -1] if res is None else res", "title": "" }, { "docid": "9859845215887303570197a0336acc41", "score": "0.59645736", "text": "def search(self, e) -> Union[(BSTNode, None)]:\n if self.root:\n if e:\n pass\n return self.root.search(e)\n else:\n return", "title": "" }, { "docid": "751495152cdf72e85421b7c8e00bf7ff", "score": "0.5955491", "text": "def bfs(self, queue, target, targetx,\n targety): # finds BFS path to the finish. if there is no path, will return nothing\n\n '''\n 1. So we have a parent matrix\n 2. This records the parent\n 3. We have a dictionary of cell: parents'''\n\n thisset = {(0, 0)}\n traceSet = {(0, 0): None}\n\n flag = False # variable to see if it is possible to reach the goal\n while queue:\n fringe = queue.pop(0) # gets 0, 0 first\n adjs = self.getAdj(fringe[0], fringe[1])\n\n if self.map1[fringe[0]][fringe[1]] == 2:\n print(\"Our attempt has started\")\n\n if self.map1[fringe[0]][fringe[1]] == target:\n print(\"Goal reached\")\n print(\"This is how you go about it\")\n # print(traceSet)\n ans = self.trace(traceSet, targetx, targety)\n path = self.savePath(ans)\n flag = True\n # print(ans.pop())\n break\n\n if self.map1[fringe[0]][fringe[1]] == 0 or self.map1[fringe[0]][fringe[1]] == 3:\n continue\n\n for i in range(len(adjs)):\n if self.legal(adjs[i][0], adjs[i][1]):\n if adjs[i] in thisset:\n continue\n \n thisset.add(adjs[i])\n traceSet[adjs[i]] = fringe\n queue.append(adjs[i])\n if flag is False:\n print(\"No way to goal\")\n return []\n return path", "title": "" }, { "docid": "d8eadf7d32d1f133d1a27ba97183602b", "score": "0.59412175", "text": "def breadth_first_tree_search(problem):\n\n frontier = FIFOQueue()\n frontier.append(Node(problem.initial))\n\n while frontier:\n currentNode = frontier.pop()\n if problem.goal_test(currentNode.state):\n return currentNode\n\n frontier.extend(child for child in currentNode.expand(problem) if child not in frontier)\n\n return None", "title": "" }, { "docid": "f2f0ec4f41d288c9fc81ef67f82a8b4f", "score": "0.5937387", "text": "def find_path_to(self, target):\n if not isinstance(target, re._pattern_type):\n target = re.compile(r\".*?{}.*?\".format(re.escape(str(target))))\n\n def _find_in_tree(t, p=None):\n if p is None:\n p = []\n for item in t:\n if isinstance(item, tuple):\n if target.match(item[0]) is None:\n subtree = _find_in_tree(item[1], p + [item[0]])\n if subtree is not None:\n return subtree\n else:\n return p + [item[0]]\n else:\n if target.match(item) is not None:\n return p + [item]\n else:\n return None\n\n result = _find_in_tree(self.read_contents())\n if result is None:\n raise NameError(\"{} not found in tree\".format(target.pattern))\n else:\n return result", "title": "" }, { "docid": "be7ffdb501f6c9f66e1b480fdd9853fb", "score": "0.59361", "text": "def search(self):\n #get the initial state\n initialState = State()\n \n #create root node\n rootNode = Node(initialState)\n \n #perform search from root node\n self.DFS(rootNode)\n \n rootNode.printTree()", "title": "" }, { "docid": "5455b141d0764fac259cde7cfbf85d50", "score": "0.5928797", "text": "def FindPath(self):\n observed = set()\n queue = [[self._map[self._source]]]\n while queue:\n # Get the current path. This pops from the beginning, which gives priority\n # to the 'closest' matches.\n path = queue.pop(0)\n\n # Get the last node from the current path.\n node = path[-1]\n if node in observed:\n continue\n observed.add(node)\n\n # Return if we found a match.\n if node.word == self._target:\n return path\n\n # Otherwise, add all of the potential candidates for this node.\n for matches in node.IterMatches():\n for match in matches:\n new_path = list(path)\n new_path.append(self._map[match])\n queue.append(new_path)", "title": "" }, { "docid": "22036938fda308134f9e5d8953cd6423", "score": "0.59251606", "text": "def unexplored_search(self, starting_vertex):\n # Create an empty queue and enqueue A PATH TO the starting vertex ID\n # Create a Set to store visited vertices\n # While the queue is not empty...\n # Dequeue the first PATH\n # Grab the last vertex from the PATH\n # If that vertex has not been visited...\n # CHECK IF IT'S THE TARGET\n # IF SO, RETURN PATH\n # Mark it as visited...\n # Then add A PATH TO its neighbors to the back of the queue\n # COPY THE PATH\n # APPEND THE NEIGHOR TO THE BACK\n \n # Create an empty queue and enqueue the starting vertex ID\n q = Queue()\n q.enqueue([starting_vertex])\n # Create an empty Set to store visited vertices\n visited = set()\n # While the queue is not empty...\n while q.size() > 0:\n # Dequeue the first vertex\n v = q.dequeue()\n # If that vertex has not been visited...\n if v is not None:\n if v[-1] not in visited:\n # Mark it as visited\n if '?' in self.vertices[v[-1]][\"exits\"].values():\n return v\n visited.add(v[-1])\n # Then add all of its neighbors to the back of the queue\n for neighbor in self.vertices[v[-1]][\"exits\"].values():\n z = v[:]\n z.append(neighbor)\n q.enqueue(z)\n return None", "title": "" }, { "docid": "19ff57659f9b05bc5df04f0e7b5b06be", "score": "0.5912208", "text": "def search(problem, fringe, calc_heuristic=None, heuristic=None):\n start_state = problem.get_start_state()\n if isinstance(fringe, util.Stack) or isinstance(fringe, util.Queue):\n fringe.push(Node(start_state))\n else:\n fringe.push(Node(start_state), 0)\n visited = []\n step = 0\n while not fringe.empty():\n \"*** YOUR CODE HERE ***\"\n # TODO search\n node = fringe.pop()\n if problem.is_goal_state(node.state):\n return node.path, step\n visited.append(node.state)\n for item in problem.get_successor(node.state):\n curr = Node(item[0], [])\n curr.path = node.path.copy()\n curr.path.append(item[1])\n step = step + 1\n if curr.state not in visited:\n visited.append(curr.state)\n fringe.update(curr, ucs_compute_node_cost(problem, None, curr, None))\n \"*** END YOUR CODE HERE ***\"\n return [] # no paproblem.is_goal_state(problem.get_start_state()):th is found", "title": "" }, { "docid": "f6a3e06099045b0ac2457c393fa07ad8", "score": "0.5903413", "text": "def search(self, find_val):\n # Your code goes here\n return self.preorder_search(self.root,find_val)", "title": "" }, { "docid": "c585f51b261f4f774ff57d3de2a0720a", "score": "0.59009194", "text": "def find(queue, targetNode):\n x = targetNode.x\n y = targetNode.y\n for i in range(0,len(queue)):\n x2 = queue[i].x\n y2 = queue[i].y\n if(x == x2 and y == y2):\n return i\n return -1", "title": "" }, { "docid": "13bec792bc6ff150603d05ce9d7a5dc0", "score": "0.5896497", "text": "def depth_first_search(problem):\n \"\"\"\n start is the start state from problem, exploredState is an array of the states being explored\n fringe is stack in depth first search, fringe adds the tuple of the start state and an array\n \"\"\"\n start = problem.get_start_state()\n exploredState = []\n exploredState.append(start)\n fringe = util.Stack()\n fringeTuple = (start, [])\n fringe.push(fringeTuple)\n \"\"\"\n Main Loop checks if fringe is not empty then checks if goal state, if goal state then actions\n are returned otherwise get the successors of the current state then check if the coordinates are\n within the exploredState and then add the coordinates to the exploredState array and add the actions\n to get to that coordinate state\n \"\"\"\n while not fringe.is_empty():\n state, actions = fringe.pop()\n if problem.is_goal_state(state):\n return actions\n exploredState.append(state)\n successor = problem.get_successors(state)\n for i in successor:\n coordinates = i[0]\n if not coordinates in exploredState:\n direction = i[1]\n fringe.push((coordinates, actions + [direction]))\n return actions", "title": "" }, { "docid": "acdc7f493cca1b29e5cc091aee9b63db", "score": "0.58857334", "text": "def depthFirstSearch(problem): \n return genericSearch(problem, util.Stack())\n \n util.raiseNotDefined()", "title": "" }, { "docid": "bf86af0bc6c5092211d6f6a7a501edd4", "score": "0.58846474", "text": "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n \"\"\"\n Variables used in the code:\n visited -> It stores a set of nodes which have been visited\n queue -> A queue is used to get the nodes which are to be explored\n node -> Every node popped from the stack is stored here to be access anywhere in the code.\n successor -> Each node's successor is stored in this\n \n FUNCTION:\n Every node is popped out from a queue and checked if its a goal state. If not, its successors are found and pushed over queue. While doing \t this we add the action to reach the parent to that of the child. Thus, wherever we find a goal state we will have a complete path to reach \t that goal node. \n \"\"\"\n visited = [];\n queue = util.Queue();\n queue.push((problem.getStartState(),[],0));\n \n while not queue.isEmpty():\n\tnode = queue.pop();\n if(problem.isGoalState(node[0])): \n\t return node[1];\n\tif(node[0] not in visited):\n\t visited.append(node[0]);\n\t successors = problem.getSuccessors(node[0]);\n\t for successor in successors:\n\t\tqueue.push((successor[0],node[1]+[successor[1]],successor[2]));\n return None\n util.raiseNotDefined()", "title": "" }, { "docid": "95306b8573665ac54dbe3af4f7335262", "score": "0.5882678", "text": "def depth_first_tree_search(problem):\n\n frontier = [Node(problem.initial)] # Stack\n\n while frontier:\n node = frontier.pop()\n if problem.goal_test(node.state):\n return node\n frontier.extend(node.expand(problem))\n return None", "title": "" }, { "docid": "7104f2cbff3885c0576ee2d41bccf4fe", "score": "0.5868123", "text": "def dfs_recursive(self, start, target=None, visited=[]):\n visited.append(start)\n if start.label == target:\n return visited\n for neighbor in self.vertices[start]:\n if neighbor not in visited:\n visited = self.dfs_recursive(neighbor, target=target, visited=visited)\n\n return visited", "title": "" }, { "docid": "bcf79d0fd973f0a023c3f2e56dc27333", "score": "0.5867981", "text": "def BFS(self, start, target):\n start = self.adj_list[start]\n frontier_queue = []\n bfs_path = []\n frontier_queue.append(start)\n #bfs_path.append(start)\n the_path = self.Path([])\n discovered = {}\n #create dictionary\n while frontier_queue:\n currentv = frontier_queue.pop()\n if currentv.ID == target:\n #walkpath now\n if target in discovered:\n bfs_path.append(target)\n walk = target\n while walk != start.ID:\n edge = discovered[walk]\n bfs_path.append(edge)\n walk = edge\n bfs_path.reverse()\n for item in bfs_path:\n the_path.add_vertex(item)\n return the_path\n #each vertex adjV adjacent to currentV\n edge_list = currentv.get_edges()\n for item in edge_list:\n adj_ID = int(item.destination)\n adj_v = self.adj_list[adj_ID]\n if adj_v.ID not in discovered:\n frontier_queue.append(adj_v)\n #bfs_path.append(adj_v)\n discovered[adj_v.ID] = currentv.ID\n no_find = self.Path([])\n return no_find", "title": "" }, { "docid": "4e5bfaf8a90e4822d2dbe3e27c69e5c2", "score": "0.58576506", "text": "def search(self, time_budget):\n startTime = clock()\n num_rollouts = 0\n\n # do until we exceed our time budget\n while (clock() - startTime < time_budget):\n node, state = self.select_node()\n turn = state.turn()\n threads = []\n for i in range(self.threads):\n threads.append(LeafThread(state))\n threads[i].start()\n for t in threads:\n t.join()\n\n outcome = [t.get_results() for t in threads]\n self.backup(node, turn, outcome)\n num_rollouts += 1\n run_time = clock() - startTime\n node_count = self.tree_size()\n self.run_time = run_time\n self.node_count = node_count\n self.num_rollouts = num_rollouts", "title": "" }, { "docid": "59146ef8e823a5fadf443d21e7511671", "score": "0.58553916", "text": "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"", "title": "" }, { "docid": "7efa4f680b3d676dfc678d8a38714040", "score": "0.58543795", "text": "def depth_first_search(problem):\n return search(problem, util.Stack())", "title": "" }, { "docid": "4c2e32884d8bb5f0eb46b39f52eb6176", "score": "0.5844373", "text": "def tree_search(self):\n current_node = self.root\n child_nodes = current_node.get_child_nodes()\n while not self.state_manager.is_game_over(current_node.get_state()) and len(child_nodes) > 0:\n child_values = {}\n for child in child_nodes: # Update value of each child node before using tree policy to choose the next root. Values are stored in child_values to avoid updating the child values outside backpropagation\n if current_node.get_player() == 1:\n u = self.c * sqrt(log(child.get_parent_counter())/(1 + child.get_counter()))\n child_values[child] = child.get_value() + u\n elif current_node.get_player() == 2:\n u = self.c * sqrt(log(child.get_parent_counter())/(1 + child.get_counter()))\n child_values[child] = child.get_value() - u\n # Tree policy: choose action (and hence next root) that maximize value for P1 or minimize value for P2\n if current_node.get_player() == 1:\n current_node = max(child_values, key=child_values.get) # P1 chooses action argmax(Q + u)\n elif current_node.get_player() == 2:\n current_node = min(child_values, key=child_values.get) # P2 chooses action argmin(Q - u)\n child_nodes = current_node.get_child_nodes()\n return current_node # The chosen leaf node", "title": "" }, { "docid": "6a3c6661a14eb1e74197b692be307e88", "score": "0.5835063", "text": "def depth_first_search(graph, source_nodes, val):\n\n \n ## This is the actual DFS algorithm\n stack = []\n #source_nodes = [graph.nodes[0]] #get_nodes_without_predecessors(graph)\n while len(source_nodes) != 0:\n stack.append(source_nodes.pop())\n while len(stack) > 0:\n node = stack.pop()\n if node.previsit == -1:\n stack.append(node)\n node.previsit = val\n val += 1\n for n in node.successors:\n if n.postvisit == -1 and n.previsit == -1:\n stack.append(n)\n elif node.postvisit == -1:\n node.postvisit = val\n val += 1\n\n\n ## This checks for any nodes left over that has not been traversed bc it's dumb af\n no_val = []\n for n in graph.nodes:\n if n.postvisit == -1:\n no_val.append(n)\n if len(no_val) > 0:\n depth_first_search(graph, no_val, val)", "title": "" }, { "docid": "2dc9d2bd7c2ee28ba9dcdae6a30704b7", "score": "0.58282864", "text": "def search(self, nums: List[int], target: int) -> int:\n\n if not nums:\n return -1\n\n low, high = 0, len(nums) - 1\n\n while low <= high:\n mid = (low + high) // 2\n if target == nums[mid]:\n return mid\n\n if nums[low] <= nums[mid]:\n if nums[low] <= target <= nums[mid]:\n high = mid - 1\n else:\n low = mid + 1\n else:\n if nums[mid] <= target <= nums[high]:\n low = mid + 1\n else:\n high = mid - 1\n\n return -1", "title": "" }, { "docid": "54126156e3bf953cbd071a3c106f6949", "score": "0.5824986", "text": "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n \"\"\"\n Variables used in the code:\n visited -> It stores a set of nodes which have been visited\n stack -> A stack is used to get the nodes which are to be explored\n node -> Every node popped from the stack is stored here to be access anywhere in the code.\n successor -> Each node's successor is stored in this\n \n FUNCTION:\n Every node is popped out from a stack and checked if its a goal state. If not, its successors are found and pushed over stack. While doing \t this we add the action to reach the parent to that of the child. Thus, wherever we find a goal state we will have a complete path to reach \t that goal node. \n \"\"\"\n visited = [];\n stack = util.Stack();\n stack.push((problem.getStartState(),[],0));\n \n while not stack.isEmpty():\n\tnode = stack.pop();\n if(problem.isGoalState(node[0])): \n\t return node[1];\n\tif(node[0] not in visited):\n\t visited.append(node[0]);\n\t successors = problem.getSuccessors(node[0]);\n\t for successor in successors:\n\t\tstack.push((successor[0],node[1]+[successor[1]],successor[2]));\n return None\n\t\t\n\n util.raiseNotDefined()", "title": "" }, { "docid": "d3db79e5d1fae9ab6c1f4cd2d1815946", "score": "0.5823075", "text": "def breadthFirstSearch(problem):\n\n closed = set()\n fringe = util.Queue()\n\n startNode = Node(problem.getStartState(), [], 0, 0, problem)\n fringe.push(startNode)\n\n while True:\n if fringe.isEmpty():\n return False\n node = fringe.pop()\n if problem.isGoalState(node.state):\n return node.path\n if node.state not in closed:\n closed.add(node.state)\n for childNode in node.getSuccessors():\n fringe.push(childNode)", "title": "" }, { "docid": "469d765b347910880ac238bd8b4e86e8", "score": "0.5821945", "text": "def search(self, env):\n node = self.root\n while node.successors:\n action = self.tree_policy(node, c=self.uct_coeff)\n env.move(action, node.player)\n node = node.successors[action]\n return node", "title": "" }, { "docid": "29bb0f377f313abd3adb08b63c5a50ea", "score": "0.5816502", "text": "def nut_finder(t):\n \"*** YOUR CODE HERE ***\"\n # if label(tree) == 'nut':\n # return True\n # elif is_leaf(t):\n # return False\n # else:\n # nut_finder(branches(t))\n \"answer1\"\n if label(t) == 'nut':\n return True\n for b in branches(t):\n if nut_finder(b):\n return True\n return False\n \"answer2\"\n if label(t) == 'nut':\n return True\n return True in [nut_finder(b) for b in branches(t)]", "title": "" }, { "docid": "ffdf4b240466f0f362b3cd24394b731f", "score": "0.5813277", "text": "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n explored=set()#the explored position or dot\n fringes=util.Queue()# store the latest pushed route\n action_=util.Queue()\n parent={}#key is the child node and value is the parent value\n init_state=problem.getSuccessors(problem.getStartState())\n start_pos=problem.getStartState()\n if(problem.isGoalState(start_pos)):\n return start_pos\n else:\n explored.add(start_pos)#have explored the dot\n for i in range(0,len(init_state)):\n parent[init_state[i][0]]=start_pos\n route=[start_pos,init_state[i][0]]\n fringes.push(route)#add the route to the fringes\n action_init=[init_state[i][1]]\n action_.push(action_init)\n #print the child-parent relation\n #for key in parent:\n # print(parent[key])\n flag=False\n while(not fringes.isEmpty()):\n route=fringes.pop()\n action_direction=action_.pop()\n #print(type(action_direction))\n next_to_expand=route[len(route)-1]\n #print(\"next to expand is: \",next_to_expand)\n if(problem.isGoalState(next_to_expand)):\n flag=True\n #print(\"we found the route\")\n #print(route)\n #print(type(route))\n return action_direction\n elif(next_to_expand not in explored):\n explored.add(next_to_expand)\n curr_child=problem.getSuccessors(next_to_expand)\n #print(\"the next succssor to expand is:\",next_to_expand)\n if(len(curr_child)!=0):#the node does have the child node to expand\n for i in range(0,len(curr_child)):\n #print(\"before added, the route: \",route)\n if(curr_child[i][0] not in explored):\n #print(\"the current node's child is \",curr_child[i][0])\n #print(\"the current direction is: \",curr_child[i][1])\n route_=route.copy()\n route_.append(curr_child[i][0])\n action_cp=action_direction.copy()\n action_cp.append(curr_child[i][1])\n #print(\"the new route is: \",route_)\n #print(\"the new direction is: \",action_cp)\n fringes.push(route_)\n action_.push(action_cp)\n else:\n if(fringes.isEmpty()):\n #print(\"zero!\")\n if(problem.isGoalState(curr_child[i][0])):\n return action_direction\n else:#the next_to_expand_node doesn't have a child node, and meanwhile it is not the goal then just go to the next route to explored\n continue\n else:\n # print(\"oops we seems to reach the point that previously explored \")\n # print(next_to_expand)\n continue#if the next node to expand has already been expanded, do nothing just\n #return\n #util.raiseNotDefined()", "title": "" }, { "docid": "f7c021b2f1eb27d1521d82100fa438ee", "score": "0.58011895", "text": "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n # This implementation expands more nodes than the second one but it is accepted by the\n # autograder\n #print \"Using DFS\"\n\n # Set of candidate nodes to be explored\n # It will also hold the actions that were taken to get from the starting state to that state\n fringe = util.Stack()\n\n # List to keep track of what nodes (states) have been visited so as to not visit them again\n visited_nodes = []\n\n # Push in the first node along with a list of no actions\n # Since it is the starting node and no actions were taken to get to it\n # It will be popped first\n fringe.push((problem.getStartState(),[]))\n\n # Do this as long as the fringe is not empty\n while(fringe.isEmpty() == 0):\n # Pop the latest node and the actions taken to get to it\n working_state, actions_taken = fringe.pop()\n\n # As long as the state has not been visited ... \n if(working_state not in visited_nodes):\n \n # Mark the state as visited\n visited_nodes.append(working_state)\n\n # If goal was found, return the actions taken to get to it\n if(problem.isGoalState(working_state)):\n return actions_taken\n \n # Add successors to the fringe\n for next_state in problem.getSuccessors(working_state):\n state, current_action, cost = next_state\n fringe.push((state,actions_taken + [current_action]))\n\n util.raiseNotDefined()", "title": "" }, { "docid": "820041091c68457b63c78e1c37fc3a25", "score": "0.5790913", "text": "def recurse_traverse(node):\n neighbors = self.get_neighbors(node)\n print('neighbors', neighbors)\n for neighbor in neighbors:\n if neighbor[0] not in visited_lst:\n visited_lst.append(neighbor[0])\n print('passed-true. neighbor[0]', neighbor[0])\n recurse_traverse(neighbor[0])\n \n return", "title": "" }, { "docid": "59cd2394092cef7b3efb1e29b4b26b4a", "score": "0.57858616", "text": "def binary_search_recursive(data, target, low, high):\n if low > high:\n return False\n else:\n mid = (low + high) // 2 # // is an integer division (5 // 4 = 1)\n if target == data[mid]:\n return True\n elif target < data[mid]:\n return binary_search_recursive(data, target, low, mid - 1)\n else: \n return binary_search_recursive(data, target, mid + 1, high)", "title": "" }, { "docid": "78a5bdd012d91d92c21e11a8feeccf71", "score": "0.578569", "text": "def breadthFirstSearch(problem):\n fringe = util.Queue()\n visited = []\n solution = []\n startNode = SearchNode(problem.getStartState(), None, None)\n fringe.push(startNode)\n while not fringe.isEmpty():\n curr = fringe.pop()\n if problem.isGoalState(curr.value):\n solution.append(curr.action)\n if curr.parent == startNode and problem.isGoalState(curr.value):\n return solution\n while curr.parent != None:\n solution.append(curr.parent.action)\n curr = curr.parent\n solution.reverse()\n return solution[1:]\n elif curr.value not in visited:\n visited.append(curr.value)\n for x in problem.getSuccessors(curr.value):\n fringe.push(SearchNode(x[0], x[1], curr))", "title": "" }, { "docid": "ea0867f388452c289f5d23b0c7595099", "score": "0.57661784", "text": "def depthFirstSearch(problem):\n\n fringe = util.Stack()\n visited = []\n solution = []\n startNode = SearchNode(problem.getStartState(), None, None)\n fringe.push(startNode)\n while not fringe.isEmpty():\n\n curr = fringe.pop()\n if problem.isGoalState(curr.value):\n solution.append(curr.action)\n while curr.parent != startNode:\n solution.append(curr.parent.action)\n curr = curr.parent\n solution.reverse()\n return solution\n\n elif curr.value not in visited:\n visited.append(curr.value)\n for x in problem.getSuccessors(curr.value):\n fringe.push(SearchNode(x[0], x[1], curr))", "title": "" }, { "docid": "bc50f24d7cf8e79c967f4127e22fbe03", "score": "0.57644653", "text": "def dfs(start_node, goal_node, max_nodes):\n stack = []\n visited = []\n if goal_node<=0 or start_node <=0 or max_nodes<start_node:\n print(\"Error\")\n return\n else:\n nodes_gen = 1\n stack.append(start_node)\n while(stack):\n top = stack.pop()\n nodes_gen +=1\n visited.append(top)\n if goal_node == top:\n global found\n found = True\n print(\"\\n Found\")\n return (visited,stack)\n else:\n if gen_left_child(top,max_nodes) >-1:\n stack.append(gen_left_child(top,max_nodes))\n if gen_right_child(top,max_nodes) >-1:\n stack.append(gen_right_child(top,max_nodes))\n print(\"\\n Not Found\")\n return (visited,stack)", "title": "" }, { "docid": "e08222a7c068d3d179b5f69b65711a49", "score": "0.5763942", "text": "def depthFirstSearch(problem):\n return generic_search(util.Stack(), problem, add_successors, nullHeuristic)", "title": "" }, { "docid": "442dc16d7649338522205a14169c6c4b", "score": "0.576214", "text": "def bfs(graph,source = None,target = None):\n \n try:\n assert graph.ndim == 2\n except:\n err = 'Adjacency matrix must have 2 dimensions.'\n raise ValueError(err)\n \n try:\n assert graph.shape[1] > 0\n except:\n err = 'Dim-2 size must be greater than 0.'\n raise ValueError(err)\n\n visited = np.zeros((graph.shape[0],)).astype(np.int32)\n parent = -1 * np.ones((graph.shape[0],)).astype(np.int32)\n \n Q = Queue()\n Q.put(source)\n\n while not Q.empty():\n \n vertex = Q.get()\n\n for n in np.arange(graph.shape[1]):\n if not visited[n] and graph[vertex][n]:\n \n parent[n] = vertex\n \n if n == target:\n \n return parent,visited\n\n Q.put(n)\n\n visited[vertex] = 1\n \n return parent,visited", "title": "" }, { "docid": "1750d204e02b6f87225ebc97d51b3308", "score": "0.57567775", "text": "def search(problem, fringe):\n closed_list = []\n\n # Add start state to fringe\n # type checks for PriorityQueue\n if type(fringe) is util.PriorityQueue:\n fringe.push((problem.get_start_state(), [], 0), 0)\n else:\n fringe.push((problem.get_start_state(), []))\n\n # Loop while there is nodes in fringe to expand\n while not fringe.is_empty():\n # Pop node off fringe\n node = fringe.pop()\n # Check if the current node is the goal state\n if problem.is_goal_state(node[0]):\n # Return path to goal state\n return node[1]\n # Check if node is in closed list\n if node[0] not in closed_list:\n # Add node to closed list\n closed_list.append(node[0])\n # Expand node adding its children to the fringe\n for nextNode in problem.get_successors(node[0]):\n # Copy path from past node\n path = node[1][:]\n # Add next nodes path to list\n path.append(nextNode[1])\n # Add node to fringe\n # Note: does a type check for priority queue's to handle UCS\n if type(fringe) is util.PriorityQueue:\n # Add next state to fringe with a cost for UCS\n fringe.push((nextNode[0], path, node[2] + nextNode[2]),\n node[2] + nextNode[2])\n else:\n # Add next state to fringe\n fringe.push((nextNode[0], path))", "title": "" }, { "docid": "a0b066a9eb84434ea37ca9552fafef4c", "score": "0.57518137", "text": "def search(ints, target):\n return binary_search(ints, target, 0, len(ints) - 1)", "title": "" }, { "docid": "8b44e29221e55ab00c8f732466d0b8e2", "score": "0.5751362", "text": "def breadthFirstSearch(problem):\n q = util.Queue()\n closed = set()\n start = (problem.getStartState(), 0, [])\n q.push(start)\n while not q.isEmpty():\n (node, cost, path) = q.pop()\n if problem.isGoalState(node): \n return path\n if not node in closed:\n closed.add(node)\n for child_node, child_action, child_cost in problem.getSuccessors(node):\n new_cost = cost + child_cost\n new_path = path + [child_action]\n new_state = (child_node, new_cost, new_path)\n q.push(new_state)\n util.raiseNotDefined()", "title": "" }, { "docid": "c1dacf0738dcf01dac4152916807f262", "score": "0.5744569", "text": "def bruteForceSearch(digraph, start, end, maxTotalDist, maxDistOutdoors, path = [], pathes_list = [], total_distance = 0, outdoor_distance=0):\n \n path = path + [start]\n if start == end:\n #print \"path is found\", path\n return path\n\n current_node = Node(start)\n\n \n\n #print \"path is\", path\n #print \"start node is\", current_node.getName() \n #if digraph.childrenOf(current_node):\n\n for (node, w1, w2) in digraph.childrenOf(current_node):\n if node.getName() not in path:\n total_distance= total_distance+w1\n outdoor_distance = outdoor_distance+ w2\n new_path = bruteForceSearch(digraph, node.getName(), end, maxTotalDist, maxDistOutdoors, path, pathes_list,\n total_distance, outdoor_distance)\n if new_path:\n #print \"new path is\", new_path\n pathes_list.append((new_path, total_distance, outdoor_distance))\n \n #return new_path\n if len(path) != 1:\n return None\n\n print \"***************this is the end of the BFS **************\"\n shortest_path = []\n minTotalDist = maxTotalDist\n minDistOutdoors = maxDistOutdoors\n for (path, dist, out_dist) in pathes_list:\n if dist < maxTotalDist:\n minTotalDist = dist\n shortest_path = path\n minDistOutdoors = out_dist\n\n\n #return (shortest_path, minTotalDist, minTotalDist)\n return shortest_path", "title": "" }, { "docid": "58c97a66e566e9ed1462e9cb9041a630", "score": "0.5740708", "text": "def __searchForTarget(self):\n \n self.__targetSearchDistance += self.__loopDisplacement\n \n logPrint('Searching for target, search distance: '\n + str(self.__targetSearchDistance) + ', search pattern: '\n + self.SEARCH_PATTERN_NAMES[self.__currentSearchPattern]\n , LOG_LEVEL_DEBUG)\n \n if self.__targetSearchDistance < self.TARGET_SEARCH_DISTANCE:\n self.__setSearchPatternNavigate()\n elif self.__targetSearchDistance < self.TARGET_SEARCH_DISTANCE * 2:\n self.__setSearchPatternLeft()\n elif self.__targetSearchDistance < self.TARGET_SEARCH_DISTANCE * 3:\n self.__setSearchPatternRight()\n else:\n self.__setSearchPatternNone()\n \n return", "title": "" }, { "docid": "49c3e40c8eff8c7c328545931563f9b3", "score": "0.5739923", "text": "def breadth_first_search(graph, source, discovered):\n\n\tbfs_tree = Node(source)\n\tdiscovered[source] = 1\n\tlevel_count = 0\n\tlevel = [[bfs_tree]]\n\tflag = True\n\n\twhile flag:\n\t\tnext_level = []\n\n\t\tfor node in level[level_count]:\n\t\t\tfor neighbor in np.where(graph[node.data] == 1)[0]:\n\t\t\t\tif (discovered[neighbor] == 0) or (len([1 for item in next_level if item.data == neighbor]) != 0):\n\n\t\t\t\t\tchild = Node(neighbor)\n\t\t\t\t\tchild.parents.append(node)\n\t\t\t\t\tnode.children.append(child)\n\t\t\t\t\tnext_level.append(child)\n\n\t\t\t\t\tdiscovered[neighbor] = 1\n\n\t\tif not next_level:\n\t\t\tflag = False\n\t\telse:\t\n\t\t\tlevel.append(next_level)\n\t\t\tlevel_count += 1\n\n\treturn bfs_tree", "title": "" }, { "docid": "13ef9ad7469af20309486fa6b44300c7", "score": "0.57276165", "text": "def dfs(self, start, target=None):\n stack = []\n stack.append(start)\n visited = set(stack)\n\n while stack:\n current = stack.pop()\n if current == target:\n break\n # need to add the current one to the stack\n visited.add(current)\n # need to subtract the visted one from the vertices and extend the remaining to the stack\n stack.extend(self.vertices[current] - visited)\n\n return visited", "title": "" }, { "docid": "8915d6e5d7a1b3524faa7447c3bb49c8", "score": "0.57256544", "text": "def binantySearch1(value,target):\n if len(value) == 0 or value[0] > target or value[-1] < target:\n return -1\n\n half = len(value)/2\n if target > value[half]:\n temp = binantySearch1(value[half:len(value)],target)\n if temp == -1:\n return -1\n else:\n half += temp\n elif target < value[half]:\n half = binantySearch1(value[0:half], target)\n elif target == value[half]:\n return half\n\n return half", "title": "" }, { "docid": "67f8edc8e484360965da61b6e6eb972a", "score": "0.57251084", "text": "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n #print(\"Start:\", problem.getStartState())\n #print(\"Is the start a goal?\", problem.isGoalState(problem.getStartState()))\n# print(\"call dfs\")\n explored=set()#the explored position or dot\n fringes=util.Stack()# store the latest pushed route\n action_=util.Stack()\n parent={}#key is the child node and value is the parent value\n init_state=problem.getSuccessors(problem.getStartState())\n start_pos=problem.getStartState()\n #print(start_pos)\n #if(problem.isGoalState(start_pos)):\n # return start_pos\n #else:\n explored.add(start_pos)#have explored the dot\n for i in range(0,len(init_state)):\n parent[init_state[i][0]]=start_pos\n route=[start_pos,init_state[i][0]]\n fringes.push(route)#add the route to the fringes\n action_init=[init_state[i][1]]\n action_.push(action_init)\n #print the child-parent relation\n #for key in parent:\n # print(parent[key])\n flag=False\n while(not fringes.isEmpty()):\n route=fringes.pop()\n action_direction=action_.pop()\n #print(type(action_direction))\n next_to_expand=route[len(route)-1]\n # print(\"next to expand is: \",next_to_expand)\n if(problem.isGoalState(next_to_expand)):\n flag=True\n #print(\"we found the route\")\n #print(route)\n #print(\"the action_direction\",type(action_direction))\n return action_direction\n elif(next_to_expand not in explored):\n explored.add(next_to_expand)\n curr_child=problem.getSuccessors(next_to_expand)\n #print(\"the next succssor to expand is:\",next_to_expand)\n #print(type(problem.getSuccessors(next_to_expand)))\n if(len(curr_child)!=0):#the node does have the child node to expand\n for i in range(0,len(curr_child)):\n # print(\"before added, the route: \",route)\n if(curr_child[i][0] not in explored):\n #print(\"the current node's child is \",curr_child[i][0])\n #print(\"the current direction is: \",curr_child[i][1])\n #parent[curr_child[i][0]]=next_to_expand\n route_=route.copy()\n route_.append(curr_child[i][0])\n action_cp=action_direction.copy()\n action_cp.append(curr_child[i][1])\n #print(\"the new route is: \",route_)\n #print(\"the new direction is: \",action_cp)\n fringes.push(route_)\n action_.push(action_cp)\n else:#the next_to_expand_node doesn't have a child node, and meanwhile it is not the goal then just go to the next route to explored\n continue\n else:\n print(\"oops we seems to reach the point that previously explored \")\n #print(next_to_expand)\n continue#if the next node to expand has already been expanded, do nothing just\n #print(\"Start's successors:\", problem.getSuccessors(problem.getStartState()))\n #util.raiseNotDefined()", "title": "" }, { "docid": "08bcd4e76943f496ea3abd0be31b5612", "score": "0.5713114", "text": "def findNodeJustBefore(target, nodes):\n result = None\n for node in nodes:\n if comparePosition(target, node) < 0:\n return result\n result = node\n return result", "title": "" }, { "docid": "9d3b599860f82a24d0857da139b9f37d", "score": "0.57054645", "text": "def search(self, target):\n index = self.hash(target)\n for i in self.array[index]:\n if i.word == target:\n return i\n return None", "title": "" }, { "docid": "7d2b540bb007a7b66c938e9dd5002663", "score": "0.57049227", "text": "def helperSearch(frontier, exploredSet, problem, isUCS): \n\n thisState = frontier.pop()\n thisNode, result, _ = thisState\n\n ## check if node has been explored \n if thisNode in exploredSet: \n return \"continue\"\n\n # did we reach the goal state? \n if problem.isGoalState(thisNode): \n return result\n \n # expand the node \n nodeInfo = problem, thisNode, frontier, exploredSet, result\n expandThisNode(nodeInfo, isUCS)", "title": "" }, { "docid": "314a94f963c97c3bb65595375006296e", "score": "0.57013196", "text": "def dfs(tag, source_x, source_y, matched, parent_left, parent_top):\n v_left, v_top, v_right, v_bottom = int(tag['left']), int(tag['top']), int(tag['right']), int(tag['bottom'])\n width, height = v_right - v_left, v_bottom - v_top\n abs_left, abs_top = parent_left + v_left, parent_top + v_top\n abs_right, abs_bottom = width + abs_left, abs_top + height\n\n is_visible = tag['visible'] == 'true'\n is_enable = tag['enabled'] == 'true'\n if is_visible and abs_left <= source_x <= abs_right and abs_top <= source_y <= abs_bottom:\n children_feedback = list()\n for child in tag.children:\n if isinstance(child, bs4.element.Tag):\n fb = dfs(child, source_x, source_y, matched, abs_left, abs_top)\n children_feedback.append(fb)\n if tag['address'] == '4704841':\n print(abs_left, abs_top)\n print(fb, child)\n print('===')\n # children_feedback.append(dfs(child, source_x, source_y, matched, abs_left, abs_top))\n if list(children_feedback) == 0:\n # Leaf node\n matched.append((tag, abs_left, abs_top,))\n return True\n # Inner node\n result = False\n for fb in children_feedback:\n result |= fb\n if result:\n # True, i.e., some child match, and add child itself\n return True\n else:\n # No child match, then add tag itself\n matched.append((tag, abs_left, abs_top,))\n return True\n else:\n return False", "title": "" }, { "docid": "52b736aaf302637634301f04212ba25d", "score": "0.56939447", "text": "def get_target_nodes(self, array, target=None):\r\n \r\n if self.root in target:\r\n # Check if the current node matches the target.\r\n \r\n # Add the current node to the array.\r\n array.append(self)\r\n \r\n # Find all non-terminal children of the current node.\r\n NT_kids = [kid for kid in self.children if kid.root in\r\n params['BNF_GRAMMAR'].non_terminals]\r\n \r\n for child in NT_kids:\r\n if NT_kids:\r\n # Recursively call function on any non-terminal children.\r\n array = child.get_target_nodes(array, target=target)\r\n \r\n return array", "title": "" }, { "docid": "a977d873b2da18495e7e6f4ee0bfb21e", "score": "0.56920975", "text": "def search(self, e) -> Union[(BSTNode, None)]:\n current = self\n found = False\n while not found and current:\n if current.element < e:\n current = current.right\n elif current.element > e:\n current = current.left\n else:\n found = True\n\n if found:\n return current\n else:\n return", "title": "" }, { "docid": "35e15db2ad5820e6d29759fc53df748b", "score": "0.56891155", "text": "def breadthFirstSearch(problem):\n\n visited = [problem.getStartState()]\n parentPath = Stack()\n parentPath.push([[], problem.getStartState()])\n\n while not parentPath.isEmpty():\n paths = parentPath\n parentPath = Stack()\n\n while not paths.isEmpty():\n\n item = paths.pop()\n\n if problem.isGoalState(item[1]):\n return item[0]\n\n for state in problem.getSuccessors(item[1]):\n if not state[0] in visited:\n visited.append(state[0])\n\n parentPath.push([item[0]+[state[1]],state[0]])", "title": "" }, { "docid": "29f05b2d033526085f6ed046c3acee77", "score": "0.56837434", "text": "def __iterativeTreeSearch(self, root: BinaryTreeNode, key: int) -> BinaryTreeNode:\n while root != None:\n if root.val == key: return root\n elif root.val < key: root = root.right\n else: root = root.left\n return None", "title": "" }, { "docid": "cf1343858af4973e113dfae3a55d530d", "score": "0.56835485", "text": "def exhausted_search(tree, Xi):\n\n dist_best = float('inf')\n nd_best = None\n que = [tree.root]\n while que:\n nd = que.pop(0)\n dist = get_euclidean_distance(Xi, nd.split[0])\n if dist < dist_best:\n dist_best = dist\n nd_best = nd\n if nd.left is not None:\n que.append(nd.left)\n if nd.right is not None:\n que.append(nd.right)\n return nd_best", "title": "" }, { "docid": "42e8f3df10f9a6419db5ef7785b0da04", "score": "0.56833106", "text": "def depthFirstSearch(problem):\r\n\r\n # *** Your Code Here ***\r\n # initialize structures\r\n stack = Stack()\r\n visited = []\r\n actionList = []\r\n\r\n # initialize stack and visited list with start node,\r\n # parent node, and previous action\r\n stack.push((problem.startingState(), None, None))\r\n visited.append(problem.startingState())\r\n\r\n while (not stack.isEmpty()):\r\n currentState, sourceState, prevAction = stack.pop()\r\n # trace back ancestors to form path if goal reached\r\n if problem.isGoal(currentState):\r\n actionList.append(prevAction)\r\n while sourceState:\r\n s, ss, p = sourceState\r\n actionList.append(p)\r\n sourceState = ss\r\n break\r\n # push succesor nodes to stack\r\n for (state, action, cost) in problem.successorStates(currentState):\r\n if state not in visited:\r\n stack.push((state, (currentState, sourceState, prevAction), action))\r\n visited.append(state)\r\n\r\n # reverse list to get start->goal\r\n # remove first filler action (None)\r\n list.reverse(actionList)\r\n actionList.pop(0)\r\n\r\n return actionList", "title": "" }, { "docid": "d785c1aa623144d841e3b00bf86322ce", "score": "0.56819004", "text": "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n \n\n\n # Get Starting Node & return empty list if its the goal state.\n node = problem.getStartState()\n if problem.isGoalState(node): return []\n\n actions = []\n cost = 0", "title": "" }, { "docid": "93d3621b5ee189c5c79d42e4c802ac9f", "score": "0.5678816", "text": "def search(self, x: int, path: List[SkipListNode]) \\\n -> Tuple[Optional[SkipListNode], List[SkipListNode]]:\n if self.value == x:\n # item found, go to level 0 to find the position\n if self.down is None:\n # item found\n return self, path\n else:\n # go down\n path.append(self)\n return self.down.search(x, path)\n\n if self.down is None:\n # level 0, linear search\n if self.next is None or self.next.value > x:\n # hit list end or larger value at level 0, return None\n return None, path\n else:\n return self.next.search(x, path)\n\n if self.next is None or self.next.value > x:\n # hit list end or larger value, go down level\n path.append(self)\n return self.down.search(x, path)\n else:\n return self.next.search(x, path)", "title": "" }, { "docid": "4297bbd45aa4244a4f503696b48fb40d", "score": "0.56761956", "text": "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n # print \"Start:\", problem.getStartState()\n # print \"Is the start a goal?\", problem.isGoalState(problem.getStartState())\n # print \"Start's successors:\", problem.getSuccessors(problem.getStartState())\n fringe = util.Stack()\n start_Node = Node(problem.getStartState())\n # avoid cycle in DFS\n visited = set()\n fringe.push(start_Node)\n\n while not fringe.isEmpty():\n current_Node = fringe.pop()\n\n # Check if we make it\n if problem.isGoalState(current_Node.state):\n # print(\"Success!\")\n # print(current_Node.path)\n return list(current_Node.path)\n\n # Check if visited\n if current_Node.state in visited:\n continue\n\n # Mark as visited\n visited.add(current_Node.state)\n\n successors = problem.getSuccessors(current_Node.state)\n for successor in successors:\n s_state = successor[0]\n s_action = successor[1]\n s_cost = successor[2] + current_Node.cost\n if s_state == current_Node.parent or s_state in visited:\n continue\n new_Node = current_Node.next_Node(s_action, s_state, s_cost)\n fringe.push(new_Node)\n\n print('fail')\n return[]", "title": "" }, { "docid": "eb6678baa29bb0d8f26af4025d9d1ffe", "score": "0.567522", "text": "def search_iterative(self, needle):\n if self.root is None:\n return False\n else:\n current_node = self.root\n while current_node is not None:\n if current_node.data == needle:\n return True\n elif current_node.data < needle:\n current_node = current_node.right\n else:\n current_node = current_node.left\n return False", "title": "" }, { "docid": "1b8a8df5473bab979d00dc8cadcc70cf", "score": "0.5665442", "text": "def breadthFirstSearch(problem):\n fringe = util.Queue()\n def add_to_fringe_fn(fringe, node, cost):\n fringe.push(node)\n\n return genericSearch(problem, fringe, add_to_fringe_fn)", "title": "" }, { "docid": "4e314d4efd4bbcf583973ad120645594", "score": "0.5662355", "text": "def test_search_tree(self):\n tobj = putil.tree.Tree('/')\n tobj.add_nodes(\n [\n {'name':'root', 'data':[]},\n {'name':'root/anode', 'data':[]},\n {'name':'root/bnode', 'data':[]},\n {'name':'root/cnode', 'data':[]},\n {'name':'root/bnode/anode', 'data':[]},\n {'name':'root/cnode/anode/leaf', 'data':[]},\n {'name':'root/cnode/anode/leaf1', 'data':[]}\n ]\n )\n assert tobj.search_tree('anode') == sorted(\n [\n 'root/anode',\n 'root/bnode/anode',\n 'root/cnode/anode',\n 'root/cnode/anode/leaf',\n 'root/cnode/anode/leaf1'\n ]\n )\n assert tobj.search_tree('leaf') == sorted(['root/cnode/anode/leaf'])\n tobj = putil.tree.Tree('/')\n tobj.add_nodes(\n [\n {'name':'anode', 'data':[]},\n {'name':'anode/some_node', 'data':[]}\n ]\n )\n assert (\n tobj.search_tree('anode') == sorted(['anode', 'anode/some_node'])\n )\n tobj = putil.tree.Tree('/')\n tobj.add_nodes({'name':'anode', 'data':[]})\n assert tobj.search_tree('anode') == sorted(['anode'])", "title": "" }, { "docid": "4fc735af4cc5ce1658631099adfb6379", "score": "0.5659971", "text": "def search_recursive(self, node, needle):\n if node is None:\n return False\n else:\n if node.data == needle:\n return True\n elif node.data < needle:\n return self.search_recursive(node.right, needle)\n else:\n return self.search_recursive(node.left, needle)", "title": "" }, { "docid": "f86bb039e1b1e941acab61d6050ef65a", "score": "0.56564045", "text": "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n queue = util.Queue()\n node = Node(problem.getStartState(),None)\n queue.push(node)\n movements = []\n explored = []\n start = Node(None,None)\n while not queue.isEmpty():\n parent = queue.pop()\n if parent.state not in explored:\n if problem.isGoalState(parent.state):\n start = parent\n break\n explored.append(parent.state)\n for s in problem.getSuccessors(parent.state):\n node = Node(s[0],s[1],parent)\n if (node not in queue.list) and (s[0] not in explored):\n queue.push(node)\n moves = util.Stack()\n while not start.parent == None:\n moves.push(start.direction)\n start = start.parent\n while not moves.isEmpty():\n movements.append(moves.pop())\n return movements", "title": "" }, { "docid": "8bb7949a726b603afec4f91d1083d129", "score": "0.5655292", "text": "def search_matrix(matrix, target):\n if not matrix or not matrix[0] or not target:\n return False\n\n visited = set()\n center_vertex = SubMatrix(0, len(matrix[0]) - 1, 0, len(matrix) - 1)\n vertex_queue = [center_vertex]\n\n while vertex_queue:\n # get a new vertex from the queue.\n current_vertex = vertex_queue.pop()\n current_index_x, current_index_y = current_vertex.vertex\n current_value = matrix[current_index_y][current_index_x]\n if current_vertex.vertex not in visited:\n visited.add(current_vertex.vertex)\n print(current_vertex)\n print(current_value)\n if contains_target(matrix, current_vertex, target):\n if current_value < target:\n southeast_quadrant = current_vertex.get_southeast_quadrant()\n if southeast_quadrant and southeast_quadrant.vertex not in visited:\n print(\"South East Quadrant: %s\" % (southeast_quadrant))\n vertex_queue.append(southeast_quadrant)\n elif current_value > target:\n northwest_quadrant = current_vertex.get_northwest_quadrant()\n if northwest_quadrant and northwest_quadrant.vertex not in visited:\n print(\"North West Quadrant: %s\" % (northwest_quadrant))\n vertex_queue.append(northwest_quadrant)\n else:\n # We found the target value\n return True\n\n # get the northeast quadrant, we always add this one.\n northeast_quadrant = current_vertex.get_northeast_quadrant()\n if northeast_quadrant and northeast_quadrant.vertex not in visited:\n print(\"North East Quadrant: %s\" % (northeast_quadrant))\n vertex_queue.append(northeast_quadrant)\n\n # get the southwest quadrant, we always add this one.\n southwest_quadrant = current_vertex.get_southwest_quadrant()\n if southwest_quadrant and southwest_quadrant.vertex not in visited:\n print(\"South West Quadrant: %s\" % (southwest_quadrant))\n vertex_queue.append(southwest_quadrant)\n\n return False", "title": "" }, { "docid": "01107a63b24b792f8289cc7e9587bc1b", "score": "0.56540364", "text": "def find(self, count_nodes=False):\n # Arrays for bookkeeping.\n node_id = np.full([self.n], fill_value=-1, dtype=int)\n lowpoint = np.full([self.n], fill_value=np.iinfo(np.int32).max)\n parent_arr = np.full([self.n], fill_value=-1, dtype=int)\n # Initialize with starting node.\n node_stack = [self.start]\n backtrack_list = [self.start]\n parent_arr[self.start] = self.start\n current_id = 0\n while node_stack:\n flat_index = node_stack.pop()\n if not self.grid[flat_index] == 1 and flat_index != self.start:\n continue\n self.grid[flat_index] = 2\n node_id[flat_index] = current_id\n lowpoint[flat_index] = current_id\n # lowpoint_for_inspection[np.unravel_index(flat_index, grid.shape)] = lowpoint[flat_index]\n # ids_for_inspection[np.unravel_index(flat_index, grid.shape)] = node_id[flat_index]\n current_id += 1\n for index_change in self.allowed_moves:\n new_flat_index = flat_index + index_change\n if new_flat_index < 0 or new_flat_index >= self.n:\n continue\n elif self.grid[new_flat_index] == 0:\n continue\n elif index_change == 1 and new_flat_index % self.n_cols == 0:\n continue\n elif index_change == -1 and flat_index % self.n_cols == 0:\n continue\n elif new_flat_index == parent_arr[flat_index]:\n continue\n elif self.grid[new_flat_index] == 1:\n parent_arr[new_flat_index] = flat_index\n node_stack.append(new_flat_index)\n backtrack_list.append(new_flat_index)\n elif self.grid[new_flat_index] == 2:\n lowpoint[flat_index] = min(lowpoint[flat_index], node_id[new_flat_index])\n # lowpoint_for_inspection[np.unravel_index(flat_index, grid.shape)] = lowpoint[flat_index]\n\n unique, counts = np.unique(parent_arr, return_counts=True)\n child_count = dict(zip(unique, counts))\n child_count[self.start] -= 1\n if count_nodes:\n self.nodes_count = len(set(backtrack_list)) - 1\n if self.end:\n self.end_reachable = self.end in backtrack_list\n while backtrack_list:\n child = backtrack_list.pop()\n parent = parent_arr[child]\n lowpoint[parent] = min(lowpoint[parent], lowpoint[child])\n # lowpoint_for_inspection[np.unravel_index(parent, grid.shape)] = lowpoint[parent]\n if node_id[parent] <= lowpoint[child]:\n self.articulation_points.append(parent)\n if parent == self.start and child_count[self.start] < 2:\n self.articulation_points.pop()\n self.articulation_points = set(self.articulation_points)\n\n return", "title": "" }, { "docid": "a68c1c4569067216baa648173fee5ef0", "score": "0.5653397", "text": "def _find(x):\n if components[x][0] == x:\n return x\n else:\n root = x\n stack = [ ]\n while components[root][0] != root:\n stack.append(root)\n root = components[root][0]\n for y in stack:\n components[y][0] = root\n return root", "title": "" }, { "docid": "442e3dcd2f30b9b68e2c2148acb3a7b9", "score": "0.5651712", "text": "def search(self, start, end):\n queue = []\n self.marked[start[0], start[1]] = 1\n queue.append([start])\n while queue:\n path = queue.pop(0)\n cell = path[-1]\n if cell == end:\n return path\n for neighbor in self.adjacent(cell):\n new_path = list(path)\n self.marked[neighbor[0], neighbor[1]] = 1\n new_path.append(neighbor)\n queue.append(new_path)", "title": "" }, { "docid": "1f50b1f6462226f81ecfd390ed9bd738", "score": "0.56510055", "text": "def search(self, find_val):\n return self.bst_search(self.root, find_val)", "title": "" }, { "docid": "b7f8942788987a9df7c5e3bf8071c2ee", "score": "0.5650434", "text": "def binary_search_iterative(data, target):\n low = 0\n high = len(data) - 1\n while low <= high:\n mid = (low + high) // 2\n if target == data[mid]:\n return True\n elif target < data[mid]:\n high = mid - 1\n else:\n low = mid + 1\n return False", "title": "" }, { "docid": "bcd94aedffcf33b31e27b258ed5760d1", "score": "0.5646922", "text": "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n from util import Stack\n visted=[]\n move=[]\n stack = util.Stack()\n## print\"Start Position:\", problem.getStartState()\n stack.push((problem.getStartState(), []))\n while stack:\n currentPostion = stack.pop()\n move=currentPostion[1]\n sList = problem.getSuccessors(currentPostion[0])\n if problem.isGoalState(currentPostion[0]):\n break\n if not currentPostion[0] in visted:\n sList = problem.getSuccessors(currentPostion[0])\n for s in sList:\n stack.push((s[0], move + [s[1]]))\n visted=visted+[currentPostion[0]]\n## print \"s[0]\", move+[s[1]]\n## print \"visted nodes:\", visted\n## print \"currentPosition:\", currentPostion[0]\n## print \"Steps:\", move\n## print \"Start:\", problem.getStartState()\n## print \"Is the start a goal?\", problem.isGoalState(problem.getStartState())\n## print \"Start's successors:\", problem.getSuccessors(problem.getStartState())\n return move", "title": "" }, { "docid": "ad7d9a83b25e9c97b0589f46a7052645", "score": "0.5644591", "text": "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n fringe = util.Queue()\n start_Node = Node(problem.getStartState())\n # avoid cycle in BFS\n visited = set()\n fringe.push(start_Node)\n\n while not fringe.isEmpty():\n current_Node = fringe.pop()\n\n # Check if we make it\n if problem.isGoalState(current_Node.state):\n # print(\"Success!\")\n # print(current_Node.path)\n return list(current_Node.path)\n\n # Check if visited\n if current_Node.state in visited:\n continue\n\n # Mark as visited\n visited.add(current_Node.state)\n\n successors = problem.getSuccessors(current_Node.state)\n for successor in successors:\n s_state = successor[0]\n s_action = successor[1]\n s_cost = successor[2] + current_Node.cost\n if s_state == current_Node.parent or s_state in visited:\n continue\n new_Node = current_Node.next_Node(s_action, s_state, s_cost)\n fringe.push(new_Node)\n\n print('fail')\n return []", "title": "" }, { "docid": "f353369db5b655d7da8d912a19e16dbc", "score": "0.56435466", "text": "def genericSearch(problem, fringe, add_to_fringe_fn):\n closed = set()\n start = (problem.getStartState(), 0, []) # (state, cost, actions) \n add_to_fringe_fn(fringe, start, 0)\n\n while not fringe.isEmpty():\n state, cost, actions = fringe.pop()\n\n if problem.isGoalState(state):\n return actions\n\n if not state in closed:\n closed.add(state)\n\n for newState, newAction, newCost in problem.getSuccessors(state):\n nextNode = (newState, cost+newCost, actions+[newAction])\n add_to_fringe_fn(fringe, nextNode, cost+newCost)", "title": "" } ]
9c56f06c5aedb2c724ba0b2b16b1ec9b
Create a file containing blow count vs depth.
[ { "docid": "a69ffe7e037a5f03d7d6e710e693f126", "score": "0.59456706", "text": "def create_bcount(\n did,\n bcounts,\n depths\n):\n bcounts_ = [bcounts, depths]\n\n with open(\n 'bcounts/{}.p'.format(did),\n 'wb'\n ) as out_file:\n pickle.dump(\n bcounts_,\n out_file\n )\n\n update_table(\n did,\n bcount=True\n )", "title": "" } ]
[ { "docid": "a591fb0a1c1f83821fcb324131b569db", "score": "0.6140474", "text": "def write_png_depth(filename: str, depth: int) -> None:\n data = struct.pack('!i', depth)\n with open(filename, 'r+b') as f:\n # seek to the beginning of the IEND chunk\n f.seek(-LEN_IEND, 2)\n # overwrite it with the depth chunk\n f.write(DEPTH_CHUNK_LEN + DEPTH_CHUNK_START + data)\n # calculate the checksum over chunk name and data\n crc = binascii.crc32(DEPTH_CHUNK_START + data) & 0xffffffff\n f.write(struct.pack('!I', crc))\n # replace the IEND chunk\n f.write(IEND_CHUNK)", "title": "" }, { "docid": "92bb6486a14b15472f7b8f43527fbc64", "score": "0.58069056", "text": "def write_depths_to_file(output, chromosome, compressed_depths):\n keys = list(compressed_depths.keys())\n for key in keys:\n output.write(chromosome + \" \" + str(key) + \" \" + str(round(compressed_depths[key], 3)) + \"\\n\")", "title": "" }, { "docid": "20e5aa9e6d19ba18749921354f8ae44a", "score": "0.5785021", "text": "def __create_game_counter(self):\n with open(self.__counter_path, \"w+\") as f:\n f.write(\"0\")", "title": "" }, { "docid": "c548db2a7076a019a30ebb42fdcdce11", "score": "0.57663596", "text": "def write_depth_image(depth, path):\n with open(path, 'wb') as file:\n file.write(\n struct.pack(\n 'H' * np.prod(depth.shape),\n *depth.flatten().tolist()\n )\n )\n return True", "title": "" }, { "docid": "80baa0de7e629139c6a91d3062cc23be", "score": "0.5629219", "text": "async def create_til_count_file(count):\n print(\"Generating count.json\")\n with open(\"count.json\", \"w\") as json_file:\n data = {\"count\": count}\n json.dump(data, json_file, indent=\" \")", "title": "" }, { "docid": "9906c8368b4479687a54c244d0625670", "score": "0.55747205", "text": "def getDepth():", "title": "" }, { "docid": "eb4f3cd216b5ec177de0958866f17ed4", "score": "0.55517757", "text": "def writefile():", "title": "" }, { "docid": "29c9fb606c24411ba0c1327924e46f07", "score": "0.5539517", "text": "def create(filename, mode='w'):", "title": "" }, { "docid": "6daecf0a282504616d16dfe4dba3d840", "score": "0.55222416", "text": "def test_createnc(create_case):\n case_dir = create_case\n sys.argv = [\"geoclaw-landspill\", \"createnc\", str(case_dir)]\n gclandspill.__main__.main()\n\n assert case_dir.joinpath(\"_output\", \"{}-depth-lvl02.nc\".format(case_dir.name)).is_file()", "title": "" }, { "docid": "e9afdb912c42c6a6c8ca66453c49aef2", "score": "0.55156493", "text": "def dump(self, path: str):\n with open(path, 'w') as f:\n f.write(str(self._root.num()) + '\\n')\n f.write(str(self._cnt) + '\\n')\n\n rec: List[str] = [\"\" for i in range(self._cnt)]\n stack: List[_Node] = [self._root]\n while len(stack) != 0:\n x = stack[-1]\n stack.pop()\n\n if x is None:\n continue\n\n left_child = x.left_child()\n right_child = x.right_child()\n if left_child is None:\n left_child = _Node(-1, 0)\n if right_child is None:\n right_child = _Node(-1, 0)\n\n rec[x.num()] = \"%d %d %d\" % (\n x.weight(), left_child.num(), right_child.num())\n stack.append(x.left_child())\n stack.append(x.right_child())\n for s in rec:\n f.write(s + '\\n')\n\n for label in self._label2num:\n f.write(\"%s %d\\n\" % (label, self._label2num[label]))", "title": "" }, { "docid": "14b87da2046e3b65e11ddf069e86042e", "score": "0.55106896", "text": "def write(self, setup):\n self.counters.write( self.dirName )\n self.averages.write( self.dirName )", "title": "" }, { "docid": "f02122030b1226c667bca2c22ad7ac56", "score": "0.5495452", "text": "def _write_chain(file, path, depth=0, reference=None):\n\n if depth != 0:\n offset = _calculate_offset(path, depth)\n file.write(offset * ' ' + ' --> ')\n\n for index in range(depth, len(path) - 1):\n if path[index] in discovered_modules and discovered_modules[path[index]].number != -1:\n file.write('[' + str(discovered_modules[path[index]].number) + '] ')\n file.write(path[index])\n file.write(' --> ')\n\n if path[-1] in discovered_modules and discovered_modules[path[-1]].number != -1:\n file.write('[' + str(discovered_modules[path[-1]].number) + '] ')\n file.write(path[-1])\n if reference is not None:\n file.write(' (watch ' + str(reference) + ')')\n elif path[-1] not in discovered_modules:\n file.write(' [ext]')\n elif discovered_modules[path[-1]].number == -1:\n file.write(' [end]')\n\n file.write('\\n')", "title": "" }, { "docid": "4f0ca369bf3c6204aea90c86afa9dc17", "score": "0.54871273", "text": "def create(path, purge_cycles=True): \n\n file = File(path, \"w+\", purge_cycles)\n return file.root", "title": "" }, { "docid": "b3132cb515271093d08372ec7a5cbbb3", "score": "0.54661673", "text": "def small_look_data_file(n):\n\t\n\tfile_handle = open('level_data.txt', 'w')\n\tgfile_handle = open('max_level_data.txt', 'w')\n\tmax_level = 0\n\tfor i in range(1,n,2):\n\t\tseq = [x for x in collatz_seq(i) if x%2 == 1]\n\t\tlevel = len(seq) - 1\n\t\tif level > max_level:\n\t\t\tmax_level = level\n\t\tout_str = \"%d \\t %d \\n\"%(i,level)\n\t\tfile_handle.write(out_str)\n\t\tmax_out_str = \"%d \\t %d \\n\"%(i,max_level)\n\t\tgfile_handle.write(max_out_str)\n\tfile_handle.close()\n\tgfile_handle.close()\n\treturn 'done'", "title": "" }, { "docid": "77a429e49aad481505108b7c89e626b4", "score": "0.5448138", "text": "def create_non_zero_file(self, size, path, mode=None):\n filepath = os.path.join(self.testdir, path)\n with open(filepath, 'w') as f:\n f.write('\\132' * size)\n if mode is not None:\n os.chmod(filepath, mode)\n return filepath", "title": "" }, { "docid": "4abb68eda537c0071e834e5b1fa2a053", "score": "0.5418369", "text": "def write_covering_fatgraph(self, filename):\n if not all([hasattr(v,'sheet') for v in self.V]):\n print \"This function can only be called for a fatgraph with .sheet data\"\n return\n f = open(filename, 'w')\n #write the fatgraph as normal\n f.write(str(len(self.V)) + ' ' + str(len(self.E)) + '\\n')\n for v in self.V:\n f.write(str(len(v.edges)))\n for (e,d) in v.edges:\n signed_ind = (e+1 if d else -(e+1))\n f.write(' ' + str(signed_ind))\n f.write('\\n')\n for e in self.E:\n f.write( str(e.source) + ' ' + str(e.dest) + ' ' + e.label_forward + ' ' + e.label_backward + '\\n')\n #count the number of sheets\n num_sheets = 1\n for v in self.V:\n if v.sheet+1 > num_sheets:\n num_sheets = v.sheet+1\n #get the number of vertices in each sheet (should be the same, but no assumptions)\n vertices_per_sheet = [0 for i in xrange(num_sheets)]\n vertex_pos_in_sheet = [None for v in self.V]\n for vi, v in enumerate(self.V):\n vertex_pos_in_sheet[vi] = vertices_per_sheet[v.sheet]\n vertices_per_sheet[v.sheet] += 1\n h_start = 20\n v_start = 20\n h_step = 760/num_sheets\n v_step = [760/vps for vps in vertices_per_sheet]\n #write the positions to the file\n for vi, v in enumerate(self.V):\n f.write(str(h_start + v.sheet*h_step) + ' ' + str(v_start + vertex_pos_in_sheet[vi]*v_step[v.sheet]) + '\\n')\n f.close()", "title": "" }, { "docid": "e4fc5043cbc6ef8e54f419d9ea3be8f9", "score": "0.53894186", "text": "def create_op_structure(self):\n op_create_path = os.getcwd() + \"/history/create_op_dir.txt\"\n try:\n with open(op_create_path,\"r\") as f:\n dat=f.read()\n return 1\n except:\n logging.info(\"op created\")\n for dir1 in range(2019,2025):\n for dir2 in range(1,13):\n os.makedirs(os.path.join(\"output\",str(dir1), str(dir2)))\n with open(op_create_path,\"w+\") as f:\n f.write(\"1\")", "title": "" }, { "docid": "17823fd0c08ed9b9c02cf5cde9c5a264", "score": "0.5384528", "text": "def save_to_file(file_path, depth_map):\n\n with open(file_path, 'wb') as f:\n depth_image = (depth_map * 256).astype(np.uint16)\n\n # pypng is used because cv2 cannot save uint16 format images\n writer = png.Writer(width=depth_image.shape[1],\n height=depth_image.shape[0],\n bitdepth=16,\n greyscale=True)\n writer.write(f, depth_image)", "title": "" }, { "docid": "5b4cd2f82cef38d710a7dc0e5eddfb80", "score": "0.5377312", "text": "def create_files(self, path, count=1, size=0, depth=0, width=0, fill='zero'):\n BLOCKSIZE = 8192\n count = int(count)\n size = int(size)\n depth = int(depth)\n width = int(width)\n\n if fill == 'zero':\n block = bytearray(BLOCKSIZE)\n elif fill == 'one':\n block = bytearray(BLOCKSIZE)\n elif fill == 'random':\n random.seed(0) # Always make the same psuedo random sequence.\n block = bytearray(random.getrandbits(8) for _ in range(BLOCKSIZE))\n elif fill == 'fixed':\n hexstring = 'deadbeef'\n ncopies = BLOCKSIZE // len(hexstring)\n block = bytearray.fromhex(hexstring * ncopies)\n else:\n raise ValueError(\"Invalid fill type: %s\" % fill)\n\n nblocks = size // BLOCKSIZE\n partial_size = size % BLOCKSIZE\n if partial_size:\n partial_block = block[0:partial_size]\n\n def make_files(p, count):\n for i in range(0, count):\n name = os.path.join(p, '%d' % (i))\n with open(name, 'wb') as f:\n for _ in range(0, nblocks):\n f.write(block)\n if partial_size:\n f.write(partial_block)\n\n def make_tree(p, d):\n if d > depth:\n return\n if not os.path.isdir(p):\n os.mkdir(p)\n if count:\n make_files(p, count)\n for i in range(0, width):\n make_tree('%s/d%d' % (p, i), d + 1)\n\n make_tree(path, 0)", "title": "" }, { "docid": "38f6f9eaa2971f1f604eb17214f998dc", "score": "0.537431", "text": "def generate(self):\n with open(self.blob_name, 'wb') as f:\n if self.VCS_Fix:\n f.write(c_uint64(0)) # VCS incorrectly reads first 16 Bytes\n f.write(c_uint64(0)) # VCS incorrectly reads first 16 Bytes\n f.write(c_uint64(0)) # VCS incorrectly reads first 16 Bytes\n f.write(c_uint64(0)) # VCS incorrectly reads first 16 Bytes\n f.write(estimate_file_size(self))\n f.write(self.version)\n\n f.write(self.name)\n f.write(self.report_dir)\n f.write(self.stage_count)\n f.write(get_buffer_start(self)) # Size of a network element.\n self.myriad_params.generate(f)\n self.network.generate_info(f)\n self.network.generate_data(f)", "title": "" }, { "docid": "59826f6c4986b13901d9ed066f7dc0b2", "score": "0.53736126", "text": "def write_coverages_and_vcf_files(components, f_cov, f_vcf, values):\r\n count_forward = len(re.findall('[.ACGTN>*]', components[4])) #Count forward depth\r\n count_reverse = len(re.findall('[,acgtn<#]', components[4])) #Count reverse depth\r\n bases=['A', 'C', 'G', 'T']\r\n if components[2] not in bases:\r\n pass\r\n elif count_forward>=values.for_limit and count_reverse>=values.rev_limit and int(components[3])>=values.depth_lim: #If forward, reverse and total depths are enough\r\n f_cov.write(components[0]+'\\t'+components[1]+'\\t\\t'+components[2]+'\\t\\t'+components[3]+'\\t\\t'+str(count_forward)+'\\t\\t'+str(count_reverse)+'\\n') #Write to coverages file\r\n if f_vcf!=None:\r\n bases.remove(components[2])\r\n for variant in bases:\r\n f_vcf.write(components[0]+'\\t'+components[1]+'\\t'+'.'+'\\t'+components[2]+'\\t'+variant+'\\t.\\t.\\t') #Writes chromosome, position, id and reference base\r\n f_vcf.write(\"DP=\"+components[3]) #Max depth?\r\n f_vcf.write(\"\\t\") #Separates with tab to the next column\r\n f_vcf.write(\"GT:AD:ADF:ADR\\t0/1:\"+components[3]+':'+str(count_forward)+':'+str(count_reverse)+'\\n') #Total depth, forward depth and reverse depth\r\n return True\r\n return False", "title": "" }, { "docid": "8cf6b3ee2c0c19e9cbddeeae3a9bf680", "score": "0.53608733", "text": "def write_total_ordering(graph, filename=None):\n my_str = ' '.join(str(i) for i in follow_tree(graph))\n if filename:\n outfh = open(filename, 'w')\n outfh.write(my_str)\n outfh.write(\"\\n\")\n outfh.close()\n else:\n print my_str", "title": "" }, { "docid": "3407739801f98b4bbf83313ebf2bb9eb", "score": "0.535575", "text": "def write_wrapper():\n\n while True:\n levels_to_file()\n file_to_levels()", "title": "" }, { "docid": "79922f70d6e2cd314bb13a95da646c85", "score": "0.5355488", "text": "def test_plotdepth(create_case):\n case_dir = create_case\n sys.argv = [\"geoclaw-landspill\", \"plotdepth\", \"--border\", \"--nprocs\", \"1\", str(case_dir)]\n gclandspill.__main__.main()\n plot_dir = case_dir.joinpath(\"_plots\", \"depth\", \"level02\")\n\n assert plot_dir.is_dir()\n\n for i in range(6):\n file_path = plot_dir.joinpath(\"frame0000{}.png\".format(i))\n assert file_path.is_file(), \"{} not found\".format(file_path)", "title": "" }, { "docid": "32567673f54689474e2b8f9076f061aa", "score": "0.535257", "text": "def _create_sparsed_file(self, nms, path, size):\r\n block_size_mb = 1\r\n block_count = size * units.GiB / (block_size_mb * units.MiB)\r\n\r\n nms.appliance.execute(\r\n 'dd if=/dev/zero of=%(path)s bs=%(bs)dM count=0 seek=%(count)d' % {\r\n 'path': path,\r\n 'bs': block_size_mb,\r\n 'count': block_count\r\n }\r\n )", "title": "" }, { "docid": "e95fb32116f03d134f1057af843ccf1f", "score": "0.5343382", "text": "def create_holey_file(self, size, path, mode=None):\n filepath = os.path.join(self.testdir, path)\n with open(filepath, 'w') as f:\n if size > 0:\n f.seek(size - 1)\n f.write('\\0')\n if mode is not None:\n os.chmod(filepath, mode)\n return filepath", "title": "" }, { "docid": "cf6eeaf09d556a7c7478ad309eda4c4e", "score": "0.5337418", "text": "def writeDist(dico,pdbname):\n\n\ttxt_file_name = pdbname.replace(\".pdb\",\"_depth.txt\")\n\n\toutput = open(txt_file_name,\"w\")\n\n\tfor key in dico.keys():\n\t\toutput.write(\"{0}\\t{1}\\n\".format(key,dico[key]))\n\n\toutput.close()", "title": "" }, { "docid": "d12c31f7633c1729678efec16f0a6c49", "score": "0.5306081", "text": "def write_tree(tree, depth, root, f):\r\n if tree.is_node:\r\n f.write(\":\" + tree.classification + \"\\n\")\r\n return\r\n elif not root:\r\n depth += 1\r\n f.write(\"\\n\")\r\n for son in tree.sons:\r\n classification = son\r\n son = tree.sons[son]\r\n if not root:\r\n f.write(\"\\t\" * depth + '|')\r\n f.write(tree.attribute + \"=\" + classification)\r\n write_tree(son, depth, False, f)", "title": "" }, { "docid": "c69cb9b6291db3ba290d6bf167f0cf41", "score": "0.5274302", "text": "def test_dump_with_weight(self):\n self.edges[0].weight = 100\n temp_png_file_name = self.get_temp_filename()\n try:\n self.graph_drawer.draw_png(\n temp_png_file_name\n )\n finally:\n os.remove(temp_png_file_name)", "title": "" }, { "docid": "fe29a38ef18fabcdb8362a32363cf0d9", "score": "0.52618194", "text": "def write_tree(file_obj, vertex):\n file_obj.write(vertex.val)\n file_obj.write(\"\\n\")\n vertex.color = \"grey\"\n for edge in vertex.adj:\n if edge._to.color != \"grey\":\n write_tree(file_obj, edge._to)", "title": "" }, { "docid": "3cbacc54693c85f62262d5f189a159b6", "score": "0.5256339", "text": "def tofile():", "title": "" }, { "docid": "78b53333f0a895094758f535a1e56e89", "score": "0.5248561", "text": "def write_depth(filename, depth, intrinsics=None):\n # If depth is a tensor\n if is_tensor(depth):\n depth = depth.detach().squeeze().cpu()\n # If intrinsics is a tensor\n if is_tensor(intrinsics):\n intrinsics = intrinsics.detach().cpu()\n # If we are saving as a .npz\n if filename.endswith('.npz'):\n np.savez_compressed(filename, depth=depth, intrinsics=intrinsics)\n # If we are saving as a .png\n elif filename.endswith('.png'):\n depth = transforms.ToPILImage()((depth * 256).int())\n depth.save(filename)\n # Something is wrong\n else:\n raise NotImplementedError('Depth filename not valid.')", "title": "" }, { "docid": "eb3b400753065d455078bcd61fe3a8f0", "score": "0.5245517", "text": "def graph_stats_nograph(start_num, min_lg2_bound, max_lg2_bound):\n\n\tstats_file = open('graph_stats.txt',\"w\")\n\n\tprev_seen_list = [start_num]\n\t\n\tfor i in range(min_lg2_bound, max_lg2_bound+1):\n\t\tcur_level = [start_num]\n\t\tseen_list = [start_num]\n\n\t\t#Estimating the num we need for compute_up_level\n\t\tnum_est = 2+i\n\t\t\n\t\t#Creating the ith graph\n\t\twhile len(cur_level) > 0:\n\t\t\tnext_level = []\n\t\t\tfor target in cur_level:\n\t\t\t\ttemp = compute_up_level(target, num_est)\n\t\t\t\tif 1 in temp:\n\t\t\t\t\ttemp.remove(1)\n\t\t\t\ttrimmed_temp = [x for x in temp if x<= 2**i]\n\t\t\t\tseen_list.extend(trimmed_temp)\n\t\t\t\tnext_level.extend(trimmed_temp)\n\t\t\tcur_level = [x for x in next_level]\t\t\t\n\t\t\t\t\t\n\t\tprint \"%d nodes for %d bits \\n\"%(len(seen_list),i)\n\t\t\n\t\t#Create useful sub_lists of seen_list once rather than multiple times\n\t\tclass0_seen_list = [x for x in seen_list if x%3==0]\n\t\tclass1_seen_list = [x for x in seen_list if x%3==1]\n\t\tclass2_seen_list = [x for x in seen_list if x%3==2]\n\t\teven_desc1 = [x for x in class1_seen_list if get_descendant_parity(x) == 0]\n\t\todd_desc1 = [x for x in class1_seen_list if get_descendant_parity(x) == 1]\n\t\teven_desc2 = [x for x in class2_seen_list if get_descendant_parity(x) == 0]\n\t\todd_desc2 = [x for x in class2_seen_list if get_descendant_parity(x) == 1]\n\t\t\n\t\t\t\t\n\t\t#Now it's time to assemble some statistics\n\t\twrite_str = \"%3d BIT BOUND: \\n \\n\"%(i)\n\t\tstats_file.write(write_str)\n\t\twrite_str = \"\\t Number of nodes: %22d %20.2f bits\\n\"%(len(seen_list), math.log(len(seen_list),2))\n\t\tstats_file.write(write_str)\n\t\twrite_str = \"\\t Number of previously seen nodes: %6d %20.2f bits\\n\"%(len(prev_seen_list),math.log(len(prev_seen_list) ,2))\n\t\tstats_file.write(write_str)\n\t\twrite_str = \"\\t Number of new nodes: %18d %20.2f bits\\n\"%(len(seen_list)-len(prev_seen_list),math.log(len(seen_list)-len(prev_seen_list) ,2))\n\t\tstats_file.write(write_str)\n\t\twrite_str = \"\\t Percentage of new nodes: %14.2f \\n\"%(1.0*(len(seen_list)-len(prev_seen_list))/len(seen_list))\n\t\tstats_file.write(write_str)\n\t\n\t\tstats_file.write(\"\\n\")\n\t\tstats_file.write(\"\\t NUMBER OF NODES IN CONGRUENCE CLASSES MODULO 3 \\n\\n\")\n\t\tstats_file.write(\"\\t \\t 0 \\t %8d \\n\"%(len(class0_seen_list)))\n\t\tstats_file.write(\"\\t \\t 1 \\t %8d \\n\"%(len(class1_seen_list)))\n\t\tstats_file.write(\"\\t \\t 2 \\t %8d \\n\"%(len(class2_seen_list)))\n\t\t\n\t\tstats_file.write(\"\\n\")\n\t\tstats_file.write(\"\\t NUMBER OF NODES WITH EVEN/ODD DECENDANTS \\n\\n\")\n\t\tstats_file.write(\"\\t %8s \\t %8s \\t %8s \\t %8s \\n \"%('PARITY', 'COUNT', '1COUNT', '2COUNT'))\n\t\tstats_file.write(\"\\t %8s \\t %8d \\t %8d \\t %8d \\n \"%('EVEN', len(even_desc1) + len(even_desc2),len(even_desc1),len(even_desc2)))\n\t\tstats_file.write(\"\\t %8s \\t %8d \\t %8d \\t %8d \\n \"%('ODD', len(odd_desc1) + len(odd_desc2),len(odd_desc1),len(odd_desc2)))\n\t\t\n\t\tstats_file.write(\"\\n\")\n\t\tstats_file.write(\"\\t BREAKDOWN OF NODES\\n\\n\")\n\t\tw_str = create_stats_table(seen_list)\n\t\tstats_file.write(w_str)\n\t\t\n\t\tstats_file.write(\"\\n\")\n\t\tstats_file.write(\"\\t NUMBER OF NODES OF A GIVEN LENGTH \\n\\n\")\n\t\tstats_file.write(\"\\t %8s \\t %8s \\t %8s \\t %8s \\t %8s \\t %8s \\t %8s \\n\"%('LENGTH','ACT. NODES','MAX POSS.', '% OF POSS.', '#0 MOD3','#1 MOD3','#2 MOD3'))\n\t\tfor j in range(i):\n\t\t\tof_length = [x for x in seen_list if get_length(x)==j]\n\t\t\tnum_of_length = len(of_length)\n\t\t\tmax_poss = max_poss_of_length(j)\n\t\t\tstats_file.write(\"\\t \\t %d \\t %8d \\t %8d \\t %3.5f \\t %8d \\t %8d \\t %8d\\n\"%(j,num_of_length,max_poss,1.0*num_of_length/max_poss, len([x for x in of_length if x%3==0]), len([x for x in of_length if x%3==1]), len([x for x in of_length if x%3==2])))\n\n\t\tstats_file.write(\"\\n\")\n\t\t\n\t\tprev_seen_list = [x for x in seen_list]\n\n\tstats_file.close()\n\n\treturn 'done'", "title": "" }, { "docid": "540b856d842b40f4292bc5756b28e336", "score": "0.5218557", "text": "def writeTree(filename, seq1, seq2):\n\tassert len(seq1) == len(seq2)\n\tlength = len(seq1)\n\tfile = open(filename, 'w')\n\tfile.write(\"2 1\\n\") # 2 species, 1 tree\n\tfile.write(\"(seq_1,seq_2);\\n\")\n\tfile.close()", "title": "" }, { "docid": "71e2becf3b8d456ed1c81888d6fb28f3", "score": "0.5218214", "text": "def save_depthmap(depth,filename=None):\r\n if filename is None:\r\n raise ValueError(\"filename is None\")\r\n np.save(filename, depth)", "title": "" }, { "docid": "165a10325842c8e2896f55736a09a5da", "score": "0.52131265", "text": "def create_tree ( fname , nentries ) :\n \n import ROOT, random \n import ostap.io.root_file\n \n from array import array \n var1 = array ( 'd', [0])\n var2 = array ( 'd', [0])\n var3 = array ( 'd', [0])\n \n with ROOT.TFile.Open( fname , 'new' ) as root_file:\n \n tree = ROOT.TTree ( 'S','tree' )\n tree.SetDirectory ( root_file ) \n tree.Branch ( 'mass' , var1 , 'mass/D' )\n tree.Branch ( 'c2dtf' , var2 , 'c2dtf/D' )\n tree.Branch ( 'pt' , var3 , 'pt/D' )\n \n for i in range ( nentries ) : \n \n m = random.gauss ( 3.1 , 0.015 )\n c2 = random.gammavariate ( 2.5 , 0.5 ) / 5 \n pt = random.uniform ( 0 , 20 )\n \n var1[0] = m\n var2[0] = c2 \n var3[0] = pt\n \n tree.Fill()\n \n root_file.Write()\n \n return fname", "title": "" }, { "docid": "8ac82881169f54d332e735022129ed0b", "score": "0.5209905", "text": "def zbdg_write (trackI, subdir, fileprefix, d, log=None, single=False):\r\n if not log:\r\n log = lambda x: sys.stderr.write(x+\"\\n\")\r\n chrs = trackI.get_chr_names()\r\n os.makedirs (subdir)\r\n step = 10000000 + 2*d\r\n\r\n if single:\r\n log(\"write to a bedGraph file\")\r\n f = os.path.join(subdir,fileprefix+\"_all\"+\".bdg\")\r\n bdgfhd = open(f,\"w\")\r\n bdgfhd.write(\"track type=bedGraph name=\\\"%s_all\\\" description=\\\"Extended tag pileup from MACS version %s\\\"\\n\" % (fileprefix.replace('_afterfiting',''), MACS_VERSION)) # data type line \r\n \r\n for chrom in chrs:\r\n if not single:\r\n f = os.path.join(subdir,fileprefix+\"_\"+chrom+\".bdg\")\r\n log(\"write to \"+f+\" for chromosome \"+chrom)\r\n bdgfhd = open(f,\"w\")\r\n bdgfhd.write(\"track type=bedGraph name=\\\"%s_%s\\\" description=\\\"Extended tag pileup from MACS version %s\\\"\\n\" % (fileprefix.replace('_afterfiting',''), chrom, MACS_VERSION)) # data type line\r\n else:\r\n log(\"write data for chromosome \"+chrom)\r\n \r\n tags = trackI.get_locations_by_chr(chrom)[0]\r\n l = len(tags)\r\n window_counts = array(BYTE4,[0]*step)\r\n startp = -1*d\r\n endp = startp+step\r\n index_tag = 0\r\n\t\t\r\n while index_tag<l:\r\n s = tags[index_tag]-d/2 # start of tag\r\n e = s+d # end of tag\r\n \r\n if e < endp:\r\n # project tag to window_counts line\r\n ps = s-startp # projection start\r\n pe = ps+d # projection end\r\n for i in xrange(ps,pe):\r\n window_counts[i] += 1\r\n index_tag += 1\r\n else:\r\n # write it to zbdg file then reset parameters\r\n # keep this tag for next window\r\n prev = window_counts[d]\r\n left = startp+d\r\n right = left+1\r\n for i in xrange(d+1,step-d):\r\n if window_counts[i] == prev:\r\n # same value, extend\r\n right += 1\r\n else:\r\n # diff value, close\r\n if prev != 0:\r\n bdgfhd.write(\"%s\\t%d\\t%d\\t%d\\n\" % (chrom,left,right,prev))\r\n prev = window_counts[i]\r\n left = right\r\n right = left + 1\r\n # last bin\r\n if prev != 0: \r\n bdgfhd.write(\"%s\\t%d\\t%d\\t%d\\n\" % (chrom,left,right,prev))\r\n \r\n # reset\r\n window_counts_next = array(BYTE4,[0]*step)\r\n # copy d values from the tail of previous window to next window\r\n for n,i in enumerate(xrange(step-2*d,step)): # debug\r\n window_counts_next[n] = window_counts[i]\r\n window_counts = window_counts_next\r\n startp = endp - 2*d\r\n endp = startp+step\r\n # last window\r\n prev = window_counts[d]\r\n left = startp+d\r\n right = left+1\r\n for i in xrange(d+1,step-d):\r\n if window_counts[i] == prev:\r\n # same value, exrend\r\n right += 1\r\n else:\r\n # diff value, close\r\n if prev != 0: \r\n bdgfhd.write(\"%s\\t%d\\t%d\\t%d\\n\" % (chrom,left,right,prev))\r\n prev = window_counts[i]\r\n left = right\r\n right = left + 1\r\n # last bin\r\n if prev != 0: \r\n bdgfhd.write(\"%s\\t%d\\t%d\\t%d\\n\" % (chrom,left,right,prev))\r\n \r\n if not single:\r\n bdgfhd.close()\r\n log(\"compress the bedGraph file using gzip...\")\r\n os.system(\"gzip \"+f)\r\n if single:\r\n bdgfhd.close()\r\n log(\"compress the bedGraph file using gzip...\")\r\n os.system(\"gzip \"+f)", "title": "" }, { "docid": "1dab6f383cab90128f8bd7bcb6fe85a9", "score": "0.5208878", "text": "def test_write_to_fil():\n\n a = bl.Waterfall(voyager_h5)\n a.write_to_fil('test_out.fil')", "title": "" }, { "docid": "f62c3da81532e0aed426dd05db7a512c", "score": "0.5204782", "text": "def generate_file(self, filename, amount):\n #check if filename is actually a fileobject\n if hasattr(filename, \"write\"):\n target = filename\n else:\n target = open(filename, 'w')\n g = self.generate()\n for x in range(amount):\n target.write(str(next(g)))\n if self.token == Tokenization.word or self.token == Tokenization.none:\n target.write(\" \")\n\n target.close()", "title": "" }, { "docid": "df0f3ec845f9cf315b74ccea66f0fbd6", "score": "0.5203191", "text": "def DisplayDepth():", "title": "" }, { "docid": "dbd730018466045833763fb94294da35", "score": "0.52022433", "text": "def depth_imwrite(img, filepath):\n print(\"depth_imwrite to {}\".format(filepath))\n img_scaled = ((img - np.min(img)) * 65535./(np.max(img) - np.min(img))).astype(np.uint16)\n cv2.imwrite(filepath + \".png\", img_scaled)", "title": "" }, { "docid": "9c0e2460bb8b6e25f95a944e13244eee", "score": "0.5200233", "text": "def generateSwitchFiles():\n \"\"\" generate switch files with the spicific number of rules\"\"\"\n\n rules = readRuleFile()\n ruleList = []\n switchRulesNumber = 50\n ruleNumber = 1\n switchCounter = 1\n print(\"number of rules:\", len(rules))\n for rule in rules:\n if ruleNumber < switchRulesNumber:\n ruleList.append(rule)\n ruleNumber = ruleNumber + 1\n else:\n ruleList.append(rule)\n filename = \"SW_\" + str(switchCounter) + \".txt\"\n print(\"filename is :\", filename)\n writeFile(filename, ruleList)\n ruleList.clear()\n switchCounter = switchCounter + 1\n ruleNumber = 1", "title": "" }, { "docid": "9d6731abf637a3b77c0d811cf90fe024", "score": "0.5198833", "text": "def _generateLogs(self, num):\n with open(os.path.join(self.tempdir, 'cbuildbot.log'), 'w') as f:\n f.write(str(num + 1))\n\n for i in range(1, num + 1):\n with open(os.path.join(self.tempdir, 'cbuildbot.log.' + str(i)),\n 'w') as f:\n f.write(str(i))", "title": "" }, { "docid": "b08f40646fae322fce515b15d342b322", "score": "0.5169701", "text": "def createFile(arrPath,name='MD5SUM.txt'):\n filetext = open(name, mode='w+')\n for name, filedir in arrPath:\n print(name, filedir, file=filetext)\n print(\"File MD5SUM Created\")", "title": "" }, { "docid": "8d2b7e74f13b8c50bd6e8cd23f049b82", "score": "0.516761", "text": "def create1GBIntFile():\n file_name = 'file_of_ints.txt'\n gb_bytes = 1073741824 # number of bytes (per st_size) in a GB\n gb_goal = 1 # number of GB to generate\n\n if not os.path.isfile(file_name): # only create the file once\n\n file_conn = open(file_name, 'w')\n\n file_size = os.stat(file_name).st_size\n file_gb = file_size / gb_bytes\n while file_gb < gb_goal:\n\n file_size = os.stat(file_name).st_size\n file_gb = file_size / gb_bytes\n\n file_conn.write(\"{}\\n\".format(random.randint(0, 1000000000)))", "title": "" }, { "docid": "ab96d6943b7abaca7e7215821952ec66", "score": "0.51675045", "text": "def GetDepth(self):", "title": "" }, { "docid": "ab96d6943b7abaca7e7215821952ec66", "score": "0.51675045", "text": "def GetDepth(self):", "title": "" }, { "docid": "ab96d6943b7abaca7e7215821952ec66", "score": "0.51675045", "text": "def GetDepth(self):", "title": "" }, { "docid": "ab96d6943b7abaca7e7215821952ec66", "score": "0.51675045", "text": "def GetDepth(self):", "title": "" }, { "docid": "8c6d76e4e7d59422051fc101a7f89783", "score": "0.5159907", "text": "def save_description_file(output_file_path, count):\n with open(output_file_path, 'w') as f:\n f.write(str(count))", "title": "" }, { "docid": "5773d529493bf54e0c478a86682be961", "score": "0.5154662", "text": "def produce_and_write_out_traces(self):\n # Determine the path to write out to (using global variables specified at the top of this file)\n path_to_write_out_to = \"{dir}/{filename}\".format(\n dir=DIRECTORY_TO_WRITE_OUT_TO, filename=TRACES_OUT_FILENAME\n )\n self.out_file = open(path_to_write_out_to, 'w')\n one_percent_increment = TARGETED_NUMBER_OF_TERMINAL_DERIVATIONS / 100\n for i in xrange(TARGETED_NUMBER_OF_TERMINAL_DERIVATIONS):\n if i > 0 and i % one_percent_increment == 0:\n print \"\\t{}% complete\".format(i/one_percent_increment)\n for symbol in self.top_level_symbols:\n symbol.probabilistically_expand(write_out=True, produce_trace=True)\n self.out_file.close()\n print \"100% complete\"", "title": "" }, { "docid": "3f6c989939edb8c91e3aca2899ded9e8", "score": "0.5153557", "text": "def create_file(path):\n with open(path, \"w\") as test_file:\n test_file.write(\"test\")", "title": "" }, { "docid": "5bada6c9fa94b2577d40ca5571ee5af0", "score": "0.5107401", "text": "def create_files():\n pop_size = 32\n\n # create the random number generators\n rng_weights = p.NumpyRNG(seed=369121518)\n rng_connections = p.NumpyRNG(seed=453276549)\n\n min_weight = 0.1\n max_weight = 30.0\n weight_scale_ex_in = 0.25\n weight_dependence_e = \\\n p.RandomDistribution(distribution='uniform',\n parameters=[min_weight, max_weight],\n rng=rng_weights)\n weight_dependence_i = \\\n p.RandomDistribution(distribution='uniform',\n parameters=[-(max_weight * weight_scale_ex_in),\n -min_weight], rng=rng_weights)\n connection_dependence_i_p2 = \\\n p.RandomDistribution(distribution='uniform', parameters=[0, 1],\n rng=rng_connections).next(pop_size * pop_size)\n connection_dependence_p1_p2 = \\\n p.RandomDistribution(distribution='uniform', parameters=[0, 1],\n rng=rng_connections).next(pop_size * pop_size)\n\n for form_I_p2 in range(4):\n i_p2_file = open('List_I_p2_form_%d.txt' % (form_I_p2 + 1,), 'w')\n list_i_p2 = [(i, j, weight_dependence_i.next(), 4.0)\n for i in range(pop_size) for j in range(pop_size)\n if connection_dependence_i_p2[pop_size * i + j] > 0.5]\n if form_I_p2 == 0:\n i_p2_file.write(\"%s\" % list_i_p2)\n elif form_I_p2 == 1:\n i_p2_file.writelines([\"%s\\n\" % (conn,) for conn in list_i_p2])\n elif form_I_p2 == 2:\n i_p2_file.writelines([\"%s %s %.17f %.17f\\n\" %\n (conn[0], conn[1], conn[2], conn[3])\n for conn in list_i_p2])\n elif form_I_p2 == 3:\n i_p2_file.writelines([\"%s, %s, %.17f, %.17f\\n\" %\n (conn[0], conn[1], conn[2], conn[3])\n for conn in list_i_p2])\n i_p2_file.close()\n for form_p1_p2 in range(4):\n p1_p2_file = open('List_p1_p2_form_%d.txt' % (form_p1_p2 + 1,), 'w')\n list_p1_p2 = [(i, j, weight_dependence_e.next(), 4.0)\n for i in range(pop_size) for j in range(pop_size)\n if connection_dependence_p1_p2[pop_size*i+j] > 0.5]\n if form_p1_p2 == 0:\n p1_p2_file.write(\"%s\" % list_p1_p2)\n elif form_p1_p2 == 1:\n p1_p2_file.writelines([\"%s\\n\" % (conn,) for conn in list_p1_p2])\n elif form_p1_p2 == 2:\n p1_p2_file.writelines([\"%s %s %.17f %.17f\\n\" %\n (conn[0], conn[1], conn[2], conn[3])\n for conn in list_p1_p2])\n elif form_p1_p2 == 3:\n p1_p2_file.writelines([\"%s, %s, %.17f, %.17f\\n\" %\n (conn[0], conn[1], conn[2], conn[3])\n for conn in list_p1_p2])\n p1_p2_file.close()", "title": "" }, { "docid": "48933304cd4a65eaf5640fc3688b3b61", "score": "0.5101875", "text": "def test_create_big(self):\n FILENAME = str(uuid.uuid1())\n write_string = ''.join(random.choices(string.ascii_uppercase + string.digits, k=1000000*2))\n with open('mountdir/{}'.format(FILENAME), 'w+') as f:\n f.write(write_string)\n #f.seek(0, 0)\n #read_string = f.read()\n #self.assertEqual(read_string, write_string)\n # re-open and read\n with open('mountdir/{}'.format(FILENAME), 'r+') as f:\n #f.seek(len(write_string), 0)\n read_string = f.read()\n #print(\"written string: {}\\t read string: {}\".format(write_string, read_string))\n self.assertEqual(read_string, write_string)", "title": "" }, { "docid": "2bbb779da79c503ce902f89ff9f3bfd4", "score": "0.5093689", "text": "def make_count_file(self, names=None):\n self.get_counts()\n if self.outfile is not None:\n self.save(names)\n return self.counts", "title": "" }, { "docid": "9ebde7e8e5f5209d2e667b5b5ac7be25", "score": "0.5069963", "text": "def create(path,initial_nodesize=10):\n if exists(path):\n raise FileExistsError(path)\n\n treefile = open(path,\"w+b\")\n player = PLayer(treefile,None,initial_nodesize,False)\n player.close()\n treefile.close()", "title": "" }, { "docid": "e2654f106c4aee9cca47d53cf6d7634c", "score": "0.50664955", "text": "def write(self, filename):\n code_a = ord(\"A\")\n last_modified = datetime.now().strftime(\"%Y/%m/%d %H:%M:%S\")\n\n with codecs.open(filename, mode=\"w\", encoding=\"ISO-8859-1\") as fp:\n\n for index_tier, tier in enumerate(self):\n\n # fix information about this tier/level\n point = tier.is_point()\n level = \"LEVEL_{:s}{:s}\".format(\n chr(code_a + int(index_tier / 26)),\n chr(code_a + int(index_tier % 26)))\n\n # Write information about the tier\n fp.write(\"[DSC_{:s}]\\n\".format(level))\n fp.write(\"DSC_LEVEL_NAME=\\\"{:s}\\\"\\n\"\n \"\".format(tier.get_name()))\n fp.write(\"DSC_LEVEL_SOFTWARE={:s} {:s}\\n\"\n \"\".format(sg.__name__, sg.__version__))\n fp.write(\"DSC_LEVEL_LASTMODIF_DATE={:s}\\n\"\n \"\".format(last_modified))\n\n # Write annotations\n fp.write(\"[LBL_{:s}]\\n\".format(level))\n for index_ann, ann in enumerate(tier):\n\n text = ann.serialize_labels(separator=\" \",\n empty=\"\",\n alt=True)\n fp.write(\"LBL_{:s}_{:06d}=\\\"{:s}\\\"\"\n \"\".format(level, index_ann, text))\n\n if point:\n # Phonedit supports degenerated intervals\n # (instead of points)\n b = sppasMRK.format_point(\n ann.get_lowest_localization().get_midpoint())\n e = b\n else:\n b = sppasMRK.format_point(\n ann.get_lowest_localization().get_midpoint())\n e = sppasMRK.format_point(\n ann.get_highest_localization().get_midpoint())\n fp.write(\" {:s} {:s}\\n\".format(str(b), str(e)))\n\n fp.close()", "title": "" }, { "docid": "d44384cf252d3dc4c3b27bcf05f50166", "score": "0.5056024", "text": "def to_file(self, fname, precision=16, comment_lines = [], \n foldmaskinfo=True):\n # Open the file object.\n if fname.endswith('.gz'):\n fid = gzip.open(fname, 'wb')\n else:\n fid = open(fname, 'w')\n\n # Write comments\n for line in comment_lines:\n fid.write('# ')\n fid.write(line.strip())\n fid.write('\\n')\n\n # Write out the shape of the fs\n for elem in self.data.shape:\n fid.write('%i ' % elem)\n\n if foldmaskinfo:\n if not self.folded:\n fid.write('unfolded')\n else:\n fid.write('folded')\n if self.pop_ids is not None:\n for label in self.pop_ids:\n fid.write(' \"%s\"' % label)\n\n fid.write('\\n')\n\n # Write the data to the file. The obnoxious ravel call is to\n # ensure compatibility with old version that used self.data.tofile.\n np.savetxt(fid, [self.data.ravel()], delimiter=' ',\n fmt='%%.%ig' % precision)\n\n if foldmaskinfo:\n # Write the mask to the file\n np.savetxt(fid, [np.asarray(self.mask, int).ravel()],\n delimiter=' ', fmt='%d')\n\n fid.close()", "title": "" }, { "docid": "f75b6b14d7c89d941831eb4aa5d66565", "score": "0.5050335", "text": "def get_object_depth_write(object_id: int) -> int:\n pass", "title": "" }, { "docid": "d99c4233ae43efde7d4d0e87382849e2", "score": "0.5043191", "text": "def NewLevel(self, depth, datapoint):\n oneLevel = \" |\";\n depthLevel = oneLevel * depth;\n\n startPositionOfNumbers = 120;\n\n # add name and spaces to result\n result = \"\";\n if depth > 0:\n result += depthLevel + \"__ \";\n result += datapoint[\"name\"] + (\" \" * (startPositionOfNumbers - len(depthLevel) - len(datapoint[\"name\"]) - len(\"__ \")))\n\n # Percent of type\n percentOfType = 0;\n if datapoint[\"total\"] > 0:\n percentOfType = 100 * datapoint[\"total-of-type\"] / float(datapoint[\"total\"]);\n percentOfType = round(percentOfType, 1)\n\n\n # Add total, total-of-type, and percent-of-type\n result += str(datapoint[\"total\"]) + \"\\t\" + str(datapoint[\"total-of-type\"]) + \"\\t\" + str(percentOfType);\n\n # add info for classes A and B\n if self.group == 'A' or self.group == 'B':\n\n # sum of percentage of fgio\n percentOfFGIOInTraining = 0;\n percentOfFGIOInPrediction = 0;\n if datapoint['total-of-type'] > 0:\n percentOfFGIOInTraining = round(float(datapoint[\"sum-of-percent-of-fgio-in-training\"]) / float(datapoint[\"total-of-type\"]) , 1);\n percentOfFGIOInPrediction = round(float(datapoint[\"sum-of-percent-of-fgio-in-prediction\"]) / float(datapoint[\"total-of-type\"]) , 1);\n\n # result += \"\\t\" + str(percentOfFGIOInTraining);\n result += \"\\t\" + str(percentOfFGIOInPrediction);\n\n # percentage of leaderless in fgio\n if self.group == \"B\":\n percentOfLeaderlessInFGIOInTraining = 0;\n if float(datapoint['total-of-type']) > 0:\n percentOfLeaderlessInFGIOInTraining = round(float(datapoint[\"sum-of-percent-of-leaderless-in-fgio-in-training\"]) / float(datapoint['total-of-type']), 1);\n\n # result += \"\\t\" + str(percentOfLeaderlessInFGIOInTraining);\n\n percentOfLeaderlessInFGIOInPrediction = 0;\n if float(datapoint[\"total-of-type\"]) > 0:\n percentOfLeaderlessInFGIOInPrediction = round(float(datapoint[\"sum-of-percent-of-leaderless-in-fgio-in-prediction\"]) / float(datapoint[\"total-of-type\"]), 1);\n\n result += \"\\t\" + str(percentOfLeaderlessInFGIOInPrediction);\n\n\n # percentage of leaderless in all genes\n if self.group == \"B\":\n percentOfLeaderlessInAllGenesInTraining = 0;\n if float(datapoint['total-of-type']) > 0:\n percentOfLeaderlessInAllGenesInTraining = round(float(datapoint[\"sum-of-percent-of-leaderless-in-all-genes-in-training\"]) / float(datapoint['total-of-type']), 1);\n\n # result += \"\\t\" + str(percentOfLeaderlessInAllGenesInTraining);\n\n percentOfLeaderlessInAllGenesInPrediction = 0;\n if float(datapoint[\"total-of-type\"]) > 0:\n percentOfLeaderlessInAllGenesInPrediction = round(float(datapoint[\"sum-of-percent-of-leaderless-in-all-genes-in-prediction\"]) / float(datapoint[\"total-of-type\"]), 1);\n\n result += \"\\t\" + str(percentOfLeaderlessInAllGenesInPrediction);\n\n\n # result += \"\\t\" + str(round(average,1));\n\n if \"consensus-rbs\" in datapoint:\n result += \"\\t\" + datapoint[\"consensus-rbs\"];\n\n if \"consensus-promoter\" in datapoint:\n result += \"\\t\" + datapoint[\"consensus-promoter\"];\n\n return result;", "title": "" }, { "docid": "3d1f5b696038f7c1ac8fcaab215ec47f", "score": "0.50359046", "text": "def _create_regular_file(self, nms, path, size):\r\n block_size_mb = 1\r\n block_count = size * units.GiB / (block_size_mb * units.MiB)\r\n\r\n LOG.info(_('Creating regular file: %s.'\r\n 'This may take some time.') % path)\r\n\r\n nms.appliance.execute(\r\n 'dd if=/dev/zero of=%(path)s bs=%(bs)dM count=%(count)d' % {\r\n 'path': path,\r\n 'bs': block_size_mb,\r\n 'count': block_count\r\n }\r\n )\r\n\r\n LOG.info(_('Regular file: %s created.') % path)", "title": "" }, { "docid": "5b0730ed975d8064a4462f89a1f7f564", "score": "0.5032028", "text": "def dumpCorrectionLogicToFile(whichADC,fileName=\"correction_logic.txt\"):\n file = open(fileName, 'w+')\n print \"dumping...\"\n for stage in range(0,8):\n for weight in (0,1):\n value = readValueFromCorrectionLogic(whichADC,stage,weight)\n print >> file, hex(value)\n if (weight == 0):\n whichWeight = \"w0\"\n else:\n whichWeight = \"w2\"\n print \"ADC\",whichADC,\" stage \",stage,' ',whichWeight,' ',hex(value)", "title": "" }, { "docid": "33ee730e82a742a6258c70e14e72015b", "score": "0.5026949", "text": "def output(path, output_file):\n\n lines = []\n states = {\"A\": 0, \"B\": 0}\n current_line = \"{:>10}\".format(1)\n\n for i in range(1, len(path)):\n\n if path[i - 1] != path[i]:\n\n current_line += \"{:>10}\".format(i-1) + \" state \" + path[i-1]\n\n lines.append(current_line)\n\n current_line = \"{:>10}\".format(i)\n\n states[path[i-1]] += 1\n\n current_line += \"{:>10}\".format(len(path)) + \" state \" + path[i - 1]\n\n lines.append(current_line)\n\n states[path[i-1]] += 1\n\n file_name = \"problem_2_output_\" + str(output_file) + \".txt\"\n\n text_file = open(file_name, \"w\")\n\n first_line = \"State A: {}, State B: {}\\n\".format(states[\"A\"], states[\"B\"])\n\n text_file.write(first_line)\n\n for line in lines:\n\n text_file.write(line + \"\\n\")", "title": "" }, { "docid": "3037e75acbdbb904fdf53acfb61dce02", "score": "0.5025668", "text": "def count_furn():\n furn_folders = [\n d for d in os.scandir(\"env/models/assets/objects/complete\") if d.is_dir()\n ]\n furn_folders += [\n d for d in os.scandir(\"env/models/assets/objects/incomplete\") if d.is_dir()\n ]\n furn_count = defaultdict(list)\n total = 0\n total_parts = 0\n for folder in furn_folders:\n for f in os.scandir(folder.path):\n if f.is_file() and f.path.endswith(\"xml\"):\n tree = ET.parse(f.path)\n wb = tree.getroot().find(\"worldbody\")\n furn_count[len(wb)].append(f.name)\n total += 1\n total_parts += len(wb)\n\n with open(\"count.txt\", \"w\") as f:\n f.write(f\"Total furniture: {total}\\n\")\n f.write(f\"Total parts: {total_parts}\\n\")\n f.write(\"-\" * 80 + \"\\n\")\n for count in sorted(furn_count.keys()):\n f.write(f\"{count}-part furnitures: {len(furn_count[count])}\\n\")\n for furn in furn_count[count]:\n f.write(furn + \"\\n\")\n f.write(\"-\" * 80 + \"\\n\")", "title": "" }, { "docid": "c19692659e228d0b51f897c1d2a73aa6", "score": "0.5025213", "text": "def __write(self):\n if not self.oobject:\n raise ValueError(\"No output filepath specified\")\n self.top.save()\n def save_tree(obj):\n \"\"\"recursive save\"\"\"\n obj.save()\n for child in obj.children.values():\n save_tree(child)\n child.close()\n del child\n obj.close()\n del obj\n for child in self.top.children.values():\n save_tree(child)\n self.top.close()", "title": "" }, { "docid": "8b2b964cb8612a2fb85b2f3d7a012907", "score": "0.50245446", "text": "def compress_depth_file( filename, outfile=None, delete_file=True):\n\n if ( outfile is None):\n outfile = \"{}.gz\".format( filename )\n\n pysam.tabix_compress( filename, outfile , force=True )\n\n if ( delete_file ):\n os.unlink( filename )\n\n return outfile", "title": "" }, { "docid": "d15325b7008a918ae6451d776d61d648", "score": "0.50156206", "text": "def make(name, path):\n with PFBReader(path) as reader:\n json.dump(reader.make_empty_record(name), sys.stdout)\n sys.stdout.write(\"\\n\")", "title": "" }, { "docid": "942bcd9e770cd5184fbc231a3ac04e30", "score": "0.5015201", "text": "def make_graph(tree, file_name):\n\n def make_node(n_id, label):\n f.write(\"{} [label=\\\"{}\\\"]\\n\".format(n_id, label))\n\n def make_edge(n1, n2):\n f.write(\"{} -> {}\\n\".format(n1, n2))\n\n def make_graphviz(node):\n make_node(id(node), node.value)\n for n in node.children:\n make_edge(id(node), id(n))\n make_graphviz(n)\n\n with open(file_name, 'w') as f:\n f.write(\"digraph Tree {\\n\")\n f.write(\"node [color=black, ordering=\\\"out\\\"];\\n\")\n if tree is not None:\n if tree.root is not None:\n make_graphviz(tree.root)\n f.write(\"}\\n\")", "title": "" }, { "docid": "d6043ab5a9d7954d137c4eb5325444f1", "score": "0.5011831", "text": "def save(self):\n\n def _write(attempts):\n \"\"\"A helper function to recursively attempt to write to an appropriately named and numbered outfile.\"\"\"\n try:\n with open(self.name + \"_\" + self._class + \"_\" + str(attempts) + \".txt\", \"x\") as outfile:\n outfile.write(self.name + \"\\n\")\n outfile.write(self._class + \"\\n\")\n outfile.write(str(self.stamina_max) + \"\\n\")\n outfile.write(str(self.recharge) + \"\\n\")\n outfile.write(str(self.strength) + \"\\n\")\n outfile.write(str(self.health) + \"\\n\")\n outfile.write(str(self.stamina) + \"\\n\")\n outfile.write(str(self.exp) + \"\\n\")\n outfile.write(str(self.level) + \"\\n\")\n for b in self.backpack:\n outfile.write(str(b) + \"\\n\")\n for e in self.effects:\n outfile.write(str(e) + \"\\n\")\n except FileExistsError:\n choice = input(\"Character of that class by that name already exists. [O]verwrite or create [N]ew file? \")\n if choice == \"O\":\n with open(self.name + \"_\" + self._class + \"_\" + str(attempts) + \".txt\", \"w\") as outfile:\n outfile.write(self.name + \"\\n\")\n outfile.write(self._class + \"\\n\")\n outfile.write(str(self.stamina_max) + \"\\n\")\n outfile.write(str(self.recharge) + \"\\n\")\n outfile.write(str(self.strength) + \"\\n\")\n outfile.write(str(self.health) + \"\\n\")\n outfile.write(str(self.stamina) + \"\\n\")\n outfile.write(str(self.exp) + \"\\n\")\n outfile.write(str(self.level) + \"\\n\")\n for b in self.backpack:\n outfile.write(str(b) + \"\\n\")\n for e in self.effects:\n outfile.write(str(e) + \"\\n\")\n if choice == \"N\":\n _write(attempts+1)\n \n #Attempt to write to a file with a simple name.\n try:\n with open(self.name + \"_\" + self._class + \".txt\", \"x\") as outfile:\n outfile.write(self.name + \"\\n\")\n outfile.write(self._class + \"\\n\")\n outfile.write(str(self.stamina_max) + \"\\n\")\n outfile.write(str(self.recharge) + \"\\n\")\n outfile.write(str(self.strength) + \"\\n\")\n outfile.write(str(self.health) + \"\\n\")\n outfile.write(str(self.stamina) + \"\\n\")\n outfile.write(str(self.exp) + \"\\n\")\n outfile.write(str(self.level) + \"\\n\")\n for b in self.backpack:\n outfile.write(str(b) + \"\\n\")\n for e in self.effects:\n outfile.write(str(e) + \"\\n\")\n #If simple name already exists, recursively choose to overwrite or add a unique number to the end\n except FileExistsError:\n choice = input(\"Character of that class by that name already exists. [O]verwrite or create [N]ew file? \")\n if choice == \"O\":\n with open(self.name + \"_\" + self._class + \".txt\", \"w\") as outfile:\n outfile.write(self.name + \"\\n\")\n outfile.write(self._class + \"\\n\")\n outfile.write(str(self.stamina_max) + \"\\n\")\n outfile.write(str(self.recharge) + \"\\n\")\n outfile.write(str(self.strength) + \"\\n\")\n outfile.write(str(self.health) + \"\\n\")\n outfile.write(str(self.stamina) + \"\\n\")\n outfile.write(str(self.exp) + \"\\n\")\n outfile.write(str(self.level) + \"\\n\")\n for b in self.backpack:\n outfile.write(str(b) + \"\\n\")\n for e in self.effects:\n outfile.write(str(e) + \"\\n\")\n if choice == \"N\":\n _write(1)", "title": "" }, { "docid": "c6b98f8e3aab7cf87dd7cfcbb6249d21", "score": "0.5009134", "text": "def getDepth(self):\n return len(self._getFilepaths())", "title": "" }, { "docid": "1aa5bd2fef6b6c4e606b3ef84c6b5d79", "score": "0.5002546", "text": "def write_heade(number):", "title": "" }, { "docid": "3de7429776d7d3841cb088c9a39e22f3", "score": "0.4998305", "text": "def append_debug_level_to_file(path_name):\n\tdef write_to_file(file_name):\n\t\tlines = []\n\t\tbase_name = os.path.basename(file_name)\n\t\twith open(file_name, \"r\", encoding='utf-8') as f:\n\t\t\tfor i, line in enumerate(f):\n\t\t\t\tdebug_level = WeightedChoice([\"V\", \"D\", \"I\", \"W\", \"E\", \"F\"], [0.5, 0.35, 0.06, 0.04, 0.02, 0.03])\n\t\t\t\tlines.append(f'{debug_level.run()} {base_name}: {line}')\n\t\twith open(file_name, \"w\", encoding='utf-8') as f:\n\t\t\tfor line in lines:\n\t\t\t\tf.write(line)\n\n\tif os.path.isfile(path_name):\n\t\twrite_to_file(path_name)\n\telse:\n\t\tfiles = glob.glob(path_name + '/**/*.txt', recursive=True)\n\t\tfor file in files:\n\t\t\tif os.path.isfile(file):\n\t\t\t\twrite_to_file(file)", "title": "" }, { "docid": "88f7a457a52a07376a400a48bdd6fb9f", "score": "0.49954858", "text": "def save_depth(batch, output, args, dataset, save):\n # If there is no save folder, don't save\n if save.folder is '':\n return\n\n # If we want to save\n if save.depth.rgb or save.depth.viz or save.depth.npz or save.depth.png:\n # Retrieve useful tensors\n rgb = batch['rgb']\n pred_inv_depth = output['inv_depth']\n\n # Prepare path strings\n filename = batch['filename']\n dataset_idx = 0 if len(args) == 1 else args[1]\n save_path = os.path.join(save.folder, 'depth',\n prepare_dataset_prefix(dataset, dataset_idx),\n os.path.basename(save.pretrained).split('.')[0])\n # Create folder\n os.makedirs(save_path, exist_ok=True)\n\n # For each image in the batch\n length = rgb.shape[0]\n for i in range(length):\n # Save numpy depth maps\n if save.depth.npz:\n write_depth('{}/{}_depth.npz'.format(save_path, filename[i]),\n depth=inv2depth(pred_inv_depth[i]),\n intrinsics=batch['intrinsics'][i] if 'intrinsics' in batch else None)\n # Save png depth maps\n if save.depth.png:\n write_depth('{}/{}_depth.png'.format(save_path, filename[i]),\n depth=inv2depth(pred_inv_depth[i]))\n # Save rgb images\n if save.depth.rgb:\n rgb_i = rgb[i].permute(1, 2, 0).detach().cpu().numpy() * 255\n write_image('{}/{}_rgb.png'.format(save_path, filename[i]), rgb_i)\n # Save inverse depth visualizations\n if save.depth.viz:\n viz_i = viz_inv_depth(pred_inv_depth[i]) * 255\n write_image('{}/{}_viz.png'.format(save_path, filename[i]), viz_i)", "title": "" }, { "docid": "b88451888dc8fdbfe56dab3c2122467a", "score": "0.49937168", "text": "def create_with_path():\n open(\"/Users/David/foundations-2-python/sea-c34-python/students/DavidShoubridge/session03/new.txt\", \"wb\")", "title": "" }, { "docid": "4c3e3c10ac5a837e42c1b3e61a06c09d", "score": "0.49910554", "text": "def make_test_file( filename, file_size = 0):\r\n import random\r\n\r\n if (file_size == 0):\r\n file_size = random.randint(100, 100000)\r\n \r\n len = 0\r\n fp = open(filename, 'wb')\r\n while len < file_size:\r\n fp.write(random.choice('0123456789abcdefghijklmnopqrstuvwxyz') )\r\n len += 1\r\n fp.flush()\r\n fp.close( ) \r\n return", "title": "" }, { "docid": "5cd567c191eec3ef5c7029cb11ae7c95", "score": "0.4985659", "text": "def set_depth(base_path):\n scene = bpy.context.scene\n nodes = scene.node_tree.nodes\n links = bpy.context.scene.node_tree.links\n\n map_value_node = bpy.context.scene.node_tree.nodes.new(\"CompositorNodeMapValue\")\n map_value_node.size[0] = 1 / 20\n links.new(nodes[\"Render Layers\"].outputs[\"Depth\"], map_value_node.inputs[0])\n\n file_output_node = bpy.context.scene.node_tree.nodes.new('CompositorNodeOutputFile')\n file_output_node.base_path = base_path\n # use uint 16 for higher accuracy\n file_output_node.format.color_depth = \"16\"\n links.new(map_value_node.outputs[0], file_output_node.inputs[0])\n\n return file_output_node", "title": "" }, { "docid": "d2e6b10789c169ad3e689125a3847dc4", "score": "0.49854177", "text": "def create_file(path: str, dtype: str):\n profile = {\n \"driver\": \"GTiff\",\n \"dtype\": dtype,\n \"count\": 1,\n \"crs\": CRS.from_wkt(wkt),\n \"transform\": Affine(30.0, 0.0, -2493045.0, 0.0, -30.0, 3310005.0),\n \"height\": SIZE,\n \"width\": SIZE,\n \"compress\": \"lzw\",\n \"predictor\": 2,\n }\n\n allowed_values = [0, 11, 12, 21, 22, 23, 24, 31, 41, 42, 43, 52, 71, 81, 82, 90, 95]\n\n Z = np.random.choice(allowed_values, size=(SIZE, SIZE))\n\n with rasterio.open(path, \"w\", **profile) as src:\n src.write(Z, 1)", "title": "" }, { "docid": "4f96d057ebaab25ebb20196dc382ce2c", "score": "0.49825984", "text": "def gen_numbers_to_file(file_name : str):\n with open(file_name, \"w\") as f:\n for _ in range(0, 100):\n f.write(str(random.randint(0,1000)) + \"\\n\")\n f.close()", "title": "" }, { "docid": "539eef944a146dd4f0a8615f1a932cbf", "score": "0.49817994", "text": "def create_fold_file(filename, d, append=False, frame_class=\"foldedForm\"):\n if append:\n with open(filename) as f:\n data = json.load(f)\n # need to offset the indices of the new vertices\n num_existing_vertices = len(data[\"vertices_coords\"])\n num_existing_edges = len(data[\"edges_vertices\"])\n for e in range(len(d[\"edges_vertices\"])):\n for i in range(2):\n d[\"edges_vertices\"][e][i] += num_existing_vertices\n for f in range(len(d[\"faces_vertices\"])):\n for i in range(4):\n d[\"faces_vertices\"][f][i] += num_existing_vertices\n data[\"vertices_coords\"].extend(d[\"vertices_coords\"])\n data[\"edges_vertices\"].extend(d[\"edges_vertices\"])\n data[\"faces_vertices\"].extend(d[\"faces_vertices\"])\n data[\"edges_assignment\"].extend(\"B\" for i in range(num_existing_edges))\n\n else:\n # start with default stuff\n data = {\"file_spec\":1,\n \"file_creator\": \"Python script\",\n \"file_author\": \"pwgreene\",\n \"frame_title\": filename,\n \"file_classes\": [\"singleModel\"],\n \"frame_classes\": [frame_class],\n \"frame_attributes\": [\"3D\"],\n }\n data.update(d)\n # we're not dealing with foldings, so edge assignments don't matter\n if \"edges_vertices\" in d:\n num_edges = len(d[\"edges_vertices\"])\n edges_assignment = {\"edges_assignment\": [\"B\" for x in range(num_edges)]}\n data.update(edges_assignment)\n\n with open(filename, 'w') as f:\n json.dump(data, f, ensure_ascii=True, indent=2)", "title": "" }, { "docid": "8e00dfd0fd5d82b98da74364237c8929", "score": "0.4980143", "text": "def _writeToFile(self, fname, iter, isave, thin):\n\n self._chainfile = open(fname, 'a+')\n for jj in range((iter-isave), iter, thin):\n ind = int(jj/thin)\n self._chainfile.write('%.17e\\t %.17e\\t %.17e\\t'%(self._lnprob[ind], self._lnlike[ind],\\\n self.naccepted/iter))\n self._chainfile.write('\\t'.join([\"%.17e\"%(self._chain[ind,kk]) \\\n for kk in range(self.ndim)]))\n self._chainfile.write('\\n')\n self._chainfile.close()", "title": "" }, { "docid": "96ad7aa5a2980876b5f3f372eddf213e", "score": "0.49775776", "text": "def getDepth(self):\n return 1", "title": "" }, { "docid": "e5058b6b8c325d722d96c9f8b6e9e8c7", "score": "0.49768692", "text": "def create_death_counter_file(character, deaths, character_class=\"Survivor\"):\n file_exists = Path.is_file(Path(f\"{deathcounter_path}\\\\{character}.txt\"))\n if not file_exists:\n with open(f\"{deathcounter_path}\\\\{character}.txt\", \"w\") as dcf:\n dcf.writelines(\n \"\\n\".join(\n [\n character.split(\"-\")[0].title(),\n character_class.title(),\n str(deaths),\n ]\n )\n )\n\n # Copy new tracked character to the standard file displayed on twitch.\n character_file = f\"{deathcounter_path}\\\\{character}.txt\"\n display_file = f\"{deathcounter_path}\\current_tracked_character.txt\"\n shutil.copyfile(character_file, display_file)", "title": "" }, { "docid": "8d2e6ab5d789611d2288f339206119b3", "score": "0.49737346", "text": "def write_adder_tree(file_ptr, signal_list, exp_prime, number_pp):\n for i in range(1, len(signal_list)):\n j = 0\n for pp in signal_list[i]:\n if j == len(signal_list[i-1])-1:\n file_ptr.write(\"\\t\\t\"+pp+\" <= std_ulogic_vector(resize(unsigned(\"+signal_list[i-1][j]+\"), \"+pp+\"'length));\\n\")\n else:\n file_ptr.write(\"\\t\\t\"+pp+\" <= std_ulogic_vector(resize(unsigned(\"+signal_list[i-1][j]+\"), \"+pp+\"'length) + resize(unsigned(\"+signal_list[i-1][j+1]+\"), \"+pp+\"'length));\\n\")\n j += 2\n file_ptr.write(\"\\t\\t c_red <= std_ulogic_vector(resize(unsigned(\"+pp+\"(EXP_PRIME+\"+str(len(signal_list)-2)+\" downto EXP_PRIME)), c_red'length) + resize(unsigned(\"+pp+\"(EXP_PRIME-1 downto 0)), c_red'length));\\n\")\n file_ptr.write(\"\\t\\t c_o <= std_ulogic_vector(resize(unsigned(c_red\"\"(EXP_PRIME downto EXP_PRIME)), c_o'length) + resize(unsigned(c_red(EXP_PRIME-1 downto 0)), c_o'length));\\n\\n\")", "title": "" }, { "docid": "1f6a56a2ed076b57333c88883c4d7034", "score": "0.49723423", "text": "def _randomize_branch_lengths(amount, tree_type):\n\n f = open(\"tree_types/random.tre\", \"w\")\n f = open(\"tree_types/random.tre\", \"a\")\n\n amount_counter = 0\n while amount_counter < amount:\n #loop over all tree structures\n for tree_structure in tree_struct[tree_type]:\n\n #protect from adding too many trees\n if amount_counter < amount:\n amount_counter += 1\n\n rand_tree_struct = tree_structure\n #loop over all randomly chosen branch lengths\n for _ in range(6):\n branch_len = str(max(random.gauss(0.2, 0.2), 0.01))\n rand_tree_struct = rand_tree_struct.replace(\"_\", branch_len, 1)\n\n f.write(rand_tree_struct + \"\\n\")\n\n else:\n break #exits for loop and will also end while loop\n\n assert amount_counter == amount, \"Not correct number of trees\"\n\n f.close()", "title": "" }, { "docid": "21228a73c70b94fd329be91ba501b007", "score": "0.49721837", "text": "def create_solution_file(best_board, path, run_number):\n\t# Open file, create directories if needed\n\topened_file = open_file(path)\n\n\t# Write x,y,rotation for each shape\n\tfor solution in best_board.get_info():\n\t\topened_file.write(solution + '\\n')\n\n\t# Close file\n\topened_file.close()", "title": "" }, { "docid": "4b3da90f8c4e7cac10d7e03d52622f11", "score": "0.49714807", "text": "def create_file(self) -> None:\n open(self.folder + self.name, 'w').close()", "title": "" }, { "docid": "7df8e1c36d5e491998f5c6e6b9050668", "score": "0.49710217", "text": "def create_journal(self, filename, path):\n with open(\"{}/{}\".format(path, filename), 'w') as f:\n f.write(\"{} - {}\\n\".format(self.name, self.abbreviation))\n for page in self.pages:\n f.write(\"Page {}\\n\".format(self.pages.index(page) + 1))\n for trans in page:\n f.write(\"{}\\n\".format(str(trans)))", "title": "" }, { "docid": "8985191673a09260912009079950e3e7", "score": "0.49676597", "text": "def create_pcap_file(self):\n\n fragments = self.create_fragments()\n self.write_shuffled_pcap(fragments)", "title": "" }, { "docid": "8fb7aa51d391668f2ca9ab226600c8c2", "score": "0.49658108", "text": "def depth(self):\n return 0", "title": "" }, { "docid": "bc0944d5235f6039d3085c3633a6749b", "score": "0.49654287", "text": "def __init__(self, file):\n self.file = open(file, 'w')\n self.counter = 0", "title": "" }, { "docid": "898c85d8015b785ac1441655abeacb5c", "score": "0.49606234", "text": "def test_depth_many():\n from bst import Node\n a = Node(20)\n a.left = Node(16)\n assert a.depth() == 2", "title": "" }, { "docid": "e7ce7bd22e5b260e7546467787a5cce7", "score": "0.49498278", "text": "def __writegrtofile__(gr=None, fn=None):\n f = open(fn, 'w')\n f.writelines('dummy\\ndummy\\ndummy\\nB %i\\n'%len(gr))\n for i in range(len(gr)):\n f.writelines('%13.9f %13.9f %13.9f %13.8e\\n'%(\n gr[i][0], gr[i][1], gr[i][2], gr[i][3]))", "title": "" }, { "docid": "0ef4093dd88607322f8fd45ad9935677", "score": "0.4949458", "text": "def writefile(dic, outfile, total, class_name):\n res = sortResult(dic)\n if class_name == \"o\":\n outfile.write(\"TOP_OCCUPATIONS;\")\n else:\n outfile.write(\"TOP_STATES;\")\n outfile.write(\"NUMBER_CERTIFIED_APPLICATIONS;PERCENTAGE\\n\")\n for i in range(len(res)-1, -1, -1):\n outfile.write(res[i].output(total))", "title": "" }, { "docid": "bd479062ed8264dd4e445c90510aa434", "score": "0.4948102", "text": "def create_zeroed_hdr_file(self, size, path, mode=None):\n filepath = os.path.join(self.testdir, path)\n with open(filepath, 'w') as f:\n f.write('\\0' * HEADER_SIZE)\n f.write('\\132' * (size - HEADER_SIZE))\n if mode is not None:\n os.chmod(filepath, mode)\n return filepath", "title": "" }, { "docid": "6089fa1a9bbaa71b071f793e8b0ffde9", "score": "0.4941692", "text": "def generate_fwf_file(\n spec: FWFSpec,\n number_of_lines: int,\n output_file: pathlib.Path,\n rnd_value_generator: Callable[[FWFColumnSpec], str] = rnd_fwf_value,\n) -> None:\n if output_file.parent:\n output_file.parent.mkdir(parents=True, exist_ok=True)\n with open(output_file, \"w\", 1024, encoding=spec.encoding, newline=\"\") as f:\n lines = generate_fwf_lines(spec, number_of_lines, rnd_value_generator)\n f.writelines((line + \"\\n\" for line in lines))", "title": "" }, { "docid": "0975948bf41ae51aabb0b376051c96c7", "score": "0.4935499", "text": "def prepare_file():\n with open(filepath, 'w') as f:\n for x in range(0, 10000000): # 10 million lines\n f.write(\"This is line number \" + str(x) + \"\\n\")", "title": "" }, { "docid": "f155bb7b670460ff6931eb67843a1ff9", "score": "0.49260944", "text": "def dump(self, fname):\n template = '%s\\t%i\\n'\n with codecs.open(fname, 'w', encoding='utf-8') as f:\n for word, count in self._counter.most_common():\n f.write(template % (word, count))", "title": "" }, { "docid": "672fc1a4d78ba2c5e201b2a5709cc83d", "score": "0.492481", "text": "def output_basic_info (graph, path):\n with open(path, 'w') as out:\n out.write('***Basic***\\n')\n out.write(nx.info(graph))", "title": "" }, { "docid": "5100672a4cf5d7fbe317a560fd375108", "score": "0.49232996", "text": "def to_file(self, path, mode=\"w\"):\n with open(path, mode=mode) as f:\n for tree in self:\n for label, line in tree.to_labeled_lines():\n f.write(line + \"\\n\")", "title": "" }, { "docid": "af7ab825ca6d5125cc28494a59ef83a8", "score": "0.4923042", "text": "def disk_save(self, filename):\n\n # Assign each node a line number\n self.assign_line_number()\n\n with open(filename, 'w') as fh:\n self._disk_save(self.root, fh)\n\n # Now pad the lines with whitespaces so they are all the same length\n file_pad_lines(filename)", "title": "" } ]
0520fe6183a36493bd169bfdd893568e
Perform a Bayesian linear fit for a heteroscedastic set of points with uncertainties along both axes. See the accompanying article and D'Agostini2005 for further details of the method. The method allows not only to attach uncertainties to the data points, but also to the functional relation itself. Basically, this means that the relation is a (normal) distribution around its mean rather than a deterministic function relating x and y. If the fit converges to sigmaV = 0, this means that the supplied variance of the data can already explain all the variance observed. No need to attach variance to the equaiton. By default, we use a flat prior for sigmaV and intersect, and a uniform angle prior for the slope. A custom prior function can be supplied as a parameter
[ { "docid": "298708af0c0b338d80b4ce04e9f63123", "score": "0.7031217", "text": "def bayesian_linear_fit(x, y, Vx, Vy, c=True, prior=None):\n\n sigmaV_guess = 0\n m_guess = 1\n plot = False\n\n if c:\n # If not passing through the origin\n phi = posterior(x, y, Vx, Vy, c=True, prior=prior)\n guess = (m_guess, 0, sigmaV_guess)\n min = opt.minimize(phi, guess)\n m_est, c_est, sigmaV_est = min.x\n\n # Calculate the uncertainty on the estimates by calculating the Hessian inverse\n mV_est, cV_est, sigmaVV_est = min.hess_inv.diagonal()\n\n else:\n # If passing through the origin\n phi = posterior(x, y, Vx, Vy, c=False, prior=prior)\n guess = (m_guess, sigmaV_guess)\n min = opt.minimize(phi, guess)\n m_est, sigmaV_est = min.x\n mV_est, sigmaVV_est = min.hess_inv.diagonal()\n\n c_est = 0\n cV_est = 0\n\n if plot:\n plt.figure(clear=True, num=1)\n plt.errorbar(x, y, xerr=np.sqrt(Vx), yerr=np.sqrt(Vy), fmt='.', elinewidth=0.5)\n plt.ylim(ymin=0)\n plt.xlim(xmin=0)\n\n xfit = [np.nanmin(x), np.nanmax(x)]\n yfit = np.array(xfit) * m_est + c_est\n plt.plot(xfit, yfit, 'r')\n\n # Construct the result dictionary\n estimates = {'m': m_est, 'mV': mV_est, 'c': c_est,\n 'cV': cV_est, 'sigmaV': sigmaV_est, 'sigmaVV': sigmaVV_est}\n\n return estimates", "title": "" } ]
[ { "docid": "e2fa5000de7ab8b17be83cb851c11008", "score": "0.59692013", "text": "def fit_line(\n x, y, xhat=None, fitprobs=None, fitlogs=None, dist=None, through_origin=False\n):\n\n fitprobs = validate.fit_arguments(fitprobs, \"fitprobs\")\n fitlogs = validate.fit_arguments(fitlogs, \"fitlogs\")\n\n x = numpy.asarray(x)\n y = numpy.asarray(y)\n\n if xhat is None:\n xhat = numpy.array([numpy.min(x), numpy.max(x)])\n\n if dist is None:\n dist = stats.norm\n\n if fitprobs in [\"x\", \"both\"]:\n x = dist.ppf(x / 100.0)\n xhat = dist.ppf(numpy.array(xhat) / 100.0)\n\n if fitprobs in [\"y\", \"both\"]:\n y = dist.ppf(y / 100.0)\n\n if fitlogs in [\"x\", \"both\"]:\n x = numpy.log(x)\n\n if fitlogs in [\"y\", \"both\"]:\n y = numpy.log(y)\n\n x = sm.add_constant(x)\n\n # fix the y-intercept at 0 if requested\n # note that this is a nuance of the statsmodels package.\n # `x[:, 0] = 5` doesn't fix the intercept at 5, for instance.\n if through_origin:\n x[:, 0] = 0\n\n results = sm.OLS(y, x).fit()\n yhat = _estimate_from_fit(\n xhat,\n results.params[1],\n results.params[0],\n xlog=fitlogs in [\"x\", \"both\"],\n ylog=fitlogs in [\"y\", \"both\"],\n )\n\n if fitprobs in [\"y\", \"both\"]:\n yhat = 100.0 * dist.cdf(yhat)\n if fitprobs in [\"x\", \"both\"]:\n xhat = 100.0 * dist.cdf(xhat)\n\n return xhat, yhat, results", "title": "" }, { "docid": "ee14f4e8e6dddd6efa927b8e7c4ba9f3", "score": "0.58584356", "text": "def iterfit(xdata, ydata, invvar=None, upper=5, lower=5, x2=None,\r\n maxiter=10, **kwargs):\r\n from pydl.pydlutils.math import djs_reject\r\n nx = xdata.size\r\n if ydata.size != nx:\r\n raise ValueError('Dimensions of xdata and ydata do not agree.')\r\n if invvar is not None:\r\n if invvar.size != nx:\r\n raise ValueError('Dimensions of xdata and invvar do not agree.')\r\n else:\r\n #\r\n # This correction to the variance makes it the same\r\n # as IDL's variance()\r\n #\r\n var = ydata.var()*(float(nx)/float(nx-1))\r\n if var == 0:\r\n var = 1.0\r\n invvar = np.ones(ydata.shape, dtype=ydata.dtype)/var\r\n if x2 is not None:\r\n if x2.size != nx:\r\n raise ValueError('Dimensions of xdata and x2 do not agree.')\r\n yfit = np.zeros(ydata.shape)\r\n if invvar.size == 1:\r\n outmask = True\r\n else:\r\n outmask = np.ones(invvar.shape, dtype='bool')\r\n xsort = xdata.argsort()\r\n maskwork = (outmask & (invvar > 0))[xsort]\r\n if 'oldset' in kwargs:\r\n sset = kwargs['oldset']\r\n sset.mask = True\r\n sset.coeff = 0\r\n else:\r\n if not maskwork.any():\r\n raise ValueError('No valid data points.')\r\n # return (None,None)\r\n if 'fullbkpt' in kwargs:\r\n fullbkpt = kwargs['fullbkpt']\r\n else:\r\n sset = bspline(xdata[xsort[maskwork]], **kwargs)\r\n if maskwork.sum() < sset.nord:\r\n print('Number of good data points fewer than nord.')\r\n return (sset, outmask)\r\n if x2 is not None:\r\n if 'xmin' in kwargs:\r\n xmin = kwargs['xmin']\r\n else:\r\n xmin = x2.min()\r\n if 'xmax' in kwargs:\r\n xmax = kwargs['xmax']\r\n else:\r\n xmax = x2.max()\r\n if xmin == xmax:\r\n xmax = xmin + 1\r\n sset.xmin = xmin\r\n sset.xmax = xmax\r\n if 'funcname' in kwargs:\r\n sset.funcname = kwargs['funcname']\r\n xwork = xdata[xsort]\r\n ywork = ydata[xsort]\r\n invwork = invvar[xsort]\r\n if x2 is not None:\r\n x2work = x2[xsort]\r\n else:\r\n x2work = None\r\n iiter = 0\r\n error = 0\r\n qdone = False\r\n while (error != 0 or qdone == False) and iiter <= maxiter:\r\n # print iiter\r\n goodbk = sset.mask.nonzero()[0]\r\n if maskwork.sum() <= 1 or not sset.mask.any():\r\n sset.coeff = 0\r\n iiter = maxiter + 1\r\n else:\r\n if 'requiren' in kwargs:\r\n i = 0\r\n while xwork[i] < sset.breakpoints[goodbk[sset.nord]] and i < nx-1:\r\n i += 1\r\n ct = 0\r\n for ileft in range(sset.nord, sset.mask.sum()-sset.nord+1):\r\n while (xwork[i] >= sset.breakpoints[goodbk[ileft]] and\r\n xwork[i] < sset.breakpoints[goodbk[ileft+1]] and\r\n i < nx-1):\r\n ct += invwork[i]*maskwork[i] > 0\r\n i += 1\r\n if ct >= kwargs['requiren']:\r\n ct = 0\r\n else:\r\n sset.mask[goodbk[ileft]] = False\r\n error, yfit = sset.fit(xwork, ywork, invwork*maskwork,\r\n x2=x2work)\r\n\r\n iiter += 1\r\n inmask = maskwork\r\n if error == -2: ##all breaks dropped out\r\n return (sset, outmask)\r\n elif error == 0:\r\n maskwork, qdone = djs_reject(ywork, yfit, invvar=invwork,\r\n inmask=inmask, outmask=maskwork,\r\n upper=upper, lower=lower)\r\n else:\r\n pass\r\n outmask[xsort] = maskwork\r\n temp = yfit\r\n yfit[xsort] = temp\r\n return (sset, outmask)", "title": "" }, { "docid": "66275978c45f961f3f1c0b3e6d28a84f", "score": "0.5835382", "text": "def gaussfit(x,y,initpar,sigma=None, bounds=None, binned=False):\n #gmodel = Model(gaussian)\n #result = gmodel.fit(y, x=x, amp=initpar[0], cen=initpar[1], sig=initpar[2], const=initpar[3])\n #return result\n func = gaussian\n if binned is True: func=gaussbin\n return curve_fit(func, x, y, p0=initpar, sigma=sigma, bounds=bounds)", "title": "" }, { "docid": "9afd59d9654d734ee2429e8960d1227f", "score": "0.5784328", "text": "def func(beta, x):\n # Expression of the line that we want to fit to the data\n y = beta[0] + beta[1] * x\n return y", "title": "" }, { "docid": "466c72266b01de6f40030ec234833174", "score": "0.5637101", "text": "def fit_func_linear(x, a, b):\n return a * x + b", "title": "" }, { "docid": "c353d72c105e66c6fcb69793260af83d", "score": "0.56264573", "text": "def fitLine(xdata, ydata, ysigma=None, xsigma=None):\n if xsigma is not None:\n w=1.0/(ysigma**2+xsigma**2)\n elif ysigma is None:\n w = np.ones(len(ydata)) # Each point is equally weighted.\n else:\n w=1.0/(ysigma**2)\n\n w=w/sum(w)\n sw = sum(w)\n wx = w*xdata # this product gets used to calculate swxy and swx2\n swx = sum(wx)\n swy = sum(w*ydata)\n swxy = sum(wx*ydata)\n swx2 = sum(wx*xdata)\n \n a = (sw*swxy - swx*swy)/(sw*swx2 - swx*swx)\n b = (swy*swx2 - swx*swxy)/(sw*swx2 - swx*swx)\n \n \n if ysigma is None:\n chi2 = sum(((a*xdata + b)-ydata)**2)\n else:\n chi2 = sum((((a*xdata + b)-ydata)/ysigma)**2)\n dof = len(ydata) - 2\n rchi2 = np.sqrt(chi2/dof)\n \n sa = rchi2*np.sqrt(sw/(sw*swx2 - swx*swx))\n sb = rchi2*np.sqrt(swx2/(sw*swx2 - swx*swx))\n return a, b, sa, sb", "title": "" }, { "docid": "568417c99c1813cd98daac78f355debf", "score": "0.5621997", "text": "def test_linear_regression():\n from mle import Normal, var\n import numpy as np\n\n x = var('x', vector=True, observed=True)\n y = var('y', vector=True, observed=True)\n\n a = var('a')\n b = var('b')\n sigma = var('sigma')\n\n model = Normal(y, a * x + b, sigma)\n np.random.seed(42)\n\n xs = np.linspace(0, 1, 20)\n ys = 0.5 * xs - 0.3 + np.random.normal(0, 0.2, 20)\n\n results = model.fit({'x': xs, 'y':ys}, {'a': 2, 'b': 1, 'sigma': 1})\n print(results)", "title": "" }, { "docid": "17d7deaf67a0b45737c9c6b0f5cea570", "score": "0.5613875", "text": "def fit(self, xdata, ydata, invvar, x2=None):\r\n goodbk = self.mask[self.nord:]\r\n nn = goodbk.sum()\r\n if nn < self.nord:\r\n yfit = np.zeros(ydata.shape, dtype='f')\r\n return (-2, yfit)\r\n nfull = nn * self.npoly\r\n bw = self.npoly * self.nord\r\n a1, lower, upper = self.action(xdata, x2=x2)\r\n\r\n foo = np.tile(invvar, bw).reshape(bw, invvar.size).transpose()\r\n a2 = a1 * foo\r\n alpha = np.zeros((bw, nfull+bw), dtype='d')\r\n beta = np.zeros((nfull+bw,), dtype='d')\r\n bi = np.arange(bw, dtype='i4')\r\n bo = np.arange(bw, dtype='i4')\r\n for k in range(1, bw):\r\n bi = np.append(bi, np.arange(bw-k, dtype='i4')+(bw+1)*k)\r\n bo = np.append(bo, np.arange(bw-k, dtype='i4')+bw*k)\r\n\r\n for k in range(nn-self.nord+1):\r\n itop = k*self.npoly\r\n ibottom = min(itop, nfull) + bw - 1\r\n ict = upper[k] - lower[k] + 1\r\n \r\n if ict > 0:\r\n work = np.dot(a1[lower[k]:upper[k]+1, :].T, a2[lower[k]:upper[k]+1, :])\r\n wb = np.dot(ydata[lower[k]:upper[k]+1], a2[lower[k]:upper[k]+1, :])\r\n alpha.T.flat[bo+itop*bw] += work.flat[bi]\r\n beta[itop:ibottom+1] += wb\r\n min_influence = 1.0e-10 * invvar.sum() / nfull\r\n \r\n errb = cholesky_band(alpha, mininf=min_influence) # ,verbose=True)\r\n if isinstance(errb[0], int) and errb[0] == -1:\r\n a = errb[1]\r\n else:\r\n if type(errb[0]) == type(0): errb = (np.array([errb[0]]),errb[1])\r\n yfit, foo = self.value(xdata, x2=x2, action=a1, upper=upper, lower=lower)\r\n return (self.maskpoints(errb[0]), yfit)\r\n errs = cholesky_solve(a, beta)\r\n if isinstance(errs[0], int) and errs[0] == -1:\r\n sol = errs[1]\r\n else:\r\n #\r\n # It is not possible for this to get called, because cholesky_solve\r\n # has only one return statement, & that statement guarantees that\r\n # errs[0] == -1\r\n #\r\n yfit, foo = self.value(xdata, x2=x2, action=a1, upper=upper, lower=lower)\r\n return (self.maskpoints(errs[0]), yfit)\r\n if self.npoly > 1:\r\n self.icoeff[:, goodbk] = np.array(a[0, 0:nfull].reshape(self.npoly, nn), dtype=a.dtype)\r\n self.coeff[:, goodbk] = np.array(sol[0:nfull].reshape(self.npoly, nn), dtype=sol.dtype)\r\n else:\r\n self.icoeff[goodbk] = np.array(a[0, 0:nfull], dtype=a.dtype)\r\n self.coeff[goodbk] = np.array(sol[0:nfull], dtype=sol.dtype)\r\n yfit, foo = self.value(xdata, x2=x2, action=a1, upper=upper, lower=lower)\r\n return (0, yfit)", "title": "" }, { "docid": "4450403c98a8dfca30f960a35a99dcba", "score": "0.55504596", "text": "def fit(self,xdata,ydata,invvar,x2=None):\n from . import cholesky_band, cholesky_solve\n goodbk = self.mask[self.nord:]\n nn = goodbk.sum()\n if nn < self.nord:\n yfit = np.zeros(ydata.shape,dtype='f')\n return (-2,yfit)\n nfull = nn * self.npoly\n bw = self.npoly * self.nord\n a1,lower,upper = self.action(xdata,x2=x2)\n foo = np.tile(invvar,bw).reshape(bw,invvar.size).transpose()\n a2 = a1 * foo\n alpha = np.zeros((bw,nfull+bw),dtype='d')\n beta = np.zeros((nfull+bw,),dtype='d')\n bi = np.arange(bw,dtype='i4')\n bo = np.arange(bw,dtype='i4')\n for k in range(1,bw):\n bi = np.append(bi, np.arange(bw-k,dtype='i4')+(bw+1)*k)\n bo = np.append(bo, np.arange(bw-k,dtype='i4')+bw*k)\n for k in range(nn-self.nord+1):\n itop = k*self.npoly\n ibottom = min(itop,nfull) + bw - 1\n ict = upper[k] - lower[k] + 1\n if ict > 0:\n work = np.dot(a1[lower[k]:upper[k]+1,:].T,a2[lower[k]:upper[k]+1,:])\n wb = np.dot(ydata[lower[k]:upper[k]+1],a2[lower[k]:upper[k]+1,:])\n alpha.T.flat[bo+itop*bw] += work.flat[bi]\n beta[itop:ibottom+1] += wb\n min_influence = 1.0e-10 * invvar.sum() / nfull\n errb = cholesky_band(alpha,mininf=min_influence) # ,verbose=True)\n if isinstance(errb[0],int) and errb[0] == -1:\n a = errb[1]\n else:\n yfit,foo = self.value(xdata,x2=x2,action=a1,upper=upper,lower=lower)\n return (self.maskpoints(errb[0]),yfit)\n errs = cholesky_solve(a,beta)\n if isinstance(errs[0],int) and errs[0] == -1:\n sol = errs[1]\n else:\n #\n # It is not possible for this to get called, because cholesky_solve\n # has only one return statement, & that statement guarantees that\n # errs[0] == -1\n #\n yfit,foo = self.value(xdata,x2=x2,action=a1,upper=upper,lower=lower)\n return (self.maskpoints(errs[0]),yfit)\n if self.npoly > 1:\n self.icoeff[:,goodbk] = np.array(a[0,0:nfull].reshape(self.npoly,nn),dtype=a.dtype)\n self.coeff[:,goodbk] = np.array(sol[0:nfull].reshape(self.npoly,nn),dtype=sol.dtype)\n else:\n self.icoeff[goodbk] = np.array(a[0,0:nfull],dtype=a.dtype)\n self.coeff[goodbk] = np.array(sol[0:nfull],dtype=sol.dtype)\n yfit,foo = self.value(xdata,x2=x2,action=a1,upper=upper,lower=lower)\n return (0,yfit)", "title": "" }, { "docid": "48e58c03974e65e8a18d1ba3748e89e0", "score": "0.55496144", "text": "def test_simple_fit():\n from mle import Normal, var\n import numpy as np\n\n x = var('x', vector=True, observed=True)\n mu = var('mu')\n sigma = var('sigma')\n\n dist = Normal(x, mu, sigma)\n np.random.seed(42)\n try:\n data = dist.sample(1e6, {'mu': 0, 'sigma': 1})\n except:\n assert False, 'Generating data failed'\n \n try:\n results = dist.fit(data, {'mu': 1, 'sigma': 2}, method='BFGS')\n except:\n assert False, 'Fitting generated data failed'", "title": "" }, { "docid": "d714b4b4cd5314b9c0168ac98e6359eb", "score": "0.55444497", "text": "def fit_linear(mu,I):\n alpha_1_1 = sum((1.0-mu)**2)\n beta_1 = sum((1.0-mu)*(1.0-I))\n a = beta_1/alpha_1_1\n return a", "title": "" }, { "docid": "5300dbd4653cffabbf8368f1730064ab", "score": "0.55261654", "text": "def base_fit(dataset):\n popt, pcov = curve_fit(base_log_func, prepare_data(dataset)[0][0:crossover_point], prepare_data(dataset)[1][0:crossover_point])\n base_fit_fn = np.array(list(zip(prepare_data(dataset)[0],base_log_func(prepare_data(dataset)[0], *popt))))\n base_variance = variance(base_fit_fn,dataset)\n return popt,pcov,base_fit_fn,base_variance", "title": "" }, { "docid": "fe3cb82d575d814f7a644188da4a2b3b", "score": "0.5502055", "text": "def fit(self, x, y):", "title": "" }, { "docid": "fe3cb82d575d814f7a644188da4a2b3b", "score": "0.5502055", "text": "def fit(self, x, y):", "title": "" }, { "docid": "bb38f3d0a27e705f10f497858654a90c", "score": "0.54320246", "text": "def least_squares_fit(x, y): \n beta = correlation(x, y) * standard_deviation(y) / standard_deviation(x)\n alpha = mean(y) - beta * mean(x)\n return alpha, beta", "title": "" }, { "docid": "ed168c5875f81501115ec310f8a27c87", "score": "0.53922683", "text": "def fit_curvefit(p0, xdat, ydat, func, yerr=None, **kwargs):\n\n method = kwargs.get('lsqmethod','lm')\n if (_scipyversion >= 0.17) and (yerr is not None):\n pfit, pcov = curve_fit(func, xdat, ydat, p0=p0, sigma=yerr,\n absolute_sigma = True, method=method)\n else:\n pfit, pcov = curve_fit(func, xdat, ydat, p0=p0, sigma=yerr, **kwargs)\n\n if (len(ydat) > len(p0)) and (pcov is not None):\n pcov = pcov *(((func(pfit, xdat, ydat)-ydat)**2).sum()\n / (len(ydat)-len(p0)))\n else:\n pcov = _np.inf\n # endif\n # endif\n\n return pfit, pcov\n \"\"\"\n The below uncertainty is not a real uncertainty. It assumes that there\n is no covariance in the fitting parameters.\n perr = []\n for ii in range(len(pfit)):\n try:\n #This assumes uncorrelated uncertainties (no covariance)\n perr.append(_np.absolute(pcov[ii][ii])**0.5)\n except:\n perr.append(0.00)\n # end try\n # end for\n return pfit, _np.array(perr)\n\n perr - Estimated uncertainty in fitting parameters\n (scaled by residuals)\n \"\"\"", "title": "" }, { "docid": "f4fc3135f2309489d007141de4cfb49f", "score": "0.53780216", "text": "def linalgfit(x, y): \n # To avoid undefined 1/y\n maxy =np.max(y)\n ind = np.arange(len(y))\n mody = np.array(map(lambda i: maxy/100.0 if y[i] < maxy/100.0 else y[i], ind)) \n \n # Common variables\n N = len(x)\n Sx = np.sum(x)\n Sx2 = np.sum(x**2)\n Sx3 = np.sum(x**3)\n Sx4 = np.sum(x**4)\n \n S1_y = np.sum(1/mody)\n Sx_y = np.sum(x/mody)\n Sx2_y = np.sum(x**2/mody)\n \n # Create a 3x3 matrix and a 3 dimensional vector\n X = np.array( [[N, Sx, Sx2], [Sx, Sx2, Sx3], [Sx2, Sx3, Sx4]] )\n Y = np.vstack(np.array( [S1_y, Sx_y, Sx2_y] ))\n \n # Solve linear equation using least square\n a, b, c = np.linalg.lstsq(X,Y)[0]\n \n # Calculate fitted values of height, position and width\n height = (4 * c) / (4*a*c - b**2)\n position = - b/(2 * c)\n\n if (4*a*c - b**2) < 0:\n print >> sys.stdout, '\\n Warning: Non-physical Fitting parameters. Try weighted or iterative least square methods.' \n width = - np.sqrt(np.absolute(4*a - b**2/c)) / np.sqrt(c)\n else:\n width = np.sqrt(4*a - (b**2/c)) / np.sqrt(c)\n\n return np.array( [height, position, width] )", "title": "" }, { "docid": "97ae46ea3169d3142311a697ad19ddd7", "score": "0.5348616", "text": "def fit(self, X, y):", "title": "" }, { "docid": "97ae46ea3169d3142311a697ad19ddd7", "score": "0.5348616", "text": "def fit(self, X, y):", "title": "" }, { "docid": "3daaa2414dea6756443d72cc3840eb55", "score": "0.53331953", "text": "def bet_fit(pressure, bet_points):\n slope, intercept, corr_coef, p, stderr = stats.linregress(pressure, bet_points)\n return slope, intercept, corr_coef", "title": "" }, { "docid": "b1386e74fd305a481bce65a8986d9359", "score": "0.53230685", "text": "def nonLinearFit(X, Y, fitfunction, initial_guess=None, \n bounds=(-np.infty, np.infty), dY=None, \n showplot=True, plot_some_errors=(False, 20), \n **kwargs):\n \n if not isinstance(X, np.ndarray):\n raise TypeError(\"X should be a np.array\")\n if not isinstance(Y, np.ndarray):\n raise TypeError(\"Y should be a np.array\")\n if not isinstance(dY, np.ndarray) and dY is not None:\n raise TypeError(\"dY shouuld be a np.array\")\n if len(X) != len(Y):\n raise IndexError(\"X and Y must have same lenght\")\n if dY is not None and len(dY) != len(Y):\n raise IndexError(\"dY and Y must have same lenght\")\n \n if dY is None:\n W = None\n else:\n W = 1/dY**2\n \n parameters, covariance = curve_fit(fitfunction, X, Y,\n p0=initial_guess, bounds=bounds, \n sigma=W) \n rsq = sum( (Y - fitfunction(X, *parameters))**2 )\n rsq = rsq/sum( (Y - np.mean(Y))**2 )\n rsq = 1 - rsq\n n = len(parameters)\n\n if showplot:\n \n plt.figure()\n if dY is None:\n plt.plot(X, Y, 'b.', zorder=0)\n else:\n if plot_some_errors[0] == False:\n plt.errorbar(X, Y, yerr=dY, linestyle='b', marker='.',\n ecolor='b', elinewidth=1.5, zorder=0)\n else:\n plt.errorbar(X, Y, yerr=dY, linestyle='-', marker='.',\n color='b', ecolor='b', elinewidth=1.5,\n errorevery=len(Y)/plot_some_errors[1], \n zorder=0)\n plt.plot(X, fitfunction(X, *parameters), 'r-', zorder=100) \n plt.legend([\"Ajuste lineal ponderado\",\"Datos\"])\n \n kwargs_list = ['text_position', 'par_units', 'par_string_scale', \n 'par_error_digits', 'rsq_decimal_digits']\n kwargs_default = [(.02,'up'), ['' for i in range(n)], \n [False for i in range(n)], \n [3 for i in range(n)], 3]\n for key, value in zip(kwargs_list, kwargs_default):\n try:\n kwargs[key]\n if key != 'text_position':\n try:\n if len(kwargs[key]) != n:\n print(\"Wrong number of parameters\",\n \"on '{}'\".format(key))\n kwargs[key] = value\n except TypeError:\n kwargs[key] = [kwargs[key] for i in len(n)]\n except KeyError:\n kwargs[key] = value\n \n if kwargs['text_position'][1] == 'up':\n vertical = [.9-i*.08 for i in range(n+1)]\n elif kwargs['text_position'][1] == 'down':\n vertical = [.05+i*.08 for i in range(n+1)]\n else:\n if kwargs['text_position'][1] <= .08:\n fact = .08\n else:\n fact = -.08\n vertical = [\n kwargs['text_position'][1]+fact*i for i in range(n+1)]\n \n for i in range(n):\n plt.annotate(\n 'a{} = {}'.format(\n i,\n ivu.errorValueLatex(\n parameters[i], \n sqrt(covariance[i,i]),\n error_digits=kwargs['par_error_digits'][i],\n units=kwargs['par_units'][i],\n string_scale=kwargs['par_string_scale'][i],\n one_point_scale=True)),\n (kwargs['text_position'][0], vertical[i]),\n xycoords='axes fraction')\n rsqft = r'$R^2$ = {:.' + str(kwargs['rsq_decimal_digits'])+'f}'\n plt.annotate(rsqft.format(rsq),\n (kwargs['text_position'][0], vertical[-i]),\n xycoords='axes fraction')\n \n plt.show()\n \n parameters_error = np.array(\n [sqrt(covariance[i,i]) for i in range(n)])\n parameters = list(zip(parameters, parameters_error))\n \n return rsq, parameters", "title": "" }, { "docid": "d02c81cfa2861ca6e8466751acf7dfe3", "score": "0.5311644", "text": "def _fit(self, X: pd.Series, y, **fit_parmas):\r\n raise NotImplementedError", "title": "" }, { "docid": "150bb9bc6330ae518ba6decb5a49263c", "score": "0.53019285", "text": "def fit(self, X, y=None, method='lyapunov', **kwargs):\n \n if (not type(X) == np.ndarray) or (not X.ndim == 2):\n raise TypeError(\"\"\"Argument X must be matrix (time x nodes).\"\"\")\n n_T, self.n_nodes = np.shape(X)\n\n # Make sure a correct optimization method was entered\n if method not in ['lyapunov', 'moments']:\n raise ValueError(\"\"\"Please enter a valid method: 'lyapunov' or 'moments'.\"\"\")\n\n # Decide number of time shifts for covariances Q_emp\n if 'i_tau_opt' in kwargs.keys():\n if (not type(kwargs['i_tau_opt']) == np.int):\n raise TypeError(\"\"\"Argument Xi_tau_opt must be integer.\"\"\")\n # calculate time lags from 0 up to i_tau_opt\n n_tau = int(kwargs['i_tau_opt']) + 1\n else:\n n_tau = 2\n \n # Create a dictionary to store the diagnostics of fit\n self.d_fit = dict()\n\n # Lag (time-shifted) FC matrices\n Q_emp = np.zeros([n_tau, self.n_nodes, self.n_nodes], dtype=np.float)\n # Remove mean in the time series\n centered_X = X - X.mean(axis=0)\n # Calculate the lagged FC matrices\n n_T_span = n_T - n_tau + 1\n for i_tau in range(n_tau):\n Q_emp[i_tau] = np.tensordot(centered_X[0:n_T_span], \\\n centered_X[i_tau:n_T_span+i_tau], \\\n axes=(0,0))\n Q_emp /= float(n_T_span - 1)\n\n # Call adequate method for optimization and check for specific arguments\n if method == 'lyapunov':\n return self.fit_LO(Q_emp, **kwargs)\n elif method == 'moments':\n return self.fit_moments(Q_emp[0,:,:], Q_emp[1,:,:])", "title": "" }, { "docid": "7e4e4974973a5cb5cc4c3945e3958b6c", "score": "0.52972066", "text": "def fit(self, X, y):\n self.shape_fit_ = X.shape\n\n res_compute_weights = self._compute_weights(X, y)\n intercept_already_computed = type(res_compute_weights) == tuple\n if intercept_already_computed:\n lagrange_multipliers, intercept = res_compute_weights\n self.intercept_ = intercept\n else:\n lagrange_multipliers = res_compute_weights\n\n support_vector_indices = lagrange_multipliers > self.support_vector_tol\n \n self.support_ = (support_vector_indices * range(self.shape_fit_[0])).nonzero()[0]\n if support_vector_indices[0]:\n self.support_ = np.insert(self.support_,0,0)\n \n self.dual_coef_ = lagrange_multipliers[support_vector_indices] * y[support_vector_indices]\n self.support_vectors_ = X[support_vector_indices]\n self.n_support_ = np.array([sum(y[support_vector_indices] == -1),\n sum(y[support_vector_indices] == 1)])\n\n if not intercept_already_computed:\n self.intercept_ = np.mean(y[support_vector_indices] - self.predict(self.support_vectors_))", "title": "" }, { "docid": "55bf3346bfe8d4eec6093f0359354e30", "score": "0.527925", "text": "def curveFit(f, x, y, p0 = None, sigma = None, **kw):\n if p0 is None or isscalar(p0):\n # determine number of parameters by inspecting the function\n import inspect\n args, varargs, varkw, defaults = inspect.getargspec(f)\n if len(args) < 2:\n raise (ValueError, \"p0 not given as a sequence and inspection\"\\\n \" cannot determine the number of fit parameters\")\n if p0 is None:\n p0 = 1.0\n p0 = [p0]*(len(args)-1)\n\n args = (x, y, f)\n\n def resid(p, x, y, f):\n return y - f(x, *p)\n\n def wresid(p, x, y, f, weights):\n return weights*(y - f(x, *p))\n\n if sigma is None:\n r = resid\t\t\n else:\n args += (1.0/ asarray(sigma), )\n r = wresid\n\n plsqFull = optimize.leastsq(r, p0, args = args, full_output=True, **kw)\n\n p = plsqFull[0]\n cov = plsqFull[1]\n success = plsqFull[4]\n# \tprint p, success\n # For no weighting,\n # \t Weights(i) = 1.0.\n # For instrumental (Gaussian) weighting,\n # \t Weights(i)=1.0/sigma(i)^2\n # For statistical (Poisson) weighting,\n # \t Weights(i) = 1.0/y(i), etc.\n\n # calculate the error factor according to section 15.2 of Numerical Recipes (which I got from the IDL routine gaussfit)\n\n # if sigma is passed in, it is put in the denominator of r() and then squared, so we get our (y(x) - y)^2 / sigma^2. This assumes sigma is std of measurements. If we want poisson stats, pass in sqrt(y), then we get (y(x) - y)^2/ y (equation 4.33 from bevington). Otherwise \n if (len(y) > len(p)) and cov is not None:\n chi2 = sum( r(p, *args)**2 )\n chi2 = chi2 / (len(x) - len(p)) # reduced chi2\n # If no weighting, then we need to boost error estimates by\n # sqrt(chisq). we are multiplying the errors by sqrt(chi2) - so\n # multiply the covariance matrix by chi2 and when you sqrt it later it\n # works out.\n #\n # I think this also corresponds to equation 6.23 in Bevington. The\n # basic idea is that we have 'common uncertainties', so sigma_i =\n # sigma. The sigma_xx (covariance bits) reduce to 6.23. Notice there\n # is a sigma^2 in both of those that are not in 6.21 and 6.22. So we\n # need to mult by that. sigma^2 is defined in 6.14, which is...\n # chi2_reduced.\n if sigma is None:\n cov = cov * chi2\n else:\n chi2 = inf\n cov = inf\n\n return p, cov, chi2, success", "title": "" }, { "docid": "0eaa64e2a2c8ccc7794e4d0706bfb7e9", "score": "0.52746844", "text": "def nlmFit(x,a,b,y0):\n return y0+(a*(b**x))", "title": "" }, { "docid": "496eb686e560b7eb960daa1ad3fa5f30", "score": "0.52743185", "text": "def fit(self, X_train, y_train):\n x = []\n for sample in X_train:\n x.append(sample[0])\n\n y = y_train\n mean_x = np.mean(x)\n mean_y = np.mean(y)\n \n m = sum([(x[i] - mean_x) * (y[i] - mean_y) for i in range(len(x))]) / sum([(x[i] - mean_x) ** 2 for i in range(len(x))])\n b = mean_y - m * mean_x \n self.intercept = b\n self.slope = m \n \n pass # TODO: fix this", "title": "" }, { "docid": "bb4aedb4d936e1bf180ce2cd62064b55", "score": "0.5259882", "text": "def fitGaussian(x, data, baseline=0.0, verb=False):\n\n tmp = np.copy(data)\n tmp -= baseline\n peakx = x[np.argmax(data)]\n tmp[np.where(tmp < 0)] = 0.0\n\n mod = GaussianModel(missing='drop', independent_vars=['x'])\n\n # pars = mod.guess(data, x=x)\n out = mod.fit(tmp, center=peakx, amplitude=1.0, sigma=1.0, x=x)\n\n if verb:\n print(out.fit_report(min_correl=0.25))\n\n return out", "title": "" }, { "docid": "f7dcc9b4366e63a3fe25894f43aaf495", "score": "0.5254696", "text": "def least_squares_fit(x, y):\n beta = stats.correlation(x, y) * \\\n stats.standard_deviation(y) / stats.standard_deviation(x)\n alpha = stats.mean(y) - beta * stats.mean(x)\n return alpha, beta", "title": "" }, { "docid": "71252ff75219660ad548ee556285d819", "score": "0.5254289", "text": "def normal_fit(x,mu, sigma, A):\r\n n = A*mlab.normpdf(x,mu,sigma)\r\n return n", "title": "" }, { "docid": "3960a319c98c171de302f6dd68219a13", "score": "0.52333933", "text": "def fit(self, x, y):\n x_mean = np.mean(x)\n y_mean = np.mean(y)\n\n aux = (x -x_mean) * (y - y_mean)\n self.b1 = np.sum(aux) / np.sum((x - x_mean) ** 2)\n self.b0 = y_mean - self.b1 * x_mean", "title": "" }, { "docid": "2125a8d484a158d7807f35cd2d87e32b", "score": "0.52310485", "text": "def fit(self, X, y=None):\n # Fit with one less component\n self.n_components -= 1\n self._second_stage = False\n super(FirstFixedGaussianMixtureDensity, self).fit(X, y)\n\n # Update means, etc. with single Gaussian component\n self.n_components += 1\n n_samples, n_features = X.shape\n self.means_ = np.append(self.means_, np.zeros(n_features).reshape(1, -1), axis=0)\n if self.covariance_type == 'full':\n self._append_standard_covariance(\n np.eye(n_features, n_features).reshape(n_features, n_features, 1)\n )\n elif self.covariance_type == 'tied':\n raise RuntimeError('FirstFixed cannot support covariance_type=\"tied\"')\n elif self.covariance_type == 'diag':\n self._append_standard_covariance(np.ones(n_features).reshape(1, -1))\n elif self.covariance_type == 'spherical':\n self._append_standard_covariance(np.array([1]))\n else:\n raise RuntimeError('Invalid covariance_type \"%s\"' % self.covariance_type)\n\n # Rescale weights\n if self.fixed_weight < 0 or self.fixed_weight > 1:\n raise ValueError('weight should be between 0 and 1')\n self.weights_ *= (1 - self.fixed_weight)\n self.weights_ = np.append(self.weights_, np.array([self.fixed_weight]))\n\n # Reset parameters for second fitting\n old_warm_start = self.warm_start\n self.warm_start = True\n self.converged_ = False\n self.lower_bound_ = -np.infty\n\n # Refit with second stage set (i.e. fixed standard normal)\n self._second_stage = True\n super(FirstFixedGaussianMixtureDensity, self).fit(X, y)\n self.warm_start = old_warm_start\n\n return self", "title": "" }, { "docid": "abec7876f7e09b5ac1593723013eb70a", "score": "0.5222782", "text": "def fit_sigmoid(data, xdata_range=None, gamma_eq_lambda=False,\n bounds=(-np.inf,np.inf), no_lapses=False):\n # unpack data\n x = list(data[0,:])\n ratio = list(data[1,:])\n freqs = list(data[2,:])\n xdata = []\n ydata = []\n for ind in range(len(x)):\n xlist = [x[ind]]\n ylist = [ratio[ind]]\n xdata.append(xlist*int(freqs[ind]))\n ydata.append(ylist*int(freqs[ind]))\n xdata = list(chain.from_iterable(xdata))\n ydata = list(chain.from_iterable(ydata))\n params = []\n psycurve = []\n \n if xdata_range is None:\n xdata_range = xdata\n if no_lapses:\n # boundaries set such that slope must be positive (lower bound=0)\n params, covar = curve_fit(psyfun_alpha_beta, xdata, ydata, bounds=([-np.inf, 0], np.inf), max_nfev=5000)\n psycurve = psyfun_alpha_beta(xdata_range, params[0], params[1])\n elif gamma_eq_lambda:\n params, covar = curve_fit(psyfun_gam_eq_lamb, xdata, ydata, absolute_sigma=True, bounds=bounds)\n psycurve = psyfun_gam_eq_lamb(xdata_range, *params)\n elif not gamma_eq_lambda:\n params, covar = curve_fit(psyfun, xdata, ydata, absolute_sigma=True, bounds=bounds)\n psycurve = psyfun(xdata_range, *params)\n return psycurve, params", "title": "" }, { "docid": "dd3490792d987e945849de10f5df75b4", "score": "0.52215856", "text": "def fit(self, X, y, overlapping=False):\n # Check parameters\n if not isinstance(self.n_coefs, int):\n raise TypeError(\"'n_coefs' must be an integer.\")\n if isinstance(self.n_coefs, int) and self.n_coefs < 2:\n raise ValueError(\"'n_coefs' must be greater than or equal to 2.\")\n if not isinstance(self.window_sizes, (list, tuple, np.ndarray)):\n raise TypeError(\"'window_sizes' must be array-like.\")\n if (isinstance(self.n_coefs, int) and\n self.n_coefs > np.min(self.window_sizes)):\n raise ValueError(\"'n_coefs' must be lower than or equal to the \"\n \"minimum value in 'window_sizes'.\")\n if not isinstance(self.norm_mean, (int, float)):\n raise TypeError(\"'norm_mean' must be a boolean.\")\n if not isinstance(self.norm_std, (int, float)):\n raise TypeError(\"'norm_std' must be a boolean.\")\n if not isinstance(self.n_bins, int):\n raise ValueError(\"'n_bins' must be an integer.\")\n if self.n_bins < 2:\n raise ValueError(\"'n_bins' must be greater than or equal to 2.\")\n if not isinstance(self.variance_selection, (int, float)):\n raise TypeError(\"'variance_selection' must be a boolean.\")\n if not isinstance(self.variance_threshold, (int, float)):\n raise TypeError(\"'variance_threshold' must be a float.\")\n if not isinstance(self.pvalue_threshold, (int, float)):\n raise TypeError(\"'pvalue_threshold' must be a float or an \"\n \"integer.\")\n if (self.pvalue_threshold < 0) or (self.pvalue_threshold > 1):\n raise ValueError(\"'pvalue_threshold' must be between 0 and 1.\")\n if not isinstance(overlapping, (int, float)):\n raise TypeError(\"'overlapping' must be a boolean.\")\n\n # Check X and y\n X, y = check_X_y(X, y)\n check_classification_targets(y)\n le = LabelEncoder()\n y_ind = le.fit_transform(y)\n n_samples, n_features = X.shape\n\n # Save parameters\n self._sfa_list = []\n self._count_list = []\n self._relevant_features_list = []\n self.vocabulary_ = {}\n\n for window_size in self.window_sizes:\n if overlapping:\n n_windows = n_features - window_size + 1\n X_window = np.asarray([X[:, i: i + window_size]\n for i in range(n_windows)])\n X_window = X_window.reshape(n_samples * n_windows,\n -1,\n order='F')\n else:\n n_windows = n_features // window_size\n remainder = n_features % window_size\n if remainder == 0:\n window_idx = np.array_split(np.arange(0, n_features),\n n_windows)\n else:\n split_idx = np.arange(window_size,\n (n_windows + 1) * window_size,\n window_size)\n window_idx = np.split(np.arange(0, n_features),\n split_idx)[:-1]\n X_window = X[:, window_idx].reshape(n_samples * n_windows, -1)\n\n sfa = SFA(self.n_coefs, True, self.norm_mean,\n self.norm_std, self.n_bins, 'entropy',\n self.variance_selection, self.variance_threshold)\n count = CountVectorizer(ngram_range=(1, 2))\n\n y_window = np.repeat(y_ind, n_windows)\n X_sfa = sfa.fit_transform(X_window, y_window)\n X_sfa = np.apply_along_axis(lambda x: ''.join(x),\n 1,\n X_sfa).reshape(n_samples, -1)\n word_size = len(X_sfa[0, 0])\n if word_size == 1:\n count.set_params(tokenizer=self._tok)\n X_sfa = np.apply_along_axis(lambda x: ' '.join(x), 1, X_sfa)\n\n tf = count.fit_transform(X_sfa)\n _, pval = chi2(tf, y_ind)\n relevant_features = np.where(pval > self.pvalue_threshold)[0]\n\n old_size = len(self.vocabulary_)\n for key, value in count.vocabulary_.items():\n idx = np.where(relevant_features == value)[0]\n if idx.size == 1:\n word = str(window_size) + \" \" + key\n self.vocabulary_[idx[0] + old_size] = word\n\n self._relevant_features_list.append(relevant_features)\n self._sfa_list.append(sfa)\n self._count_list.append(count)\n\n return self", "title": "" }, { "docid": "cdd3fafcf3f3097faf1c744d85ff6a7c", "score": "0.5211461", "text": "def fit(data, pdf, init_pars=None, par_bounds=None, **kwargs):\n _, opt = get_backend()\n init_pars = init_pars or pdf.config.suggested_init()\n par_bounds = par_bounds or pdf.config.suggested_bounds()\n return opt.minimize(twice_nll, data, pdf, init_pars, par_bounds, **kwargs)", "title": "" }, { "docid": "33bb2d7957aa2cc8223c2e6a4bbb35c3", "score": "0.52058464", "text": "def fit(self, X, y=None, overlapping=True):\n # Check input data\n X = check_array(X)\n n_samples, n_features = X.shape\n\n # Check parameters\n if (not isinstance(self.n_coefs, int)) and (self.n_coefs is not None):\n raise TypeError(\"'n_coefs' must be None or an integer.\")\n if isinstance(self.n_coefs, int) and self.n_coefs < 2:\n raise ValueError(\"'n_coefs' must be greater than or equal to 2.\")\n if isinstance(self.n_coefs, int) and self.n_coefs % 2 != 0:\n raise ValueError(\"'n_coefs' must be an even integer.\")\n if not isinstance(self.window_size, int):\n raise TypeError(\"'window_size' must be an integer.\")\n if self.window_size > n_features:\n raise ValueError(\"'window_size' must be lower than or equal to \"\n \"the size of each time series.\")\n if isinstance(self.n_coefs, int) and self.n_coefs > self.window_size:\n raise ValueError(\"'n_coefs' must be lower than or equal to \"\n \"'window_size'.\")\n if not isinstance(self.norm_mean, (int, float)):\n raise TypeError(\"'norm_mean' must be a boolean.\")\n if not isinstance(self.norm_std, (int, float)):\n raise TypeError(\"'norm_std' must be a boolean.\")\n if not isinstance(self.n_bins, int):\n raise TypeError(\"'n_bins' must be an integer.\")\n if self.n_bins < 2:\n raise ValueError(\"'n_bins' must be greater than or equal to 2.\")\n if self.quantiles not in ['empirical', 'gaussian']:\n raise ValueError(\"'quantiles' must be either 'gaussian' or \"\n \"'empirical'.\")\n if not isinstance(self.variance_selection, (int, float)):\n raise TypeError(\"'variance_selection' must be a boolean.\")\n if not isinstance(self.variance_threshold, (int, float)):\n raise TypeError(\"'variance_threshold' must be a float.\")\n if not isinstance(self.numerosity_reduction, (int, float)):\n raise TypeError(\"'numerosity_reduction' must be a boolean.\")\n if not isinstance(overlapping, (int, float)):\n raise TypeError(\"'overlapping' must be a boolean.\")\n\n self.vocabulary_ = {}\n\n if overlapping:\n n_windows = n_features - self.window_size + 1\n X_window = np.asarray([X[:, i: i + self.window_size]\n for i in range(n_windows)])\n X_window = X_window.reshape(n_samples * n_windows, -1, order='F')\n else:\n n_windows = n_features // self.window_size\n remainder = n_features % self.window_size\n if remainder == 0:\n window_idx = np.array_split(np.arange(0, n_features),\n n_windows)\n else:\n split_idx = np.arange(self.window_size,\n n_windows * (self.window_size + 1),\n self.window_size)\n window_idx = np.split(np.arange(0, n_features), split_idx)[:-1]\n X_window = X[:, window_idx].reshape(n_samples * n_windows, -1)\n\n sfa = SFA(self.n_coefs, False, self.norm_mean,\n self.norm_std, self.n_bins, self.quantiles,\n self.variance_selection, self.variance_threshold)\n count = CountVectorizer(ngram_range=(1, 1))\n\n X_sfa = sfa.fit_transform(X_window)\n X_sfa = np.apply_along_axis(lambda x: ''.join(x),\n 1,\n X_sfa).reshape(n_samples, -1)\n word_size = len(X_sfa[0, 0])\n if word_size == 1:\n count.set_params(tokenizer=self._tok)\n if self.numerosity_reduction:\n X_sfa = np.apply_along_axis(numerosity_reduction, 1, X_sfa)\n else:\n X_sfa = np.apply_along_axis(lambda x: ' '.join(x), 1, X_sfa)\n count.fit(X_sfa)\n\n for key, value in count.vocabulary_.items():\n self.vocabulary_[value] = key\n\n self._sfa = sfa\n self._count = count\n return self", "title": "" }, { "docid": "ab9b590b73bd407b158b595b97b622e9", "score": "0.5203047", "text": "def fit(self, y, data):\n if self.intercept:\n data = np.pad(data, ((0, 0), (1, 0)), \"constant\", constant_values=1)\n\n # Process\n y = np.matrix(y)\n self.y = y\n data = np.matrix(data)\n self.data = data\n\n # Fit\n _, n_features = data.shape\n self.beta = np.linalg.inv(np.transpose(data) * data + self.alpha * np.eye(n_features)) * np.transpose(data) * y\n return self.beta", "title": "" }, { "docid": "5e0af77d55c5b1fc8a20cda34aeaafba", "score": "0.5170345", "text": "def fit(self, Vr, X, Xdot, U=None, P=0):\n return _InferredMixin.fit(self, Vr, X, Xdot, U, P)", "title": "" }, { "docid": "4d379dbe958059b2041c4023be71e614", "score": "0.5169992", "text": "def fit(self, x: np.ndarray, y: np.ndarray = None) -> Any:\n if min(x.shape[0], x.shape[1] - 1) >= 3:\n self._fuzzy_index = min(x.shape[0], x.shape[1] - 1) / (min(x.shape[0], x.shape[1] - 1) - 2)\n else:\n self._fuzzy_index = 2\n\n n = x.shape[0]\n self._dimensions = x.shape[1]\n\n u = np.random.rand(self._n_clusters, n)\n\n self._losses = []\n\n for t in range(self._max_iters):\n u, v, loss, signal = self._update(x, u)\n self._losses.append(loss)\n print('\\titer: {} - loss: {:.4f}'.format(t, loss))\n if signal:\n break\n\n self._fitted = True\n self._center = v\n self._train_u = u\n self._variance = np.zeros(self._center.shape)\n\n for i in range(self._dimensions):\n self._variance[:, i] = np.sum(\n u * ((x[:, i][:, np.newaxis] - self._center[:, i].transpose()) ** 2).T, axis=1\n ) / np.sum(u, axis=1)\n\n self._variance = np.fmax(self._variance, np.finfo(np.float64).eps)\n\n return self", "title": "" }, { "docid": "2e5d3dadd111719e19724f724ee76c62", "score": "0.5154077", "text": "def robust_slope(x,y,sigma,limits=None,npt=15,reweight=False):\n # Maybe add sigma outlier rejection in the future\n n = len(x)\n if n==2:\n return wtslope(x,y,sigma,error=True,reweight=reweight)\n # Calculate weighted pmx/pmxerr\n wt_slp,wt_slperr = wtslope(x,y,sigma,error=True,reweight=reweight)\n wt_y, wt_yerr = wtmean(y,sigma,error=True,reweight=reweight)\n # Unweighted slope\n uwt_slp = wtslope(x,y,sigma*0+1,reweight=reweight)\n # Calculate robust loss metric for range of slope values\n # chisq = Sum( abs(y-(x*slp-mean(x*slp)))/sigma )\n if limits is None:\n limits = np.array([np.min([0.5*wt_slp,0.5*uwt_slp]), np.max([1.5*wt_slp,1.5*uwt_slp])])\n slp_step = (np.max(limits)-np.min(limits))/(npt-1)\n slp_arr = np.arange(npt)*slp_step + np.min(limits)\n # Vectorize it\n resid = np.outer(y,np.ones(npt))-np.outer(x,np.ones(npt))*np.outer(np.ones(n),slp_arr)\n mnresid = np.mean(resid,axis=0)\n resid -= np.outer(np.ones(n),mnresid) # remove the mean\n chisq = np.sum( np.abs(resid) / np.outer(sigma,np.ones(npt)) ,axis=0)\n bestind = np.argmin(chisq)\n best_slp = slp_arr[bestind]\n # Get parabola bisector\n lo = np.maximum(0,bestind-2)\n hi = np.maximum(bestind+2,n)\n quad_slp = quadratic_bisector(slp_arr[lo:hi],chisq[lo:hi])\n # Problem with parabola bisector, use best point instead \n if np.isnan(quad_slp) | (np.abs(quad_slp-best_slp)> slp_step):\n best_slp = best_slp\n else:\n best_slp = quad_slp\n return best_slp, wt_slperr", "title": "" }, { "docid": "a185d5cdc96523b99e5a3f098b725865", "score": "0.51132536", "text": "def fit(self, X, y):\n if len(X) != len(y):\n raise ValueError(f\"Found input variables with inconsistent numbers of samples: {len(X)}, {len(y)}\")\n\n X, y = np.array(X), np.array(y)\n self.coef_ = np.ones(len(X[0]), dtype=np.float32)\n self.intercept_ = 0.0\n for self.n_iter_ in range(1, self.max_iter + 1):\n if self.shuffle:\n shuffled_indices = np.arange(len(X))\n np.random.shuffle(shuffled_indices)\n X = X[shuffled_indices]\n y = y[shuffled_indices]\n update = False\n for i in range(len(X)):\n if y[i] * (np.dot(self.coef_, X[i]) + self.intercept_) <= -self.tol:\n self.coef_ += self.eta0 * y[i] * X[i]\n self.intercept_ += self.eta0 * y[i]\n update = True\n if not update:\n break\n return self", "title": "" }, { "docid": "d6186c924644beab9cd535b79fa4325e", "score": "0.51043093", "text": "def _wls_linearfit_predict(x, w, wx, wy, wxx, wxy, select):\n\n # linear fit\n k = w[select].sum()\n kx = wx[select].sum()\n ky = wy[select].sum()\n kxx = wxx[select].sum()\n kxy = wxy[select].sum()\n delta = k * kxx - kx ** 2\n m = 1. / delta * (k * kxy - kx * ky)\n b = 1. / delta * (kxx * ky - kx * kxy)\n b_var = kxx / delta\n m_var = k / delta\n bm_covar = - kx / delta\n\n # estimation\n y = b + m * x\n dy2 = b_var + 2 * bm_covar * x + m_var * x**2\n\n return y, dy2", "title": "" }, { "docid": "d887c5665bd2282fd42e6916f15bd941", "score": "0.5093072", "text": "def fit(self, x, y, lamda=0.0):\n phi = np.zeros((x.shape[0], self.w.shape[0]))\n\n for i in range(phi.shape[1]):\n phi[:, i] = self.base_functions[i](x)\n\n # solve normal equation\n self.w = np.linalg.inv(phi.T @ phi + lamda * np.eye(phi.shape[1])) @ phi.T @ y", "title": "" }, { "docid": "26060611ca7a3543bd73eafc1969b05e", "score": "0.5081235", "text": "def WeightedLinearLeastSquaresFit(x,y,w):\n wsum = sum(w)\n wx = sum(x*w)\n wx2 = sum(w*(x**2))\n wy = sum(w*y)\n wxy = sum(w*x*y)\n m = (wsum*wxy-wx*wy)/(wsum*wx2 - (wx**2))\n b = (wx2*wy-wx*wxy)/(wsum*wx2 - (wx**2))\n if sum(w)/len(w) == 1:#if this equals 1, should behave like question 2\n averagex = average(x)\n averagey = average(y)\n averagex2 = average(x**2)\n averagexy = average(x*y)\n dev = y - (m*x+b)\n dev1 = average(dev**2)\n slerr = ( (1.0/(len(x)-2)) * (dev1/((averagex2 - averagex**2))) )**(0.5)\n interr = ( (1.0/(len(x)-2)) * ((dev1*averagex2)/((averagex2 - averagex**2))) )**(0.5)\n else:\n slerr = (wsum/((wsum*wx2) - (wx**2)))**0.5 #weighted slope error\n interr = (wx2/(wsum*wx2 - wx**2))**0.5 #weighted intercept error\n return m,b,slerr,interr", "title": "" }, { "docid": "70c22a862ff4e0e333704694241116e4", "score": "0.5066629", "text": "def fit(self, X, y):\n ...", "title": "" }, { "docid": "70c22a862ff4e0e333704694241116e4", "score": "0.5066629", "text": "def fit(self, X, y):\n ...", "title": "" }, { "docid": "8dfbaae178cdf98f430f6720706cf881", "score": "0.50650406", "text": "def fit(X: np.ndarray, y: np.ndarray, **kwargs) -> any:\n raise NotImplementedError(\"You must implement this method.\")", "title": "" }, { "docid": "fcba6459c88b04d9e2ba99b8bd432723", "score": "0.5033081", "text": "def fit(self, X, constraints, bounds=None):\n a,b,c,d = self._process_inputs(X, constraints, bounds)\n gamma = self.gamma\n num_pos = len(a)\n num_neg = len(c)\n _lambda = np.zeros(num_pos + num_neg)\n lambdaold = np.zeros_like(_lambda)\n gamma_proj = 1. if gamma is np.inf else gamma/(gamma+1.)\n pos_bhat = np.zeros(num_pos) + self.bounds_[0]\n neg_bhat = np.zeros(num_neg) + self.bounds_[1]\n A = self.A_\n\n for it in xrange(self.max_iter):\n # update positives\n vv = self.X_[a] - self.X_[b]\n for i,v in enumerate(vv):\n wtw = v.dot(A).dot(v) # scalar\n alpha = min(_lambda[i], gamma_proj*(1./wtw - 1./pos_bhat[i]))\n _lambda[i] -= alpha\n beta = alpha/(1 - alpha*wtw)\n pos_bhat[i] = 1./((1 / pos_bhat[i]) + (alpha / gamma))\n Av = A.dot(v)\n A += beta * np.outer(Av, Av)\n\n # update negatives\n vv = self.X_[c] - self.X_[d]\n for i,v in enumerate(vv):\n wtw = v.dot(A).dot(v) # scalar\n alpha = min(_lambda[i+num_pos], gamma_proj*(1./neg_bhat[i] - 1./wtw))\n _lambda[i+num_pos] -= alpha\n beta = -alpha/(1 + alpha*wtw)\n neg_bhat[i] = 1./((1 / neg_bhat[i]) - (alpha / gamma))\n Av = A.dot(v)\n A += beta * np.outer(Av, Av)\n\n normsum = np.linalg.norm(_lambda) + np.linalg.norm(lambdaold)\n if normsum == 0:\n conv = np.inf\n break\n conv = np.abs(lambdaold - _lambda).sum() / normsum\n if conv < self.convergence_threshold:\n break\n lambdaold = _lambda.copy()\n if self.verbose:\n print('itml iter: %d, conv = %f' % (it, conv))\n\n if self.verbose:\n print('itml converged at iter: %d, conv = %f' % (it, conv))\n self.n_iter_ = it\n return self", "title": "" }, { "docid": "254b323a0aeca643c809520bdc75e1e2", "score": "0.5028507", "text": "def fit(self, X):", "title": "" }, { "docid": "8b8dd519426b7b5678cba035270fa85c", "score": "0.5024564", "text": "def von_mises_fitfunc(x, A, kappa, l, s):\r\n return A*stats.vonmises.pdf(x, kappa, loc=l, scale=s)", "title": "" }, { "docid": "c8620b286c1507f4744c25631b937fef", "score": "0.5014953", "text": "def fit(self, v, X):\n raise Exception('DensityEstimator is an abstract class')", "title": "" }, { "docid": "fd24e6e36dbb7c1ce3114870ab33dfb4", "score": "0.5012165", "text": "def fit(self, X, y):\n X = check_array(X, force_all_finite=True, accept_sparse=False,\n ensure_2d=True)\n y = check_array(y, force_all_finite=False, accept_sparse=False,\n ensure_2d=False)\n if y.ndim == 1:\n y = check_array(y, force_all_finite=True, accept_sparse=False,\n ensure_2d=False)\n self._validate_params()\n\n self.linop_ = self._get_kernel_map(X, y)\n Gram = self.linop_._Gram(X)\n if self.lbda > 0:\n self.dual_coefs_ = dlyap(-Gram / self.lbda, self.linop_.A,\n y / self.lbda)\n else:\n # TODO: Check A is invertible!!\n self.dual_coefs_ = solve(Gram, y)\n return self", "title": "" }, { "docid": "8bf24de006a2ff3e6b598c1120ee6da3", "score": "0.50076723", "text": "def fit(self, X, y):\r\n pass", "title": "" }, { "docid": "b1874d9bd80d048ebf017538e6601959", "score": "0.4998761", "text": "def fit_UVslope(filternames, mags, magerrs, z, beta0=-2.0, normband=filters['f850lp'], normmag=25.0, verbose=False, fit='flux'):\n x = UVSlope(1500., beta0, z)\n mag = S.Observation(x.rspec, normband).effstim('abmag')\n A0 = 10. ** (normmag - mag)\n bands = [filters[b] for b in filternames]\n def func((beta, A)):\n y = UVSlope(1500., beta, z, A, verbose=verbose)\n return y.chi2(bands, mags, magerrs, fit=fit)\n # print beta0, A0, func((beta0, A0))\n output = scipy.optimize.fmin(func, np.array([beta0, A0]), maxiter=1000)\n return output", "title": "" }, { "docid": "672d2845614d6693e6b5125067d4f0eb", "score": "0.49959633", "text": "def fit_leastsq(p0, xdat, ydat, func, **kwargs):\n\n def errf(*args):\n p,x,y=(args[:-2],args[-2],args[-1])\n return func(x, _np.asarray(p)) - y\n # end def errf\n # errf = lambda p, x, y: func(p,x) - y\n\n pfit, pcov, infodict, errmsg, success = \\\n leastsq(errf, p0, args=(xdat, ydat), full_output=1,\n epsfcn=0.0001, **kwargs)\n\n # end if\n\n if (len(ydat) > len(p0)) and pcov is not None:\n pcov = pcov * ((errf(pfit, xdat, ydat)**2).sum()\n / (len(ydat)-len(p0)))\n else:\n pcov = _np.inf\n # endif\n\n return pfit, pcov\n\n \"\"\"\n The below uncertainty is not a real uncertainty. It assumes that there\n is no covariance in the fitting parameters.\n perr = []\n for ii in range(len(pfit)):\n try:\n #This assumes uncorrelated uncertainties (no covariance)\n perr.append(_np.absolute(pcov[ii][ii])**0.5)\n except:\n perr.append(0.00)\n # end try\n # end for\n return pfit, _np.array(perr)\n\n perr - Estimated uncertainty in fitting parameters\n (scaled by residuals)\n \"\"\"", "title": "" }, { "docid": "ef0c5fe690e675cacbe26346fb1dfb20", "score": "0.49937546", "text": "def linear(x, params=(0,1)):\n return params[1]*x+params[0]+np.random.normal(size=len(x))", "title": "" }, { "docid": "44bda71a73c5c203a8414bcc50d2c683", "score": "0.49880266", "text": "def fit(self, x, y):\n pass", "title": "" }, { "docid": "7f6c1458167a8f0750341e7cb57477d3", "score": "0.49797046", "text": "def test_linear_fitter_1set(self):\n expected = np.array([0, 1, 1, 1])\n p1 = models.Poly1DModel(3)\n p1.parameters = [0, 1, 1, 1]\n y1 = p1(self.x1)\n pfit = fitting.LinearLSQFitter(p1)\n pfit(self.x1, y1)\n utils.assert_allclose(p1.parameters, expected, atol=10 ** (-7))", "title": "" }, { "docid": "790ce785e38885a294823f97735d790d", "score": "0.4978268", "text": "def test_linefit(self):\n\n\t\tx = numpy.linspace(-10, 10, 256)\n\t\ty = 1.34*x + 0.56\n\n\t\tcc = robust.linefit(x, y)\n\t\tself.assertAlmostEqual(cc[0], 1.34, 2)\n\t\tself.assertAlmostEqual(cc[1], 0.56, 2)\n\n\t\tcc = robust.linefit(x, y, Bisector=True)\n\t\tself.assertAlmostEqual(cc[0], 1.34, 2)\n\t\tself.assertAlmostEqual(cc[1], 0.56, 2)\n\n\t\tx = numpy.linspace(-10, 10, 2048)\n\t\ty = 2.86*x - 0.56\n\n\t\tcc = robust.linefit(x, y)\n\t\tself.assertAlmostEqual(cc[0], 2.86, 2)\n\t\tself.assertAlmostEqual(cc[1], -0.56, 2)\n\n\t\tcc = robust.linefit(x, y, Bisector=True)\n\t\tself.assertAlmostEqual(cc[0], 2.86, 2)\n\t\tself.assertAlmostEqual(cc[1], -0.56, 2)", "title": "" }, { "docid": "ccc6811aef6801239d8a425e5b2b6071", "score": "0.497563", "text": "def fit_line(data):\n left_mean = data[:10].mean()\n right_mean = data[-10:].mean()\n slope = (right_mean - left_mean) / data.shape[0]\n \n x = np.arange(data.shape[0])\n return slope * x + left_mean", "title": "" }, { "docid": "ccc6811aef6801239d8a425e5b2b6071", "score": "0.497563", "text": "def fit_line(data):\n left_mean = data[:10].mean()\n right_mean = data[-10:].mean()\n slope = (right_mean - left_mean) / data.shape[0]\n \n x = np.arange(data.shape[0])\n return slope * x + left_mean", "title": "" }, { "docid": "f3de41d088ea1fca6791f8f8fa22a16b", "score": "0.49718767", "text": "def fit(self, X, y):\r\n positive = X.T[y == 1].T\r\n negative = X.T[y == -1].T\r\n self.model['y_pos_probability'] = np.mean(y == 1)\r\n self.model['mu_p_hat'] = np.mean(positive, axis=1)\r\n self.model['mu_n_hat'] = np.mean(negative, axis=1)\r\n self.model['cov_hat'] = (((positive.shape[1] - 1) * np.cov(positive) +\r\n (negative.shape[1] - 1) * np.cov(negative))\r\n / (X.shape[1] - 2))", "title": "" }, { "docid": "8ba283329d968101d28f6005738feea0", "score": "0.49701786", "text": "def fit(self, X, y):\n self._means = np.mean(X, 0)\n self._stds = np.std(X, 0)\n\n Xold = X\n X = self._standardize_inputs(X)\n ymat = y.reshape((y.shape[0], 1))\n\n if self._lambda is None:\n self._lambda = internal_cross_validation(\n LogisticRegression, {'schema': self._schema}, 'lambda',\n [0, 0.001, 0.01, 0.1, 1, 10, 100], 'auc', Xold, y\n )\n\n optres = scipy.optimize.minimize(self.loss,\n np.append(self._w, [self._b]),\n args=(X, ymat), jac=self.jac,\n method='Newton-CG')\n\n self._w = optres.x[:-1]\n self._w = self._w.reshape((self._w.shape[0], 1)) # make it a matrix\n self._b = optres.x[-1]", "title": "" }, { "docid": "741dbb8ea053b6c6758c6f966ef19124", "score": "0.49688393", "text": "def voigtfit(x,y,initpar=None,sigma=None,bounds=(-np.inf,np.inf)):\n if initpar is None:\n initpar = [np.max(y),x[np.argmax(y)],1.0,1.0,np.median(y),0.0]\n func = voigt\n return curve_fit(func, x, y, p0=initpar, sigma=sigma, bounds=bounds)", "title": "" }, { "docid": "97658eb3512c07ae7151462ad7686a95", "score": "0.49626175", "text": "def fit(self, X, y, lambda_=0, bias=True):\n if bias == True:\n X = self.add_bias(X)\n\n penalisation = lambda_ * np.eye(X.shape[1])\n\n beta = np.dot(np.dot(np.linalg.inv(np.dot(X.T, X) + penalisation), X.T), y)\n self.beta = beta", "title": "" }, { "docid": "db50aef4cb152f11cb4f6d53befd439d", "score": "0.4954584", "text": "def fit_line1(x, y):\n # Remove entries where either x or y is NaN.\n clean_data = pd.concat([x, y], 1).dropna(0) # row-wise\n (_, x), (_, y) = clean_data.iteritems()\n slope, intercept, r, p, stderr = linregress(x, y)\n return slope, intercept # could also return stderr", "title": "" }, { "docid": "3e3101620f48352663080469d4b40023", "score": "0.49531314", "text": "def fit(self, X: object, y: np.array):\n solver_obj = self._solver_obj\n model_obj = self._model_obj\n prox_obj = self._prox_obj\n prox_intercepts_obj = self._prox_intercepts_obj\n fit_intercept = self.fit_intercept\n\n X = LearnerGLM._safe_array(X)\n y = LearnerGLM._safe_array(y)\n\n # Pass the data to the model\n model_obj.fit(X, y)\n\n n_samples = model_obj.n_samples\n n_features = model_obj.n_features\n\n if self.step is None:\n self.step = 1. / model_obj.get_lip_best()\n\n # Range of the sample intercepts prox is always the same\n if fit_intercept:\n prox_intercepts_obj.range = (n_features + 1,\n n_features + n_samples + 1)\n else:\n prox_intercepts_obj.range = (n_features, n_features + n_samples)\n prox_obj.range = (0, n_features)\n\n if self.penalty == 'none':\n # No penalization is used on the model weights, so the prox applied\n # overall is only ProxSlope on the sample intercepts\n solver_prox = prox_intercepts_obj\n else:\n solver_prox = ProxMulti([prox_obj, prox_intercepts_obj])\n\n # Now, we can pass the model and prox objects to the solver\n solver_obj.set_model(model_obj).set_prox(solver_prox)\n # Make sure that there is no linesearch\n solver_obj.linesearch = False\n\n coeffs_start = None\n if self.warm_start and self.coeffs is not None:\n if self.coeffs.shape == (model_obj.n_coeffs,):\n coeffs_start = self.coeffs\n else:\n raise ValueError('Cannot warm start, coeffs don\\'t have the '\n 'right shape')\n\n # Launch the solver\n coeffs = solver_obj.solve(coeffs_start)\n\n self._set(\"coeffs\", coeffs)\n self._set(\"weights\", coeffs[:n_features])\n if fit_intercept:\n self._set(\"intercept\", coeffs[n_features])\n self._set(\"sample_intercepts\",\n coeffs[(n_features + 1):(n_features + n_samples + 1)])\n else:\n self._set(\"intercept\", None)\n self._set(\"sample_intercepts\",\n coeffs[n_features:(n_features + n_samples)])\n self._set(\"_fitted\", True)\n return self", "title": "" }, { "docid": "24798c8636d04d8a20043bd5cde26691", "score": "0.4952788", "text": "def fit(model_func, x, y, y_err=None, model_guess_func=None, return_fit=\"lin\", xlog=False, ylog=False, **kwargs):\r\n \r\n free_param_keys = []\r\n param_guess = []\r\n fixed_params = {}\r\n \r\n # parse kwargs:\r\n # key: (free?, value)\r\n for k,v in kwargs.items():\r\n t = type(v)\r\n if t is tuple:\r\n free, v = v\r\n elif t is bool:\r\n free = v\r\n v = None\r\n else:\r\n free = True\r\n if free:\r\n free_param_keys.append(k)\r\n param_guess.append(v)\r\n else:\r\n fixed_params[k] = v\r\n \r\n _model_func = model_func\r\n _x = x\r\n _y = y\r\n _y_err = y_err\r\n if xlog:\r\n _x = np.log(x)\r\n if ylog:\r\n _y = np.log(y)\r\n _y_err = y_err/y if y_err is not None else None\r\n if xlog and ylog:\r\n _model_func = lambda logx, *args, **kwargs: np.log(model_func(np.exp(logx), *args, **kwargs))\r\n elif xlog:\r\n _model_func = lambda logx, *args, **kwargs: model_func(np.exp(logx), *args, **kwargs)\r\n elif ylog:\r\n _model_func = lambda x, *args, **kwargs: np.log(model_func(x, *args, **kwargs))\r\n \r\n \r\n # build fit function with some fixed parameters\r\n fit_func = lambda x, *fit_params: _model_func(x, **fixed_params, **dict(zip(free_param_keys,fit_params)))\r\n \r\n \r\n # Find initial guess for free fit parameters\r\n if None in param_guess:\r\n param_guess = model_guess_func(x, y, y_err=y_err, **fixed_params, **dict(zip(free_param_keys,param_guess)))\r\n\r\n for k in fixed_params:\r\n del param_guess[k]\r\n param_guess = [param_guess[k] for k in free_param_keys]\r\n \r\n \r\n # perform the fit\r\n try:\r\n if y_err is not None:\r\n param, cov = curve_fit(fit_func, _x, _y, sigma=_y_err, p0=param_guess)\r\n else:\r\n param, cov = curve_fit(fit_func, _x, _y, p0=param_guess)\r\n param_err = np.sqrt(np.diag(cov))\r\n success = True\r\n except: #TODO be more restrictive on what Exception might be thrown\r\n # Fit failed!\r\n # Return some parameters that might prevent some follow-up errors\r\n param = param_guess\r\n param_err = [0] * len(param)\r\n \r\n # make sure, though, you indicate clearly that the fit failed\r\n success = False\r\n \r\n # build result dictionary\r\n result = fixed_params.copy()\r\n result.update(dict(zip([k+\"_err\" for k in fixed_params.keys()],[0] * len(fixed_params))))\r\n result.update(dict(zip(free_param_keys,param)))\r\n result.update(dict(zip([k+\"_err\" for k in free_param_keys],param_err)))\r\n result[\"success\"] = success\r\n result[\"good_fit\"] = success and np.all(param_err < np.abs(param))\r\n \r\n if return_fit is not None:\r\n ty = type(return_fit)\r\n if (ty is tuple and len(return_fit) == 2) \\\r\n or ty is str or ty is int:\r\n if ty is tuple:\r\n return_fit, N_points = return_fit\r\n elif ty is str:\r\n N_points = 150\r\n else:\r\n N_points = int(return_fit)\r\n return_fit = \"lin\"\r\n if return_fit == \"log\" or xlog:\r\n fitx = np.logspace(np.log10(min(x)), np.log10(max(x)), N_points)\r\n #if return_fit == \"lin\":\r\n else:\r\n fitx = np.linspace(min(x), max(x), N_points)\r\n elif hasattr(return_fit, \"__iter__\"):\r\n fitx = np.array(return_fit)\r\n \r\n if xlog:\r\n fitx = np.log(fitx)\r\n fity = fit_func(fitx, *param)\r\n result[\"fitx\"] = fitx\r\n result[\"fity\"] = fity\r\n result[\"fity_guess\"] = fit_func(fitx, *param_guess)\r\n \r\n if xlog:\r\n result[\"fitx\"] = np.exp(result[\"fitx\"])\r\n if ylog:\r\n result[\"fity\"] = np.exp(result[\"fity\"])\r\n result[\"fity_guess\"] = np.exp(result[\"fity_guess\"])\r\n \r\n return result", "title": "" }, { "docid": "eaa6b904f74868f0e0638e3549fd4cfd", "score": "0.49488974", "text": "def fitProfile(inp_x, inp_y, fit_center_in, fit_width=8, sigma=None,\n func='fgauss_const', return_residuals=False,p0=None,bounds=(-np.inf,np.inf)):\n\n # select out the region to fit\n # this will be only consistent to +- integer pixels\n fit_center = copy.copy(fit_center_in)\n xx_index = np.arange(len(inp_x))\n assert len(inp_x) == len(inp_y)\n \n j1 = int(np.round(np.amax([0, fit_center - fit_width])))\n j2 = int(round(np.amin([np.amax(xx_index), fit_center + fit_width])))\n\n # define sub-arrays to fit\n sub_x1 = inp_x[j1:j2]\n sub_y1 = inp_y[j1:j2]\n\n tot_counts_in_line = float(np.nansum(sub_y1))\n\n # normalize the sub-array\n try:\n scale_value = np.nanmax(sub_y1)\n except ValueError as e:\n print(e,j1,j2,sub_x1,sub_y1)\n sub_y_norm1 = sub_y1 / scale_value\n\n # select out the finite elements\n ii_good = np.isfinite(sub_y_norm1)\n sub_x = sub_x1[ii_good]\n sub_y_norm = sub_y_norm1[ii_good]\n if sigma is not None:\n sub_sigma1 = sigma[j1:j2]\n ii_good = np.isfinite(sub_y_norm1) & (np.isfinite(sub_sigma1))\n sub_sigma = sub_sigma1[ii_good]\n sub_y_norm = sub_y_norm1[ii_good]\n else:\n sub_sigma = None\n\n # note whether any NaNs were present\n if len(sub_x) == len(sub_x1):\n nanflag = False\n else:\n nanflag = True\n\n # set up initial parameter guesses, function names, and bounds. \n # initial guess assumes that the gaussian is centered at the middle of the input array\n # the sigma is \"1\" in x units\n # the amplitude is -0.1.\n # for the functions with an additional constant and line, the constant defaults to 1.\n if func == 'fgauss':\n if p0 is None:\n p0 = (np.mean(sub_x), 5., -0.5)\n use_function = fgauss\n elif func == 'fgauss_const':\n if p0 is None:\n p0 = (np.mean(sub_x),1., -np.ptp(sub_y_norm), np.nanmedian(sub_y_norm))\n use_function = fgauss_const\n elif func == 'fgauss_line':\n if p0 is None:\n p0 = (np.mean(sub_x), 1., -0.5, 1., 0.)\n use_function = fgauss_line\n elif func == 'fgauss_from_1':\n if p0 is None:\n p0 = (np.mean(sub_x),1., -np.ptp(sub_y_norm))\n use_function = fgauss_from_1\n else:\n raise ValueError\n\n # perform the least squares fit\n try:\n popt, pcov = scipy.optimize.curve_fit(use_function,\n sub_x,\n sub_y_norm,\n p0=p0,\n sigma=sub_sigma,\n maxfev=10000,\n bounds=bounds)\n\n # Pull out fit results\n # fitted values (0 is the centroid, 1 is the sigma, 2 is the amp)\n # lists used to facilitate json recording downstream\n errs = np.diag(pcov)\n centroid = popt[0]\n centroid_error = np.sqrt(errs[0])\n width = popt[1]\n width_error = np.sqrt(errs[1])\n fit_successful = True\n pcov_list = pcov.tolist()\n popt_list = popt.tolist()\n\n except RuntimeError:\n errs = np.NaN\n centroid = np.NaN\n centroid_error = np.NaN\n width = np.NaN\n width_error = np.NaN\n fit_successful = False\n pcov_list = []\n popt_list = []\n\n except ValueError as e:\n print('ValueError: {}'.format(e))\n errs = np.NaN\n centroid = np.NaN\n centroid_error = np.NaN\n width = np.NaN\n width_error = np.NaN\n fit_successful = False\n pcov_list = []\n popt_list = []\n\n except TypeError as e:\n print('TypeError: {}'.format(e))\n errs = np.NaN\n centroid = np.NaN\n centroid_error = np.NaN\n width = np.NaN\n width_error = np.NaN\n fit_successful = False\n pcov_list = []\n popt_list = []\n\n except:\n print('unknown error')\n errs = np.NaN\n centroid = np.NaN\n centroid_error = np.NaN\n width = np.NaN\n width_error = np.NaN\n fit_successful = False\n pcov_list = []\n popt_list = []\n\n\n if np.isnan(centroid_error) or np.isnan(centroid):\n fit_successful = False\n\n # build the returned dictionary\n retval = {'centroid': centroid,\n 'e_centroid': centroid_error,\n 'sigma': width,\n 'e_sigma': width_error,\n 'nanflag': nanflag,\n 'pcov': pcov_list,\n 'popt': popt_list,\n 'indices_used': (j1, j2),\n 'function_used': func,\n 'tot_counts_in_line': tot_counts_in_line,\n 'fit_successful': fit_successful,\n 'scale_value':float(scale_value)}\n\n # since residual array can be large, optionally include it\n if return_residuals:\n if fit_successful:\n predicted = use_function(sub_x, *popt)\n residuals = (predicted - sub_y_norm).tolist()\n else:\n residuals = np.NaN\n retval['residuals'] = residuals\n\n #return(retval['popt'][0], retval['popt'][1], retval['popt'][2], retval)\n return(retval)", "title": "" }, { "docid": "7351e6c8cd45c7f710115965e3aa4e61", "score": "0.49468344", "text": "def line_fit():\n data = np.load(\"housing.npy\")\n b=np.array([row[1] for row in data])\n a1=np.array([row[0] for row in data])\n a=np.stack((a1,np.ones(33)),axis=-1)\n\t\n x=[row[0] for row in data]\n y=[row[1] for row in data]\n\t\n slope,intercept=least_squares(a,b)\n abline_values = [slope * i + intercept for i in x]\n \n plt.plot(x, abline_values, 'r')\n plt.plot(x,y,'bo')\n plt.ylabel('housing prices')\n plt.xlabel('year')\n plt.show()", "title": "" }, { "docid": "19859ef665c1c0cc51bf07411ccafd1f", "score": "0.494575", "text": "def fit(self, X, a, y, sample_weight=None):\n raise NotImplementedError", "title": "" }, { "docid": "28f5f2d43ad983ea705435942a2173fb", "score": "0.49332556", "text": "def trainSVMlinear(X,y):\n clf = SVC(kernel=\"linear\")\n clf.fit(X,y)\n return clf", "title": "" }, { "docid": "41457ad6f9a8663a1a7bbb2bdd2dba78", "score": "0.49320874", "text": "def gauss_poly_fit(data,window_left,window_right,guess_override=[None,None,None,None,None,None,None,None,None]):\n\tx_data = range( window_left, window_right+1 )\n\ty_data = data[ window_left: window_right+1 ]\n\tslope_guess = ( numpy.average( y_data[-3:] ) - numpy.average( y_data[0:3] ) ) / ( x_data[-2] - x_data[1] )\n\tif( slope_guess < 0.0 ):\n\t\tslope_guess = 0.0\n\tyint_guess = numpy.average( y_data[0:3] ) - slope_guess * x_data[1]\n\tif( yint_guess < 0.0 ):\n\t\tyint_guess = 0.0\n\tmag_guess = max(y_data)\n\tcentroid_guess = numpy.average( x_data, weights=y_data )\n\tsigma_guess = sqrt( numpy.average( (x_data-centroid_guess)**2.0, weights=y_data ) )\n\tbeta_guess = sigma_guess/2.0\n\tguess = [ mag_guess, centroid_guess, sigma_guess, beta_guess, yint_guess, slope_guess, 0.0, 0.0, 0.0 ]\n\t\n\tfor i in range(0,len(guess_override)):\n\t\tif( guess_override[i] != None ):\n\t\t\tguess[i] = guess_override[i]\n\n\tbound_by = ( [0.0,0.0,0.0,0.0,0.0,-numpy.inf,-numpy.inf,-numpy.inf,-numpy.inf], [numpy.inf,numpy.inf,numpy.inf,numpy.inf,numpy.inf,numpy.inf,numpy.inf,numpy.inf,numpy.inf] )\n\tsigmas = numpy.sqrt( y_data )\n\tsigmas[numpy.isnan(sigmas)]=1.0\n\n\ttry:\n\t\tB,B_cov = list( curve_fit(gauss_poly,x_data,y_data,p0=guess,bounds=bound_by,sigma=sigmas) )\n\texcept RuntimeError as e:\n\t\ttry:\n\t\t\tB,B_cov = list( curve_fit(gauss_poly,x_data,y_data,p0=guess,bounds=bound_by,sigma=sigmas,maxfev=10000) )\n\t\texcept RuntimeError as e:\n\t\t\tB = [ 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0 ]\n\tres = list(B)\n\n\treturn res", "title": "" }, { "docid": "9fbdef0c9e415ea81a048ab1d4ac7a5f", "score": "0.4931322", "text": "def fit(sigmaA: np.array, sigmaB: np.array, functionType, verbose=False):\n sigmaA.sort()\n sigmaB.sort()\n loss_func = lambda alpha: function_loss(functionType, sigmaB, sigmaA, alpha)\n res = opt.minimize(fun=loss_func, x0=np.array([0.]), bounds=np.array([(0., 1.)]))\n return lambda sigmaA: functionType(res.x[0], sigmaA)", "title": "" }, { "docid": "c498e6ff6647392b45d0df319ba4ba84", "score": "0.4931107", "text": "def fitPSF(d,x0,y0,sigma,A):\n mesh = np.meshgrid(np.arange(d.shape[0]),np.arange(d.shape[1]))\n flattened_d = d.flatten()\n\n def residuals(params):\n return flattened_d - (modelPSF(params,mesh)).flatten()\n\n prms = lmfit.Parameters()\n prms.add('x0', value = x0, min = 0, max = d.shape[0], vary = True)\n prms.add('y0', value = y0, min = 0, max = d.shape[1], vary = True)\n prms.add('W' , value = 0.5, min = 0, max = 1., vary = True)\n prms.add('Ag', value = A, min = 0, max = np.sum(d-np.median(d)), vary = True)\n prms.add('Am', value = A, min = 0, max = np.sum(d-np.median(d)), vary = True)\n prms.add('sigma_x', value = sigma, min = 0, max = d.shape[0]/3., vary = True)\n prms.add('sigma_y', value = sigma, min = 0, max = d.shape[1]/3., vary = True)\n prms.add('sigma_m', value = sigma, min = 0, max = d.shape[1]/3., vary = True)\n prms.add('beta', value = 1., min = 0, max = 10.)\n prms.add('bkg', value = np.median(d), min = np.median(d)-10*get_sigma_mad(d), \\\n max = np.median(d)+10*get_sigma_mad(d), vary = True)\n prms.add('theta', value = np.pi/4., min = 0, max = np.pi)\n result = lmfit.minimize(residuals, prms)\n return result.params", "title": "" }, { "docid": "05c0b4568e20359376b77fa2b666d0db", "score": "0.49259126", "text": "def fit_regression_line(x, y):\n \n assert x.size == y.size\n assert isinstance(x, np.ndarray)\n assert isinstance(y, np.ndarray)\n\n sumX = np.sum(x)\n sumY = np.sum(y)\n sumXY = np.sum(x*y)\n sumSqr = np.sum(x**2)\n numDataPoints = x.size\n\n A = (numDataPoints * sumXY - sumX * sumY) /\\\n (numDataPoints * sumSqr - (sumX ** 2))\n\n B = (sumY - A * sumX) / numDataPoints\n \n print(\"Y = %dx + %d\" % (A, B))\n\n return lambda x: A * x + B", "title": "" }, { "docid": "b403723f70f7746b90d8f6e0a43da899", "score": "0.49245006", "text": "def fit_transform(self, X, y=None, overlapping=True):\n # Check input data\n X = check_array(X)\n n_samples, n_features = X.shape\n\n # Check parameters\n if (not isinstance(self.n_coefs, int)) and (self.n_coefs is not None):\n raise TypeError(\"'n_coefs' must be None or an integer.\")\n if isinstance(self.n_coefs, int) and self.n_coefs < 2:\n raise ValueError(\"'n_coefs' must be greater than or equal to 2.\")\n if isinstance(self.n_coefs, int) and self.n_coefs % 2 != 0:\n raise ValueError(\"'n_coefs' must be an even integer.\")\n if not isinstance(self.window_size, int):\n raise TypeError(\"'window_size' must be an integer.\")\n if self.window_size > n_features:\n raise ValueError(\"'window_size' must be lower than or equal to \"\n \"the size of each time series.\")\n if isinstance(self.n_coefs, int) and self.n_coefs > self.window_size:\n raise ValueError(\"'n_coefs' must be lower than or equal to \"\n \"'window_size'.\")\n if not isinstance(self.norm_mean, (int, float)):\n raise TypeError(\"'norm_mean' must be a boolean.\")\n if not isinstance(self.norm_std, (int, float)):\n raise TypeError(\"'norm_std' must be a boolean.\")\n if not isinstance(self.n_bins, int):\n raise TypeError(\"'n_bins' must be an integer.\")\n if self.n_bins < 2:\n raise ValueError(\"'n_bins' must be greater than or equal to 2.\")\n if self.quantiles not in ['empirical', 'gaussian']:\n raise ValueError(\"'quantiles' must be either 'gaussian' or \"\n \"'empirical'.\")\n if not isinstance(self.variance_selection, (int, float)):\n raise TypeError(\"'variance_selection' must be a boolean.\")\n if not isinstance(self.variance_threshold, (int, float)):\n raise TypeError(\"'variance_threshold' must be a float.\")\n if not isinstance(self.numerosity_reduction, (int, float)):\n raise TypeError(\"'numerosity_reduction' must be a boolean.\")\n if not isinstance(overlapping, (int, float)):\n raise TypeError(\"'overlapping' must be a boolean.\")\n\n self.vocabulary_ = {}\n\n if overlapping:\n n_windows = n_features - self.window_size + 1\n X_window = np.asarray([X[:, i: i + self.window_size]\n for i in range(n_windows)])\n X_window = X_window.reshape(n_samples * n_windows, -1, order='F')\n else:\n n_windows = n_features // self.window_size\n remainder = n_features % self.window_size\n if remainder == 0:\n window_idx = np.array_split(np.arange(0, n_features),\n n_windows)\n else:\n split_idx = np.arange(self.window_size,\n n_windows * (self.window_size + 1),\n self.window_size)\n window_idx = np.split(np.arange(0, n_features), split_idx)[:-1]\n X_window = X[:, window_idx].reshape(n_samples * n_windows, -1)\n\n sfa = SFA(self.n_coefs, False, self.norm_mean,\n self.norm_std, self.n_bins, self.quantiles,\n self.variance_selection, self.variance_threshold)\n count = CountVectorizer(ngram_range=(1, 1))\n\n X_sfa = sfa.fit_transform(X_window)\n X_sfa = np.apply_along_axis(lambda x: ''.join(x),\n 1,\n X_sfa).reshape(n_samples, -1)\n word_size = len(X_sfa[0, 0])\n if word_size == 1:\n count.set_params(tokenizer=self._tok)\n if self.numerosity_reduction:\n X_sfa = np.apply_along_axis(numerosity_reduction, 1, X_sfa)\n else:\n X_sfa = np.apply_along_axis(lambda x: ' '.join(x), 1, X_sfa)\n tf = count.fit_transform(X_sfa)\n\n for key, value in count.vocabulary_.items():\n self.vocabulary_[value] = key\n return tf", "title": "" }, { "docid": "7521f36b607a0deef8f900076e444d61", "score": "0.4900169", "text": "def fit(x, h1, w1, c1, h2, w2, c2, h3, w3, c3, noise_lvl):\n return (gaussian.fit(x, h1, w1, c1, 0) +\n gaussian.fit(x, h2, w2, c2, 0) +\n gaussian.fit(x, h3, w3, c3, 0) + noise_lvl)", "title": "" }, { "docid": "8c9163f5dd032cf8d9ab2ff329615706", "score": "0.4896803", "text": "def fit(self, xs, ys):\n raise NotImplementedError", "title": "" }, { "docid": "86d52dea3758aac473718e3ca4499363", "score": "0.48953307", "text": "def _fit_kde_prior_sklearn(self):\n from sklearn.neighbors import KernelDensity\n from sklearn.model_selection import GridSearchCV\n grid = GridSearchCV(KernelDensity(), {'bandwidth': np.linspace(0.1, 1.0, 30)}, cv=10, refit=True)\n kde = grid.fit(self.X).best_estimator_\n def log_prior(x):\n if x.ndim == 1:\n x = x.reshape(1,-1)\n return kde.score_samples(x)\n self.log_prior = log_prior\n self.prior = lambda x: np.exp(self.log_prior(x))", "title": "" }, { "docid": "eaff883ea5dede74b86b37f155cbcb7b", "score": "0.48877016", "text": "def fit(self, X, y, check_input=True):\n\n if self.alpha == 0:\n warnings.warn(\n \"With alpha=0, this algorithm does not converge \"\n \"well. You are advised to use the LinearRegression \"\n \"estimator\",\n stacklevel=2,\n )\n\n if isinstance(self.precompute, six.string_types):\n raise ValueError(\"precompute should be one of True, False or\" \" array-like. Got %r\" % self.precompute)\n\n # We expect X and y to be float64 or float32 Fortran ordered arrays\n # when bypassing checks\n if check_input:\n X, y = check_X_y(\n X,\n y,\n accept_sparse=\"csc\",\n order=\"F\",\n dtype=[np.float64, np.float32],\n copy=self.copy_X and self.fit_intercept,\n multi_output=True,\n y_numeric=True,\n )\n y = check_array(y, order=\"F\", copy=False, dtype=X.dtype.type, ensure_2d=False)\n\n X, y, X_offset, y_offset, X_scale, precompute, Xy = _pre_fit(\n X, y, None, self.precompute, self.normalize, self.fit_intercept, copy=False\n )\n\n if y.ndim == 1:\n y = y[:, None]\n if Xy is not None and Xy.ndim == 1:\n Xy = Xy[:, None]\n\n n_samples, n_features = X.shape\n n_targets = y.shape[1]\n\n if self.selection not in [\"cyclic\", \"random\"]:\n raise ValueError(\"selection should be either random or cyclic.\")\n\n if not self.warm_start or self.coef_ is None:\n coef_ = np.zeros((n_targets, n_features), dtype=X.dtype, order=\"F\")\n else:\n coef_ = self.coef_\n if coef_.ndim == 1:\n coef_ = coef_[None, :]\n\n dual_gaps_ = np.zeros(n_targets, dtype=X.dtype)\n if self.n_jobs == 1:\n self.n_iter_ = []\n history = []\n for k in range(n_targets):\n if self.mode == \"admm\":\n this_coef, hist, this_iter = group_lasso_overlap(\n X,\n y[:, k],\n lamda=self.alpha,\n groups=self.groups,\n rho=self.rho,\n max_iter=self.max_iter,\n tol=self.tol,\n verbose=self.verbose,\n rtol=self.rtol,\n )\n elif self.mode == \"paspal-matlab\":\n this_coef, hist, this_iter = group_lasso_overlap_paspal(\n X,\n y[:, k],\n lamda=self.alpha,\n groups=self.groups,\n rho=self.rho,\n max_iter=self.max_iter,\n tol=self.tol,\n verbose=self.verbose,\n rtol=self.rtol,\n matlab_engine=self.matlab_engine,\n )\n elif self.mode == \"paspal\": # paspal wrapper\n this_coef, hist, this_iter = glopridu_algorithm(\n X,\n y[:, k],\n tau=self.alpha,\n blocks=self.groups,\n max_iter_ext=self.max_iter,\n tol_ext=self.tol,\n verbose=self.verbose,\n tol_int=self.rtol,\n )\n else:\n raise ValueError(self.mode)\n coef_[k] = this_coef.ravel()\n history.append(hist)\n self.n_iter_.append(this_iter)\n else:\n import joblib as jl\n\n if self.mode == \"admm\":\n coef_, history, self.n_iter_ = zip(\n *jl.Parallel(n_jobs=self.n_jobs)(\n jl.delayed(group_lasso_overlap)(\n X,\n y[:, k],\n lamda=self.alpha,\n groups=self.groups,\n rho=self.rho,\n max_iter=self.max_iter,\n tol=self.tol,\n verbose=self.verbose,\n rtol=self.rtol,\n )\n for k in range(n_targets)\n )\n )\n elif self.mode == \"paspal-matlab\": # paspal wrapper\n coef_, history, self.n_iter_ = zip(\n *jl.Parallel(n_jobs=self.n_jobs)(\n jl.delayed(group_lasso_overlap_paspal)(\n X,\n y[:, k],\n lamda=self.alpha,\n groups=self.groups,\n rho=self.rho,\n max_iter=self.max_iter,\n tol=self.tol,\n verbose=self.verbose,\n rtol=self.rtol,\n matlab_engine=self.matlab_engine,\n )\n for k in range(n_targets)\n )\n )\n elif self.mode == \"paspal\": # paspal wrapper\n coef_, history, self.n_iter_ = zip(\n *jl.Parallel(n_jobs=self.n_jobs)(\n jl.delayed(glopridu_algorithm)(\n X,\n y[:, k],\n tau=self.alpha,\n blocks=self.groups,\n max_iter_ext=self.max_iter,\n tol_ext=self.tol,\n verbose=self.verbose,\n tol_int=self.rtol,\n )\n for k in range(n_targets)\n )\n )\n else:\n raise ValueError(self.mode)\n\n if n_targets == 1:\n self.n_iter_ = self.n_iter_[0]\n\n self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_])\n self._set_intercept(X_offset, y_offset, X_scale)\n\n # workaround since _set_intercept will cast self.coef_ into float64\n self.coef_ = np.asarray(self.coef_, dtype=X.dtype)\n\n self.history_ = history\n\n # return self for chaining fit and predict calls\n return self", "title": "" }, { "docid": "49f022b7d35ca187e2d0eebabd97e7a4", "score": "0.4887551", "text": "def fit_function(self, parms, x):\n # We reorder the needed variables to only use these that are\n # not fixed for minimization\n index = 0\n for i in np.arange(len(self.values)):\n if self.valuestofit[i]:\n self.values[i] = parms[index]\n index = index + 1\n # Only allow physically correct parameters\n self.values = self.check_parms(self.values)\n tominimize = (self.function(self.values, x) - self.data)\n # Check if we have a weighted fit\n if self.weightedfit is True:\n # Check dataweights for zeros and don't use these\n # values for the least squares method.\n with np.errstate(divide='ignore'):\n tominimize = np.where(self.dataweights!=0, \n tominimize/self.dataweights, 0)\n ## There might be NaN values because of zero weights:\n #tominimize = tominimize[~np.isinf(tominimize)]\n return tominimize", "title": "" }, { "docid": "622c474a8d0c6c291dac26a72f9bf8d0", "score": "0.4882291", "text": "def estimate_gaussianlinearoffset_peak(self, x_axis, data, params):\n\n error = self._check_1D_input(x_axis=x_axis, data=data, params=params)\n\n # try at first a fit with the ordinary gauss function\n res_ordinary_gauss = self.make_gaussian_fit(\n x_axis=x_axis,\n data=data,\n units=None,\n estimator=self.estimate_gaussian_peak\n )\n\n # subtract the result and perform again a linear fit:\n data_subtracted = data - res_ordinary_gauss.best_fit\n\n res_linear = self.make_linear_fit(\n x_axis=x_axis,\n data=data_subtracted,\n estimator=self.estimate_linear\n )\n\n # this way works much better than performing at first a linear fit,\n # subtracting the fit and make an ordinary gaussian fit. Especially for a\n # peak at the borders, this method is much more beneficial.\n\n # assign the obtained values for the initial fit:\n params['offset'] = res_ordinary_gauss.params['offset']\n params['center'] = res_ordinary_gauss.params['center']\n params['amplitude'] = res_ordinary_gauss.params['amplitude']\n params['sigma'] = res_ordinary_gauss.params['sigma']\n params['slope'] = res_linear.params['slope']\n\n return error, params", "title": "" }, { "docid": "d3c340f57a1bb5769486abe8be63c392", "score": "0.4876994", "text": "def fit(self, X, y=None):\n raise NotImplementedError", "title": "" }, { "docid": "87c3ad9d0413827f657cb25f19be2840", "score": "0.48768333", "text": "def fit(self, X, y=None):\n check_array(X, allow_nd=True)\n if self.func is None:\n self._func = _identity\n else:\n self._func = self.func\n\n if self.func_params is None:\n self.effective_func_params_ = {}\n else:\n self.effective_func_params_ = self.func_params.copy()\n\n validate_params({**self.get_params(), '_func': self._func},\n self._hyperparameters)\n\n return self", "title": "" }, { "docid": "8d81495d344ebd6a58a63d91ac9bf27d", "score": "0.4876039", "text": "def fit(self, X, y):\n pass", "title": "" }, { "docid": "8d81495d344ebd6a58a63d91ac9bf27d", "score": "0.4876039", "text": "def fit(self, X, y):\n pass", "title": "" }, { "docid": "8d81495d344ebd6a58a63d91ac9bf27d", "score": "0.4876039", "text": "def fit(self, X, y):\n pass", "title": "" }, { "docid": "8d81495d344ebd6a58a63d91ac9bf27d", "score": "0.4876039", "text": "def fit(self, X, y):\n pass", "title": "" }, { "docid": "8d81495d344ebd6a58a63d91ac9bf27d", "score": "0.4876039", "text": "def fit(self, X, y):\n pass", "title": "" }, { "docid": "8d81495d344ebd6a58a63d91ac9bf27d", "score": "0.4876039", "text": "def fit(self, X, y):\n pass", "title": "" }, { "docid": "a001c3cf42f29a4806fc72adb638391d", "score": "0.48739702", "text": "def fit(self, X, y):\n raise NotImplementedError('Not implemented in base ({}) class'.format(self.__class__.__name__))", "title": "" }, { "docid": "e09b35fa0ffad1b2d6561c6bdcc42310", "score": "0.48733622", "text": "def fit(x, h1, w1, c1, h2, w2, c2, noise_lvl):\n return (gaussian.fit(x, h1, w1, c1, 0) +\n gaussian.fit(x, h2, w2, c2, 0) + noise_lvl)", "title": "" }, { "docid": "d189c1e05e9da82f77aad6b1a409dafa", "score": "0.48680598", "text": "def fit_gauss(self, X):\n mu = np.mean(X, axis=0)\n cov = np.cov(X.T)\n mod = multivariate_normal(mu, cov)\n return(mod)", "title": "" }, { "docid": "10a997c2f114004d6ea4a7d91defc29f", "score": "0.48591095", "text": "def fit(self, x: Iterable[Iterable], y: Iterable = None) -> BaseEstimator:\n return self # pragma: no cover", "title": "" }, { "docid": "930db06624460e9d064998e33633828e", "score": "0.48551887", "text": "def fit(residual_function, params, args=None, kws=None, algo_choice=\"simplex\",\r\n **fit_kws):\r\n # Differentiate between custom fitting and [lmfit] fitting\r\n if algo_choice in custom_algos:\r\n return __custom_fit(residual_function, params, algo_choice, args=args,\r\n kws=kws, **fit_kws)\r\n elif algo_choice in lmfit_algos:\r\n return __lmfit_fit(residual_function, params, algo_choice, args=args,\r\n kws=kws, **fit_kws)\r\n else:\r\n raise ValueError(\"algo_choice invalid\")", "title": "" }, { "docid": "3ac56bfc88e2087359c2eed3cf01d6f4", "score": "0.48540604", "text": "def fit(\n self,\n X,\n Xk,\n y,\n groups=None,\n zstat=\"coef\",\n use_pyglm=True,\n group_lasso=False,\n antisym=\"cd\",\n group_agg=\"avg\",\n cv_score=False,\n debias=False,\n Ginv=None,\n **kwargs,\n ):\n\n # Possibly set default groups\n n = X.shape[0]\n p = X.shape[1]\n if groups is None:\n groups = np.arange(1, p + 1, 1)\n\n # Check if y_dist is gaussian, binomial, poisson\n kwargs[\"y_dist\"] = parse_y_dist(y)\n\n # Step 1: Calculate Z statistics\n zstat = str(zstat).lower()\n if zstat == \"coef\":\n\n # Fit (possibly group) lasso\n gl, inds, rev_inds = fit_group_lasso(\n X=X,\n Xk=Xk,\n y=y,\n groups=groups,\n use_pyglm=use_pyglm,\n group_lasso=group_lasso,\n **kwargs,\n )\n\n # Parse the expected output format based on which\n # lasso package we are using\n reg_lasso_flag = use_reg_lasso(groups) or (not group_lasso)\n logistic_flag = parse_logistic_flag(kwargs)\n\n # Retrieve Z statistics\n if use_pyglm and not reg_lasso_flag:\n Z = gl.beta_[rev_inds]\n elif reg_lasso_flag and logistic_flag:\n if gl.coef_.shape[0] != 1:\n raise ValueError(\n \"Unexpected shape for logistic lasso coefficients (sklearn)\"\n )\n Z = gl.coef_[0, rev_inds]\n else:\n Z = gl.coef_[rev_inds]\n\n # Possibly debias the lasso\n if debias:\n if Ginv is None:\n raise ValueError(f\"To debias the lasso, Ginv must be provided\")\n elif logistic_flag:\n raise ValueError(\n f\"Debiased lasso is not implemented for binomial data\"\n )\n else:\n features = np.concatenate([X, Xk], axis=1)\n debias_term = np.dot(Ginv, features.T)\n debias_term = np.dot(debias_term, y - np.dot(features, Z))\n Z = Z + debias_term / n\n\n # Save lasso class and reverse inds\n self.model = gl\n self.inds = inds\n self.rev_inds = rev_inds\n\n # Try to save cv accuracy for logistic lasso\n if isinstance(self.model, linear_model.LogisticRegressionCV):\n self.score = self.model.scores_[1].mean(axis=0).max()\n self.score_type = \"accuracy_cv\"\n # Save cv mse for lasso\n elif isinstance(self.model, linear_model.LassoCV):\n self.score = self.model.mse_path_.mean(axis=1).min()\n self.score_type = \"mse_cv\"\n # Else compute the score\n else:\n features = np.concatenate([X, Xk], axis=1)[:, inds]\n self.cv_score_model(\n features=features,\n y=y,\n cv_score=cv_score,\n logistic_flag=logistic_flag,\n )\n\n elif zstat == \"lars_path\":\n Z = calc_lars_path(X, Xk, y, groups, **kwargs)\n\n else:\n raise ValueError(f'zstat ({zstat}) must be one of \"coef\", \"lars_path\"')\n\n # Combine Z statistics\n W_group = combine_Z_stats(Z, groups, antisym=antisym, group_agg=group_agg)\n\n # Save values for later use\n self.Z = Z\n self.groups = groups\n self.W = W_group\n return W_group", "title": "" }, { "docid": "270f76d58831d7cb4bed30aa23fe06e0", "score": "0.48507693", "text": "def fit(self, obs, x, y):\n with torch.autograd.set_detect_anomaly(True):\n network_output = self.__call__(x, obs)\n self.zero_grad()\n # l1_loss = self.lambda1 * torch.norm(final_param, 1)\n loss = self.loss(network_output, y)\n # print(loss[:5,:].data.cpu().numpy())\n loss = loss.sum(dim=1).mean()\n loss.backward()\n self.opt.step()\n return loss.data.cpu().numpy()", "title": "" }, { "docid": "0561287215c289ec7301654ac05fcdd7", "score": "0.48443666", "text": "def model(x, beta):\n\n return polyval(x, beta)", "title": "" }, { "docid": "cb2020db27057baf4f76704c957c5a3a", "score": "0.48416653", "text": "def fit(self, y, **kwargs):\n pass", "title": "" } ]
9bc9e8fd60f198e6e2715832dcc25ddd
Reads a CODON MSA file and translates it to an integer array coded for Amino acids.
[ { "docid": "1ac40774e3fb5be53773cc01e88e2d93", "score": "0.7213606", "text": "def get_aa_msa_from_codon_msa(filename):\n msa = get_codon_msa_as_int_array(filename)\n return CODON_AA_MAP[msa.long()]", "title": "" } ]
[ { "docid": "7d60cdf54d31288c30a114ffd553da6f", "score": "0.7629135", "text": "def get_codon_msa_as_int_array(filename, as_torch=True):\n seq_iter = get_msa_from_aln_iter(filename)\n ret = np.array([codon_seq_to_int_list(seq) for seq in seq_iter], \n dtype=np.uint8) \n if as_torch: \n ret = torch.from_numpy(ret)\n return ret", "title": "" }, { "docid": "ff7b4bf5dceee9428cae4e11e37df39c", "score": "0.7510381", "text": "def get_aa_msa_as_int_array(filename, as_torch=True):\n seq_iter = get_msa_from_aln_iter(filename)\n aa_map = AA_MAP.copy()\n aa_map['-'] = 20 # add the gap character at the end\n ret = np.array([list(map(aa_map.get, seq)) for seq in seq_iter], \n dtype=np.uint8) \n if as_torch: \n ret = torch.from_numpy(ret)\n return ret", "title": "" }, { "docid": "f71ce8969cd1470d35779edd9ac65697", "score": "0.60501117", "text": "def dope_to_arr_ca(input_dope_file):\n\n nb_aa = 20\n nb_coef = int(nb_aa*(nb_aa+1)/2) # Low-memory storage of symetric array\n dope_arr = np.full((nb_coef, 30), np.NaN) # 2D array\n k = 0\n index_aa = {} # To make correspond amino acids with index inside the array\n iterat = it.product(range(nb_aa), range(nb_aa))\n\n for one_line in input_dope_file:\n splitted_line = one_line.split()\n\n if splitted_line[1] == \"CA\" and splitted_line[3] == \"CA\":\n # We get the aa name with its corresponding index:\n if k < 20:\n index_aa[three_to_one(splitted_line[2])] = k\n k += 1\n\n # Corresponding index in list with index in array (file):\n idx_l, idx_c = next(iterat)\n i, j = min(idx_l, idx_c), max(idx_l, idx_c)\n ind_list = i*nb_aa - int((i-1)*i/2) + j-i\n\n # Assignation only if it was not already assigned\n if np.isnan(dope_arr[ind_list][0]):\n nrgies = list(map(float, splitted_line[4:]))\n dope_arr[ind_list] = nrgies\n\n return (dope_arr, index_aa)", "title": "" }, { "docid": "356efc6745dfa0db56b4fa09a28f0587", "score": "0.60237855", "text": "def parse_fasta(seq_path):\n\n # parse sequence and chromosome from fasta file\n num_data = np.round(sum(1 for line in open(seq_path))/2).astype(int)\n fin = open(seq_path, \"r\")\n sequences = []\n for j in range(num_data):\n coord = fin.readline()\n line = fin.readline()[:-1].upper()\n sequences.append(line)\n sequences = np.array(sequences)\n return sequences", "title": "" }, { "docid": "8ac0160241b4e955c1253d618419ce3f", "score": "0.5857021", "text": "def read_ints(file_name: str) -> np.array:\n res = []\n with open(file_name) as f:\n for line in f:\n res.append(int(line))\n return np.array(res, dtype='int')", "title": "" }, { "docid": "63bafb3315d8d0d91e7e2bf09f7c070e", "score": "0.58541524", "text": "def write_amino_acids(seq_dna, out_file):\n codon_table = {\n 'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',\n 'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',\n 'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',\n 'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',\n 'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',\n 'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',\n 'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',\n 'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',\n 'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',\n 'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',\n 'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',\n 'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',\n 'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',\n 'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',\n 'TAC':'Y', 'TAT':'Y', 'TAA':'*', 'TAG':'*',\n 'TGC':'C', 'TGT':'C', 'TGA':'*', 'TGG':'W'} \n # gene dictionary reference: https://pythonforbiologists.com/dictionaries\n\n input_pos = [0,1,2]\n for pos in input_pos:\n amino_acids = find_reading_frame(seq=seq_dna, pos=pos, codon_table=codon_table)\n # print(amino_acids)\n comment = \"5'3' Frame \" + str(pos+1) + \"\\n\"\n write_frames(gene=comment, filename=out_file)\n write_frames(gene=amino_acids + \"\\n\\n\", filename=out_file)\n\n comp_dict = {'A': 'T', 'T':'A', 'C':'G', 'G':'C'}\n comp_rev_seq = complement(seq=seq_dna,comp=comp_dict)\n for pos in input_pos:\n rev_amino_acids = find_reading_frame(seq=comp_rev_seq, pos=pos, codon_table=codon_table)\n # print(rev_amino_acids)\n comment = \"3'5' Frame \" + str(pos+1) + \"\\n\"\n write_frames(gene=comment, filename=out_file)\n write_frames(gene=rev_amino_acids + \"\\n\\n\",filename=out_file)", "title": "" }, { "docid": "2b7574d5ef5941a555f4bc7266b032ae", "score": "0.5811241", "text": "def mrcToArray(filename, msg=True):\n\tnumer = mrc.read(filename)\n\tif msg is True:\n\t\tapDisplay.printMsg(\"reading MRC: \"+apDisplay.short(filename)+\\\n\t\t\t\" size:\"+str(numer.shape)+\" dtype:\"+str(numer.dtype))\n\treturn numer", "title": "" }, { "docid": "35e178030df1affcc73e1c4d9ab5d51e", "score": "0.5753705", "text": "def _align_dna_amino(self):\n # Create codons\n start = self.codon_start - 1\n s = self.coding_sequence[start:]\n split_seq = [s[i:i + 3] for i in range(0, len(s), 3)]\n dna_to_aa = []\n\n for i, v in enumerate(split_seq):\n try:\n dna = split_seq[i]\n amino = codontable[dna.upper()]\n amino_min = aminoacidtable[amino.upper()]\n\n if i >= len(self.aa_seq):\n break\n\n aa = self.aa_seq[i]\n dna_to_aa.append({dna: aa})\n\n if aa is not amino_min:\n print('error')\n\n except IndexError as e:\n print('Seq: {} doesnt have AA'.format(dna))\n\n self.dna_to_aa = dna_to_aa", "title": "" }, { "docid": "87a5a2c7ef09e1f7ae33095d8ef568ec", "score": "0.5701671", "text": "def getAminoAcids(codons):\r\n for codon in codons:\r\n aminoAcid = codonToAminoAcid(codon)\r\n yield aminoAcid + (\" \" * (27 - len(aminoAcid))) + codon", "title": "" }, { "docid": "c0ede54672db3335b1e52b223329cb83", "score": "0.56998825", "text": "def parse_naca_code(naca_code: str) -> map:\n digits = naca_code.upper().strip(\"NACA\")\n if len(digits) == 4 and all(d.isdigit() for d in digits):\n return map(int, digits)\n else:\n raise ValueError(\"NACA code must contain 4 numbers\")", "title": "" }, { "docid": "9abdbae4021df7975944ecdfc3ecaac1", "score": "0.56819904", "text": "def read_seq_ids(fasta_fp):\n return [record.id for record in SeqIO.parse(str(fasta_fp), \"fasta\")]", "title": "" }, { "docid": "3c82e14f6f88eb79677b244d5dea7b1a", "score": "0.56713694", "text": "def ReadFASTA(fastafile):\n seqs =[]\n header = None\n for seq_record in SeqIO.parse(fastafile, \"fasta\"):\n seq_record.seq.alphabet=IUPAC.unambiguous_dna\n seqs.append(seq_record)\n\n return seqs", "title": "" }, { "docid": "537b2d61bad1368176da6f1b72cae2d2", "score": "0.5665044", "text": "def load(self, file):\n\n address = 0\n\n f = open(file, \"r\")\n\n for line in f.readlines():\n split_line = line.split(\"#\")[0]\n if split_line is \"\" or split_line is \"\\n\":\n continue\n conversion = int(split_line, 2)\n self.ram[address] = conversion\n address += 1\n\n f.close()", "title": "" }, { "docid": "00d5d3101623a090ab4d425f728b57fb", "score": "0.56195194", "text": "def get_alignment_int_form(file_name, biomolecule='protein'):\n\n biomolecule=biomolecule.strip().upper()\n if biomolecule not in ['PROTEIN','RNA']:\n logger.error(\n '\\n\\t{} entered. Biomolecule must be either PROTEIN or RNA'.format(\n biomolecule))\n raise ValueError\n NUM_SITE_STATES = 21 if biomolecule == 'PROTEIN' else 5\n RES_TO_INT = RES_TO_INT_ALL[biomolecule]\n alignment_int_form = []\n alignment = get_alignment_from_fasta_file(file_name)\n\n num_seqs_with_non_standard_res = 0\n num_non_standard_res = 0\n total_num_seqs_in_msa = 0\n for seq in alignment:\n try:\n seq_int = [RES_TO_INT[res.upper()] for res in seq]\n except KeyError:\n num_seqs_with_non_standard_res += 1\n seq_int = []\n for res in seq:\n res = res.upper()\n if res in RES_TO_INT.keys():\n seq_int.append(RES_TO_INT[res.upper()])\n else:\n num_non_standard_res += 1\n seq_int.append(NUM_SITE_STATES)\n total_num_seqs_in_msa += 1\n if seq_int not in alignment_int_form:\n alignment_int_form.append(seq_int)\n if num_seqs_with_non_standard_res > 0:\n logger.info('\\n\\tFound {} non-standard residues in {} sequences'\n ''.format(num_non_standard_res, num_seqs_with_non_standard_res)\n )\n logger.info('\\n\\tTotal number of sequences read from file: {}'.format(total_num_seqs_in_msa))\n if not alignment_int_form:\n logger.error('\\n\\tNo data found in alignment in integer representation')\n raise ValueError\n return alignment_int_form", "title": "" }, { "docid": "6d0933aa678a55d4f8068e96bfde4f19", "score": "0.5598939", "text": "def test_read_file(self):\n test_filename = os.path.join(_dir_path, 'test.fasta')\n sequences = fastapy.read_file(test_filename)\n self.assertEqual(len(sequences), 3)\n self.assertEqual(sequences[0].sequence, 'ACD')", "title": "" }, { "docid": "417afb99d1ed330794aab3c823458ac6", "score": "0.5589094", "text": "def read_fasta(fasta_file):\n aa1 = list(\"ACDEFGHIKLMNPQRSTVWY\")\n aa3 = \"ALA CYS ASP GLU PHE GLY HIS ILE LYS LEU MET ASN PRO GLN ARG SER THR VAL TRP TYR\".split()\n aa123 = dict(zip(aa1, aa3))\n # aa321 = dict(zip(aa3, aa1))\n with open(fasta_file) as fasta:\n seq = ''\n for line in fasta:\n if line[0] == '>':\n pass\n else:\n seq += line.strip()\n seq = [aa123[r] for r in seq]\n return seq", "title": "" }, { "docid": "fadeb33db1a541837351a22a3e7f3154", "score": "0.5551192", "text": "def parse_fasta_file(fasta_path, clades, use_codons=True, margin_width=0, trans_dict=None, remove_stop_rows=False):\n trans_dict = {} if trans_dict is None else trans_dict\n species = [leaf_order(c,use_alternatives=True) for c in clades] if clades != None else []\n \n entries = [rec for rec in SeqIO.parse(fasta_path, \"fasta\")]\n # parse the species names\n spec_in_file = [e.id.split('|')[0] for e in entries]\n\n # translate species name from file to taxon ids\n translator = lambda s : trans_dict[s] if s in trans_dict else s\n msa_taxon_ids = list(map(translator, spec_in_file))\n \n # compare them with the given references\n ref_ids = [[(r,i) for r in range(len(species)) for i in range(len(species[r])) if s in species[r][i] ] for s in msa_taxon_ids]\n\n # check if these are contained in exactly one reference clade\n n_refs = [len(x) for x in ref_ids]\n\n if 0 == min(n_refs) or max(n_refs) > 1:\n return -1, 0, None\n\n ref_ids = [x[0] for x in ref_ids]\n\n if len(set(r for (r,i) in ref_ids)) > 1:\n return -1, 0, None\n\n # the first entry of the fasta file has the header informations\n header_fields = entries[0].id.split(\"|\")\n # allow noninformative fasta headers as well\n frame = 0\n if len(header_fields) > 2:\n try:\n frame = int(header_fields[2][-1])\n except ValueError:\n pass # leave frame at 0 by default\n\n # read the sequences and trim them if wanted\n sequences = [str(rec.seq).lower() for rec in entries]\n sequences = sequences[margin_width:-margin_width] if margin_width > 0 else sequences\n\n msa = MSA(\n model = None,\n chromosome_id = None, \n start_index = None,\n end_index = None,\n is_on_plus_strand = True if len(header_fields) < 5 or header_fields[4] != 'revcomp' else False,\n frame = frame,\n spec_ids = ref_ids,\n offsets = [],\n sequences = sequences,\n use_codons = use_codons\n )\n # Use the correct onehot encoded sequences\n coded_sequences = msa.coded_codon_aligned_sequences if use_codons else msa.coded_sequences\n\n # remove all rows with an in-frame stop codon (except last col)\n stops = msa.in_frame_stops\n # print (msa, stops)\n remove_stop_rows = False\n if stops and remove_stop_rows :\n msa.delete_rows(stops)\n coded_sequences = coded_sequences[np.invert(stops)]\n # print (\"after stop deletion:\", msa, \"\\ncoded_sequences=\", coded_sequences)\n\n if len(msa.sequences) < 2:\n return -2, 0, None\n \n sequence_length = len(coded_sequences[0])\n if sequence_length == 0:\n return -2, 0, None\n\n # cardinality of the alphabet that has been onehot-encoded\n s = coded_sequences.shape[-1]\n \n # get the id of the used clade and leaves inside this clade\n clade_id = msa.spec_ids[0][0]\n num_species = max([len(specs) for specs in species])\n leaf_ids = [l for (c,l) in msa.spec_ids]\n \n \n # embed the coded sequences into a full MSA for the whole leaf-set of the given clade\n S = np.ones((num_species, sequence_length, s), dtype = np.int32)\n S[leaf_ids,...] = coded_sequences\n \n # make the shape conform with the usual way datasets are structured,\n # namely the columns of the MSA are the examples and should therefore\n # be the first axis\n S = np.transpose(S, (1,0,2))\n\n return clade_id, sequence_length, S", "title": "" }, { "docid": "92bcc99b6991e4618e0609984f9e75c8", "score": "0.5537557", "text": "def read_input(filename):\n\n with open(args.input, \"r\") as infile:\n raw_intcodes = infile.readlines()[0].strip().split(\",\")\n\n return raw_intcodes", "title": "" }, { "docid": "0f31317380cc55a6c5a397870147c797", "score": "0.5506271", "text": "def read_input_atm_man(filename):\n infile = open(filename,'r')\n trova_spip(infile,hasha='$')\n n_alt = int(infile.readline())\n trova_spip(infile,hasha='$')\n prof = []\n while len(prof) < n_alt:\n line = infile.readline()\n prof += list(map(float, line.split()))\n alts = np.array(prof)\n\n trova_spip(infile,hasha='$')\n prof = []\n while len(prof) < n_alt:\n line = infile.readline()\n prof += list(map(float, line.split()))\n pres = np.array(prof)\n\n trova_spip(infile,hasha='$')\n prof = []\n while len(prof) < n_alt:\n line = infile.readline()\n prof += list(map(float, line.split()))\n temp = np.array(prof)\n\n return alts, temp, pres", "title": "" }, { "docid": "4098f691b99e96fa1dd215c7b5f2ad32", "score": "0.54922396", "text": "def load_sequences( fasta_file ):\n\t\n\tsequences = {}\n\t\n\twith open( fasta_file ) as f:\n\t\theader = f.readline()[1:].strip()\n\t\tseq = []\n\t\tline = f.readline()\n\t\twhile line:\n\t\t\tif line[0] == '>':\n\t\t\t\t\tsequences.update( { header: \"\".join( seq ).upper() } )\n\t\t\t\t\theader = line.strip()[1:]\n\t\t\t\t\tseq = []\n\t\t\telse:\n\t\t\t\tseq.append( line.strip() )\n\t\t\tline = f.readline()\n\t\tsequences.update( { header: \"\".join( seq ).upper() } )\t\n\treturn sequences", "title": "" }, { "docid": "8ac53509c8ffa1461a0a2b967362110f", "score": "0.5489592", "text": "def read_scoring_matrix(filename):\n with open(filename, \"r\") as file_:\n lines = []\n\n # Take uncommented lines\n for line in file_:\n if line[0] == \"#\":\n continue\n else:\n lines.append(line[1:].strip())\n\n # Make 2D array\n lines = [line.split() for line in lines]\n\n # Get amino acids names\n aminoacids = lines[0]\n\n # Get scoring matrix\n scores = lines[1:]\n scores = [[int(score) for score in score_line] for score_line in scores]\n\n # Construct scoring map\n scoring_function = {}\n for i, amino1 in enumerate(aminoacids):\n for j, amino2 in enumerate(aminoacids):\n scoring_function[(amino1, amino2)] = scores[i][j]\n\n return scoring_function", "title": "" }, { "docid": "16afc6b7ff407a7d9277e4a9c9e743b0", "score": "0.54728174", "text": "def read_lab1_input_sequence(filename):\n file = open(filename, 'r')\n input_sequence = []\n\n line = file.readline().strip()\n while line != \"\":\n input_sequence.append(int(line))\n line = file.readline().strip()\n\n file.close()\n return input_sequence", "title": "" }, { "docid": "642ab3d63ca1aa7b781799c2fa1e8761", "score": "0.54639226", "text": "def create_array_from_txt(file_name):\n f = open(file_name, \"r\")\n content = f.read()\n data = content.split()\n data = list(map(int, data))\n array_data = np.array(data)\n f.close()\n return array_data", "title": "" }, { "docid": "b1ac926afef07db906b230ae37faa30d", "score": "0.5442396", "text": "def read_fasta(file_path):\n seq_dict = {}\n for sequence in SeqIO.parse(file_path, \"fasta\"): #\n s = str(sequence.seq)\n seq_dict[sequence.id] = s\n return seq_dict", "title": "" }, { "docid": "0300dc5c25fd3a4b4977eaa83eba76bf", "score": "0.5435637", "text": "def ReadALN(self):\r\n \r\n alnName = self.fastaname[0:-6] + '.aln'\r\n\r\n reFilter = re.compile('Seq(\\d*) *(.*)')\r\n #returns the 'n' of Seqn and the subsequent alignment string\r\n\r\n alnHandle = open(alnName, 'r+t')\r\n for line in alnHandle:\r\n reOutput = reFilter.findall(line)\r\n if len(reOutput) == 1:\r\n self.alignedseqs[int(reOutput[0][0])] += reOutput[0][1]\r\n\r\n\r\n alnHandle.close()", "title": "" }, { "docid": "35fc05862cb5ba80de83e37d44a5bf1b", "score": "0.5435416", "text": "def _load_table_iana(filename):\n\n table = bytearray(0x110000)\n\n with open(filename) as fp:\n for line in fp:\n # Ignore csv header.\n if line == 'Codepoint,Property,Description\\n':\n continue\n\n m = IANA_LINE_REGEX.match(line)\n assert m, 'Unexpected format: %s' % line\n\n lo = int(m.group(1), 16)\n if m.group(2):\n hi = int(m.group(2)[1:], 16)\n else:\n hi = lo\n\n prop = PROPS[m.group(3)]\n for cp in range(lo, hi + 1):\n table[cp] = prop\n\n # Check that all codepoints are assigned.\n for cp in range(0x110000):\n assert table[cp] != 0, 'Codepoint missing: %d' % cp\n\n return table", "title": "" }, { "docid": "860e938d054d4af7891dbc316ee9b6e2", "score": "0.54304993", "text": "def coding_strand_to_AA(dna):\n \n amino_acid = \"\" #Sets a blank string to be filled\n count = len(dna)/3 #determines the number of actual codons \n for iteration in range(count):\n #breaks string up into codons\n codon_start = iteration*3\n codon_stop = 3 + codon_start\n codon = dna[codon_start: codon_stop]\n length = len(codons)\n for index in range(length):\n #searches which amino acid matches the codon\n if codon in codons[index]:\n amino_acid += aa[index]\n #print amino_acid\n return amino_acid", "title": "" }, { "docid": "40a2efb5fdb6fd29ae50deb33869d49e", "score": "0.54044086", "text": "def coding_strand_to_AA(dna):\n codons = divide_to_codons(dna)\n amino_acid = ''\n for codon in codons:\n if len(codon) == 3:\n amino_acid = amino_acid + aa_table[codon]\n return amino_acid", "title": "" }, { "docid": "a3bd8350e8da0ea86c26fd57d32455b8", "score": "0.53959894", "text": "def fasta_reader(fasname):\n location_marks = dict() \n for rec in SeqIO.parse(fasname, \"fasta\"):\n\n location_marks[rec.id] = 0 \n\n return location_marks", "title": "" }, { "docid": "bbb3b071cdcfa83f2dacd248e2da5211", "score": "0.53652775", "text": "def init_from_file(self, name_file=\"data.txt\"):\n file = open(name_file, mode='r')\n self.sequence = np.array([int(x) for x in file.readline().split(\" \") if x.isdigit()])\n self.T = len(self.sequence)\n self.A = np.math.floor(max(self.sequence) + 1)\n self.CMM_S = None", "title": "" }, { "docid": "ac613d67a5082965058d289a63b96349", "score": "0.53616405", "text": "def read_file():\n with open(\"../euler_files/p082_matrix.txt\") as file:\n\n return [list(map(int, line.strip().split(\",\"))) for line in file]", "title": "" }, { "docid": "4df5549d75ecd0eae88c97025df42928", "score": "0.53505486", "text": "def fasta2nuc(filename):\n\n if filename.split('.')[1] != 'fasta':\n raise ValueError('wrong file format .' + filename.split('.')[1] + ' => .fasta expected')\n\n with open(filename) as file:\n id = file.readline()[1:].strip() # identifier of sequence\n nuc = '' # sequence of nucleotides\n for line in file:\n if line[0] == '>': # take only first seq in case file contains more than 1\n break\n nuc += line.strip()\n return id, nuc", "title": "" }, { "docid": "5905b660a03e8de20cfd43a4842d89cf", "score": "0.5350113", "text": "def aa(codon):\n codon = codon.upper()\n aa = {\"ATT\": \"I\", \"ATC\": \"I\", \"ATA\": \"I\",\n \"CTT\": \"L\", \"CTC\": \"L\", \"CTA\": \"L\", \"CTG\": \"L\", \"TTA\": \"L\",\n \"TTG\": \"L\",\n \"GTT\": \"V\", \"GTC\": \"V\", \"GTA\": \"V\", \"GTG\": \"V\",\n \"TTT\": \"F\", \"TTC\": \"F\",\n \"ATG\": \"M\",\n \"TGT\": \"C\", \"TGC\": \"C\",\n \"GCT\": \"A\", \"GCC\": \"A\", \"GCA\": \"A\", \"GCG\": \"A\",\n \"GGT\": \"G\", \"GGC\": \"G\", \"GGA\": \"G\", \"GGG\": \"G\",\n \"CCT\": \"P\", \"CCC\": \"P\", \"CCA\": \"P\", \"CCG\": \"P\",\n \"ACT\": \"T\", \"ACC\": \"T\", \"ACA\": \"T\", \"ACG\": \"T\",\n \"TCT\": \"S\", \"TCC\": \"S\", \"TCA\": \"S\", \"TCG\": \"S\", \"AGT\": \"S\",\n \"AGC\": \"S\",\n \"TAT\": \"Y\", \"TAC\": \"Y\",\n \"TGG\": \"W\",\n \"CAA\": \"Q\", \"CAG\": \"Q\",\n \"AAT\": \"N\", \"AAC\": \"N\",\n \"CAT\": \"H\", \"CAC\": \"H\",\n \"GAA\": \"E\", \"GAG\": \"E\",\n \"GAT\": \"D\", \"GAC\": \"D\",\n \"AAA\": \"K\", \"AAG\": \"K\",\n \"CGT\": \"R\", \"CGC\": \"R\", \"CGA\": \"R\", \"CGG\": \"R\", \"AGA\": \"R\",\n \"AGG\": \"R\",\n \"TAA\": \"*\", \"TAG\": \"*\", \"TGA\": \"*\"}\n # Translate valid codon\n try:\n amino_a = aa[codon]\n except KeyError:\n amino_a = \"?\"\n return amino_a", "title": "" }, { "docid": "f0ea5fad419d16bbcb827b2d30ebf580", "score": "0.5328463", "text": "def readInput(fileName):\r\n with open(fileName, 'r') as file:\r\n fileContent = file.readlines()\r\n\r\n return [int(number) for number in fileContent[0]]", "title": "" }, { "docid": "ac6d7a4866afdedd67791c902ad33541", "score": "0.5317118", "text": "def coding_strand_to_AA(dna):\n length = len(dna)\n a = 0\n b = 3\n\n amino_acids = '' \n\n while b < length+1:\n amino_acids = amino_acids + aa_table[dna[a:b]]\n a += 3\n b += 3\n\n return amino_acids", "title": "" }, { "docid": "cf88583f0d10acbcbd706fd4fbbd5e54", "score": "0.531233", "text": "def readFasta(self, fastapath):\r\n \r\n with open(fastapath) as inp:\r\n \r\n inp.readline() # pop off the header\r\n seq = inp.readline().strip()\r\n\r\n self.seq = list(seq.replace('T','U'))", "title": "" }, { "docid": "764a61c1c309e5232995d2108d12cc88", "score": "0.5312016", "text": "def socnumber(fastas, nlag=30, **kw):\n if min_seq_len_norm_aa(fastas) < nlag + 1:\n print('Error: all the sequence length should be larger than the nlag+1: ' + str(nlag + 1) + '\\n\\n')\n return 0\n\n data_dir = os.path.dirname(os.path.realpath(__file__))\n data_file = os.path.join(data_dir, 'data', 'Schneider-Wrede.txt')\n data_file1 = os.path.join(data_dir, 'data', 'Grantham.txt')\n\n AA = 'ACDEFGHIKLMNPQRSTVWY'\n AA1 = 'ARNDCQEGHILKMFPSTWYV'\n\n DictAA = {}\n for i in range(len(AA)):\n DictAA[AA[i]] = i\n\n DictAA1 = {}\n for i in range(len(AA1)):\n DictAA1[AA1[i]] = i\n\n with open(data_file) as f:\n records = f.readlines()[1:]\n AADistance = []\n for i in records:\n array = i.rstrip().split()[1:] if i.rstrip() != '' else None\n AADistance.append(array)\n AADistance = np.array(\n [float(AADistance[i][j]) for i in range(len(AADistance)) for j in range(len(AADistance[i]))]).reshape((20, 20))\n\n with open(data_file1) as f:\n records = f.readlines()[1:]\n AADistance1 = []\n for i in records:\n array = i.rstrip().split()[1:] if i.rstrip() != '' else None\n AADistance1.append(array)\n AADistance1 = np.array(\n [float(AADistance1[i][j]) for i in range(len(AADistance1)) for j in range(len(AADistance1[i]))]).reshape(\n (20, 20))\n\n encodings = []\n\n for i in fastas:\n name, sequence = i[0], re.sub('-', '', i[1])\n code = [name]\n for n in range(1, nlag + 1):\n code.append(sum(\n [AADistance[DictAA[sequence[j]]][DictAA[sequence[j + n]]] ** 2 for j in range(len(sequence) - n)]) / (\n len(sequence) - n))\n\n for n in range(1, nlag + 1):\n code.append(sum([AADistance1[DictAA1[sequence[j]]][DictAA1[sequence[j + n]]] ** 2 for j in\n range(len(sequence) - n)]) / (len(sequence) - n))\n encodings.append(code)\n return encodings", "title": "" }, { "docid": "a1d02da7f0d3b52f48c0593a672e9d2a", "score": "0.5310091", "text": "def load_seq(fasta_file):\n retval = \"\"\n f = open(fasta_file)\n lines = f.readlines()\n for l in lines[1:]:\n retval += l[0:-1]\n f.close()\n return retval", "title": "" }, { "docid": "cb8d13e07543c672876f670e502bc1f3", "score": "0.52994066", "text": "def readAntennasFromCalPosition(xmlfile):\n if (not os.path.exists(xmlfile)):\n print(\"Could not find file\")\n return\n if (os.path.isdir(xmlfile)):\n xmlfile += '/CalPosition.xml'\n f = open(xmlfile,'r')\n antennas = []\n for line in f.readlines():\n loc = line.find('<antennaName>')\n if (loc >= 0):\n antennas.append(line[loc+len('<antennaName>'):].split('</antennaName>')[0].strip())\n f.close()\n return(np.unique(antennas))", "title": "" }, { "docid": "6e260f5cbde0a3d857539256575f7da8", "score": "0.5292423", "text": "def parse_fasta(fasta_file):\n identifiers = []\n sequences = []\n for line in fasta_file:\n line = line.rstrip()\n if line.startswith(\">\"):\n # Get rid of the \">\"\n line = line[1:]\n identifiers.append(line)\n else:\n sequences.append(line)\n return identifiers, sequences", "title": "" }, { "docid": "58a54ab7de410e7e9915e5871d143678", "score": "0.5281491", "text": "def load_fasta_file(filename):\n\n with open(filename, \"r\") as handle:\n records = list(SeqIO.parse(handle, \"fasta\"))\n return records", "title": "" }, { "docid": "a7376de043ef0dccc5b30015a48af257", "score": "0.52788895", "text": "def get_anatomical():\n return nib.load(os.path.join(get_resource_path(), \"MNI152_T1_2mm.nii.gz\"))", "title": "" }, { "docid": "cde9d722389c82da6a4439d9cdc3906c", "score": "0.52781785", "text": "def aminoAcid(dnaCodon):\n return(dnaCode[dnaCodon])", "title": "" }, { "docid": "d9fcde0dd831f89d5f6ba3c059b3c369", "score": "0.52779984", "text": "def read_FASTA(self, fasta_file):\n\t\tsequences = []\n\t\theaders = []\n\t\theader = None\n\t\tseq = \"\"\n\t\tfor line in fasta_file:\n\t\t\tif line.startswith(\">\"):\n\t\t\t\tif header is None:\n\t\t\t\t\theader = line.strip()[1:]\n\t\t\t\telse:\n\t\t\t\t\theaders.append(header)\n\t\t\t\t\tsequences.append(seq)\n\t\t\t\t\theader = line.strip()[1:]\n\t\t\t\t\tseq = \"\"\n\t\t\telse:\n\t\t\t\tseq += line.strip()\n\t\theaders.append(header)\n\t\tsequences.append(seq)\n\t\treturn headers, sequences", "title": "" }, { "docid": "910f702061d2d79c5f480260439e48c1", "score": "0.5276948", "text": "def readFASTA(inputfile):\n with open(inputfile, \"r\") as seqfile:\n # skip the name line \n seq = seqfile.readline()\n seq = seqfile.read()\n seq = seq.replace(\"\\n\", \"\")\n seq = seq.replace(\"\\t\", \"\") \n return seq", "title": "" }, { "docid": "910f702061d2d79c5f480260439e48c1", "score": "0.5276948", "text": "def readFASTA(inputfile):\n with open(inputfile, \"r\") as seqfile:\n # skip the name line \n seq = seqfile.readline()\n seq = seqfile.read()\n seq = seq.replace(\"\\n\", \"\")\n seq = seq.replace(\"\\t\", \"\") \n return seq", "title": "" }, { "docid": "910f702061d2d79c5f480260439e48c1", "score": "0.5276948", "text": "def readFASTA(inputfile):\n with open(inputfile, \"r\") as seqfile:\n # skip the name line \n seq = seqfile.readline()\n seq = seqfile.read()\n seq = seq.replace(\"\\n\", \"\")\n seq = seq.replace(\"\\t\", \"\") \n return seq", "title": "" }, { "docid": "9d7de27865f42ca1051a79e95a944970", "score": "0.52696174", "text": "def parse_fasta(filename):\n input_file = open(filename, 'r')\n id_list = []\n seq_list = []\n for line in input_file:\n if line.startswith('>'):\n id_list.append(line.strip())\n else:\n seq_list.append(line.strip())\n return id_list, seq_list", "title": "" }, { "docid": "529c3dbc79e66310f09e8d3244cab930", "score": "0.5258627", "text": "def open_asm(filename):\n # datapath = '/Users/carlyhendrickson/git/microsoft_malware_challenge/data/train/' + filename + '.asm'\n # datapath = '/nfs/research/agonzales/git/microsoft_malware_challenge/data/train/' + filename + '.asm'\n # datapath = '/home/afruizc/Documents/hacks/microsoft_malware_challenge/mmc_data/all_data/train/' + filename + '.asm'\n datapath = '/home/afruizc/Documents/hacks/microsoft_malware_challenge/data/all_data/train/' + filename + '.asm'\n with open(datapath, 'rb') as f:\n asm = [to_utf(line) for line in f.readlines()]\n return asm", "title": "" }, { "docid": "22631eb01479fd9b410250e8a16b7d2b", "score": "0.525636", "text": "def _readCMASS1(self, data, n):\r\n s = Struct(b'6i')\r\n nelements = (len(data) - n) // 24\r\n for i in xrange(nelements):\r\n eData = data[n:n + 24] # 6*4\r\n out = s.unpack(eData)\r\n #(eid,pid,g1,g2,c1,c2) = out\r\n elem = CMASS1(None, out)\r\n self.addOp2Element(elem)\r\n n += 24\r\n self.card_count['CMASS1'] = nelements\r\n return n", "title": "" }, { "docid": "5bb6451c2e786542bb27fabbdc1a8f51", "score": "0.52398986", "text": "def read_fasta(fastafile):\r\n with open(fastafile, 'r') as f:\r\n content = [l.strip() for l in f.readlines()]\r\n\r\n res = {}\r\n seq, seq_id = '', None\r\n for line in content:\r\n if line.startswith('>'):\r\n \r\n if len(seq) > 0:\r\n res[seq_id] = seq\r\n \r\n seq_id = line.replace('>', '')\r\n seq = ''\r\n else:\r\n seq += line\r\n res[seq_id] = seq\r\n return res", "title": "" }, { "docid": "91f25f458ab23d36f6df99f9d597b446", "score": "0.5233035", "text": "def parse_fasta_file (inPath, outPath):\n s = list(SeqIO.parse(str(inPath), 'fasta'))\n with io.open(outPath, \"w\") as fout:\n fout.write('\\t'.join(['ID', 'Length', 'Sequence']) + '\\n')\n for i, row in enumerate(s):\n fout.write('\\t'.join([row.id, str(len(row.seq)), str(row.seq)]) + '\\n')\n print('%d sequences extracted from file ' % (i + 1) + str(inPath) +\n ' and written to file ' + str(outPath))", "title": "" }, { "docid": "9116710c15c2787c2452d9a576bb876d", "score": "0.5232661", "text": "def read_states(file):\n f = open(file, 'r')\n content = f.readlines()\n for i in range(len(content)):\n content[i] = content[i].split(':')[1]\n content[i] = content[i][:-1]\n content[i] = list(content[i])\n content[i] = list(map(int,content[i]))\n states = state_to_num(np.array(content))\n return states", "title": "" }, { "docid": "d401cb53d42c2bb0e1485e388515d958", "score": "0.52263063", "text": "def get_char_data(name='infile'):\n regex = re.compile('[ATCG]')\n # transform = {'A': 0, 'T': 1, 'C': 2, 'G': 3}\n sequences, N, M = parse_data(name)\n numeric_sequences = np.chararray((N, M), unicode=True)\n n = 0\n for seq in sequences:\n sites = re.findall(regex, seq)\n if len(sites) == 0 or sites is None:\n continue\n sites = sites[-M:]\n numeric_sequences[n, :] = np.array([site for site in sites])\n n += 1\n return numeric_sequences, N, M", "title": "" }, { "docid": "80d60f673e82aeba3ee807ede0a975b4", "score": "0.5219522", "text": "def map_aln( alnfile : Path ) -> dict:\n\n aln = MultiFASTA_extra()\n mapping = {}\n\n try:\n aln.read_fasta(alnfile)\n for it in range(len(aln.df.Accession)):\n id = aln.df.Accession[it]\n seq = aln.df.Sequence[it]\n\n mapid = []\n count = 0\n for aa in seq:\n if aa != '-':\n count += 1\n mapid.append(count)\n\n mapping[id] = {'seq': seq, 'mapid': mapid}\n\n except OSError as e:\n print(\"File error:\", sys.exc_info()[0])\n raise\n\n except:\n print(\"Unrecognised header type by bioservices MultiFasta... try using something like Uniprot headers 'sp|UkbID|Blabla' ...\", sys.exc_info()[0])\n raise\n\n return mapping", "title": "" }, { "docid": "3cabb12b52566a2c63ef34015c7cca9a", "score": "0.52193844", "text": "def read_aps_13bm(fname, format, proj=None, sino=None):\n\n if format == 'netcdf4':\n files = glob.glob(fname[0:-5] + '*[1-3].nc')\n files.sort()\n #print('files', files)\n tomo = dxreader.read_netcdf4(files[1], 'array_data', slc=(proj, sino))\n\n flat1 = dxreader.read_netcdf4(files[0], 'array_data', slc=(proj, sino))\n flat2 = dxreader.read_netcdf4(files[2], 'array_data', slc=(proj, sino))\n flat = np.concatenate((flat1, flat2), axis = 0)\n del flat1, flat2\n\n setup = glob.glob(fname[0:-5] + '*.setup')\n setup = open(setup[0], 'r')\n setup_data = setup.readlines()\n result = {}\n for line in setup_data:\n words = line[:-1].split(':',1)\n result[words[0].lower()] = words[1]\n\n dark = float(result['dark_current'])\n dark = flat*0+dark\n\n #theta = np.linspace(0.0, np.pi, tomo.shape[0])\n\n theta = dxreader.read_netcdf4(files[1], 'Attr_SampleOmega')[:]/180.0*np.pi\n\n return tomo, flat, dark, theta", "title": "" }, { "docid": "01cb4126bf7b15625dd96664394f2348", "score": "0.5216532", "text": "def read_rannou_aer(filename):\n infile = open(filename, 'r')\n data = [line.split() for line in infile]\n data_arr = np.array(data)\n freq = np.array([float(r) for r in data_arr[:, 0]])\n extco = np.array([float(r) for r in data_arr[:, 1]])\n ssa = np.array([float(r) for r in data_arr[:, 2]])\n leg_coeff = data_arr[:, 3:]\n leg_coeff = leg_coeff.astype(np.float)\n infile.close()\n\n return freq,extco,ssa,leg_coeff", "title": "" }, { "docid": "ff3fb68a6c6529de1a7760b5e337ccb2", "score": "0.5213028", "text": "def read_fc_phonopy(FC_file,natom,masses):\n\n with open(FC_file,'r') as fc:\n lines=fc.readlines()\n\n nlineblock=4\n fc_all=np.zeros((natom,natom,3,3))\n start=1\n for i in range(natom):\n for j in range(natom):\n fc_block=lines[start+1:start+nlineblock]\n fc=np.loadtxt(fc_block)\n fc_all[i,j]=fc/np.sqrt(masses[i]*masses[j])\n #fc_all[i,j]=fc\n start=start+nlineblock\n \n return fc_all", "title": "" }, { "docid": "035ffe2aea33190a1f5a8f4edcbe675a", "score": "0.52113706", "text": "def load_sequences( fasta_file ):\n\t\n\tsequences = {}\n\t\n\twith open( fasta_file ) as f:\n\t\theader = f.readline()[1:].strip().split('.')[0]\n\t\tseq = []\n\t\tline = f.readline()\n\t\twhile line:\n\t\t\tif line[0] == '>':\n\t\t\t\t\tsequences.update( { header: \"\".join( seq ) } )\n\t\t\t\t\theader = line.strip()[1:].split('.')[0]\n\t\t\t\t\tseq = []\n\t\t\telse:\n\t\t\t\tseq.append( line.strip() )\n\t\t\tline = f.readline()\n\t\tsequences.update( { header: \"\".join( seq ) } )\t\n\treturn sequences", "title": "" }, { "docid": "c93472367e362012f7cc19f63f327e95", "score": "0.5208727", "text": "def load(self, fileName):\n address = 0\n with open(fileName) as file:\n for line in file:\n line = line.split('#')[0].strip()\n try:\n instruction = int(line, 2)\n self.ram[address] = instruction\n address += 1\n except ValueError:\n continue", "title": "" }, { "docid": "fb5fb389a4cffda0eaed5cb7b1f0e09b", "score": "0.5202765", "text": "def read_data(filename):\n with open(filename) as f:\n nums = [int(line.split()[0]) for line in f]\n return nums", "title": "" }, { "docid": "c951089dd1db826d92958e68160a29fc", "score": "0.5202053", "text": "def readSequenceFastaFile(fasta_file):\n\n sequences = {}\n sequence = ''\n with open(fasta_file) as ff:\n for l in ff:\n if l.startswith('>'):\n if sequence != '':\n sequences[identifier] = sequence\n identifier = l.strip().replace('>','')\n sequence = ''\n else:\n sequence += l.strip()\n if sequence != '':\n sequences[identifier] = sequence\n\n return sequences", "title": "" }, { "docid": "7499a647b17f8af4782e76806edc851a", "score": "0.52014714", "text": "def read_assembly(filename):\n return open_asm(filename)\n # asm = [to_utf(line) for line in my_asm]", "title": "" }, { "docid": "72ca75b03bcd208b56741997ff4ed08e", "score": "0.5198437", "text": "def read_FASTA(fname):\r\n sequence=\"\"\r\n sequences=[]\r\n sequence_name=\"\"\r\n file=open(fname,'r')\r\n for line in file:\r\n if line[0]=='>':\r\n sequences.append((sequence_name,sequence))\r\n sequence_name=line[1:-1]\r\n sequence=\"\"\r\n else:\r\n sequence=sequence+line[:-1]\r\n else:\r\n sequences.append((sequence_name,sequence))\r\n sequences.pop(0)\r\n return sequences # a list of (sequence_name , sequence) tuples\r", "title": "" }, { "docid": "5aea31112ddfbd50a7862deaad446c08", "score": "0.5179237", "text": "def read_abinit_in(fd):\n\n tokens = []\n\n for line in fd:\n meat = line.split('#', 1)[0]\n tokens += meat.lower().split()\n\n # note that the file can not be scanned sequentially\n\n index = tokens.index(\"acell\")\n unit = 1.0\n if(tokens[index + 4].lower()[:3] != 'ang'):\n unit = Bohr\n acell = [unit * float(tokens[index + 1]),\n unit * float(tokens[index + 2]),\n unit * float(tokens[index + 3])]\n\n index = tokens.index(\"natom\")\n natom = int(tokens[index + 1])\n\n index = tokens.index(\"ntypat\")\n ntypat = int(tokens[index + 1])\n\n index = tokens.index(\"typat\")\n typat = []\n while len(typat) < natom:\n token = tokens[index + 1]\n if '*' in token: # e.g. typat 4*1 3*2 ...\n nrepeat, typenum = token.split('*')\n typat += [int(typenum)] * int(nrepeat)\n else:\n typat.append(int(token))\n index += 1\n assert natom == len(typat)\n\n index = tokens.index(\"znucl\")\n znucl = []\n for i in range(ntypat):\n znucl.append(int(tokens[index + 1 + i]))\n\n index = tokens.index(\"rprim\")\n rprim = []\n for i in range(3):\n rprim.append([acell[i] * float(tokens[index + 3 * i + 1]),\n acell[i] * float(tokens[index + 3 * i + 2]),\n acell[i] * float(tokens[index + 3 * i + 3])])\n\n # create a list with the atomic numbers\n numbers = []\n for i in range(natom):\n ii = typat[i] - 1\n numbers.append(znucl[ii])\n\n # now the positions of the atoms\n if \"xred\" in tokens:\n index = tokens.index(\"xred\")\n xred = []\n for i in range(natom):\n xred.append([float(tokens[index + 3 * i + 1]),\n float(tokens[index + 3 * i + 2]),\n float(tokens[index + 3 * i + 3])])\n atoms = Atoms(cell=rprim, scaled_positions=xred, numbers=numbers,\n pbc=True)\n else:\n if \"xcart\" in tokens:\n index = tokens.index(\"xcart\")\n unit = Bohr\n elif \"xangst\" in tokens:\n unit = 1.0\n index = tokens.index(\"xangst\")\n else:\n raise IOError(\n \"No xred, xcart, or xangs keyword in abinit input file\")\n\n xangs = []\n for i in range(natom):\n xangs.append([unit * float(tokens[index + 3 * i + 1]),\n unit * float(tokens[index + 3 * i + 2]),\n unit * float(tokens[index + 3 * i + 3])])\n atoms = Atoms(cell=rprim, positions=xangs, numbers=numbers, pbc=True)\n\n try:\n ii = tokens.index('nsppol')\n except ValueError:\n nsppol = None\n else:\n nsppol = int(tokens[ii + 1])\n\n if nsppol == 2:\n index = tokens.index('spinat')\n magmoms = [float(tokens[index + 3 * i + 3]) for i in range(natom)]\n atoms.set_initial_magnetic_moments(magmoms)\n\n assert len(atoms) == natom\n return atoms", "title": "" }, { "docid": "61f3606bad79a4197697ac5a4702789b", "score": "0.5169646", "text": "def bam_to_sRNA_counts(contents):\n\t# set up dataframe for sRNA data\n\tsRNA_data = pd.DataFrame(columns = [\"Sequence\", \"Strand\"])\n\t# read file contents\n\tbam_contents = pysam.AlignmentFile(contents, \"rb\")\n\tsense_seqs = []\n\tantisense_seqs = []\n\tfor line in bam_contents:\n\t\t# isolate the cigar string (6th field)\n\t\tline_contents = str(line).split(\"\\t\")\n\t\tcigar = line_contents[5]\n\t\t# keep only mapped reads\n\t\tif cigar.endswith(\"M\"):\n\t\t\t# isolate their strand and sequence fields\n\t\t\tstrand = line_contents[1]\n\t\t\tseq = line_contents[9]\n\t\t\tif line_contents[1] == \"16\":\n\t\t\t\tsRNA_data = sRNA_data.append({\"Sequence\": str(Seq(seq).reverse_complement()), \"Strand\": \"-\"}, ignore_index = True)\n\t\t\telse:\n\t\t\t\tsRNA_data = sRNA_data.append({\"Sequence\": seq, \"Strand\": \"+\"}, ignore_index = True)\n\t# add a length column\n\tsRNA_data[\"Length\"] = sRNA_data.apply(lambda row: len(row[\"Sequence\"]), axis = 1)\n\t# add a 5' base column\n\tsRNA_data[\"5base\"] = sRNA_data.apply(lambda row: row[\"Sequence\"][0], axis = 1)\n\t# create blank dataframe to hold counts of each base-strand combination for each length\n\tsRNA_counts = pd.DataFrame({\n\t\t\"Length\": list(range(17,36)),\n\t\t\"A_sense\": [0 for i in range(17,36)],\n\t\t\"C_sense\": [0 for i in range(17,36)],\n\t\t\"G_sense\": [0 for i in range(17,36)],\n\t\t\"U_sense\": [0 for i in range(17,36)],\n\t\t\"A_antisense\": [0 for i in range(17,36)],\n\t\t\"C_antisense\": [0 for i in range(17,36)],\n\t\t\"G_antisense\": [0 for i in range(17,36)],\n\t\t\"U_antisense\": [0 for i in range(17,36)]\n\t})\n\t# set the index as the sRNA lengths\n\tsRNA_counts = sRNA_counts.set_index(\"Length\")\n\t# go through the bam_contents_df dataframe, updating the counts in the sRNA_counts dataframe\n\t# outer loop iterates over each length\n\tfor length in sRNA_counts.index:\n\t\t# grab all sRNAs that have this length\n\t\tsRNAs_of_desired_length = sRNA_data[sRNA_data[\"Length\"]==length]\n\t\t# check the strand of each sRNA\n\t\tfor index, row in sRNAs_of_desired_length.iterrows():\n\t\t\tif row[\"Strand\"] == \"+\" and row[\"5base\"] == \"A\":\n\t\t\t\tsRNA_counts.at[length, \"A_sense\"] += 1\n\t\t\telif row[\"Strand\"] == \"+\" and row[\"5base\"] == \"C\":\n\t\t\t\tsRNA_counts.at[length, \"C_sense\"] += 1\n\t\t\telif row[\"Strand\"] == \"+\" and row[\"5base\"] == \"G\":\n\t\t\t\tsRNA_counts.at[length, \"G_sense\"] += 1\n\t\t\telif row[\"Strand\"] == \"+\" and row[\"5base\"] == \"T\":\n\t\t\t\tsRNA_counts.at[length, \"U_sense\"] += 1\n\t\t\telif row[\"Strand\"] == \"-\" and row[\"5base\"] == \"A\":\n\t\t\t\tsRNA_counts.at[length, \"A_antisense\"] -= 1\n\t\t\telif row[\"Strand\"] == \"-\" and row[\"5base\"] == \"C\":\n\t\t\t\tsRNA_counts.at[length, \"C_antisense\"] -= 1\n\t\t\telif row[\"Strand\"] == \"-\" and row[\"5base\"] == \"G\":\n\t\t\t\tsRNA_counts.at[length, \"G_antisense\"] -= 1\n\t\t\telif row[\"Strand\"] == \"-\" and row[\"5base\"] == \"T\":\n\t\t\t\tsRNA_counts.at[length, \"U_antisense\"] -= 1\n\t# put Length back as a column\n\tsRNA_counts.reset_index(inplace = True)\n\treturn sRNA_counts", "title": "" }, { "docid": "34d674ca08ba664dd284893a875e41b4", "score": "0.5154352", "text": "def get_amino_acid_handler(addr, tags, stuff, source):\n\tglobal out_c_max\n\tglobal cn\n\tmsg = OSC.OSCMessage()\n\tmsg.setAddress(\"/aminoAcidCounts\")\n\tfor cnt in cn.getAminoAcidCounts():\n\t\tmsg.append(cnt)\n\tout_c_max.send(msg)", "title": "" }, { "docid": "c97e6e3f611ad163e46dc16caced9362", "score": "0.5150127", "text": "def read_data(filename):\n \n with open(filename, 'r') as f:\n first = f.readline()\n C, n = first.split()\n C = int(C)\n n = int(n)\n data = []\n for i in range(n):\n line = f.readline()\n data.append([int(i) for i in line.split()])\n return C, n, data", "title": "" }, { "docid": "706d8614bc70268b4fedaaf9a506dd69", "score": "0.5148143", "text": "def read_fasta(n_lines, file):\n mapping = {letter: index for index, letter in enumerate(['A', 'C', 'G', 'T'])}\n \n with open(file, 'r') as fasta_file:\n for line_no, line in enumerate(fasta_file):\n if line[0] == '>':\n continue\n elif n_lines == line_no:\n break\n for number in [mapping[i] for i in str(line).upper().strip()]:\n yield number", "title": "" }, { "docid": "dcf121a89fac238c3fbfef1418d3f82d", "score": "0.51404965", "text": "def load(self, file):\n try:\n address = 0\n\n with open(file) as f:\n for line in f:\n # strip out white space, split at inline comment\n cleaned_line = line.strip().split(\"#\")\n # grab number\n value = cleaned_line[0].strip()\n\n # check if blank or not, if blank skip to next line\n if value != \"\":\n # convert from binary to num\n num = int(value, 2)\n self.ram[address] = num\n address += 1\n\n else:\n continue\n\n except FileNotFoundError:\n # exit and give error\n print(\"ERROR: File not found\")\n sys.exit(1)", "title": "" }, { "docid": "436930055ef6c0193ec5986fc35c5368", "score": "0.514043", "text": "def acc2npy(filename, nant=96, npol=2):\n nantpol = nant * npol\n nsb = 512 # ACC have 512 subbands\n nints = 1 # ACC only have a single integration\n corrMatrix = np.fromfile(filename, dtype='complex') # read in the correlation matrix\n return np.reshape(corrMatrix, (nints, nsb, nantpol, nantpol))", "title": "" }, { "docid": "59a56524346b75a525007166a7c79ad5", "score": "0.51296645", "text": "def read_fasta(file):\n if not os.path.exists(file):\n raise ValueError('Error: \"' + file + '\" does not exist.')\n\n with open(file) as f:\n records = f.read()\n\n if re.search('>', records) is None:\n raise TypeError('The input file does not seem to be in fasta format.')\n\n records = records.split('>')[1:]\n myFasta = []\n for fasta in records:\n array = fasta.split('\\n')\n name, sequence = array[0].split()[0], re.sub('[^ARNDCQEGHILKMFPSTWYV-]', '-', ''.join(array[1:]).upper())\n myFasta.append([name, sequence])\n return myFasta", "title": "" }, { "docid": "cc75d053123b2672d5bdc65e59c934aa", "score": "0.5124804", "text": "def open_asm2(filename):\n datapath = os.path.abspath(filename + '.asm')\n with open(datapath, 'rb') as f:\n asm = [line for line in f.readlines()]\n return asm", "title": "" }, { "docid": "1dd4033d726a13de97b0f6c8f082e645", "score": "0.51190424", "text": "def ReadFasta(aPath):\n fr = open(aPath)\n dictOfsequences = {}\n seqID = ''\n seq = ''\n flag = 0\n for line in fr:\n if '>' in line:\n if flag != 0:\n dictOfsequences[seqID] = seq\n seq = ''\n seqID=line.replace('\\n','')\n flag += 1\n else:\n seq += line.replace('\\n','')\n dictOfsequences[seqID] = seq\n fr.close()\n return dictOfsequences", "title": "" }, { "docid": "6854073fba1d28828900aa63f53c49e7", "score": "0.5116047", "text": "def read_file( filename, format = None ) :\n ret = []\n for ct in structure.StructureReader( filename, format = format ) :\n struc = SchrodStruc( ct )\n for i in range( 1, len( struc.atom ) + 1 ) :\n struc.atom_prop[i][\"orig_index\"] = i\n ret.append( struc )\n \n return ret", "title": "" }, { "docid": "f55ac77904baca0d87ae58518c85e988", "score": "0.5109424", "text": "def aa_mass():\n amino_acid_mass = {\n 'G': 57,\n 'A': 71,\n 'S': 87,\n 'P': 97,\n 'V': 99,\n 'T': 101,\n 'C': 103,\n 'I': 113,\n 'L': 113,\n 'N': 114,\n 'D': 115,\n 'K': 128,\n 'Q': 128,\n 'E': 129,\n 'M': 131,\n 'H': 137,\n 'F': 147,\n 'R': 156,\n 'Y': 163,\n 'W': 186\n }\n return amino_acid_mass", "title": "" }, { "docid": "87e2a216f23010650fb2891c69892d3d", "score": "0.51050353", "text": "def read_abacus_output(filename):\n force = None\n with open(filename, \"r\") as file:\n for line in file:\n if re.search(r\"TOTAL ATOM NUMBER = [0-9]+\", line):\n natom = int(re.search(\"[0-9]+\", line).group())\n if re.search(r\"TOTAL-FORCE \\(eV/Angstrom\\)\", line):\n force = np.zeros((natom, 3))\n for i in range(4):\n file.readline()\n for i in range(natom):\n _, fx, fy, fz = file.readline().split()\n force[i] = (float(fx), float(fy), float(fz))\n if force is None:\n raise ValueError(\"Force data not found.\")\n\n return force", "title": "" }, { "docid": "3755ca8b4d9fe86e736bc98ff0c48c8c", "score": "0.5097515", "text": "def readfasta(ffile):\n record_dict = SeqIO.to_dict(SeqIO.parse(ffile, \"fasta\"))\n return record_dict", "title": "" }, { "docid": "0d47612cbc8139b3f48a9d2c4189a9e7", "score": "0.5091523", "text": "def read_fci_space_index(filename):\n detlist = np.loadtxt(filename)[:, 1]\n detlist = detlist.astype(np.int32)\n return detlist", "title": "" }, { "docid": "0127b8f41fcf4de07971adceacaae095", "score": "0.5090024", "text": "def load_numbers(file_path: str) -> [int]:\n with open(file_path, 'br') as f:\n header, l = read_header(f)\n assert Metadata.f_coding in header, \\\n \"File {0} metadata does not contain encoding information\".format(file_path)\n assert 'count' in header, \\\n \"File {0} metadata does not contain the count\".format(file_path)\n decoder = research.utils.get_class_of(header[Metadata.f_coding]).Decoder(f)\n return [decoder.decode() for i in range(header['count'])]", "title": "" }, { "docid": "3442906d60975cb5a77f9fc4cca2e9d4", "score": "0.50861484", "text": "def load_sampa_ipa():\n conversion = dict()\n ipa_sampa_mapfile = os.path.join(paths.resources,\n \"dict\",\n \"sampa-ipa.txt\")\n\n with codecs.open(ipa_sampa_mapfile, \"r\", 'utf-8') as f:\n for line in f.readlines():\n tab_line = line.split()\n if len(tab_line) > 1:\n # 1: IPA; 0: SAMPA\n conversion[tab_line[1].strip()] = tab_line[0].strip()\n f.close()\n\n return conversion", "title": "" }, { "docid": "5b700bd41d528b0f380871f4d7af4752", "score": "0.50795937", "text": "def get_alignment_from_fasta_file(file_name):\n alignment = []\n try:\n record_iterator = AlignIO.read(file_name, 'fasta')\n #biopython just reads the records if there are tags (>some key).\n #It doesn't know if the file is really a biological sequence or not\n except Exception as expt:\n error_msg='\\n\\tError occured while reading from fasta file: {}.' +\\\n '\\n\\tError type:{}\\n\\tArguments:{!r}'\n logger.error(error_msg.format(file_name, type(expt).__name__, expt.args))\n raise\n else:\n if any(True for _ in record_iterator):\n for record in record_iterator:\n seq = record.seq.strip()\n if seq: alignment.append(seq.upper())\n if not alignment:\n logger.error(\n '\\n\\trecord_iterator returned by AlignIO.read()'\n ' has no sequences',\n )\n raise ValueError\n\n else:\n logger.error(\n '\\n\\trecord_iterator returned by AlignIO.read() is empty',\n )\n raise ValueError\n return alignment", "title": "" }, { "docid": "2eafab59223f4be9b289607a4069a69d", "score": "0.50759774", "text": "def readCDIAC(filename):\n regex = r\"([0-9]+)\\s+(-?[0-9]*\\.[0-9]*)\\s+(-?[0-9]*\\.[0-9]*)\\s+(-?[0-9]*\\.[0-9]*)\\s+(-?[0-9]*\\.[0-9]*)\\s+(-?[0-9]*\\.[0-9]*)\\s+(-?[0-9]*\\.[0-9]*)\\s+(-?[0-9]*\\.[0-9]*)\\s+(-?[0-9]*\\.[0-9]*)\\s+(-?[0-9]*\\.[0-9]*)\\s+(-?[0-9]*\\.[0-9]*)\\s+(-?[0-9]*\\.[0-9]*)\\s+(-?[0-9]*\\.[0-9]*)\"\n # date format seems to be month/day/year\n data = []\n for i in range(12): \n data.append([])\n in_file =open(filename,'r')\n for line in in_file:\n #print line\n matchedcontent = re.match(regex,line)\n # #print matchedcontent\n if(matchedcontent<> None):\n # #print matchedcontent.group(1)\n year = int(matchedcontent.group(1))\n #print date\n for i in range(12):\n data[i].append((year,float(matchedcontent.group(2+i))))\n #print \"read \", len(data), \" times 12 data points!\"\n in_file.close()\n assert len(data)>0\n data.sort()\n #data.reverse()\n return data", "title": "" }, { "docid": "84d266ad2c636072d22740a662ed81d3", "score": "0.5074481", "text": "def find_reading_frame(seq,pos,codon_table):\n amino_acid = \"\"\n for i in range(pos, len(seq)-2, 3):\n if i+2 < len(seq):\n codon = seq[i:i+3]\n aa = codon_table[codon]\n amino_acid += aa\n \n return amino_acid", "title": "" }, { "docid": "df932ce03bf68f96d20080beb16a83d0", "score": "0.50627875", "text": "def read_file(file_name):\n with open(file_name) as f:\n content = f.readlines()\n length = int(content[0])\n seq = [int(s) for s in content[1].split(\" \")]\n return (length, seq)", "title": "" }, { "docid": "9df5d019bc2a71e4c50cd4ad1bdf5d2e", "score": "0.50526357", "text": "def nmbroto(fastas, props=('CIDH920105', 'BHAR880101', 'CHAM820101', 'CHAM820102',\n 'CHOC760101', 'BIGC670101', 'CHAM810101', 'DAYM780201'),\n nlag=30, **kw):\n if min_seq_len_norm_aa(fastas) < nlag + 1:\n raise AssertionError(f\"All sequences should have length greater than nlag+1: {str(nlag + 1)}\")\n\n AA = 'ARNDCQEGHILKMFPSTWYV'\n data_dir = os.path.dirname(os.path.realpath(__file__))\n data_file = os.path.join(data_dir, 'data', 'AAidx.txt')\n\n with open(data_file) as f:\n records = f.readlines()[1:]\n myDict = {}\n for i in records:\n array = i.rstrip().split('\\t')\n myDict[array[0]] = array[1:]\n\n aa_idx = []\n aa_idx_name = []\n for i in props:\n if i in myDict:\n aa_idx.append(myDict[i])\n aa_idx_name.append(i)\n else:\n print('\"' + i + '\" properties not exist.')\n return None\n\n aa_idx1 = np.array([float(j) for i in aa_idx for j in i])\n aa_idx = aa_idx1.reshape((len(aa_idx), 20))\n pstd = np.std(aa_idx, axis=1)\n pmean = np.average(aa_idx, axis=1)\n\n for i in range(len(aa_idx)):\n for j in range(len(aa_idx[i])):\n aa_idx[i][j] = (aa_idx[i][j] - pmean[i]) / pstd[i]\n\n index = {}\n for i in range(len(AA)):\n index[AA[i]] = i\n\n encodings = []\n\n for i in fastas:\n name, sequence = i[0], re.sub('-', '', i[1])\n code = [name]\n N = len(sequence)\n for prop in range(len(props)):\n for n in range(1, nlag + 1):\n if len(sequence) > nlag:\n # if key is '-', then the value is 0\n rn = sum(\n [aa_idx[prop][index.get(sequence[j], 0)] * aa_idx[prop][index.get(sequence[j + n], 0)] for j in\n range(len(sequence) - n)]) / (N - n)\n else:\n rn = 'NA'\n code.append(rn)\n encodings.append(code)\n return encodings", "title": "" }, { "docid": "b83855ab18391ea858d31813239395b6", "score": "0.5048227", "text": "def loadAnnotatedPrions(ifile):\n from Bio import SeqIO\n S=[]\n with open(ifile, \"rU\") as fhandle:\n for record in SeqIO.parse(fhandle, \"fasta\"): \n seq=record.seq.tostring()\n try:\n idx=[int(i)-1 for i in record.description.split('#')[1].split('-')]\n except IndexError:\n idx=[0,len(seq)-1] \n \n S.append((seq,idx))\n return tuple(map(list,[a for a in zip(*S)]))", "title": "" }, { "docid": "5ca4c55ec566b76509179f060d70abd7", "score": "0.5046303", "text": "def create_anndata(path):\n with tf.io.gfile.GFile(os.path.join(path, 'matrix.mtx'), mode='rb') as f:\n matrix = scipy.io.mmread(f)\n matrix = scipy.sparse.csr_matrix(matrix)\n adata = anndata.AnnData(matrix)\n adata = adata.transpose()\n with tf.io.gfile.GFile(os.path.join(path, 'barcodes.tsv'), mode='r') as f:\n barcodes = pd.read_csv(f, sep='\\t', header=None)[0]\n adata.obs_names = barcodes\n with tf.io.gfile.GFile(os.path.join(path, 'bins.tsv'), mode='r') as f:\n bins = pd.read_csv(f, sep='\\t', header=None)[0]\n adata.var_names = bins\n return adata", "title": "" }, { "docid": "85adb0a3033f540ca49619de530962af", "score": "0.5034957", "text": "def read(self):\n try:\n f = open(self.datfile, 'r')\n except IOError:\n print('ERROR: data file not found!')\n exit()\n\n # Get rid of the header\n for _ in range(self.header_length):\n f.readline()\n\n # Read nuclide mass data\n for line in f:\n ls = line.strip()\n if ls:\n # Pull data out of the line\n line, _ = str_head_pop(line, 4)\n line, n = str_head_pop(line, 5)\n line, z = str_head_pop(line, 5)\n line, a = str_head_pop(line, 5)\n line, _ = str_head_pop(line, 1)\n line, element = str_head_pop(line, 3)\n line, origin = str_head_pop(line, 4)\n line, _ = str_head_pop(line, 1)\n line, mexcess = str_head_pop(line, 13)\n mexcess = cleanup(mexcess)\n line, d_mexcess = str_head_pop(line, 11)\n d_mexcess = cleanup(d_mexcess)\n line, nucbind = str_head_pop(line, 11)\n nucbind = cleanup(nucbind)\n line, d_nucbind = str_head_pop(line, 9)\n d_nucbind = cleanup(d_nucbind)\n line, _ = str_head_pop(line, 1)\n line, decay_type = str_head_pop(line, 2)\n line, ebeta = str_head_pop(line, 11)\n ebeta = cleanup(ebeta)\n line, d_ebeta = str_head_pop(line, 9)\n d_ebeta = cleanup(d_ebeta)\n line, _ = str_head_pop(line, 1)\n line, mass = str_head_pop(line, 16)\n mass = cleanup(mass)\n line, d_mass = str_head_pop(line, 11)\n d_mass = cleanup(d_mass)\n\n # Store nuclide data\n nuclide = AMENuclide(n, z, a, element.strip(), origin.strip(),\n mexcess, d_mexcess, nucbind,\n d_nucbind, decay_type.strip(), ebeta,\n d_ebeta, mass, d_mass)\n self.nuclides.append(nuclide)\n f.close()", "title": "" }, { "docid": "eb1b57dfc59b2277cbe57951c301117d", "score": "0.503341", "text": "def histo(namefile):\n amino_acid_dictionary = {\"ALA\":0, \"ARG\":0, \"ASN\":0, \"ASP\":0, \"ASX\":0, \"CYS\":0, \"GLU\":0, \"GLN\":0, \"GLX\":0, \"GLY\":0, \"HIS\":0, \"ILE\":0, \"LEU\":0, \"LYS\":0, \"MET\":0, \"PHE\":0, \"PRO\":0, \"SER\":0, \"THR\":0, \"TRP\":0, \"TYR\":0, \"VAL\":0}\n with open(namefile, 'r') as myfile: \n whole_file_list = myfile.readlines()\n seq_string_a = \"\"\n for i in range(len(whole_file_list)):\n whole_file_record = whole_file_list[i]\n if whole_file_record.startswith(\"SEQRES\"):\n seq_string = whole_file_record[17:70]\n seq_string_a = seq_string_a + seq_string\n new_seq = seq_string_a.split(' ')\n new_seq_clean = []\n\n for liny in range(len(new_seq)):#loop through the string of amino acids to remove '' and \\n and populate the list new_seq_clean with ONLY amino acids\n if new_seq[liny] != '':\n new_seq_clean.append(new_seq[liny])\n\n\n for amino_acid_a in range(len(new_seq_clean)):\n amino_acid_dictionary[new_seq_clean[amino_acid_a]] += 1 ##Iterate through the dictionary to count and record every occurance of each amino acid\n \n return amino_acid_dictionary", "title": "" }, { "docid": "3365700381fb52d15e0ad52d4c70fa7d", "score": "0.50183314", "text": "def read_midi(file):\n \n print(\"Loading:\", file)\n notes = []\n notes_to_parse = None\n \n #parsing a midi file\n midi = converter.parse(file)\n \n #grouping based on different instruments\n s2 = instrument.partitionByInstrument(midi)\n\n #Looping over all the instruments\n for part in s2.parts:\n #select elements of only piano\n if 'Piano' in str(part): \n notes_to_parse = part.recurse() \n \n #finding whether a particular element is note or a chord\n for element in notes_to_parse:\n #note\n if isinstance(element, note.Note):\n notes.append(str(element.pitch))\n #chord\n elif isinstance(element, chord.Chord):\n notes.append('.'.join(str(n) for n in element.normalOrder))\n return np.array(notes)", "title": "" }, { "docid": "d7fc6e0de1a70c334c8e795b04ce215b", "score": "0.5016206", "text": "def read_cfa_or_bsnip(fname):\n spectra = np.loadtxt(fname)\n return spectra", "title": "" }, { "docid": "3f517b898279a09e6477301d15a697e5", "score": "0.50151235", "text": "def read_file(infile_name):\n chr_list = [0]*13 \n for i in range(len(chr_list)):\n chr_list[i] = [] \n infile = open(infile_name)\n for line in infile:\n if line.startswith('SL2.40'):\n chr = int(line.strip().split()[0][-2:])\n start = int(line.strip().split()[1])\n end = int(line.strip().split()[2])\n chr_list[chr].append([start,end])\n else:\n pass\n infile.close()\n return chr_list", "title": "" }, { "docid": "60d098952b3cdbe5c25835f83ee48912", "score": "0.5009998", "text": "def fastaParser(filename):\r\n seqs = []\r\n genome = ''\r\n with open(filename, 'r') as f:\r\n for line in f:\r\n if not line[0] == '>':\r\n seqs.append(list(line.rstrip()))\r\n return seqs", "title": "" }, { "docid": "44b0ffdd072bb7a0f1b9617c784cf9d3", "score": "0.5008174", "text": "def read_abacus(filename):\n fd = open(filename, \"r\")\n contents = fd.read()\n title_str = (\n r\"(?:LATTICE_CONSTANT|NUMERICAL_ORBITAL|ABFS_ORBITAL|\"\n + r\"LATTICE_VECTORS|LATTICE_PARAMETERS|ATOMIC_POSITIONS)\"\n )\n\n # remove comments and empty lines\n contents = re.compile(r\"#.*|//.*\").sub(\"\", contents)\n contents = re.compile(r\"\\n{2,}\").sub(\"\\n\", contents)\n\n # specie, mass, pps\n specie_pattern = re.compile(rf\"ATOMIC_SPECIES\\s*\\n([\\s\\S]+?)\\s*\\n{title_str}\")\n specie_lines = np.array(\n [line.split() for line in specie_pattern.search(contents).group(1).split(\"\\n\")]\n )\n symbols = specie_lines[:, 0]\n ntype = len(symbols)\n try:\n atom_potential = dict(zip(symbols, specie_lines[:, 2].tolist()))\n except IndexError:\n atom_potential = None\n\n # basis\n aim_title = \"NUMERICAL_ORBITAL\"\n aim_title_sub = title_str.replace(\"|\" + aim_title, \"\")\n orb_pattern = re.compile(rf\"{aim_title}\\s*\\n([\\s\\S]+?)\\s*\\n{aim_title_sub}\")\n orb_lines = orb_pattern.search(contents)\n if orb_lines:\n atom_basis = dict(zip(symbols, orb_lines.group(1).split(\"\\n\")))\n else:\n atom_basis = None\n\n # ABFs basis\n aim_title = \"ABFS_ORBITAL\"\n aim_title_sub = title_str.replace(\"|\" + aim_title, \"\")\n abf_pattern = re.compile(rf\"{aim_title}\\s*\\n([\\s\\S]+?)\\s*\\n{aim_title_sub}\")\n abf_lines = abf_pattern.search(contents)\n if abf_lines:\n atom_offsite_basis = dict(zip(symbols, abf_lines.group(1).split(\"\\n\")))\n else:\n atom_offsite_basis = None\n\n # lattice constant\n aim_title = \"LATTICE_CONSTANT\"\n aim_title_sub = title_str.replace(\"|\" + aim_title, \"\")\n a0_pattern = re.compile(rf\"{aim_title}\\s*\\n([\\s\\S]+?)\\s*\\n{aim_title_sub}\")\n a0_lines = a0_pattern.search(contents)\n atom_lattice_scale = float(a0_lines.group(1))\n\n # lattice vector\n aim_title = \"LATTICE_VECTORS\"\n aim_title_sub = title_str.replace(\"|\" + aim_title, \"\")\n vec_pattern = re.compile(rf\"{aim_title}\\s*\\n([\\s\\S]+?)\\s*\\n{aim_title_sub}\")\n vec_lines = vec_pattern.search(contents)\n if vec_lines:\n atom_lattice = np.array(\n [line.split() for line in vec_pattern.search(contents).group(1).split(\"\\n\")]\n ).astype(float)\n atom_lattice = atom_lattice * atom_lattice_scale\n\n aim_title = \"ATOMIC_POSITIONS\"\n type_pattern = re.compile(rf\"{aim_title}\\s*\\n(\\w+)\\s*\\n\")\n # type of coordinates\n atom_pos_type = type_pattern.search(contents).group(1)\n assert atom_pos_type in [\n \"Direct\",\n \"Cartesian\",\n ], \"Only two type of atomic coordinates are supported: 'Direct' or 'Cartesian'.\"\n\n block_pattern = re.compile(rf\"{atom_pos_type}\\s*\\n([\\s\\S]+)\")\n block = block_pattern.search(contents).group()\n if block[-1] != \"\\n\":\n block += \"\\n\"\n atom_magnetism = []\n atom_symbol = []\n atom_block = []\n for i, symbol in enumerate(symbols):\n pattern = re.compile(rf\"{symbol}\\s*\\n({_re_float})\\s*\\n(\\d+)\")\n sub_block = pattern.search(block)\n number = int(sub_block.group(2))\n\n # symbols, magnetism\n sym = [symbol] * number\n atom_mags = [float(sub_block.group(1))] * number\n for j in range(number):\n atom_symbol.append(sym[j])\n # atom_mass.append(masses[j])\n atom_magnetism.append(atom_mags[j])\n\n if i == ntype - 1:\n lines_pattern = re.compile(\n rf\"{symbol}\\s*\\n{_re_float}\\s*\\n\\d+\\s*\\n([\\s\\S]+)\\s*\\n\"\n )\n else:\n lines_pattern = re.compile(\n rf\"{symbol}\\s*\\n{_re_float}\\s*\\n\\d+\\s*\\n([\\s\\S]+?)\"\n + rf\"\\s*\\n\\w+\\s*\\n{_re_float}\"\n )\n lines = lines_pattern.search(block)\n for j in [line.split() for line in lines.group(1).split(\"\\n\")]:\n atom_block.append(j)\n atom_block = np.array(atom_block)\n atom_magnetism = np.array(atom_magnetism)\n\n # position\n atom_positions = atom_block[:, 0:3].astype(float)\n\n def _get_index(labels, num):\n index = None\n res = []\n for label in labels:\n if label in atom_block:\n index = np.where(atom_block == label)[-1][0]\n if index is not None:\n res = atom_block[:, index + 1 : index + 1 + num].astype(float)\n\n return res, index\n\n # magnetism\n m_labels = [\"mag\", \"magmom\"]\n if \"angle1\" in atom_block or \"angle2\" in atom_block:\n import warnings\n\n warnings.warn(\n \"Non-colinear angle-settings are not yet supported for this interface.\"\n )\n mags, m_index = _get_index(m_labels, 1)\n try: # non-colinear\n if m_index:\n atom_magnetism = atom_block[:, m_index + 1 : m_index + 4].astype(float)\n except IndexError: # colinear\n if m_index:\n atom_magnetism = mags\n\n # to ase\n if atom_pos_type == \"Direct\":\n atoms = PhonopyAtoms(\n symbols=atom_symbol,\n cell=atom_lattice,\n scaled_positions=atom_positions,\n magnetic_moments=atom_magnetism,\n )\n elif atom_pos_type == \"Cartesian\":\n atoms = PhonopyAtoms(\n symbols=atom_symbol,\n cell=atom_lattice,\n positions=atom_positions * atom_lattice_scale,\n magnetic_moments=atom_magnetism,\n )\n\n fd.close()\n return atoms, atom_potential, atom_basis, atom_offsite_basis", "title": "" }, { "docid": "56c73c9f7d1fc28747872f6159c0e991", "score": "0.500809", "text": "def mrna_sequence(self):\r\n return self._cds_sequence(self.coding_exons)", "title": "" }, { "docid": "9d50c1c6608051cc11e3b5619b72c190", "score": "0.5004634", "text": "def encode_file(self, pssm_file):\n if not os.path.exists(pssm_file):\n print(\"The specified PSSM file does not exist\\n\\\n Please specify a valid PSSM file\")\n sys.exit(0)\n\n with open(pssm_file) as fh:\n matrix = [line.strip().split() for line in fh][3:-6]\n\n pssm = np.array(matrix)[:, 2:22].astype(np.int32)\n seq = \"\".join(np.array(matrix)[:, 1])\n\n idx_list = [aa_dict[aa] if aa in aa_dict else 19 for aa in seq]\n seq_vec = np.zeros((len(seq), 22), dtype=np.int32)\n seq_vec[np.arange(len(seq)), idx_list] = 1\n\n return seq, np.concatenate([seq_vec, pssm], axis=1).reshape(1, -1, 42)", "title": "" }, { "docid": "e453a4ca96eda182b37fe5257910d9ce", "score": "0.5002104", "text": "def convert_ali(self, fasta, pir):\n assert pir != \"output.pir\" # Assumption\n logging.captureWarnings(True)\n aln = modeller.alignment(self.env)\n aln.append(file=fasta, alignment_format=\"FASTA\", remove_gaps=False)\n aln.write(file=\"output.pir\", alignment_format='PIR')\n fasta_h = open(fasta, \"r\")\n sequences = SeqIO.parse(fasta_h, \"fasta\")\n values = []\n for record in sequences:\n pdb_id = self._extract_id(record.id)\n values.append([len(record), pdb_id])\n# print(record.id)\n # Download the pdb to build the model\n # modeller search for all the posible names of the file\n try:\n pdb = plots.pdb_download(pdb_id, os.getcwd())\n except urllib.error.URLError:\n pass\n except ftplib.error_perm:\n pass\n# finally:\n# parser = PDBParser(PERMISSIVE=1)\n# structure = parser.get_structure(pdb_id, pdb)\n# print(parser.get_trailer())\n self.pir = pir # Set the pir as an attribute\n # Convert the pir into a understandable pir format?\n with open(pir, \"w\") as out:\n with open(\"output.pir\", \"r\") as fl:\n records = fl.read()\n records = records.split(\">\")\n for n, record in enumerate(records, start=-1):\n lines = record.split(\"\\n\")\n if lines[0] == \"\":\n continue\n id_pdb = self._extract_id(lines[0])\n lines[0] = \">\"+lines[0].split(\";\")[0]+\";\"+id_pdb\n fields = lines[1].split(\":\")\n fields[0] = \"structureX\"\n fields[1] = id_pdb\n fields[2] = \"1\"\n fields[3] = \"A\"\n if values[n][1] == id_pdb.rstrip():\n fields[4] = str(values[n][0])\n else:\n fields[4] = \"500\" # Default length of the sequence\n fields_a = []\n for field in fields[:]:\n if field == \"\":\n fields_a.append(\".\")\n else:\n fields_a.append(field)\n lines[1] = \":\".join(fields_a)\n lines_o = \"\\n\".join(lines)\n out.write(lines_o)\n os.remove(\"output.pir\")\n logging.captureWarnings(False)", "title": "" }, { "docid": "375f9cad45168b7f830d0886a1a2bd36", "score": "0.500058", "text": "def readCMFile(filePath):\r\n if not os.path.exists(filePath):\r\n print(\"The file \" + filePath + \" doesn't exist.\\nIt means there was an error calling the comparator.\")\r\n raise Exception('error')\r\n \r\n with open(filePath) as f:\r\n for line in f.readlines():\r\n if line.startswith('cm:'):\r\n numbers = line.split()[1:]\r\n return [int(nb) for nb in numbers[:5]]", "title": "" }, { "docid": "0a96897d8ba184e583a540188cf09d1f", "score": "0.4998792", "text": "def readAcode_file(filename):\n AcodeDic = {}\n file=filename # #os.path.join(os.path.sep,'etc','ethers')\n # print file\n #if ( os.path.exists(file) == False ) :\n # logger.debug(\"Ethers file: \" + file + \" - Not present.\")\n # return hostlist\n\n inFile = open(file, 'rb')\n for line in inFile:\n # use line only starting with #define and have ACDKEY within them\n if (line.startswith('#define') and ('ACDKEY' in line)):\n line = line.strip(' \\n') # removecarrage returns, white spaces\n #print\n #print \"Acode line: \" + line\n #print line.split()\n list = line.split()\n if (len(list) < 5):\n continue\n #print list[2]\n #print list[2].strip('(')\n #print list[4]\n #print list[4].strip(' )')\n #print tsplit(line, (' ', '\\t','\\n','(',')','|' ) )\n name = list[1].strip()\n value = list[4].strip(' )')\n ival = int(value)\n # print \"Acode: '%s' - '%s'\" % (name,value)\n #print \"Acode: '%s' - '%d'\" % (name,ival)\n\n AcodeDic[ival] = name\n\n return AcodeDic", "title": "" } ]
60ce49f563c0f7593b8c42628f149bc4
Takes data from a dictionary with a particular structure, and stores it in several Problem instances.
[ { "docid": "d9f4542ee50cec3b083462f12883bad2", "score": "0.59248495", "text": "def problems_from_dict(\n data: Mapping[str, Any], yaml_filename: str\n) -> Mapping[str, Problem]:\n problems, _ = _problems_and_ids_from_dict(data, yaml_filename)\n return problems", "title": "" } ]
[ { "docid": "7612f7c7c51a296b0cfd6ad809783f41", "score": "0.6503", "text": "def _problems_and_ids_from_dict(\n data: Mapping[str, Any], yaml_filename: str\n) -> Tuple[Mapping[str, Problem], Dict[Any, Any]]:\n\n # Mapping to remember which dictionaries were already converted to objects\n # Keys are object ids of dictionaries, values are the corresponding malloovia objects\n ids_to_objects: Dict[int, Any] = {}\n\n def create_if_neccesary(_class, _dict):\n \"\"\"Auxiliary function to instantiate a new object from a dict only\n if the same dict was not already instantiated\"\"\"\n # If already created, return the stored object\n if id(_dict) in ids_to_objects:\n return ids_to_objects[id(_dict)]\n\n # If _dict is not a dict, it is an already created object, return it\n if not isinstance(_dict, dict):\n return _dict\n\n # Else, create the object, store it and return it\n new = _class(**_dict)\n ids_to_objects[id(_dict)] = new\n return new\n\n def copy_id_to_name(_dict):\n \"\"\"Helper function to set the name equal to id, if missing\"\"\"\n if isinstance(_dict, dict) and \"name\" not in _dict:\n _dict[\"name\"] = _dict[\"id\"]\n\n def create_instance_classes(_list):\n \"\"\"Helper function which creates all required Instance_classes from\n a list of InstanceClasses, and the Limiting_sets referenced from\n those Instance_classes\"\"\"\n for ic_data in _list:\n copy_id_to_name(ic_data)\n limiting_sets = []\n for lset_data in ic_data[\"limiting_sets\"]:\n copy_id_to_name(lset_data)\n limiting_sets.append(create_if_neccesary(LimitingSet, lset_data))\n ic_data[\"limiting_sets\"] = tuple(limiting_sets)\n create_if_neccesary(InstanceClass, ic_data)\n\n def create_workloads(_list):\n \"\"\"Helper function which creates all required Workloads from a list\n of workloads, and the Apps referenced from those workloads\"\"\"\n for w_data in _list:\n w_data[\"app\"] = create_if_neccesary(App, w_data[\"app\"])\n if w_data.get(\"filename\"):\n values = read_from_relative_csv(\n filename=w_data[\"filename\"], relative_to=yaml_filename\n )\n else:\n values = tuple(w_data[\"values\"])\n w_data.update(values=values)\n create_if_neccesary(Workload, w_data)\n\n def create_performances(_dict):\n \"\"\"Helper function which creates a Performances object from a list\n of performance dictionaries whose keys are instance_classes and apps\"\"\"\n # Check if this set of performances was already converted to\n # a Performances object, and reuse it\n if id(_dict) in ids_to_objects:\n return ids_to_objects[id(_dict)]\n\n # Else, create a dictionary suited for Performances constructor\n _list = _dict[\"values\"]\n perf_dict = {}\n for p_data in _list:\n # Get references to instance_class and app objects. Hence all\n # required instance types and apps were already created by now,\n # their ids should be present in ids_to_objects.\n # Otherwise it would be a internal error, and an exception\n # will be raised\n ic_object = ids_to_objects[id(p_data[\"instance_class\"])]\n app_object = ids_to_objects[id(p_data[\"app\"])]\n value = p_data[\"value\"]\n if ic_object not in perf_dict:\n perf_dict[ic_object] = {}\n perf_dict[ic_object][app_object] = float(value)\n perf = PerformanceSet(\n id=_dict[\"id\"],\n values=PerformanceValues(perf_dict),\n time_unit=_dict[\"time_unit\"],\n )\n ids_to_objects[id(_dict)] = perf\n return perf\n\n # The main program only instantiates problems, and the other objects\n # referenced from those problems\n problems = {}\n\n # First pass: traverse all problems to ensure that all ics and apps\n # referenced from the problems are converted to namedtuples\n for problem in data[\"Problems\"]:\n create_instance_classes(problem[\"instance_classes\"])\n create_workloads(problem[\"workloads\"])\n\n # Now traverse again to create the performances and problems\n for problem in data[\"Problems\"]:\n performances = create_performances(problem[\"performances\"])\n problem.update(\n workloads=tuple(ids_to_objects[id(w)] for w in problem[\"workloads\"]),\n instance_classes=tuple(\n ids_to_objects[id(i)] for i in problem[\"instance_classes\"]\n ),\n performances=performances,\n )\n new_problem = Problem(**problem)\n problems[new_problem.id] = new_problem\n ids_to_objects[id(problem)] = new_problem\n return problems, ids_to_objects", "title": "" }, { "docid": "f025c78993e9a5231d8f288ab682e713", "score": "0.5979477", "text": "def get_all_problems() -> Dict[str, Problem]:\n regularization_strength = 0.001\n\n housing_distributions = [\"gaussian\", \"gamma\", \"binomial\"]\n housing_load_funcs = {\n \"intermediate-housing\": generate_housing_dataset,\n }\n\n insurance_distributions = [\n \"gaussian\",\n \"poisson\",\n \"gamma\",\n \"tweedie-p=1.5\",\n \"binomial\",\n ]\n insurance_load_funcs = {\n \"intermediate-insurance\": generate_intermediate_insurance_dataset,\n \"narrow-insurance\": generate_narrow_insurance_dataset,\n \"wide-insurance\": generate_wide_insurance_dataset,\n }\n if os.path.isfile(git_root(\"data\", \"X.parquet\")):\n insurance_load_funcs[\"real-insurance\"] = generate_real_insurance_dataset\n\n problems = {}\n for penalty_str, l1_ratio in [(\"l2\", 0.0), (\"net\", 0.5), (\"lasso\", 1.0)]:\n # Add housing problems\n for distribution in housing_distributions:\n suffix = penalty_str + \"-\" + distribution\n dist = distribution\n for problem_name, load_fn in housing_load_funcs.items():\n for data_setup in [\"no-weights\", \"offset\"]:\n problems[\"-\".join((problem_name, data_setup, suffix))] = Problem(\n data_loader=partial(\n load_data, load_fn, distribution=dist, data_setup=data_setup\n ),\n distribution=distribution,\n regularization_strength=regularization_strength,\n l1_ratio=l1_ratio,\n )\n # Add insurance problems\n for distribution in insurance_distributions:\n suffix = penalty_str + \"-\" + distribution\n dist = distribution\n for problem_name, load_fn in insurance_load_funcs.items():\n for data_setup in [\"weights\", \"no-weights\", \"offset\"]:\n problems[\"-\".join((problem_name, data_setup, suffix))] = Problem(\n data_loader=partial(\n load_data, load_fn, distribution=dist, data_setup=data_setup\n ),\n distribution=distribution,\n regularization_strength=regularization_strength,\n l1_ratio=l1_ratio,\n )\n\n return problems", "title": "" }, { "docid": "ee36ac24e266c377fc3b1e33a8a7e4b2", "score": "0.57386434", "text": "def __init__(self, problem):\n self.clouds = problem[\"clouds\"]\n self.fogs = problem[\"fogs\"]\n self.peers = problem[\"peers\"]\n self.tasks = problem[\"tasks\"]\n\n self.n_clouds = len(self.clouds)\n self.n_fogs = len(self.fogs)\n self.n_peers = len(self.peers)\n self.n_tasks = len(self.tasks)\n\n self.schedule_clouds_tasks = {} # key: cloud_id, val: list_task_id []\n self.schedule_fogs_tasks = {}\n self.schedule_peers_tasks = {}", "title": "" }, { "docid": "8e5d2935e7f0f82a053eca0ffd5a1fc0", "score": "0.5586101", "text": "def solutions_from_dict(\n data: Mapping[str, Any], yaml_filename: str\n) -> Mapping[str, Union[SolutionI, SolutionII]]:\n\n # Mapping to remember which dictionaries were already converted to objects\n # Keys are object ids of dictionaries, values are the corresponding malloovia objects\n ids_to_objects: Dict[int, Any] = {}\n\n def _is_phase_i_solution(solution_dict):\n \"\"\"Receives a solution as a dict generated by yaml_load() and returns\n true if is a phase I solution and false otherwise\"\"\"\n if not \"previous_phase\" in solution_dict:\n return True\n\n return False\n\n def _create_phase_i_solution(solution_dict):\n return SolutionI(**solution_dict)\n\n def _create_phase_ii_solution(solution_dict):\n solution_dict[\"previous_phase\"] = ids_to_objects[\n id(solution_dict[\"previous_phase\"])\n ]\n return SolutionII(**solution_dict)\n\n def _dict_list_to_id_list(dict_list):\n id_list = []\n for item in dict_list:\n id_list.append(ids_to_objects[id(item)])\n return id_list\n\n def _convert_allocation(solution_dict):\n alloc = solution_dict[\"allocation\"]\n alloc[\"apps\"] = tuple(_dict_list_to_id_list(alloc[\"apps\"]))\n alloc[\"instance_classes\"] = tuple(_dict_list_to_id_list(alloc[\"instance_classes\"]))\n alloc[\"values\"] = alloc.pop(\"vms_number\")\n alloc[\"values\"] = tuple(\n tuple(tuple(vms) for vms in app) for app in alloc[\"values\"]\n )\n if \"units\" not in alloc:\n alloc[\"units\"] = \"vms\"\n if \"workload_tuples\" not in alloc:\n alloc[\"workload_tuples\"] = tuple()\n else:\n alloc[\"workload_tuples\"] = list(tuple(wl) for wl in alloc[\"workload_tuples\"])\n solution_dict[\"allocation\"] = AllocationInfo(**alloc)\n\n def _convert_reserved_allocation(solution_dict):\n alloc = solution_dict[\"reserved_allocation\"]\n alloc[\"instance_classes\"] = tuple(_dict_list_to_id_list(alloc[\"instance_classes\"]))\n alloc[\"vms_number\"] = tuple(alloc[\"vms_number\"])\n solution_dict[\"reserved_allocation\"] = ReservedAllocation(**alloc)\n\n def _status_to_enum(status: str) -> Status:\n status_enum = Status.__members__.get(status)\n if status_enum is None:\n raise ValueError(\"Invalid status '{}' in solving_stats\".format(status))\n return status_enum\n\n def _convert_malloovia_stats(data: Dict[str, Any]) -> MallooviaStats:\n status = data[\"status\"]\n data[\"status\"] = _status_to_enum(status)\n return MallooviaStats(**data)\n\n def _convert_solving_stats(solving_stats: Dict[str, Any]) -> SolvingStats:\n alg_stats = solving_stats.get(\"algorithm\")\n if alg_stats and alg_stats.get(\"malloovia\"):\n solving_stats[\"algorithm\"] = _convert_malloovia_stats(alg_stats.get(\"malloovia\"))\n return SolvingStats(**solving_stats)\n\n\n def _convert_solving_stats_phase_i(solution_dict):\n solving_stats = solution_dict.get(\"solving_stats\")\n if solving_stats:\n solution_dict[\"solving_stats\"] = _convert_solving_stats(solving_stats)\n\n def _convert_malloovia_stats_phase_ii(solution_dict):\n solving_stats = solution_dict.get(\"solving_stats\")\n if solving_stats:\n result = []\n for stats in solving_stats:\n result.append(_convert_solving_stats(stats))\n solution_dict[\"solving_stats\"] = result\n\n def _convert_global_solving_stats(solution_dict):\n g_solving_stats = solution_dict.get(\"global_solving_stats\")\n if g_solving_stats:\n status = g_solving_stats[\"status\"]\n g_solving_stats[\"status\"] = _status_to_enum(status)\n solution_dict[\"global_solving_stats\"] = GlobalSolvingStats(**g_solving_stats)\n\n def _create_solution(solution_dict):\n solution_dict[\"problem\"] = ids_to_objects[id(solution_dict[\"problem\"])]\n\n if \"allocation\" in solution_dict:\n _convert_allocation(solution_dict)\n \n if _is_phase_i_solution(solution_dict):\n _convert_solving_stats_phase_i(solution_dict)\n _convert_reserved_allocation(solution_dict)\n result = _create_phase_i_solution(solution_dict)\n else:\n _convert_malloovia_stats_phase_ii(solution_dict)\n _convert_global_solving_stats(solution_dict)\n result = _create_phase_ii_solution(solution_dict)\n\n ids_to_objects[id(solution_dict)] = result\n return result\n\n _, ids_to_objects = _problems_and_ids_from_dict(data, yaml_filename)\n\n solutions = {}\n\n # Create solutions for phase I. They have to be created before solutions for\n # phase II because the latter reference the former\n for solution_dict in data[\"Solutions\"]:\n if _is_phase_i_solution(solution_dict):\n solution = _create_solution(solution_dict)\n solutions[solution.id] = solution\n\n # Create solutions for phase II\n for solution_dict in data[\"Solutions\"]:\n if not _is_phase_i_solution(solution_dict):\n solution = _create_solution(solution_dict)\n solutions[solution.id] = solution\n\n return solutions", "title": "" }, { "docid": "4d92644796b23d5f660a622ae6e8eb3d", "score": "0.5542194", "text": "def from_json(data, benchmark, batch_pos):\n problem_part = json.loads(data[\"problem\"])\n return Problem(problem_part[\"grid\"], problem_part[\"width\"], problem_part[\"height\"], problem_part[\"starts\"],\n problem_part[\"goals\"], problem_part[\"waypoints\"], benchmark, data[\"id\"], batch_pos)", "title": "" }, { "docid": "222024e7ebe4cfdb8944ae5948c22a68", "score": "0.5505686", "text": "def data_from_dict(self, data):\n\n nvars = []\n for key, val in data.items():\n self.__dict__[key].extend(val)\n\n # assure the same parameter matrix size\n if len(nvars) > 1 and len(val) != nvars[-1]:\n raise IndexError(\n 'Model <{}> parameter <{}> must have the same length'.\n format(self._name, key))\n nvars.append(len(val))\n\n # assign idx-uid mapping\n for i, idx in zip(range(self.n), self.idx):\n self.uid[idx] = i", "title": "" }, { "docid": "86ad3e7236817e5a32a60b1dfc1e1f01", "score": "0.51949054", "text": "def from_dictionary(self, data):\n self.general_params.set_from_dictionary(data['general parameters'])\n for name in data['pair parameters'].keys():\n self.pair_params[name] = PairParams(name)\n self.pair_params[name].set_from_dictionary(data['pair parameters'][name])", "title": "" }, { "docid": "67359a3fbd0fa3d57d6a9f4c45d90d0a", "score": "0.5154958", "text": "def create_unit(self, dict_with_new_data_for_creating_a_unit):\n errors_found = self.__errors_in_incoming_data(dict_with_new_data_for_creating_a_unit, bool_full_unit=False)\n if not errors_found:\n self.dict_algolia_unit = copy.deepcopy(self.__dct_template)\n for a_key in dict_with_new_data_for_creating_a_unit:\n self.dict_algolia_unit[a_key][self.key_for_values_current] = dict_with_new_data_for_creating_a_unit[\n a_key]\n self.__unit_loaded_or_created = True\n else:\n logging.error('Errors were found in incoming data. The \"creation\" method did not complete successfully.')", "title": "" }, { "docid": "82ba4164f5d37abed29d2d7fbfddf21b", "score": "0.51174545", "text": "def load_problem(instance):\n accounts = deepcopy(instance['accounts'])\n\n orders = [\n Order.load_from_dict(order_dict, str(index))\n for index, order_dict in enumerate(instance['orders'])\n ]\n\n orders = restrict_order_sell_amounts_by_balances(orders, accounts)\n\n fee = load_fee(instance['fee'])\n\n return accounts, orders, fee", "title": "" }, { "docid": "73b2df3972f9b742971d3612e3436f93", "score": "0.50930434", "text": "def formulate(self):\n print \"Starting problem formulation\"\n self.print_matrix()\n self.i_consistency()\n self.form_heap()\n self.create_constraint_graph()\n #self.print_constraint_graph()\n self.solve()\n #self.solveBasic()\n self.print_matrix_final()\n #print self.heap\n\n # formulate the problem and add data structures which might help solve the problem\n # instrument the matrix to populate the data structures", "title": "" }, { "docid": "d5dc2cfef826b059497b138e16ec6210", "score": "0.50851357", "text": "def from_dict(self, dictionary):", "title": "" }, { "docid": "9df16424e4b6736a1686dcbcb12bab89", "score": "0.50519365", "text": "def __init__(self, columns, types, description=None):\n\n self.log = []\n if description is None:\n self.description = ''\n elif len(description) > 80:\n raise ValueError('The dictionary description cannot be more than '\n '80 characters')\n else:\n self.description = description\n\n # Adds the question objects to the dictionary\n for col_, type_ in zip(*(columns, types)):\n self.add_question(question_data=col_,\n question_type=type_,\n record=False,\n check=False)\n self.columns = list(self.keys())", "title": "" }, { "docid": "fa0c2ac807b5117b87db43ff59a27499", "score": "0.504272", "text": "def _from_dict(self, data):\n pass", "title": "" }, { "docid": "141dcfce84e625a7a3476b5b73f0aaf7", "score": "0.50232834", "text": "def get_problem_info(pid: tuple) -> dict:\n url = \"https://codeforces.com/problemset/problem/%s/%s\" % (pid[0], pid[1])\n r = requests.get(url)\n html = r.content.decode('utf-8')\n bs = BeautifulSoup(html, features=\"html.parser\")\n info = {}\n info[\"题目名称\"] = bs.find(class_=\"title\").string\n time_limit: str = bs.find(class_=\"time-limit\").get_text()\n info[\"时间限制\"] = time_limit.replace(\"time limit per test\", \"\")\n memory_limit: str = bs.find(class_=\"memory-limit\").get_text()\n info[\"空间限制\"] = memory_limit.replace(\"memory limit per test\", \"\")\n bs_tags = bs.find_all(class_=\"tag-box\")\n tags = [i.string.strip(\"\\n\\r\\t \") for i in bs_tags if not i.string.strip(\n \"\\n\\r\\t \").startswith('*')]\n info[\"标签  \"] = \", \".join(tags)\n info[\"难度  \"] = bs.find(title=\"Difficulty\").string.strip(\"\\n\\r\\t* \")\n info[\"来源  \"] = bs.th.string.strip(\"\\n\\r\\t* \")\n return info", "title": "" }, { "docid": "102c8d8ccd97eff01d74c60e6800ee45", "score": "0.5018158", "text": "def from_json(cls, json_data):\n data = json.loads(json_data)\n operations = [op_from_json(json.dumps(item))\n for item in data['operations']]\n problem = PredictionProblem(operations)\n problem.filter_column_order_of_types = data[\n 'filter_column_order_of_types']\n problem.label_generating_column_order_of_types = data[\n 'label_generating_column_order_of_types']\n return problem", "title": "" }, { "docid": "7fc7c7a10f9782c8bd9f6a610cf51bb7", "score": "0.5005901", "text": "def create_lp(\n input_dict: Dict[str, object], mu: float, args: argparse.Namespace\n) -> None:\n # Get values needed from the input_dict\n room_capacities = {room[\"id\"]: room[\"capacity\"] for room in input_dict[\"rooms\"]}\n ak_durations = {ak[\"id\"]: ak[\"duration\"] for ak in input_dict[\"aks\"]}\n\n # dict of real participants only (without dummy participants) with their preferences dicts\n real_preferences_dict = {\n participant[\"id\"]: participant[\"preferences\"]\n for participant in input_dict[\"participants\"]\n }\n\n # dict of real participants only (without dummy participants) with numerical preferences\n weighted_preference_dict = {\n participant[\"id\"]: {\n pref[\"ak_id\"]: process_pref_score(\n pref[\"preference_score\"],\n pref[\"required\"],\n mu=mu,\n )\n for pref in participant[\"preferences\"]\n }\n for participant in input_dict[\"participants\"]\n }\n\n # Get constraints from input_dict\n participant_time_constraint_dict = {\n participant[\"id\"]: set(participant[\"time_constraints\"])\n for participant in input_dict[\"participants\"]\n }\n\n participant_room_constraint_dict = {\n participant[\"id\"]: set(participant[\"room_constraints\"])\n for participant in input_dict[\"participants\"]\n }\n\n ak_time_constraint_dict = {\n ak[\"id\"]: set(ak[\"time_constraints\"]) for ak in input_dict[\"aks\"]\n }\n ak_room_constraint_dict = {\n ak[\"id\"]: set(ak[\"room_constraints\"]) for ak in input_dict[\"aks\"]\n }\n\n room_time_constraint_dict = {\n room[\"id\"]: set(room[\"time_constraints\"]) for room in input_dict[\"rooms\"]\n }\n fulfilled_time_constraints = {\n timeslot[\"id\"]: set(timeslot[\"fulfilled_time_constraints\"])\n for block in input_dict[\"timeslots\"][\"blocks\"]\n for timeslot in block\n }\n fulfilled_room_constraints = {\n room[\"id\"]: set(room[\"fulfilled_room_constraints\"])\n for room in input_dict[\"rooms\"]\n }\n\n # Get ids from input_dict\n def _retrieve_val_set(object_key: str, val_key: str) -> Set:\n return {obj[val_key] for obj in input_dict[object_key]}\n\n ak_ids = _retrieve_val_set(\"aks\", \"id\")\n room_ids = _retrieve_val_set(\"rooms\", \"id\")\n timeslot_ids = {\n timeslot[\"id\"]\n for block in input_dict[\"timeslots\"][\"blocks\"]\n for timeslot in block\n }\n\n participant_ids = _retrieve_val_set(\"participants\", \"id\")\n participant_ids = (\n participant_ids.union( # contains all participants ids (incl. dummy ids)\n {get_dummy_participant_id(ak_id) for ak_id in ak_ids}\n )\n )\n\n timeslot_block_ids = {\n timeslot[\"id\"]: (block_idx, timeslot_idx)\n for block_idx, block in enumerate(input_dict[\"timeslots\"][\"blocks\"])\n for timeslot_idx, timeslot in enumerate(block)\n }\n\n # Create problem\n prob = LpProblem(\"MLPKoMa\", sense=LpMinimize)\n\n # Create decision variables\n dec_vars = LpVariable.dicts(\n \"DecVar\", (ak_ids, timeslot_ids, room_ids, participant_ids), cat=LpBinary\n )\n\n # Set objective function\n # ∑ᴬ⋅ᵀ⋅ᴿ⋅ᴾ -Pᴬ⋅ᴾ / (Sᴬ ∑_{Pᴬ⋅ᴾ≠0} 1) Yᴬ⋅ᵀ⋅ᴿ⋅ᴾ\n cost_func = LpAffineExpression()\n for participant_id, preferences in real_preferences_dict.items():\n normalizing_factor = len(preferences)\n if normalizing_factor == 0:\n continue\n for ak_id in ak_ids:\n coeff = -weighted_preference_dict[participant_id].get(ak_id, 0)\n coeff /= ak_durations[ak_id] * normalizing_factor\n affine_constraint = lpSum(\n [\n dec_vars[ak_id][timeslot_id][room_id][participant_id]\n for timeslot_id, room_id in product(timeslot_ids, room_ids)\n ]\n )\n cost_func += coeff * affine_constraint\n\n prob += cost_func, \"cost_function\"\n\n # Add constraints\n\n # E1: MaxOneAKperPersonAndTime\n # ∀ T,P≠Pᴬ: ∑ᴬ⋅ᵀ Yᴬ⋅ᵀ⋅ᴿ⋅ᴾ ≤ 1\n for timeslot_id, participant_id in product(\n timeslot_ids, real_preferences_dict.keys()\n ):\n affine_constraint = lpSum(\n [\n dec_vars[ak_id][timeslot_id][room_id][participant_id]\n for ak_id, room_id in product(ak_ids, room_ids)\n ]\n )\n prob += affine_constraint <= 1, _construct_constraint_name(\n \"MaxOneAKperPersonAndTime\", timeslot_id, participant_id\n )\n\n # E2: AKLength\n # ∀ A: ∑ᵀ⋅ᴿ Yᴬ⋅ᵀ⋅ᴿ⋅ᴾᴬ = Sᴬ\n for ak_id in ak_ids:\n affine_constraint = lpSum(\n [\n dec_vars[ak_id][timeslot_id][room_id][get_dummy_participant_id(ak_id)]\n for timeslot_id, room_id in product(timeslot_ids, room_ids)\n ]\n )\n prob += affine_constraint == ak_durations[ak_id], _construct_constraint_name(\n \"AKLength\", ak_id\n )\n\n ## TODO FIXME BUG: Muss =1 oder =0 sein!\n # E3: NoPartialParticipation\n # ∀ A,P≠Pᴬ: 1/Sᴬ ∑ᵀ⋅ᴿ Yᴬ⋅ᵀ⋅ᴿ⋅ᴾ ≤ 1\n # Z2: PersonNeededForAK\n # ∀ A,P≠Pᴬ if P essential for A: ∑ᵀ⋅ᴿ Yᴬ⋅ᵀ⋅ᴿ⋅ᴾ = Sᴬ\n for ak_id in ak_ids:\n for participant_id, preferences in real_preferences_dict.items():\n affine_constraint = lpSum(\n [\n dec_vars[ak_id][timeslot_id][room_id][participant_id]\n for timeslot_id, room_id in product(timeslot_ids, room_ids)\n ]\n )\n for pref in preferences:\n if pref[\"ak_id\"] == ak_id:\n if pref[\n \"required\"\n ]: # participant is essential for ak -> set constraint for \"PersonNeededForAK\"\n prob += (\n affine_constraint == ak_durations[ak_id],\n _construct_constraint_name(\n \"PersonNeededForAK\",\n ak_id,\n participant_id,\n ),\n ) ## TODO Check for fixed value\n else: # participant is not essential -> set constraint for \"NoPartialParticipation\"\n affine_constraint *= 1 / ak_durations[ak_id]\n prob += affine_constraint <= 1, _construct_constraint_name(\n \"NoPartialParticipation\",\n ak_id,\n participant_id,\n )\n break\n else: # participant is not essential -> set constraint for \"NoPartialParticipation\"\n affine_constraint *= 1 / ak_durations[ak_id]\n prob += affine_constraint <= 1, _construct_constraint_name(\n \"NoPartialParticipation\",\n ak_id,\n participant_id,\n )\n\n ## TODO FIXME BUG: Muss =1 oder =0 sein!\n # E4: FixedAKRooms\n # ∀ A,R: 1 / Sᴬ ∑ᵀ Yᴬ⋅ᵀ⋅ᴿ⋅ᴾᴬ ≤ 1\n for ak_id, room_id in product(ak_ids, room_ids):\n affine_constraint = lpSum(\n [\n dec_vars[ak_id][timeslot_id][room_id][get_dummy_participant_id(ak_id)]\n for timeslot_id in timeslot_ids\n ]\n )\n affine_constraint *= 1 / ak_durations[ak_id]\n prob += affine_constraint <= 1, _construct_constraint_name(\n \"FixedAKRooms\", ak_id, room_id\n )\n\n # E5: AKConsecutive\n # ∀ A,R,Tᵃᵇ,Tᶜᵈ s.t. (a≠c ∨ |b-d|≥Sᴬ): Yᴬ⋅ᵀᵃᵇ⋅ᴿ⋅ᴾᴬ + Yᴬ⋅ᵀᶜᵈ⋅ᴿ⋅ᴾᴬ ≤ 1\n for ak_id, room_id in product(ak_ids, room_ids):\n for timeslot_id_a, timeslot_id_b in combinations(timeslot_ids, 2):\n block_idx_a, slot_in_block_idx_a = timeslot_block_ids[timeslot_id_a]\n block_idx_b, slot_in_block_idx_b = timeslot_block_ids[timeslot_id_b]\n if (\n block_idx_a != block_idx_b\n or abs(slot_in_block_idx_a - slot_in_block_idx_b) >= ak_durations[ak_id]\n ): # if two timeslots are too far apart to be consecutive\n affine_constraint = lpSum(\n [\n dec_vars[ak_id][timeslot_id_a][room_id][\n get_dummy_participant_id(ak_id)\n ],\n dec_vars[ak_id][timeslot_id_b][room_id][\n get_dummy_participant_id(ak_id)\n ],\n ]\n )\n prob += (\n affine_constraint <= 1,\n _construct_constraint_name( # forbid the ak to happen in both of them\n \"AKConsecutive\", ak_id, room_id, timeslot_id_a, timeslot_id_b\n ),\n )\n\n # E6: PersonVisitingAKAtRightTimeAndRoom\n # ∀ A,T,R,P≠Pᴬ: Yᴬ⋅ᵀ⋅ᴿ⋅ᴾᴬ - Yᴬ⋅ᵀ⋅ᴿ⋅ᴾ ≥ 0\n for ak_id, timeslot_id, room_id, participant_id in product(\n ak_ids, timeslot_ids, room_ids, real_preferences_dict.keys()\n ):\n affine_constraint = LpAffineExpression(\n dec_vars[ak_id][timeslot_id][room_id][get_dummy_participant_id(ak_id)]\n )\n affine_constraint -= LpAffineExpression(\n dec_vars[ak_id][timeslot_id][room_id][participant_id]\n )\n prob += affine_constraint >= 0, _construct_constraint_name(\n \"PersonVisitingAKAtRightTimeAndRoom\",\n ak_id,\n timeslot_id,\n room_id,\n participant_id,\n )\n\n # E7: Roomsizes\n # ∀ R,T: ∑_{A, P≠Pᴬ} Yᴬ⋅ᵀ⋅ᴿ⋅ᴾ ≤ Kᴿ\n for room_id, timeslot_id in product(room_ids, timeslot_ids):\n affine_constraint = lpSum(\n [\n dec_vars[ak_id][timeslot_id][room_id][participant_id]\n for ak_id, participant_id in product(ak_ids, real_preferences_dict)\n ]\n )\n prob += affine_constraint <= room_capacities[\n room_id\n ], _construct_constraint_name(\"Roomsizes\", room_id, timeslot_id)\n\n # E8: DummyPersonOneAk\n # ∀ T,R,B≠A: Yᴮ⋅ᵀ⋅ᴿ⋅ᴾᴬ = 0\n for timeslot_id, room_id, ak_id, dummy_ak_id in product(\n timeslot_ids, room_ids, ak_ids, ak_ids\n ):\n if ak_id == dummy_ak_id:\n continue\n _set_decision_variable(\n dec_vars,\n ak_id,\n timeslot_id,\n room_id,\n get_dummy_participant_id(dummy_ak_id),\n value=0,\n name=\"DummyPersonOneAk\",\n )\n\n # Z1: PersonNotInterestedInAK\n # ∀ A,T,R,P: If Pᴾ⋅ᴬ=0: Yᴬ⋅ᵀ⋅ᴿ⋅ᴾ = 0 (non-dummy P)\n for participant_id, preferences in real_preferences_dict.items():\n pref_aks = {\n pref[\"ak_id\"] for pref in preferences\n } # aks not in pref_aks have Pᴾ⋅ᴬ=0 implicitly\n for ak_id, timeslot_id, room_id in product(\n ak_ids.difference(pref_aks), timeslot_ids, room_ids\n ):\n _set_decision_variable(\n dec_vars,\n ak_id,\n timeslot_id,\n room_id,\n participant_id,\n value=0,\n name=\"PersonNotInterestedInAK\",\n )\n\n for participant_id in real_preferences_dict:\n # Z3: TimeImpossibleForPerson (real person P cannot attend AKs with timeslot T)\n # ∀ A,T,R,P: If P cannot attend at T: Yᴬ⋅ᵀ⋅ᴿ⋅ᴾ=0\n for timeslot_id in timeslot_ids:\n if participant_time_constraint_dict[participant_id].difference(\n fulfilled_time_constraints[timeslot_id]\n ):\n for ak_id, room_id in product(ak_ids, room_ids):\n _set_decision_variable(\n dec_vars,\n ak_id,\n timeslot_id,\n room_id,\n participant_id,\n value=0,\n name=\"TimeImpossibleForPerson\",\n )\n\n # Z4: RoomImpossibleForPerson (Real person P cannot attend AKs with room R)\n # ∀ A,T,R,P: If P cannot attend in R: Yᴬ⋅ᵀ⋅ᴿ⋅ᴾ=0\n for room_id in room_ids:\n if participant_room_constraint_dict[participant_id].difference(\n fulfilled_room_constraints[room_id]\n ):\n for ak_id, timeslot_id in product(ak_ids, timeslot_ids):\n _set_decision_variable(\n dec_vars,\n ak_id,\n timeslot_id,\n room_id,\n participant_id,\n value=0,\n name=\"RoomImpossibleForPerson\",\n )\n\n for ak_id in ak_ids:\n # Z5: TimeImpossibleForAK\n # ∀ A,T,R,P: If A cannot happen in timeslot T: Yᴬ⋅ᵀ⋅ᴿ⋅ᴾ=0\n for timeslot_id in timeslot_ids:\n if ak_time_constraint_dict[ak_id].difference(\n fulfilled_time_constraints[timeslot_id]\n ):\n for participant_id, room_id in product(participant_ids, room_ids):\n _set_decision_variable(\n dec_vars,\n ak_id,\n timeslot_id,\n room_id,\n participant_id,\n value=0,\n name=\"TimeImpossibleForAK\",\n )\n # Z6: RoomImpossibleForAK\n # ∀ A,T,R,P: If A cannot happen in room R: Yᴬ⋅ᵀ⋅ᴿ⋅ᴾ=0\n for room_id in room_ids:\n if ak_room_constraint_dict[ak_id].difference(\n fulfilled_room_constraints[room_id]\n ):\n for participant_id, timeslot_id in product(\n participant_ids, timeslot_ids\n ):\n _set_decision_variable(\n dec_vars,\n ak_id,\n timeslot_id,\n room_id,\n participant_id,\n value=0,\n name=\"RoomImpossibleForAK\",\n )\n\n # Z7: TimeImpossibleForRoom\n # ∀ A,T,R,P: If room R is not available in timeslot T: Yᴬ⋅ᵀ⋅ᴿ⋅ᴾ=0\n for room_id, timeslot_id in product(room_ids, timeslot_ids):\n if room_time_constraint_dict[room_id].difference(\n fulfilled_time_constraints[timeslot_id]\n ):\n for participant_id, ak_id in product(participant_ids, ak_ids):\n _set_decision_variable(\n dec_vars,\n ak_id,\n timeslot_id,\n room_id,\n participant_id,\n value=0,\n name=\"TimeImpossibleForRoom\",\n )\n\n # Z8: NoAKCollision\n # ∀ T, AKs A,B with A and B may not overlap: ∑ᴿ Yᴬ⋅ᵀ⋅ᴿ⋅ᴾᴬ + Yᴮ⋅ᵀ⋅ᴿ⋅ᴾᴮ ≤ 1\n ## TODO: Not implemented yet\n\n # The problem data is written to an .lp file\n prob.writeLP(\"koma-plan.lp\")\n\n kwargs_dict = {}\n if args.solver_path:\n kwargs_dict[\"path\"] = args.solver_path\n if args.warm_start:\n kwargs_dict[\"warmStart\"] = True\n if args.timelimit:\n kwargs_dict[\"timeLimit\"] = args.timelimit\n if args.gap_rel:\n kwargs_dict[\"gapRel\"] = args.gap_rel\n if args.gap_abs:\n kwargs_dict[\"gapAbs\"] = args.gap_abs\n if args.threads:\n kwargs_dict[\"Threads\"] = args.threads\n\n if args.solver:\n solver = getSolver(args.solver, **kwargs_dict)\n else:\n solver = None\n # The problem is solved using PuLP's choice of Solver\n res = prob.solve(solver)\n\n # The status of the solution is printed to the screen\n print(\"Status:\", LpStatus[prob.status])\n\n tmp_res_dir = defaultdict(lambda: defaultdict(lambda: defaultdict(set)))\n for ak_id, timeslot_id, room_id, participant_id in product(\n ak_ids, timeslot_ids, room_ids, real_preferences_dict.keys()\n ):\n if value(dec_vars[ak_id][timeslot_id][room_id][participant_id]) == 1:\n tmp_res_dir[ak_id][room_id][\"timeslot_ids\"].add(timeslot_id)\n tmp_res_dir[ak_id][room_id][\"participant_ids\"].add(participant_id)\n\n output_dict = {}\n output_dict[\"scheduled_aks\"] = [\n {\n \"ak_id\": ak_id,\n \"room_id\": room_id,\n \"timeslot_ids\": list(subsubdict[\"timeslot_ids\"]),\n \"participant_ids\": list(subsubdict[\"participant_ids\"]),\n }\n for ak_id, subdict in tmp_res_dir.items()\n for room_id, subsubdict in subdict.items()\n ]\n output_dict[\"input\"] = input_dict\n\n with open(\"output.json\", \"w\") as output_file:\n json.dump(output_dict, output_file)", "title": "" }, { "docid": "d4f268f93c36c64929a3badf0dba522a", "score": "0.4996614", "text": "def create_problem(self, **kwargs):\n return Problem(**kwargs)", "title": "" }, { "docid": "03dc467945c00756b4325602a7c9d235", "score": "0.49830803", "text": "def problems_to_yaml(\n problems: Mapping[str, Problem]\n) -> str: # pylint: disable=too-many-locals\n\n def collect_instance_classes_and_limiting_sets(\n problem\n ): # pylint: disable=invalid-name\n \"\"\"Populates and returns instance_classes and limiting_sets sets\"\"\"\n instance_classes = set()\n limiting_sets = set()\n for i_c in problem.instance_classes:\n instance_classes.add(i_c)\n limiting_sets.update(set(i_c.limiting_sets))\n return instance_classes, limiting_sets\n\n def collect_workloads_and_apps(problem):\n \"\"\"Populates and returns workloads and apps sets\"\"\"\n workloads = set()\n apps = set()\n for wld in problem.workloads:\n workloads.add(wld)\n apps.add(wld.app)\n return workloads, apps\n\n def collect_performances(problem):\n \"\"\"Populates and returns performances set\"\"\"\n performances = set()\n performances.add(problem.performances)\n return performances\n\n def lsets_to_yaml(limiting_sets):\n \"\"\"Returns an array of lines to add to the yaml array, representing the\n Limiting_sets part\"\"\"\n lines = []\n lines.append(\"Limiting_sets:\")\n for l_s in sorted(limiting_sets):\n lines.append(\" - &{}\".format(_anchor_from_id(l_s)))\n lines.extend(_namedtuple_to_yaml(l_s, level=2))\n lines.append(\"\")\n return lines\n\n def iclasses_to_yaml(instance_classes):\n \"\"\"Returns an array of lines to add to the yaml array, representing the\n Instance_classes part\"\"\"\n lines = []\n lines.append(\"Instance_classes:\")\n for i_c in sorted(instance_classes):\n anchor = _anchor_from_id(i_c)\n aux = i_c._replace(\n limiting_sets=\"[{}]\".format(\n \", \".join(\"*{}\".format(_anchor_from_id(ls)) for ls in i_c.limiting_sets)\n )\n )\n lines.append(\" - &{}\".format(anchor))\n lines.extend(_namedtuple_to_yaml(aux, level=2))\n lines.append(\"\")\n return lines\n\n def apps_to_yaml(apps):\n \"\"\"Returns an array of lines to add to the yaml array, representing the\n Apps part\"\"\"\n lines = []\n lines.append(\"Apps:\")\n for app in sorted(apps):\n lines.append(\" - &{}\".format(_anchor_from_id(app)))\n lines.extend(_namedtuple_to_yaml(app, level=2))\n lines.append(\"\")\n return lines\n\n def wloads_to_yaml(workloads):\n \"\"\"Returns an array of lines to add to the yaml array, representing the\n Workloads part\"\"\"\n lines = []\n # It is necessary to remove \"filename\" if it is None, or \"values\" if not\n # But fields cannot be removed from namedtuples, so we convert it to dict\n lines.append(\"Workloads:\")\n for w_l in sorted(workloads):\n anchor = _anchor_from_id(w_l)\n aux = w_l._asdict()\n if aux[\"filename\"]:\n aux.pop(\"values\")\n else:\n aux.pop(\"filename\")\n aux.update(values=list(w_l.values))\n aux.update(app=\"*{}\".format(_anchor_from_id(w_l.app)))\n lines.append(\" - &{}\".format(anchor))\n lines.extend(_dict_to_yaml(aux, level=2))\n lines.append(\"\")\n return lines\n\n def probs_to_yaml(problems):\n \"\"\"Returns an array of lines to add to the yaml array, representing the\n Problems part\"\"\"\n lines = []\n lines.append(\"Problems:\")\n for prob in problems.values():\n anchor = _anchor_from_id(prob)\n aux = prob._replace(\n instance_classes=\"[{}]\".format(\n \", \".join(\"*{}\".format(_anchor_from_id(ic)) for ic in prob.instance_classes)\n ),\n workloads=\"[{}]\".format(\n \", \".join(\"*{}\".format(_anchor_from_id(wl)) for wl in prob.workloads)\n ),\n performances=\"*{}\".format(_anchor_from_id(prob.performances)),\n )\n lines.append(\" - &{}\".format(anchor))\n lines.extend(_namedtuple_to_yaml(aux, level=2))\n lines.append(\"\")\n return lines\n\n def perfs_to_yaml(performances):\n \"\"\"Returns an array of lines to add to the yaml array, representing the\n Performances part\"\"\"\n lines = []\n lines.append(\"Performances:\")\n for perfset in sorted(performances):\n lines.append(\" - &{}\".format(_anchor_from_id(perfset)))\n lines.append(\" id: {}\".format(perfset.id))\n lines.append(\" time_unit: {}\".format(perfset.time_unit))\n lines.append(\" values:\")\n for iclass, app, perf in perfset.values:\n lines.append(\" - instance_class: *{}\".format(_anchor_from_id(iclass)))\n lines.append(\" app: *{}\".format(_anchor_from_id(app)))\n lines.append(\" value: {}\".format(perf))\n return lines\n\n # \"main\" body of the function\n yam: List[str] = [] # List of lines of the resulting yaml\n apps: Set[App] = set() # set of App objects indirectly referenced from the problems\n # (via the workloads)\n workloads: Set[\n Workload\n ] = set() # set of Workload objects directly referenced from the problems\n limiting_sets: Set[\n LimitingSet\n ] = set() # set of Limiting_set objects indirectly referenced from the problems\n # (via instance classes)\n instance_classes: Set[\n InstanceClass\n ] = set() # set of Instance_class objects directly referenced from the problems\n performances: Set[\n PerformanceSet\n ] = set() # set of Performance objects directly referenced from the problem\n\n for prob in problems.values():\n _wls, _apps = collect_workloads_and_apps(prob)\n _ics, _ls = collect_instance_classes_and_limiting_sets(prob)\n apps.update(_apps)\n workloads.update(_wls)\n limiting_sets.update(_ls)\n instance_classes.update(_ics)\n for prob in problems.values():\n performances.update(collect_performances(prob))\n\n yam.extend(lsets_to_yaml(limiting_sets))\n yam.extend(iclasses_to_yaml(instance_classes))\n yam.extend(apps_to_yaml(apps))\n yam.extend(wloads_to_yaml(workloads))\n yam.extend(perfs_to_yaml(performances))\n yam.extend(probs_to_yaml(problems))\n return \"\\n\".join(yam)", "title": "" }, { "docid": "ef5e4d77046d8311651d6ed29020ad14", "score": "0.4971023", "text": "def make_problems(settings):\n problem_dicts = []\n\n for setting in settings:\n setting = add_missing_defaults(setting)\n devices = setting[\"device\"]\n\n for dev in devices:\n problem = copy.deepcopy(setting)\n problem[\"device\"] = dev\n problem_dicts.append(problem)\n\n return [Problem(**p) for p in problem_dicts]", "title": "" }, { "docid": "9fe70a80d04c81e7b34de8bdec53f586", "score": "0.4968154", "text": "def __init__(self,variables, constraints):\n self.constraints = constraints\n \"list of constraints\"\n self.variables = {}\n \"dictionary of variable names to variable instances\"\n self.domains = {}\n \"dictionary of variable names to DiscreteSet/IntervalSet with admissible values\"\n for var in variables:\n self.variables[var.name] = var\n self.domains[var.name] = var.domain", "title": "" }, { "docid": "1f6203365aab44f0c6c8cc10df3d83d9", "score": "0.49666953", "text": "def populate_from_data(self, data_lines):\n poorSigLev = None\n eegPow = None\n attention = None\n for line in data_lines:\n message = simplejson.loads(line)\n if \"rawEeg\" in message:\n self.add_raw_eeg(message[\"timestamp\"], message[\"rawEeg\"])\n if \"poorSignalLevel\" in message:\n poorSigLev = message\n if \"eegPower\" in message:\n eegPow = message\n if \"eSense\" in message:\n e_sense = message[\"eSense\"]\n if \"attention\" in e_sense:\n attention = message\n # This assumes meditation always comes last\n elif \"meditation\" in e_sense:\n # If we've cached all the data we need to store power levels\n if poorSigLev and eegPow and attention:\n # Make sure our time-sequencing-based assumptions hold\n assert(poorSigLev[\"timestamp\"] ==\n eegPow[\"timestamp\"] ==\n attention[\"timestamp\"] ==\n message[\"timestamp\"])\n self.add_power_levels(message[\"timestamp\"],\n poorSigLev[\"poorSignalLevel\"],\n eegPow[\"eegPower\"][\"lowAlpha\"], \n eegPow[\"eegPower\"][\"highAlpha\"], \n eegPow[\"eegPower\"][\"lowBeta\"],\n eegPow[\"eegPower\"][\"highBeta\"], \n eegPow[\"eegPower\"][\"lowGamma\"],\n eegPow[\"eegPower\"][\"highGamma\"],\n attention[\"eSense\"][\"attention\"],\n e_sense[\"meditation\"])\n # Always reset the cached messages at this point as we may\n # have reached meditation near the start of the data\n # without having received the other data first\n poorSigLev = None\n eegPow = None\n attention = None", "title": "" }, { "docid": "9f053ce82b0cd7cddaa98339057fcec6", "score": "0.49634072", "text": "def freshservice_problem_create(self, **kwargs) -> Dict[str, Any]:\n kwargs = locals().pop('kwargs', None)\n data = remove_empty_elements(kwargs)\n\n return self._http_request(\n 'POST',\n 'api/v2/problems',\n json_data=data,\n )", "title": "" }, { "docid": "f2ada9ae5ee5568ee70fb484c49b73a3", "score": "0.4953322", "text": "def setUp(self):\n self.list_keys = [\"depth\", \"x_divisions\", \"time_total\",\n \"temperature_ambient\",\n \"temperature_initial\"]\n self.problem_description_test = {key: 10 for key in self.list_keys}\n self.problem_description_test.update({entry[0]: entry[1] for entry in [\n (\"material\", \"pmma\"), (\"problem_type\", \"direct\"),\n (\"properties_type\", \"constant\"), (\"boundcond_surface\", \"robin\"),\n (\"surface_losses_type\", \"non-linear\"), (\"absorptivity\", 0.9),\n (\"emissivity\", 0.9), (\"h_convective\", 10),\n (\"boundcond_back\", \"insulated\"), (\"ihf_type\", \"constant\"),\n (\"conductivity_subs\", 0), (\"material_type\", \"inert\"),\n (\"pre_exp_factor\", 1), (\"activation_energy\", 1),\n (\"heat_reaction\", 1), (\"reaction_order\", 1),\n (\"ihf_coefficients\", 4000)]})\n for property_name in [\"conductivity_coeff\", \"density_coeff\",\n \"heat_capacity_coeff\"]:\n self.problem_description_test[property_name] = [1, None]", "title": "" }, { "docid": "8d5238ee4df4217e52be2c87785738e6", "score": "0.49411058", "text": "def test_submit_problem_manual():\n CONSTANT_DATA = get_CONSTANT_DATA()\n path_to_data = CONSTANT_DATA['path_to_data']\n roster_name = CONSTANT_DATA['roster_name']\n S = SheetObject(path_to_data + roster_name,'submissions')\n user_id = \"jschmoe\"\n assignment = 1\n problem = 1\n timestamp = 129837\n query = {'netid':user_id, 'assignment':assignment,'problem':problem}\n old_entries = S.get(query)\n old_entry = old_entries[0]\n \n #this is the shitty way to make a new dictionary\n new_entry = old_entry\n new_submission_number = get_submission_count()\n #one would think these modifications below don't matter...\n new_entry['netid'] = user_id\n new_entry['assignment'] = assignment\n new_entry['problem'] = problem\n new_entry['submission_number'] = new_submission_number\n new_entry['submission_time'] = timestamp\n new_entry['new_submission']=1\n new_entry['submission_locked']=0\n \n #the modifications modify old_entry\n #and the query is actually empty.\n feedback_entries = S.get(old_entry)\n print(feedback_entries)\n \n assert len(feedback_entries) == 0\n \n #message,write_file = submit_problem(user_id,assignment,problem,timestamp)\n #print(message)", "title": "" }, { "docid": "11e7a2e6f3e4f4994f2c744f2d63bca8", "score": "0.4940365", "text": "def save_information(inputs):\n # active here will hold either an individual or a family\n active_entity = None\n error=[]\n # to keep track of active tag at level 0 and 1. 2 only has a single tag DATE\n active_tags = {\n 0: None,\n 1: None\n }\n\n individuals = {}\n families = {}\n tag_positions = {}\n for num, level, tag, arguments in inputs:\n active_tags[level] = tag\n \n # Saving previous 0 level entity.\n if tag == 'INDI' or tag == 'FAM':\n if active_entity:\n if type(active_entity) is Individual:\n individuals[active_entity.id] = active_entity\n else:\n families[active_entity.id] = active_entity\n \n # assign active entity new individual or family based on the tag\n active_entity = Individual(arguments) if tag == 'INDI' else Family(arguments)\n # creating a new entity in tags dictionary so that we can add tags to it in the future\n tag_positions[arguments] = defaultdict(set)\n\n # if level == 0 and arguments == '@I37@':\n # print(arguments, tag, individuals)\n if tag == 'INDI' and arguments in individuals:\n error.append(f'ERROR US22, line {num}, An Individual with ID {arguments} already exists!')\n #print(f'ERROR US22, line {num}, An Individual with ID {arguments} already exists!')\n elif tag == 'FAM' and arguments in families:\n error.append(f'ERROR US22, line {num}, A Family with ID {arguments} already exists!')\n #print(f'ERROR US22, line {num}, A Family with ID {arguments} already exists!')\n continue\n \n if level == 0:\n continue\n tag_positions[active_entity.id][tag].add(num)\n\n if tag in ['DEAT', 'BIRT', 'MARR', 'DIV']:\n continue\n \n # Since all the three fields are sets we add elements to them.\n if tag in ['FAMC', 'FAMS', 'CHIL']:\n active_entity.map[tag].add(arguments)\n else:\n # If we have a date tag convert argument to datetime\n if level == 2:\n tag = active_tags[1]\n arguments = check_and_convert_string_to_date(arguments, num)\n\n setattr(active_entity, active_entity.map[tag], arguments)\n \n if type(active_entity) is Individual:\n individuals[active_entity.id] = active_entity\n else:\n families[active_entity.id] = active_entity\n \n # Update age for everyone, since its None in the beginning\n for id in individuals:\n current = individuals[id]\n current.name = ''.join(current.name.split('/'))\n \n deathday = None\n if current.birth:\n birthday = current.birth\n\n if current.death:\n deathday = current.death\n current.alive = False if deathday < datetime.now() else True\n if deathday > datetime.now():\n deathday = datetime.now()\n else:\n current.alive = True\n deathday = datetime.now()\n difference = deathday - birthday\n current.age = (difference.days + difference.seconds//86400)//365\n \n # Update the husband and wife name for everyone, since its None in the beginning.\n for id in families:\n current = families[id]\n if current.hid:\n current.hname = individuals[current.hid].name\n \n if current.wid:\n current.wname = individuals[current.wid].name\n \n return individuals, families, tag_positions, error", "title": "" }, { "docid": "b78d377740ee49f1b3448d61352ab708", "score": "0.49338743", "text": "def set_from_dict(self, optd):\n\n # Common variables\n self.fasta = optd['fasta']\n self.sequence_length = optd['fasta_length']\n self.name = optd['name']\n self.nmodels = optd['nmodels']\n self.nproc = optd['nproc']\n\n # Directories\n self.ample_dir = optd['work_dir']\n self.work_dir = os.path.join(self.ample_dir, 'modelling')\n if not optd['models_dir']:\n self.models_dir = os.path.join(self.ample_dir, \"models\")\n else:\n self.models_dir = optd['models_dir']\n\n # psipred secondary structure prediction\n if optd['psipred_ss2']:\n if not os.path.isfile(optd['psipred_ss2']):\n raise RuntimeError(\"Cannot find psipred_ss2 file: {}\".format(optd['psipred_ss2']))\n self.psipred_ss2 = optd['psipred_ss2']\n\n if not optd['make_frags']:\n self.frags_3mers = optd['frags_3mers']\n self.frags_9mers = optd['frags_9mers']\n if not (os.path.exists(self.frags_3mers) and os.path.exists(self.frags_9mers)):\n raise RuntimeError(\n \"Cannot find both fragment files:\\n{0}\\n{1}\\n\".format(self.frags_3mers, self.frags_9mers)\n )\n else:\n # Fragment variables\n self.use_homs = optd['use_homs']\n self.fragments_directory = os.path.join(self.work_dir, 'rosetta_fragments')\n\n # Extra modelling options\n self.all_atom = optd['all_atom']\n self.domain_termini_distance = optd['domain_termini_distance']\n self.rad_gyr_reweight = optd['rg_reweight']\n\n if optd['improve_template']:\n if not os.path.exists(optd['improve_template']):\n raise RuntimeError('cant find template to improve')\n self.improve_template = optd['improve_template']\n if optd['restraints_file']:\n if not os.path.exists(optd['restraints_file']):\n raise RuntimeError(\"Cannot find restraints file: {0}\".format(optd['restraints_file']))\n self.restraints_file = optd['restraints_file']\n self.restraints_weight = optd['restraints_weight']\n if optd['disulfide_constraints_file']:\n if not os.path.exists(optd['disulfide_constraints_file']):\n raise RuntimeError(\n \"Cannot find disulfide constraints file: {0}\".format(optd['disulfide_constraints_file'])\n )\n self.disulfide_constraints_file = optd['disulfide_constraints_file']\n if optd['rosetta_flagsfile']:\n self.rosetta_flagsfile = optd['rosetta_flagsfile']\n\n # NMR options\n self.nmr_remodel = optd['nmr_remodel']\n self.nmr_process_ntimes = optd['nmr_process']\n self.nmr_alignment_file = optd['alignment_file']\n self.mnr_remodel_fasta = optd['nmr_remodel_fasta']\n\n # Multimer modelling\n self.multimer_modelling = optd['multimer_modelling']\n self.num_chains = optd['nmasu']\n\n # Runtime options\n self.submit_qtype = optd['submit_qtype']\n self.nprocesses = optd['nproc']\n self.submit_max_array = optd['submit_max_array']\n self.submit_queue = optd['submit_queue']\n self.submit_array = optd['submit_array']\n self.submit_pe = optd['submit_pe']\n\n if optd['transmembrane_old']:\n self.transmembrane_old = True\n if optd['blast_dir']:\n blastpgp = os.path.join(optd['blast_dir'], \"bin/blastpgp\")\n self.blastpgp = ample_util.find_exe(blastpgp)\n if self.blastpgp:\n logger.debug(\"Using user-supplied blast_dir for blastpgp executable: {0}\".format(self.blastpgp))\n\n # nr database\n if optd['nr']:\n if not os.path.exists(optd['nr'] + \".pal\"):\n raise RuntimeError(\n \"Cannot find the nr database: {0}\\n\"\n \"Please give the location with the nr argument to the script.\".format(optd['nr'])\n )\n else:\n self.nr = optd['nr']\n if self.nr:\n logger.debug(\"Using user-supplied nr database: {0}\".format(self.nr))\n\n self.spanfile = optd['transmembrane_spanfile']\n self.lipofile = optd['transmembrane_lipofile']\n self.octopusTopology = optd['transmembrane_octopusfile']\n\n # Check if we've been given files\n if self.octopusTopology and not (os.path.isfile(self.octopusTopology)):\n msg = \"Cannot find provided transmembrane octopus topology prediction: {0}\".format(self.octopusTopology)\n raise RuntimeError(msg)\n\n if self.spanfile and not os.path.isfile(self.spanfile):\n msg = \"Cannot find provided transmembrane spanfile: {0}\".format(self.spanfile)\n raise RuntimeError(msg)\n\n if self.lipofile and not (os.path.isfile(self.lipofile)):\n msg = \"Cannot find provided transmembrane lipofile: {0}\".format(self.lipofile)\n raise RuntimeError(msg)\n\n if (self.spanfile and not self.lipofile) or (self.lipofile and not self.spanfile):\n msg = \"You need to provide both a spanfile and a lipofile\"\n raise RuntimeError(msg)\n elif optd['transmembrane']:\n self.transmembrane = True\n # End transmembrane checks\n\n return", "title": "" }, { "docid": "bebca68b57da1fd3901771fc0869becd", "score": "0.49326622", "text": "def fromdict(cls, data, fields):\n ex = cls()\n for key, field in fields.items():\n if key not in data:\n raise ValueError(\"Specified key {} was not found in \"\n \"the input data\".format(key))\n if field is not None:\n setattr(ex, key, field.preprocess(data[key])) \n else:\n setattr(ex, key, data[key])\n return ex", "title": "" }, { "docid": "397595a4fba890f6434911b9b830788e", "score": "0.4930427", "text": "def __init__(self):\n self.data = {\"species\": {}, \"individual\": None, \"settings\": [None, None, None, 0.0], \"crossfeeding\": {}, \"uptake\": {}, \"essentials\": set()}", "title": "" }, { "docid": "f415e3242b273345597b3386b8cf91c0", "score": "0.49293983", "text": "def process_problem(problem_type3, problem, error_param):\r\n # checks if the problem should be solved, by guessing(use same model regardless of problem)\r\n if error_param[7]:\r\n model1 = {(0, 0): 'A', (1, 0): 'B', (2, 0): 'C', (3, 0): 'D'}\r\n return answer_question(model1, problem[3])\r\n if problem_type3:\r\n return process_problem_type3(problem, error_param)\r\n return process_problem_type1(problem, error_param)", "title": "" }, { "docid": "605fbf08a4a09192202da35dc3257e79", "score": "0.49283937", "text": "def __init__(self, input_dict=None):\n\n if input_dict is None:\n self.manager = {}\n self.total_ic = 0\n self.chemistry = 0\n self.rating = 0\n self.strength = 0\n self.price = 0\n self.formation = {}\n else:\n self.manager = copy.deepcopy(input_dict['manager'])\n self.total_ic = input_dict['total_ic']\n self.chemistry = input_dict['chemistry']\n self.rating = input_dict['rating']\n #self.strength = input_dict['strength']\n #self.price = input_dict['price']\n self.formation = copy.deepcopy(input_dict['formation'])\n\n # TEMPORARY UNTIL ALL TEAMS HAVE STRENGTHS------------------------------------------------------------------\n if 'strength' in input_dict:\n self.strength = input_dict['strength']\n else:\n self.strength = 0\n # TEMPORARY UNTIL ALL TEAMS HAVE STRENGTHS------------------------------------------------------------------\n\n # TEMPORARY UNTIL ALL TEAMS HAVE PRICES---------------------------------------------------------------------\n if 'price' in input_dict:\n self.price = input_dict['price']\n else:\n self.price = 0\n # TEMPORARY UNTIL ALL TEAMS HAVE PRICES---------------------------------------------------------------------", "title": "" }, { "docid": "235b212fddd1d3e531b00e4583ea4d8d", "score": "0.49279243", "text": "def dict_setup(working_directory, persistent_files):\n ###### IPC to WIPO lookup ######\n\n # input ipc_technology data from local, which contains IPC technology class definitions\n\n # create a dictionary mapping IPC code to field number\n # key: IPC_code\n # value: Field_number\n ipc_data = pd.read_csv('{}/ipc_technology.csv'.format(persistent_files))\n ipc_data['clean_IPC'] = ipc_data['IPC_code'].str.replace(\"%\",\"\").replace(' ','')\n ipc_to_field = dict(zip(ipc_data['clean_IPC'], ipc_data['Field_number']))\n\n ###### CPC to IPC mapping ######\n\n # input ipc_concordance data from local\n ipc_concordance = open(working_directory + \"/ipc_concordance.txt\").read().split(\"\\n\")\n\n # create a dictionary mapping CPC to IPC\n # key: IPC_code: first column\n # value: CPC_code: second column\n cpc_to_ipc = {}\n for row in ipc_concordance:\n row = row.split(\"\\t\\t\")\n # keep only rows that do not have null value in the dataset\n if len(row) > 1 and row[1] != \"CPCONLY\":\n cpc_to_ipc[row[0]] = row[1]\n return ipc_to_field, cpc_to_ipc", "title": "" }, { "docid": "7503a3f23bf6d3123d476e66bd69a629", "score": "0.49183396", "text": "def process_input(values, puzzle_input, u_input):\n puzzle_input[values[0]] = u_input\n return puzzle_input", "title": "" }, { "docid": "4b77782c8f40e631d7e03c88f887bb82", "score": "0.4901898", "text": "def create_metadata_dict(self):\n problems_students = []\n for i in range(len(self.text_files)):\n my_list = self.read_all_txt_files(i)\n naam_student = my_list[0]\n q_nummer = re.findall(r'\\(.+?\\)', naam_student)\n if len(my_list) > 1:\n opdracht = my_list[1]\n bestands_naam = my_list[-2]\n extension = re.findall(r'.\\w+$', bestands_naam)\n datum = my_list[2]\n hour = re.findall(r'[0-9][0-9]:[0-9][0-9]:[0-9][0-9]', datum)\n self.students_dict[\"naam_student\"].append(naam_student[6:-11])\n try:\n self.students_dict[\"extensie\"].append(extension[0])\n except IndexError:\n self.students_dict[\"extensie\"].append(\"null\")\n try:\n self.students_dict[\"opdracht\"].append(opdracht[10:])\n except IndexError:\n self.students_dict[\"opdracht\"].append(\"null\")\n problems_students.append(naam_student[6:-11])\n try:\n self.students_dict[\"bestands_naam\"].append(bestands_naam[15:-4]) #CHANGE!!\n except IndexError:\n self.students_dict[\"bestands_naam\"].append(\"null\")\n problems_students.append(naam_student[6:-11])\n try:\n self.students_dict[\"bestands_naam_full\"].append(bestands_naam[15:])\n except IndexError:\n self.students_dict[\"bestands_naam_full\"].append(\"null\")\n self.students_dict[\"datum\"].append(datum[14:-17])\n try:\n self.students_dict[\"q_nummer\"].append(q_nummer[0][1:-1])\n except IndexError:\n self.students_dict[\"q_nummer\"].append(\"null\")\n problems_students.append(naam_student[6:-11])\n if len(hour) != 0:\n self.students_dict[\"hour\"].append(hour[0])\n else:\n self.students_dict[\"hour\"].append(\"00:00:00\")\n self.students_dict[\"punt\"].append(0)\n else:\n pass\n self.students = pd.DataFrame(data=self.students_dict)\n print (self.students.head(n=20))", "title": "" }, { "docid": "ca17eebbb3b3b766ad4637014baa308c", "score": "0.48964462", "text": "def build_all(data):\n from .i import I, P\n for i, v in data.items():\n P[i] = I(i, v)", "title": "" }, { "docid": "d19bd0ce963bb9853fd2a80aba4c3c5b", "score": "0.48962823", "text": "def infotodict(seqinfo):\n\n t1w = create_key('sub-{subject}/anat/sub-{subject}_T1w')\n run1 = create_key('sub-{subject}/func/sub-{subject}_task-InstrAct_run-1_bold')\n run2 = create_key('sub-{subject}/func/sub-{subject}_task-InstrAct_run-2_bold')\n run3 = create_key('sub-{subject}/func/sub-{subject}_task-InstrAct_run-3_bold')\n run4 = create_key('sub-{subject}/func/sub-{subject}_task-InstrAct_run-4_bold')\n locdecl = create_key('sub-{subject}/func/sub-{subject}_task-locdecl_run-1_bold')\n locproc = create_key('sub-{subject}/func/sub-{subject}_task-locproc_run-1_bold')\n fm1 = create_key('sub-{subject}/fmap/sub-{subject}_magnitude')\n fm2 = create_key('sub-{subject}/fmap/sub-{subject}_phasediff')\n info = {t1w: [],\n run1: [],\n run2: [],\n run3: [],\n run4: [],\n locdecl: [],\n locproc: [],\n fm1: [],\n fm2: [],\n }\n\n for s in seqinfo:\n \"\"\"\n The namedtuple `s` contains the following fields:\n\n * total_files_till_now\n * example_dcm_file\n * series_id\n * dcm_dir_name\n * unspecified2\n * unspecified3\n * dim1\n * dim2\n * dim3\n * dim4\n * TR\n * TE\n * protocol_name\n * is_motion_corrected\n * is_derived\n * patient_id\n * study_description\n * referring_physician_name\n * series_description\n * image_type\n \"\"\"\n\n if s.protocol_name == 'GIfMI_T1_MPRAGE':\n info[t1w].append(s.series_id)\n if s.protocol_name == 'ep2d_bold_RUN_1':\n info[run1].append(s.series_id)\n if s.protocol_name == 'ep2d_bold_RUN_2':\n info[run2].append(s.series_id)\n if s.protocol_name == 'ep2d_bold_RUN_3':\n info[run3].append(s.series_id)\n if s.protocol_name == 'ep2d_bold_RUN_4':\n info[run4].append(s.series_id)\n if s.protocol_name == 'ep2d_bold_LOC_DECL':\n info[locdecl].append(s.series_id)\n if s.protocol_name == 'ep2d_bold_LOC_PROC':\n info[locproc].append(s.series_id)\n if s.protocol_name == 'gre_field_mapping_2.5mm' and s.dcm_dir_name =='GRE_FIELD_MAPPING_2_5MM_0006': #4 for subject 1\n info[fm1].append(s.series_id)\n if s.protocol_name == 'gre_field_mapping_2.5mm' and s.dcm_dir_name =='GRE_FIELD_MAPPING_2_5MM_0007': #5 for subject 1\n info[fm2].append(s.series_id)\n return info", "title": "" }, { "docid": "cf33197bd2ef6079f2b11f6001b2e866", "score": "0.48907223", "text": "def read_problem(problem_path):\n logging.debug(\"Start reading from {}\".format(problem_path))\n lines = open(problem_path, 'r').readlines()\n\n base, ext = os.path.splitext(problem_path)\n if \"/\" in base:\n _, problem_base_name = base.split(\"/\")\n else:\n problem_base_name = base\n\n problem_dict = dict()\n\n for line in lines:\n # Divide lines by key and values.\n key, values = line.strip().split(' ')\n logging.debug(\"Line: {} and {}\".format(key, values))\n if key not in problem_dict:\n # Save each key in a dict.\n problem_dict[key] = [values.split(',')]\n else:\n problem_dict[key].append(values.split(','))\n\n G = build_graph(problem_dict)\n cars = build_cars(problem_dict)\n obs = build_obs(problem_dict, problem_base_name)\n enfs = build_enfs(problem_dict)\n\n return G, cars, obs, enfs, problem_base_name", "title": "" }, { "docid": "8f0d9e02613f8c5052504f774f9d5b3d", "score": "0.48893455", "text": "def load(self, load_path, data_path):\n # load path into dict\n problem = json.loads(open(load_path).read())\n\n # read problem description\n self.label = problem['label']\n self.problem_type = problem['problem_type']\n self.model_label = problem['model_label']\n\n # load data\n self.data = pd.read_json(data_path)", "title": "" }, { "docid": "a4924ff0a0884cc42a054f3f82e62a82", "score": "0.48758548", "text": "def __init__(self):\n self.data = {}\n self.data['_old_words'] = []\n self.data['_new_words'] = []\n self.data['_remove_words'] = []\n self.data['_data_seg'] = []\n self.data['_key_value_sep'] = []\n\n self.data['_post_new_item'] = []\n self.data['_post_item_01'] = []\n self.data['_post_op_code'] = []\n self.data['_post_item_02'] = []\n\n self.data['_trans_item'] = []\n self.data['_trans_op_code'] = []\n\n self.data['_format_new_item'] = []\n self.data['_format_format'] = []\n self.data['_format_item_01'] = []\n self.data['_format_item_02'] = []\n\n self.data['_alias_new_item'] = []\n self.data['_alias_ori_item'] = []\n\n self.data['_time_step_sec'] = []", "title": "" }, { "docid": "90ed16f5c6dd9ae070bd8e3b5dee6e91", "score": "0.48567048", "text": "def import_data(self, data):\n self.clear()\n for coord in data[\"rectangle_obstacles\"]:\n self.create_rect_obstacle((coord[2] + coord[0])/2, (coord[3] + coord[1])/2, width=(coord[2] - coord[0]),\n height=(coord[3] - coord[1]))\n for par in data[\"circle_obstacles\"]:\n self.create_circle_obstacle(*par)\n for par in data[\"sources\"]:\n self.create_source(*par)\n for par in data[\"goals\"]:\n self.create_goal(*par)", "title": "" }, { "docid": "d34e673cdc17c84dbb48395845c09341", "score": "0.48490128", "text": "def load_from_dict(self, dict_with_existing_algolia_unit):\n errors_found = self.__errors_in_incoming_data(dict_with_existing_algolia_unit, bool_full_unit=True)\n if not errors_found:\n self.dict_algolia_unit = dict_with_existing_algolia_unit\n self.__unit_loaded_or_created = True\n else:\n logging.error('Errors were found in incoming data. Unable to load Algolia unit.')", "title": "" }, { "docid": "d507b452485246ae13918ac11aab6d69", "score": "0.48430243", "text": "def __init__(self):\n\n self.people = {'Jane': {'willingness to travel': 0.1596993,\n 'desire for new experience':0.67131344,\n 'cost':0.15006726,\n #'indian food':1,\n #'Mexican food':1,\n #'hipster points':3,\n 'vegetarian': 0.01892123,\n },\n 'Bob': {'willingness to travel': 0.63124581,\n 'desire for new experience':0.20269888,\n 'cost':0.01354308,\n #'indian food':1,\n #'Mexican food':1,\n #'hipster points':3,\n 'vegetarian': 0.15251223,\n },\n 'Mary': {'willingness to travel': 0.49337138 ,\n 'desire for new experience': 0.41879654,\n 'cost': 0.05525843,\n #'indian food':1,\n #'Mexican food':1,\n #'hipster points':3,\n 'vegetarian': 0.03257365,\n },\n 'Mike': {'willingness to travel': 0.08936756,\n 'desire for new experience': 0.14813813,\n 'cost': 0.43602425,\n #'indian food':1,\n #'Mexican food':1,\n #'hipster points':3,\n 'vegetarian': 0.32647006,\n },\n 'Alice': {'willingness to travel': 0.05846052,\n 'desire for new experience': 0.6550466,\n 'cost': 0.1020457,\n #'indian food':1,\n #'Mexican food':1,\n #'hipster points':3,\n 'vegetarian': 0.18444717,\n },\n 'Skip': {'willingness to travel': 0.08534087,\n 'desire for new experience': 0.20286902,\n 'cost': 0.49978215,\n #'indian food':1,\n #'Mexican food':1,\n #'hipster points':3,\n 'vegetarian': 0.21200796,\n },\n 'Kira': {'willingness to travel': 0.14621567,\n 'desire for new experience': 0.08325185,\n 'cost': 0.59864525,\n #'indian food':1,\n #'Mexican food':1,\n #'hipster points':3,\n 'vegetarian': 0.17188723,\n },\n 'Moe': {'willingness to travel': 0.05101531,\n 'desire for new experience': 0.03976796,\n 'cost': 0.06372092,\n #'indian food':1,\n #'Mexican food':1,\n #'hipster points':3,\n 'vegetarian': 0.84549581,\n },\n 'Sara': {'willingness to travel': 0.18780828,\n 'desire for new experience': 0.59094026,\n 'cost': 0.08490399,\n #'indian food':1,\n #'Mexican food':1,\n #'hipster points':3,\n 'vegetarian': 0.13634747,\n },\n 'Tom': {'willingness to travel': 0.77606127,\n 'desire for new experience': 0.06586204,\n 'cost': 0.14484121,\n #'indian food':1,\n #'Mexican food':1,\n #'hipster points':3,\n 'vegetarian': 0.01323548,\n }\n }\n\n # Transform the user data into a matrix(M_people). Keep track of column and row ids.\n\n # convert each person's values to a list\n\n peopleKeys, peopleValues = [], []\n lastKey = 0\n for k1, v1 in self.people.items():\n row = []\n\n for k2, v2 in v1.items():\n peopleKeys.append(k1+'_'+k2)\n if k1 == lastKey:\n row.append(v2)\n lastKey = k1\n\n else:\n peopleValues.append(row)\n row.append(v2)\n lastKey = k1\n\n\n #here are some lists that show column keys and values\n# print(peopleKeys)\n# print(peopleValues)\n\n\n\n self.peopleMatrix = np.array(peopleValues)\n\n# peopleMatrix.shape\n\n\n # Next you collected data from an internet website. You got the following information.\n\n #1 is bad, 5 is great\n\n self.restaurants = {'flacos':{'distance' : 2,\n 'novelty' : 3,\n 'cost': 4,\n #'average rating': 5,\n #'cuisine': 5,\n 'vegetarian': 5\n },\n 'Joes':{'distance' : 5,\n 'novelty' : 1,\n 'cost': 5,\n #'average rating': 5,\n #'cuisine': 5,\n 'vegetarian': 3\n },\n 'Poke':{'distance' : 4,\n 'novelty' : 2,\n 'cost': 4,\n #'average rating': 5,\n #'cuisine': 5,\n 'vegetarian': 4\n },\n 'Sush-shi':{'distance' : 4,\n 'novelty' : 3,\n 'cost': 4,\n #'average rating': 5,\n #'cuisine': 5,\n 'vegetarian': 4\n },\n 'Chick Fillet':{'distance' : 3,\n 'novelty' : 2,\n 'cost': 5,\n #'average rating': 5,\n #'cuisine': 5,\n 'vegetarian': 5\n },\n 'Mackie Des':{'distance' : 2,\n 'novelty' : 3,\n 'cost': 4,\n #'average rating': 5,\n #'cuisine': 5,\n 'vegetarian': 3\n },\n 'Michaels':{'distance' : 2,\n 'novelty' : 1,\n 'cost': 1,\n #'average rating': 5,\n #'cuisine': 5,\n 'vegetarian': 5\n },\n 'Amaze':{'distance' : 3,\n 'novelty' : 5,\n 'cost': 2,\n #'average rating': 5,\n #'cuisine': 5,\n 'vegetarian': 4\n },\n 'Kappa':{'distance' : 5,\n 'novelty' : 1,\n 'cost': 2,\n #'average rating': 5,\n #'cuisine': 5,\n 'vegetarian': 3\n },\n 'Mu':{'distance' : 3,\n 'novelty' : 1,\n 'cost': 5,\n #'average rating': 5,\n #'cuisine': 5,\n 'vegetarian': 3\n }\n }\n\n\n # Transform the restaurant data into a matrix(M_resturants) use the same column index.\n\n\n restaurantsKeys, restaurantsValues = [], []\n\n for k1, v1 in self.restaurants.items():\n for k2, v2 in v1.items():\n restaurantsKeys.append(k1+'_'+k2)\n restaurantsValues.append(v2)\n\n #here are some lists that show column keys and values\n# print(restaurantsKeys)\n# print(restaurantsValues)\n\n# len(restaurantsValues)\n #reshape to 2 rows and 4 columns\n\n #converting lists to np.arrays is easy\n self.restaurantsMatrix = np.reshape(restaurantsValues, (10,4))", "title": "" }, { "docid": "c3d46e693b67396e1eea88c95a6ac26b", "score": "0.48392472", "text": "def __init__(self, loc):\n self.runs = []\n self.vars = []\n self.loc = loc\n self.data = Vividict()", "title": "" }, { "docid": "db42bea3b8af56b80da0326e895eaa3a", "score": "0.48305348", "text": "def structured_solver_data(id_, cat='qpu', incomplete=False):\n obj = {\n \"properties\": {\n \"supported_problem_types\": [\"qubo\", \"ising\"],\n \"qubits\": [1, 2, 3],\n \"couplers\": [[1, 2], [1, 3], [2, 3]],\n \"num_qubits\": 3,\n \"category\": cat,\n \"parameters\": {\"num_reads\": \"Number of samples to return.\"}\n },\n \"id\": id_,\n \"description\": \"A test solver\",\n \"status\": \"ONLINE\"\n }\n\n if incomplete:\n del obj['properties']['supported_problem_types']\n\n return json.dumps(obj)", "title": "" }, { "docid": "2aad861ee70823bda85abeb7ae6753a5", "score": "0.48243952", "text": "def populate(self, data_in):\n # Initialize data\n data = deepcopy(data_in)\n\n # Validate base_type\n if len(data) != 1 or isinstance(data, defaultdict) is False:\n log_message = 'Agent data \"{}\" is invalid'.format(data)\n log.log2die(1025, log_message)\n\n # Get a description to use for label value\n for label in data.keys():\n description = self._lang.label_description(label)\n data[label]['description'] = description\n break\n\n # Add data to appropriate self._data key\n if data[label]['base_type'] is not None:\n self._data['devices'][self._devicename]['timeseries'].update(data)\n else:\n self._data['devices'][self._devicename]['timefixed'].update(data)", "title": "" }, { "docid": "b6aa0388f160d00d44e854f7e42cbcaf", "score": "0.48243672", "text": "def _get_dict(self, what, list_of_elements):\n group = self._met2gr.get(what, None) # get the h5group name\n if not group:\n if what in self._met2gr.values(): # if directly the h5group name was specified\n group = what\n # inverse to always have what as correct specifier\n what = {v: k for k, v in self._met2gr.items()}[group]\n else:\n raise DataNotInFile(\"'{}' does not exist in file.\".format(what))\n dg = self._data.setdefault(group, {}) # data group container dictionary\n # if no elements or None passed get all\n if len(list_of_elements) == 0 or \\\n (len(list_of_elements) == 1 and list_of_elements[0] is None):\n list_of_elements = self.select_axis.all_for(what)\n # check what elements have to be loaded from disk (no real load)\n set_of_new_elements = set(list_of_elements) - set(dg.keys()) # filter might be faster\n if len(set_of_new_elements) != 0: # if new elements are requested\n gr = self.file.get(group) # the h5data group\n for elem in set_of_new_elements:\n ax = self.select_axis(elem, what) # get the axis name for the specified one\n if self.version == version9_1:\n if elem == Axis.TIME:\n dg[elem] = AttributedNPArray(gr.get(ax)[1:],\n gr.get(ax).attrs, gr.get(ax).name)\n elif elem == Axis.DATA and Axis.TIME in self.select_axis.all_for(what):\n if what == \"bunch_length\":\n dg[elem] = dg[elem] = AttributedNPArray(np.sqrt(gr.get(ax)[1:]),\n gr.get(ax).attrs,\n gr.get(ax).name)\n else:\n dg[elem] = dg[elem] = AttributedNPArray(gr.get(ax)[1:],\n gr.get(ax).attrs,\n gr.get(ax).name)\n elif elem == Axis.XAXIS or elem == Axis.EAXIS or elem == Axis.FAXIS:\n dg[elem] = dg[elem] = AttributedNPArray(gr.get(ax)[0], gr.get(ax).attrs,\n gr.get(ax).name)\n else:\n dg[elem] = gr.get(ax)\n elif self.version < version14_1:\n # fix bunch_length sqrt bug: Inovesa bug 24\n if elem == Axis.DATA and what == \"bunch_length\":\n dg[elem] = AttributedNPArray(np.sqrt(gr.get(ax)), gr.get(ax).attrs,\n gr.get(ax).name)\n else:\n dg[elem] = gr.get(ax)\n elif self.version < (0, 15, -2): # fix bunch_length sqrt bug: Inovesa bug 24\n if elem == Axis.DATA and what == \"bunch_length\":\n dg[elem] = AttributedNPArray(calc_bl(self.bunch_profile(Axis.XAXIS),\n self.bunch_profile(Axis.DATA)),\n gr.get(ax).attrs, gr.get(ax).name)\n else:\n dg[elem] = gr.get(ax)\n else:\n dg[elem] = gr.get(ax)\n return DataContainer(dg, list_of_elements)", "title": "" }, { "docid": "c47a9c4d7b26805c91fa8576b02cf94b", "score": "0.48196632", "text": "def __init__(self, dictionary):\n for key in dictionary:\n setattr(self, key, dictionary[key])\n \n \"\"\" Dictionaries with of all possible answers based on the intention of the question \"\"\"\n\n self.greet = {\n \"key\": (f\"Hey\", f\"Hello\",f\"Mhh, do you need something?\")\n }\n\n self.affirm = {\n \"key\": (f\"Ok, do you want anything else\", f\"Ok\",f\"Mhh\")\n }\n\n self.deny = {\n \"key\": (f\"Ok, do you need anything else\", f\"Ok\",f\"Mhh\")\n }\n\n self.thanking = {\n \"key\": (f\"Don't worry\", f\"Ok\",f\"Mhh\")\n }\n\n \"\"\"\n self.mood_great = {\n \"key\": (f\" \", f\"Ok\",f\"Mhh\")\n }\n \"\"\"\n self.lore = {\n \"key\": (f\"{self.Entity} is {self.Info}\")\n }\n\n # Right now quest confirmation works the same, not sure how to differentiate them. \n # How to connect the action witht this, any idea?\n self.quest = {\n \"key\": (f\"We should {self.Info}\", f\"I would suggest {self.Info}\",f\"Let's {self.Info}\")\n }\n\n # The ingredient needs to be the entity her. Maybe we should divide this in two: where to get the ingredient, and how to craft\n # So then the possible answers would be \"To craft this you need...\" / This can be found in .... /// difficult to make it general\n # altough if we include the type of question (Where/Why/How would be easier and just in one method)\n self.craft = {\n \"key\": (f\"{self.Entity} is {self.Info}\")\n }\n\n # For questions about Where are WE? or were I AM? How to anser?\n self.location = {\n \"key\": (f\"We are at {self.Info}\", f\"This is {self.Info}\", f\"This place sounds familiar, I think we are at {self.Info}\")\n }\n\n # Again, generalize questions here or if asks Where to kill, access to the question WHERE.\n self.combat = {\n \"key\": (f\"To kill {self.Entity}, you need to {self.Info}\", f\"You need to {self.Info}\", \n f\"To defeat {self.Entity} you need to {self.Info}\")\n }\n\n self.inventory = {\n \"key\": (f\"We have various items: {self.Info}\", f\"This is what you have in the inventory: {self.Info}\", f\"{self.Info}, that is what you have\")\n }\n\n self.metagame = {\n }\n\n self.chitchat = {\n }\n\n self.botchallenge = {\n\n }", "title": "" }, { "docid": "4161efadcd901e2aeecf6aed5aa8fab3", "score": "0.48192248", "text": "def create_data_model(hotels, workers, from_raw_data):\n data = {}\n n_workers = len(workers)\n data[\"num_vehicles\"] = n_workers\n\n # Precise start and end locations of the workers\n # The number_workers-th first line correspond to the start locations of the workers\n start_locations = [idx for idx in range(n_workers)]\n\n # The number_workers-th to the 2*number_workers-th line correspond to the end locations of the workers\n end_locations = [idx for idx in range(n_workers, 2 * n_workers)]\n data[\"start_locations\"] = start_locations\n data[\"end_locations\"] = end_locations\n\n # Matrix of distances between locations.\n if from_raw_data:\n hotels_data = parse_csv(hotels, \"hotel\", write=False)\n else:\n hotels_data = hotels\n _distances, labels = get_distances_matrix(hotels_data, workers)\n data[\"distances\"] = _distances\n data[\"labels\"] = labels\n num_locations = len(_distances)\n data[\"num_locations\"] = num_locations\n\n # The problem is to find an assignment of routes to vehicles that has the shortest total distance\n # and such that the total amount a vehicle is carrying never exceeds its capacity. Capacities can be understood\n # as the max number of visits that a worker can do in a day\n demands = [1] * num_locations\n capacities = [MAX_VISIT_PER_DAY] * n_workers\n data[\"demands\"] = demands\n data[\"vehicle_capacities\"] = capacities\n\n return data", "title": "" }, { "docid": "3be157c5ebfdcaa65e68f4868bed1d72", "score": "0.4817514", "text": "def dataValidating(self, fields_required, fields_optional):\n for field in fields_required:\n if not self.parameters.get(field): ### Means must be provided and its value should not be empty.\n return \"ERROR: Field '%s' is required.\" % field\n\n legal_data = {}\n for field in fields_required + fields_optional:\n has_collected = 0\n if field in self.parameters:\n if self.fields_defination[field]['value_type'] == str:\n if not isinstance(self.parameters[field], str):\n return \"ERROR: Value of field '%s' must be a string.\" % field\n if not re.search(self.fields_defination[field]['value_regex'], self.parameters[field]):\n return \"ERROR: Value of field '%s' contains illegal characters.\" % field\n\n if self.fields_defination[field]['value_type'] == 'choices' and self.parameters[field] not in self.fields_defination[field]['value_choices']:\n return \"ERROR: Illegal value '%s' found for choices field '%s'.\" % (self.parameters[field],field)\n\n if self.fields_defination[field]['value_type'] == list:\n if not isinstance(self.parameters[field], list):\n return \"ERROR: Value of field '%s' must be a JSON list.\" % field\n if self.fields_defination[field]['item_type'] == str:\n for item in self.parameters[field]:\n if not isinstance(item, str):\n return \"ERROR: Item '%s' of list field '%s' must be a string.\" % (item, field)\n if not re.search(self.fields_defination[field]['item_regex'], item):\n return \"ERROR: Item '%s' of list field '%s' contains illegal characters.\" % (item,field)\n if self.fields_defination[field]['item_type'] == 'choices':\n for item in self.parameters[field]:\n if item not in self.fields_defination[field]['item_choices']:\n return \"ERROR: Illegal item '%s' found for item-choice-type list field '%s' .\" % (item,field)\n\n if self.fields_defination[field]['item_type'] == 'object_name_list':\n model = self.fields_defination[field]['item_model']\n object_list = []\n for n in self.parameters[field]:\n obj = self.getObject(model, n)\n if obj is None:\n return \"ERROR: Object not found for name '%s' in field '%s'.\" % (n, field)\n object_list.append(obj)\n legal_data[field] = object_list\n has_collected = 1\n\n if self.fields_defination[field]['value_type'] == 'object_name':\n model = self.fields_defination[field]['object_model']\n obj = self.getObject(model, self.parameters[field])\n if obj is None:\n return \"ERROR: Object not found for field '%s'.\" % field\n legal_data[field] = obj\n has_collected = 1\n\n if self.fields_defination[field]['value_type'] == dict:\n if not isinstance(self.parameters[field], dict):\n return \"ERROR: Value of field '%s' must be a JSON dict.\" % field\n if self.fields_defination[field]['key_type'] == str:\n for key in self.parameters[field]:\n if not isinstance(key, str):\n return \"ERROR: Key '%s' of dict field '%s' must be a string.\" % (key, field)\n if not re.search(self.fields_defination[field]['key_regex'], key):\n return \"ERROR: Key '%s' of dict field '%s' contains illegal characters.\" % (item,field)\n\n if self.fields_defination[field]['value_type'] == 'text':\n legal_data[field] = str(self.parameters[field])\n has_collected = 1\n\n if has_collected == 0:\n legal_data[field] = self.parameters[field]\n return legal_data", "title": "" }, { "docid": "efb5274a59e0699b2e5eef5f73622828", "score": "0.4815294", "text": "def __init__(self, data):\n for key in data:\n self.__setitem__(key, data[key])", "title": "" }, { "docid": "8c389288cd17314e107722a57c4de0a4", "score": "0.481486", "text": "def apply(self, problem):\n data = {}\n inv_data = {self.VAR_ID: problem.x.id}\n\n if not problem.formatted:\n problem = self.format_constraints(problem, None)\n data[s.PARAM_PROB] = problem\n data[self.DIMS] = problem.cone_dims\n inv_data[self.DIMS] = problem.cone_dims\n\n constr_map = problem.constr_map\n inv_data[self.EQ_CONSTR] = constr_map[Zero]\n inv_data[self.NEQ_CONSTR] = constr_map[NonNeg]\n len_eq = problem.cone_dims.zero\n\n c, d, A, b = problem.apply_parameters()\n data[s.C] = c\n inv_data[s.OFFSET] = d\n data[s.A] = -A[:len_eq]\n if data[s.A].shape[0] == 0:\n data[s.A] = None\n data[s.B] = b[:len_eq].flatten()\n if data[s.B].shape[0] == 0:\n data[s.B] = None\n data[s.G] = -A[len_eq:]\n if 0 in data[s.G].shape:\n data[s.G] = None\n data[s.H] = b[len_eq:].flatten()\n if 0 in data[s.H].shape:\n data[s.H] = None\n return data, inv_data", "title": "" }, { "docid": "2d85d87ec5546d8f198b10e8a521957e", "score": "0.48032743", "text": "def insert_datas_to_database(self, data_dict):", "title": "" }, { "docid": "09003b435e2224e3f7ba5ccb234bfdd6", "score": "0.47925553", "text": "def fill_in_parameters(\n edatas: List[amici.ExpData],\n problem_parameters: Dict[str, numbers.Number],\n scaled_parameters: bool,\n parameter_mapping: ParameterMapping,\n amici_model: AmiciModel,\n) -> None:\n if unused_parameters := (\n set(problem_parameters.keys()) - parameter_mapping.free_symbols\n ):\n warnings.warn(\n \"The following problem parameters were not used: \" + str(unused_parameters),\n RuntimeWarning,\n )\n\n for edata, mapping_for_condition in zip(edatas, parameter_mapping):\n fill_in_parameters_for_condition(\n edata,\n problem_parameters,\n scaled_parameters,\n mapping_for_condition,\n amici_model,\n )", "title": "" }, { "docid": "d76a8c854fd5b7cc8964c972c133f2ca", "score": "0.47874823", "text": "def __errors_in_incoming_data(self, dict_data_to_check, bool_full_unit):\n working_dict = copy.deepcopy(self.__dct_template)\n errors_found = False\n lst_of_fieldnames = working_dict.keys()\n for a_key in dict_data_to_check:\n if a_key in lst_of_fieldnames:\n if bool_full_unit:\n sub_key_dict = dict_data_to_check[a_key]\n if len(sub_key_dict) != 2:\n errors_found = True\n logging.error(\n 'When examining key -> ' + a_key + ' an unexpected number of entries were found. Two'\n ' entries expected. One for pushed values, and one'\n ' for current values.')\n if self.key_for_values_pushed in sub_key_dict:\n type_incoming = type(sub_key_dict[self.key_for_values_pushed])\n type_should_be = type(working_dict[a_key][self.key_for_values_pushed])\n if type_incoming != type_should_be:\n errors_found = True\n logging.error(\n 'When examining key -> ' + a_key + ' there was a Type mis-match between incoming'\n ' data labeled as \"previously pushed\",'\n ' and expected data.')\n else:\n errors_found = True\n logging.error(\n 'When examining key -> ' + a_key + ' there was no entry for pushed values in a full unit.')\n if self.key_for_values_current in sub_key_dict:\n type_incoming = type(sub_key_dict[self.key_for_values_current])\n type_should_be = type(working_dict[a_key][self.key_for_values_current])\n if type_incoming != type_should_be:\n errors_found = True\n logging.error(\n 'When examining key -> ' + a_key + ' there was a Type mis-match between incoming'\n ' data labeled as \"updated\", and expected data.')\n else:\n errors_found = True\n logging.error(\n 'When examining key -> ' + a_key + ' there was no entry for current values in a full unit.')\n else:\n # we are here if the 'full-unit' parameter was passed as False.\n type_incoming = type(dict_data_to_check[a_key])\n type_should_be = type(working_dict[a_key][self.key_for_values_current])\n if type_should_be != type_incoming:\n errors_found = True\n logging.error(\n 'The value for key ' + a_key + ' passed in a dictionary to create an Algolia Unit object'\n ' was not the expected type.')\n else:\n errors_found = True\n logging.error('The key -> ' + a_key + ' passed in a dictionary to create an Algolia Unit object'\n ' was not found in the list of Algolia fields.')\n return errors_found", "title": "" }, { "docid": "b7c6727d9b3420bf4070ccc7f959ca5b", "score": "0.4762842", "text": "def load_from_dict(question_dict):\n return question_factory(**question_dict)", "title": "" }, { "docid": "e24033853476e34f1d793e0313d5191c", "score": "0.4754326", "text": "def datatype(in_dict, join=None, parentclass=None):\n\tcontent={}\n\tallarrays={}\n\tsubditems = {}\n\tfor key,value in in_dict.items():\n\t\ta = key.split(sep)\n\t\ta0 = a[0]\n\t\tif a0 == 'ARRAY':\n\t\t\tname = a[1]\n\t\t\tif not allarrays.has_key(name):\n\t\t\t\tallarrays[name]=None\n\t\telif a0 == 'SEQ':\n\t\t\tif value is None:\n\t\t\t\tcontent[a[1]] = None\n\t\t\telse:\n\t\t\t\ttry:\n\t\t\t\t\tcontent[a[1]] = eval(value)\n\t\t\t\texcept SyntaxError:\n\t\t\t\t\tcontent[a[1]] = None\n\t\telif a0 == 'PICKLE':\n\t\t\t## contains a python pickle string,\n\t\t\t## convert it to newdict.AnyObject\n\t\t\ttry:\n\t\t\t\tvalue = value.tostring()\n\t\t\texcept AttributeError:\n\t\t\t\tpass\n\t\t\ttry:\n\t\t\t\tob = cPickle.loads(value)\n\t\t\texcept:\n\t\t\t\tob = None\n\t\t\tcontent[a[1]] = newdict.AnyObject(ob)\n\t\telif a0 == 'MRC':\n\t\t\t## set up a FileReference, to be used later\n\t\t\t## when we know the full path\n\t\t\tif value is None:\n\t\t\t\tcontent[a[1]] = None\n\t\t\telse:\n\t\t\t\tcontent[a[1]] = newdict.FileReference(value, pyami.mrc.read)\n\t\telif a0 == 'REF':\n\t\t\tfieldname = a[-1]\n\t\t\ttablename = a[-2]\n\t\t\t# By default, references are to the current database.\n\t\t\t# An extra parameter can indicate a different database.\n\t\t\tif len(a) == 4:\n\t\t\t\tmodulename = a[-3]\n\t\t\telse:\n\t\t\t\tmodulename = parentclass.__module__\n\t\t\tif value == 0 or value is None:\n\t\t\t\t### NULL reference\n\t\t\t\tcontent[fieldname] = None\n\t\t\telif fieldname in join:\n\t\t\t\t## referenced data is part of result\n\t\t\t\tjqikey = join[fieldname]\n\t\t\t\tcontent[fieldname] = data.UnknownData(jqikey)\n\t\t\telse:\n\t\t\t\t## not in result, but create reference\n\t\t\t\tdclassname = tablename\n\t\t\t\tdclass = findDataClass(modulename, dclassname)\n\t\t\t\t## If the data class does not exist, then this column should be ignored\n\t\t\t\tif dclass is None:\n\t\t\t\t\tcontinue\n\t\t\t\t# host and name should come from parent object\n\t\t\t\tcontent[fieldname] = data.DataReference(dataclass=dclass, dbid=value)\n\t\telif a0 == 'SUBD':\n\t\t\tsubditems[key] = value\n\t\telse:\n\t\t\tcontent[key]=value\n\n\t# build dictionaries\n\tallsubdicts=unflatDict(subditems, join)\n\tcontent.update(allsubdicts)\n\n\tfor matrix in allarrays:\n\t\tdm={}\n\t\tfor key,value in in_dict.items():\n\t\t\tl = re.findall('^ARRAY\\%s%s' %(sep,matrix,),key)\n\t\t\tif l:\n\t\t\t\tdm.update({key:value})\n\t\tallarrays[matrix]=dict2matrix(dm)\n\n\tcontent.update(allarrays)\n\treturn content", "title": "" }, { "docid": "5b7327085b7722d70d7fb2518e5ffb9e", "score": "0.47523037", "text": "def provide_pushed2algolia_data(self, dict_with_pushed_data):\n if self.__unit_loaded_or_created:\n errors_found = self.__errors_in_incoming_data(dict_with_pushed_data, bool_full_unit=False)\n if not errors_found:\n for a_key in dict_with_pushed_data:\n self.dict_algolia_unit[a_key][self.key_for_values_pushed] = dict_with_pushed_data[a_key]\n else:\n logging.error('Unable to update the Algolia unit with PUSHED data. Errors found.')\n else:\n logging.error('Unable to update the Algolia unit with PUSHED data. Object was not previously'\n ' created or updated.')", "title": "" }, { "docid": "b9117d0f23673393c94f5fcc579ab25f", "score": "0.47458377", "text": "def __init__(self, number_file_read=2):\n\t\tdata = DataCollection()\n\t\tdict_ = data.read_file(number_file_read)\n\t\t\n\t\t# Convert the data from a dict to pandas df\n\t\tself.df = self.convert_to_data_frame(dict_)\n\t\t\n\t\t# Encoding string info to numeric for model building\n\t\tself.country_code_dict = {}\n\t\tself.town_dict = {}\n\t\tself.sector_dict = {}\n\t\tself.theme_dict = {}\n\t\tself.geo_level_dict = {}\n\t\tself.activity_dict = {}\n\t\tself.repayment_interval_dict = {}\n\t\tself.status_dic = {}\n\n\t\t# Decoding numeric values to string values (i.e. country, activity, etc)\n\t\tself.country_code_list = list(self.df.country_code.unique())\n\t\tself.town_list= list(self.df.town.unique())\n\t\tself.sector_list= list(self.df.sector.unique())\n\t\tself.theme_list= list(self.df.theme.unique())\n\t\tself.geo_level_list= list(self.df.geo_level.unique())\n\t\tself.activity_list = list(self.df.activity.unique())\n\t\tself.repayment_interval_list = list(self.df.repayment_interval.unique())\n\t\tself.status_list=(self.df.status.unique())\n\n\t\t# This will fill the dictinary to encode string values\t\t\n\t\tself.fill_dictionarys()\n\t\tself.change_all_variable()", "title": "" }, { "docid": "c8aba80396f2e80c5a1ec0d9b4344268", "score": "0.47450984", "text": "def init_data(self):\n for _ in AREA_UNITS:\n _area_units.get_or_create(id=_[0], name=_[1])\n for _ in LINEAR_UNITS:\n _linear_units.get_or_create(id=_[0], name=_[1])\n for _ in DOCUMENT_SIZES:\n _document_sizes.get_or_create(id=_[0], name=_[1])\n for _ in DOCUMENT_TYPES:\n _document_types.get_or_create(id=_[0], name=_[1])\n for _ in ROLES:\n _roles.get_or_create(id=_[0], name=_[1])\n for _ in WORKS_LOCATIONS:\n _works_locations.get_or_create(id=_[0], name=_[1])\n for _ in BASEMENT_WORKS_TYPES:\n _basement_works_types.get_or_create(id=_[0], name=_[1])\n for _ in ROOF_WORKS_TYPES:\n _roof_works_types.get_or_create(id=_[0], name=_[1])\n for _ in BORDER_WORKS_TYPES:\n _border_works_types.get_or_create(id=_[0], name=_[1])\n for _ in ACCESS_WORKS_TYPES:\n _access_works_types.get_or_create(id=_[0], name=_[1])\n for _ in ACCESS_WORKS_SCOPES:\n _access_works_scopes.get_or_create(id=_[0], name=_[1])\n for _ in PARKING_WORKS_SCOPES:\n _parking_works_scopes.get_or_create(id=_[0], name=_[1])\n for _ in EQUIPMENT_WORKS_TYPES:\n _equipment_works_types.get_or_create(id=_[0], name=_[1])\n for _ in EQUIPMENT_WORKS_CONSERVATION_TYPES:\n _equipment_works_conservation_types.get_or_create(id=_[0], name=_[1])\n for _ in GATES_FENCES_WALLS_TYPES:\n _gates_fences_walls_types.get_or_create(id=_[0], name=_[1])\n for _ in DECLARATIONS:\n _declarations.get_or_create(id=_[0], name=_[1])\n for _ in OWNERSHIP_TYPES:\n _ownership_types.get_or_create(id=_[0], name=_[1])\n for _ in APPLICATION_STATUSES:\n _application_statuses.get_or_create(id=_[0], name=_[1])\n\n self._add_materials()\n\n try:\n self._add_users()\n except Exception as e:\n console.error(e)\n\n from smpa.app import config\n if config.base == 'development' or config.base == 'test':\n try:\n self._dummy_data()\n except Exception as e:\n console.error(e)\n raise\n else:\n console.success('Created dummy data')\n\n console.success('Created default data')", "title": "" }, { "docid": "3d9f858b8ee4d59f84613af6d7586bc8", "score": "0.47409302", "text": "def read_problems_from_github(\n dataset: str, _id: str = None, base_url: str = None\n) -> Union[Problem, Mapping[str, Problem]]:\n\n if base_url is None:\n base_url = (\n \"https://raw.githubusercontent.com/asi-uniovi/malloovia\"\n \"/units/tests/test_data/problems/\"\n )\n\n url = \"{}/{}.yaml\".format(base_url, dataset)\n with urllib.request.urlopen(url) as stream:\n data = yaml.safe_load(stream)\n\n problems = problems_from_dict(data, dataset)\n\n if _id is None:\n return problems\n\n return problems[_id]", "title": "" }, { "docid": "e28b256436b62818308b26972365bbdb", "score": "0.47369805", "text": "def test_datafile_set_and_get_valid(self):\n data = [\n ((dict_flat_set_min(), object_data_min()), dict_flat_get_min()),\n ((dict_flat_set_full(), object_data_full()), dict_flat_get_full()),\n (({}, {}), {}),\n ]\n\n pdv = data_object()\n pdv.set(dict_flat_set_min())\n assert isinstance(pdv.get(), dict)\n\n for input, data_eval in data:\n pdv = data_object()\n pdv.set(input[0])\n data = pdv.get()\n for key, val in data_eval.items():\n assert data[key] == input[1][key] == data_eval[key]\n assert len(data) == len(input[1]) == len(data_eval)", "title": "" }, { "docid": "6ad549d59aca10f13275f28f8804c7ba", "score": "0.47329757", "text": "def __init__(self):\n self.data = {}\n self.lookup = []", "title": "" }, { "docid": "0c901def9d229d6d88b88ae7167faebe", "score": "0.47314242", "text": "def get_structure_from_data(self, TrainedData):", "title": "" }, { "docid": "b8bc552ed2fe3d8b16419ae3619f98c6", "score": "0.47311023", "text": "def __init__(self, data=DataStore()):\n self.data = data\n self.inbound_trucks = OrderedDict()\n self.outbound_trucks = OrderedDict()\n self.compound_trucks = OrderedDict()\n self.inbound_data = {}\n self.outbound_data = {}\n self.compound_data = {}\n self.truck_data = {}\n self.number_of_goods = self.data.number_of_goods\n self.number_of_inbound_trucks = self.data.number_of_inbound_trucks\n self.number_of_outbound_trucks = self.data.number_of_outbound_trucks\n self.number_of_compound_trucks = self.data.number_of_compound_trucks\n self.number_of_trucks = self.data.number_of_inbound_trucks + self.data.number_of_outbound_trucks + self.data.number_of_compound_trucks\n self.number_of_coming_trucks = self.data.number_of_inbound_trucks + self.data.number_of_compound_trucks\n self.number_of_going_trucks = self.data.number_of_outbound_trucks + self.data.number_of_compound_trucks\n self.truck_dictionary = {'inbound': self.inbound_trucks,\n 'outbound': self.outbound_trucks,\n 'compound': self.compound_trucks\n }\n self.all_trucks = None\n self.sequence_bool = False\n self.solution_finish = False\n\n self.number_of_shipping_doors = self.data.number_of_shipping_doors\n self.number_of_receiving_doors = self.data.number_of_receiving_doors\n\n self.alpha = 0\n self.gamma = 0\n self.tightness_factor = 0\n self.inbound_mu = 0\n self.outbound_mu = 0\n self.product_per_inbound_truck = 0\n self.product_per_outbound_truck = 0\n\n # calculate data\n self.calculate_mu()\n self.calculate_product_per_truck()\n\n self.finish = False\n\n # create trucks\n self.inbound_data['arrival_time'] = self.data.inbound_arrival_time\n self.inbound_data['mu'] = self.inbound_mu\n self.inbound_data['product_per_truck'] = self.product_per_inbound_truck\n\n self.outbound_data['arrival_time'] = self.data.outbound_arrival_time\n self.outbound_data['mu'] = self.outbound_mu\n self.outbound_data['product_per_truck'] = self.product_per_outbound_truck\n\n self.compound_data['arrival_time'] = self.data.inbound_arrival_time\n self.compound_data['mu'] = self.inbound_mu\n self.compound_data['transfer_time'] = self.data.transfer_time\n self.compound_data['inbound_product_per_truck'] = self.product_per_inbound_truck\n self.compound_data['outbound_product_per_truck'] = self.product_per_outbound_truck\n\n self.truck_data['loading_time'] = self.data.loading_time\n self.truck_data['changeover_time'] = self.data.changeover_time\n self.truck_data['makespan_factor'] = self.data.makespan_factor # not used anthwere!!\n self.truck_data['alpha'] = self.alpha\n self.truck_data['gamma'] = self.gamma\n self.truck_data['tightness_factor'] = self.tightness_factor\n\n self.station = Station(self.data.good_transfer_time)\n self.create_trucks()\n\n self.current_data_set = 0\n\n # init model solution\n self.current_time = 0\n self.time_step = 1", "title": "" }, { "docid": "54e89846d94e3bbca055053b89e22a62", "score": "0.47303486", "text": "def add_solution(self, key, data):\n\t\tassert isinstance(key, str) and sparse.issparse(data) and data.shape == (1, self.num_variables), \"spurious insertion: key = %s, data = %s\" % (key, data)\n\t\tassert key not in self.index, \"duplicate data for key %s\" % key\n\t\tself.index[key] = self.num_operators\n\t\tself.matrix = sparse.vstack([self.matrix, expect_real(data)])", "title": "" }, { "docid": "cc4d2ae77bfcd11baf0a6a79847d3c17", "score": "0.4725835", "text": "def __init__(self,data,geodict):\n m,n = data.shape\n if m != geodict['nrows'] or n != geodict['ncols']:\n raise DataSetException('Input geodict does not match shape of input data.')\n self._data = data\n self._geodict = geodict", "title": "" }, { "docid": "fc3a0c73572aa5ec2c30403715c6c052", "score": "0.47235635", "text": "def prepare_data(\n input_path: str,\n output_dir: str,\n bpe_dir: str,\n dataset: str,\n problem: str,\n model_type: str,\n no_tokenize: bool,\n):\n logger.info(\n f\"Running data preparation with input path {input_path}, output dir {output_dir} and dataset {dataset} \"\n f\"for problem {problem} and model type {model_type}.\"\n )\n\n if problem == \"Requirements_TO_TargetProduct\":\n problem = Problem.Requirements_TO_TargetProduct\n elif problem == \"TargetProduct_TO_Requirements\":\n problem = Problem.TargetProduct_TO_Requirements\n elif problem == \"Requirements_TO_TargetProductAndTasks\":\n problem = Problem.Requirements_TO_TargetProductAndTasks\n elif problem == \"TargetProductAndRequirements_TO_Tasks\":\n problem = Problem.TargetProductAndRequirements_TO_Tasks\n elif problem == \"TargetProductAndRequirementsAndTasks\":\n problem = Problem.TargetProductAndRequirementsAndTasks\n elif problem == \"RequirementsAndTargetProductAndTasks\":\n problem = Problem.RequirementsAndTargetProductAndTasks\n elif problem == \"RequirementsAndTargetProductShuffle\":\n problem = Problem.RequirementsAndTargetProductShuffle\n\n # Get dataset iterable\n # + example parser to Procedure\n load_data, parse_procedure, tokenizer = LOADER_AND_PARSER_AND_TOKENIZER[dataset]\n dataset_iterable = load_data(input_path)\n\n # Create output directory\n # (e.g.: output_dir/Requirements_TO_TargetProduct/Recipe1M/fairseq)\n output_dir = Path(output_dir) / problem.name / dataset / model_type\n if output_dir.exists():\n raise FileExistsError(f\"Directory {str(output_dir)} already exists.\")\n output_dir.mkdir(parents=True, exist_ok=True)\n logger.info(f\"Output directory: {output_dir}.\")\n\n if model_type == \"onmt\":\n raise NotImplementedError(\"OpenNMT models are not yet supported.\")\n elif model_type == \"huggingface\":\n raise NotImplementedError(\"HuggingFace models are not yet supported.\")\n elif model_type == \"fairseq\":\n # Prepare data\n if problem in TASK_TO_PROBLEMS[\"language_modeling\"]:\n langs = [problem.name]\n else:\n langs = problem.name.replace(\"_\", \"\").split(\"TO\")\n\n with contextlib.ExitStack() as stack:\n partition_to_files = {\n part: [\n stack.enter_context(open(output_dir / f\"{part}.{lang}\", \"wt\"))\n for lang in langs\n ]\n for part in PARTITIONS\n }\n\n for i, entry in enumerate(tqdm(dataset_iterable)):\n # Parse dataset entry to Procedure\n if \"Shuffle\" in problem.name:\n proc, partition, curr_problem = parse_procedure(entry)\n curr_problem = Problem[curr_problem]\n else:\n curr_problem = problem\n proc, partition = parse_procedure(entry)\n\n # Convert Procedure to translation example\n example = data.procedure_to_example(proc, curr_problem)\n\n # Tokenize example\n if not no_tokenize:\n example = data.tokenize_example(example, tokenizer)\n\n # Write to files\n partition_to_files[partition][0].write(f\"{example.src}\\n\")\n if problem in TASK_TO_PROBLEMS[\"translation\"]:\n partition_to_files[partition][1].write(f\"{example.tgt}\\n\")\n\n # BPE encode\n for part in PARTITIONS:\n inputs = [output_dir / f\"{part}.{lang}\" for lang in langs]\n outputs = [\n output_dir\n / f'{part}.bpe{\".\" + lang if problem is not Problem.TargetProductAndRequirementsAndTasks else \"\"}'\n for lang in langs\n ]\n logger.info(f\"Encoding {inputs}, {outputs}\")\n # encode\n tok_args = Namespace(\n encoder_json=f\"{bpe_dir}/encoder.json\",\n vocab_bpe=f\"{bpe_dir}/vocab.bpe\",\n inputs=inputs,\n outputs=outputs,\n keep_empty=True,\n workers=60,\n )\n fairseq_encode(tok_args)\n # store decoded for reference\n tok_args.inputs = outputs\n tok_args.outputs = [f\"{o}.decoded\" for o in outputs]\n fairseq_encode(tok_args, decode=True)\n\n # Preprocess/binarize\n from fairseq_cli import preprocess\n from fairseq.options import get_preprocessing_parser\n\n parser = get_preprocessing_parser()\n\n preprocess_args = parser.parse_args([]) # get default args\n if problem in TASK_TO_PROBLEMS[\"language_modeling\"]:\n preprocess_args.task = \"language_modeling\"\n preprocess_args.only_source = True\n else:\n preprocess_args.task = \"translation\"\n preprocess_args.source_lang = langs[0]\n preprocess_args.target_lang = langs[1]\n preprocess_args.joined_dictionary = True\n\n # Pretrained BART:\n # preprocess_args.srcdict = '.../ckpts/procgen/v1/processed/Requirements_TO_TargetProductAndTasks/Recipe1M/fairseq/bart.large.cnn/dict.source.txt'\n # preprocess_args.tgtdict = '.../ckpts/procgen/v1/processed/Requirements_TO_TargetProductAndTasks/Recipe1M/fairseq/bart.large.cnn/dict.target.txt'\n\n # preprocess_args.destdir = output_dir / \"data-bin/tokenized-gpt2\"\n preprocess_args.destdir = output_dir / \"data-bin/tokenized\"\n\n preprocess_args.trainpref = str(output_dir / \"train\") # train.bpe train\n preprocess_args.validpref = str(output_dir / \"valid\") # valid.bpe valid\n preprocess_args.testpref = str(output_dir / \"test\") # test.bpe test\n\n # preprocess_args.workers = 120\n\n preprocess.main(preprocess_args)\n\n logger.info(f\"Wrote output to {output_dir}: {list(output_dir.iterdir())}\")", "title": "" }, { "docid": "24fbf716968bca17ef7b3ff74f9c3819", "score": "0.47217187", "text": "def create_from_dict(self, dicJob: dict):\n self.set_source(dicJob['Source'])\n for i in dicJob['Destinations']:\n self.add_destination(i)\n self.set_options(dicJob['Options'])\n self.add_dirs_to_skip(dicJob['Folders to skip'])\n self.add_filter(dicJob['File types to filter'])\n try:\n self.countFiles = dicJob['Number of files to copy']\n except KeyError:\n pass\n try:\n self.sizeFiles = dicJob['Total file size']\n except KeyError:\n pass", "title": "" }, { "docid": "1044100f8304cd8abd5c7ea9c10f71e3", "score": "0.4711087", "text": "def process_problem_type1(problem, error_param):\r\n prob = problem.copy()\r\n #process the first premise by calling understand premise and model_start\r\n model1 = model_start(understand_premise(prob[0], error_param[0]), error_param[1])\r\n #print(model1)\r\n # the second premise will lead to an insert function\r\n model2 = model_insert(understand_premise(prob[1], error_param[2]), model1, error_param[3])\r\n #print(model2)\r\n # the third premise will also lead to an insert\r\n model3 = model_insert(understand_premise(prob[2], error_param[4]), model2, error_param[5])\r\n #print(model3)\r\n if error_param[6]:\r\n return verbal_memory(prob[:3], prob[3])\r\n return answer_question(model3, prob[3])", "title": "" }, { "docid": "ff8fa27c67ce6c24cda0ae30111b7d18", "score": "0.4705569", "text": "def testValidData(self):\n toml_data = {\n \"area1\": {\"hosts\": [\"host1\", \"host2\"]},\n \"area2\": {\"hosts\": [\"host4\", \"host3\"]}\n }\n check_single_assignment(toml_data)", "title": "" }, { "docid": "378303eb5abacfeaa0838b4792449fae", "score": "0.4704829", "text": "def _validate_service_config_problem(problem: configuration_pb2.ProblemSpec,\n error_msgs: List[str]) -> None:\n problem_kind = problem.WhichOneof('problems')\n if problem_kind is None:\n error_msgs.append('problem type is not specified.')\n return\n\n if problem_kind == 'classification':\n classification = problem.classification\n if not classification.class_names:\n error_msgs.append(\n 'classification: class_names must be specified and non-empty.')\n\n if _is_field_present(classification, 'ground_truth_column_spec', error_msgs,\n 'classification: '):\n _verify_field_specified(classification.ground_truth_column_spec, 'name',\n error_msgs,\n 'classification.ground_truth_column_spec: ')\n\n if _is_field_present(classification, 'prediction_score_column_spec',\n error_msgs, 'classification: '):\n _verify_field_specified(classification.prediction_score_column_spec,\n 'name', error_msgs,\n 'classification.prediction_score_column_spec: ')\n if classification.type == configuration_pb2.ClassificationProblemSpec.UNKNOWN:\n error_msgs.append('classification: type must be specified.')\n return\n\n if problem_kind == 'regression':\n regression = problem.regression\n if _is_field_present(regression, 'ground_truth_column_spec', error_msgs,\n 'regression: '):\n _verify_field_specified(regression.ground_truth_column_spec, 'name',\n error_msgs,\n 'regression.ground_truth_column_spec: ')\n\n if _is_field_present(regression, 'prediction_score_column_spec', error_msgs,\n 'regression: '):\n _verify_field_specified(regression.prediction_score_column_spec, 'name',\n error_msgs,\n 'regression.prediction_score_column_spec: ')\n return\n\n if problem_kind == 'forecasting':\n forecasting = problem.forecasting\n if _is_field_present(forecasting, 'ground_truth_column_spec', error_msgs,\n 'forecasting: '):\n _verify_field_specified(forecasting.ground_truth_column_spec, 'name',\n error_msgs,\n 'forecasting.ground_truth_column_spec: ')\n\n if _is_field_present(forecasting, 'prediction_score_column_spec',\n error_msgs, 'forecasting: '):\n _verify_field_specified(forecasting.prediction_score_column_spec, 'name',\n error_msgs,\n 'forecasting.prediction_score_column_spec: ')\n\n if forecasting.type == configuration_pb2.ForecastingProblemSpec.UNKNOWN:\n error_msgs.append('forecasting: type must be specified.')\n\n if forecasting.type == configuration_pb2.ForecastingProblemSpec.QUANTILE:\n if not forecasting.quantiles:\n error_msgs.append(\n 'quantile forecasting: quantiles must be specified and non-empty.')\n if not all((v >= 0 and v <= 1) for v in forecasting.quantiles):\n raise TypeError(\n '{}: all values are expected to be between 0 and 1 but are not.'\n .format(forecasting.quantiles))\n if forecasting.HasField(\n 'options') and forecasting.options.enable_point_evaluation:\n index = forecasting.options.point_evaluation_quantile_index\n if index < 0 or index >= len(forecasting.quantiles):\n error_msgs.append('quantile forecasting: invalid quantile index.')", "title": "" }, { "docid": "1bf6ae2878be6ebe5faf6b786878e2ec", "score": "0.4701491", "text": "def read_problem(filename):\n with open(filename, 'r') as problem_file:\n problem = [['rb' if c == '.' else c for c in line if c in '.rb']\n for line in problem_file]\n size = len(problem)\n assert all(len(v) == size for v in problem)\n cells = {(r, c): problem[r][c] for r in range(size) for c in range(size)}\n problem_dict = {'size': size, 'variables': cells, 'state': 'unsolved'}\n return problem_dict", "title": "" }, { "docid": "d9628ab783d085136103136a99f64187", "score": "0.46996585", "text": "def __init__(self, inInputLines):\n\t\tpeople = []\n\t\tfor peopPos in range(0, len(inInputLines[0])):\n\t\t\tpeople.append(inInputLines[0][peopPos])\n\n\t\tself.__problem = {\n\t\t\t'people': people,\n\t\t\t'transforms': []}\n\n\t\tfor line in inInputLines[1:]:\n\t\t\ttransformsParsed = {}\n\t\t\ttransforms = line.split(',')\n\t\t\tfor transform in transforms:\n\t\t\t\tinfo = transform.split('=>')\n\t\t\t\ttargets = []\n\t\t\t\tfor targetPos in range(0, len(info[1])):\n\t\t\t\t\ttargets.append(info[1][targetPos])\n\n\t\t\t\ttransformsParsed[info[0]] = targets\n\n\t\t\tself.__problem['transforms'].append(transformsParsed)\n\n\t\tif self.__debug:\n\t\t\tprint(self.__problem)", "title": "" }, { "docid": "40049b1a397b1e3512d7de92f2a6f152", "score": "0.46995118", "text": "def __init__(self, data):\n self._units = {u.unit_id: u.name for u in data.units}\n self._unit_stats = {u.unit_id: u for u in data.units}\n self._upgrades = {a.upgrade_id: a for a in data.upgrades}\n self._abilities = {a.ability_id: a for a in data.abilities}\n self._general_abilities = {a.remaps_to_ability_id\n for a in data.abilities\n if a.remaps_to_ability_id}\n\n for a in six.itervalues(self._abilities):\n # for a in itervalues(self._abilities):\n a.hotkey = a.hotkey.lower()", "title": "" }, { "docid": "a40aab11b13dadc975835c9527661186", "score": "0.4699181", "text": "def process(self, *args, **kwargs):\n data = self.load(as_dict=True, *args, **kwargs)\n num_exchanges = sum([\n len(obj.get(\"exchanges\", []))\n for obj in data.values()\n if obj.get(\"type\", \"process\") == \"process\"\n ])\n\n gl = config.global_location\n\n # Create geomapping array\n count = 0\n arr = np.zeros((len(data), ), dtype=self.dtype_fields_geomapping + self.base_uncertainty_fields)\n for key in sorted(data.keys(), key=lambda x: x[1]):\n if data[key].get('type', 'process') == 'process':\n arr[count] = (\n mapping[key],\n geomapping[data[key].get(\"location\", gl) or gl],\n MAX_INT_32, MAX_INT_32,\n 0, 1, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, False\n )\n count += 1\n\n arr.sort(order=self.dtype_field_order(\n self.dtype_fields_geomapping + self.base_uncertainty_fields\n ))\n\n with open(self.filepath_geomapping(), \"wb\") as f:\n pickle.dump(arr[:count], f, protocol=pickle.HIGHEST_PROTOCOL)\n\n arr = np.zeros((num_exchanges + len(data), ), dtype=self.dtype)\n count = 0\n\n for key in data:\n production_found = False\n if data[key].get('type', 'process') != \"process\":\n continue\n for exc in data[key].get(\"exchanges\", []):\n\n if \"amount\" not in exc or \"input\" not in exc:\n raise InvalidExchange\n if \"type\" not in exc:\n raise UntypedExchange\n if np.isnan(exc['amount']) or np.isinf(exc['amount']):\n raise ValueError(\"Invalid amount in exchange {}\".format(data))\n\n if exc['type'] == 'production':\n production_found = True\n try:\n arr[count] = (\n mapping[exc[\"input\"]],\n mapping[key],\n MAX_INT_32,\n MAX_INT_32,\n TYPE_DICTIONARY[exc[\"type\"]],\n exc.get(\"uncertainty type\", 0),\n exc[\"amount\"],\n exc[\"amount\"] \\\n if exc.get(\"uncertainty type\", 0) in (0,1) \\\n else exc.get(\"loc\", np.NaN),\n exc.get(\"scale\", np.NaN),\n exc.get(\"shape\", np.NaN),\n exc.get(\"minimum\", np.NaN),\n exc.get(\"maximum\", np.NaN),\n exc[\"amount\"] < 0\n )\n\n except KeyError:\n raise UnknownObject((\"Exchange between {} and {} is invalid \"\n \"- {} is unknown (i.e. doesn't exist as a process dataset)\"\n ).format(exc[\"input\"], key, exc[\"input\"])\n )\n\n count += 1\n if not production_found:\n # Add amount produced for each process (default 1)\n arr[count] = (\n mapping[key], mapping[key],\n MAX_INT_32, MAX_INT_32, TYPE_DICTIONARY[\"production\"],\n 0, 1, 1, np.NaN, np.NaN, np.NaN, np.NaN, False\n )\n count += 1\n\n # Automatically set 'depends'\n self.metadata['depends'] = self.find_dependents()\n self._metadata.flush()\n\n # The array is too big, because it can include a default production\n # amount for each activity. Trim to actual size.\n arr = arr[:count]\n arr.sort(order=self.dtype_field_order())\n np.save(self.filepath_processed(), arr, allow_pickle=False)", "title": "" }, { "docid": "adae933f9a277d560ac370f46a6d16e5", "score": "0.46864626", "text": "def map_data(data, path):\n tb_Worker = dict()\n tb_WorkerRace = dict()\n # mapping data from patient resources\n if data['resourceType'] == 'Patient':\n # map the Worker ID\n tb_Worker['WorkerID'] = data['id']\n tb_WorkerRace['WorkerID'] = data['id']\n\n # map key fields for table Worker\n # StudyCode fixes as 'NFR'\n tb_Worker['StudyCode'] = 'NFR'\n # GenderCode read from gender field in patient FHIR standard\n tb_Worker['GenderCode'] = data['gender']\n # Source file is the path the file was read in from \n tb_Worker['SourceFile'] = path\n tb_Worker['ImportCode'] = 'NFR_Script'\n\n # map key fields for table WorkerRace\n tb_WorkerRace['StudyCode'] = 'NFR'\n tb_WorkerRace['RaceCode'] = '0000'\n tb_WorkerRace['SourceFile'] = path\n tb_WorkerRace['ImportCode'] = 'NFR_Script'\n\n # For address we read in the most recent address in the patient FHIR standard\n if 'address' in data:\n tb_Worker['CurrentResidentialStreet'] = data['address'][-1]['line'][0]\n tb_Worker['CurrentResidentialCity'] = data['address'][-1]['city']\n tb_Worker['CurrentResidentialStateProv'] = data['address'][-1]['state']\n if 'postalCode' in data['address'][-1]:\n tb_Worker['CurrentResidentialPostalCode'] = data['address'][-1]['postalCode']\n tb_Worker['CurrentResidentialCountry'] = data['address'][-1]['country']\n # Mapping logic for name:\n # If only one name provided, use it as their primary name (not alias)\n # else >1 name provided check to see if use exists\n # If it does, look for official (primary) and nickname (alias)\n # If primary name wasn't found, set it to the last value in name array\n if len(data['name']) == 1:\n tb_Worker['LastName'] = data['name'][-1]['family']\n if len(data['name'][-1]['given']) > 1:\n tb_Worker['FirstName'] = data['name'][-1]['given'][0]\n tb_Worker['MiddleName'] = data['name'][-1]['given'][1]\n else:\n tb_Worker['FirstName'] = data['name'][-1]['given'][0]\n else:\n for name in data['name']:\n if 'use' in name:\n if name['use'] == \"official\":\n tb_Worker['LastName'] = name['family']\n if len(name['given']) > 1:\n tb_Worker['FirstName'] = name['given'][0]\n tb_Worker['MiddleName'] = name['given'][1]\n else:\n tb_Worker['FirstName'] = name['given'][0]\n elif name['use'] == \"nickname\":\n tb_Worker['LastNameAlias'] = name['family']\n if len(name['given']) > 1:\n tb_Worker['FirstNameAlias'] = name['given'][0]\n tb_Worker['MiddleNameAlias'] = name['given'][1]\n else:\n tb_Worker['FirstNameAlias'] = name['given'][0]\n if 'FirstName' not in tb_Worker:\n tb_Worker['LastName'] = data['name'][-1]['family']\n if len(data['name'][-1]['given']) > 1:\n tb_Worker['FirstName'] = data['name'][-1]['given'][0]\n tb_Worker['MiddleName'] = data['name'][-1]['given'][1]\n else:\n tb_Worker['FirstName'] = data['name'][-1]['given'][0]\n # Phone number and email are both found in telecom section of patient FHIR data standard\n if 'telecom' in data:\n for telecom in data['telecom']:\n if telecom['system'] == 'phone' and telecom['use'] == 'mobile':\n tb_Worker['MobilePhoneNumber'] = telecom['value']\n elif telecom['system'] == 'email':\n tb_Worker['PrimaryEmailAddress'] = telecom['value']\n # We read in Birth Data and break it into Month, Day, Year\n birthDate = datetime.datetime.strptime(data['birthDate'], '%Y-%m-%d')\n # Database table will convert them into birthDate automatically\n tb_Worker['BirthMonth'] = birthDate.strftime(\"%m\")\n tb_Worker['BirthDay'] = birthDate.strftime(\"%d\")\n tb_Worker['Birthyear'] = birthDate.strftime(\"%Y\")\n # We read SSN from identifier section of FHIR Patient standard\n for x in range(len(data['identifier'])):\n if data['identifier'][x]['system'] == \"http://hl7.org/fhir/sid/us-ssn\":\n tb_Worker['SSN'] = data['identifier'][x]['value']\n if 'extension' in data:\n for x in range(len(data['extension'])):\n # We read in us-core-ethinicy extention for paitent ethnicity\n if data['extension'][x]['url'] == \"http://hl7.org/fhir/us/core/StructureDefinition/us-core-ethnicity\":\n for i in range(len(data['extension'][x]['extension']) - 1, -1, -1):\n if 'valueCoding' in data['extension'][x]['extension'][i].keys():\n tb_Worker['EthnicityCode'] = data['extension'][x]['extension'][i]['valueCoding']['code']\n break\n # We read in patient-birthPlace extention for paitent birth place\n elif data['extension'][x]['url'] == \"http://hl7.org/fhir/StructureDefinition/patient-birthPlace\":\n tb_Worker['BirthPlaceCountry'] = data['extension'][x]['valueAddress']['country']\n tb_Worker['BirthPlaceCity'] = data['extension'][x]['valueAddress']['city']\n tb_Worker['BirthPlaceStateProv'] = data['extension'][x]['valueAddress']['state']\n # We read in us-core-race extention for paitent race\n elif data['extension'][x]['url'] == \"http://hl7.org/fhir/us/core/StructureDefinition/us-core-race\":\n for i in range(len(data['extension'][x]['extension']) - 1, -1, -1):\n if 'valueCoding' in data['extension'][x]['extension'][i].keys():\n tb_WorkerRace['RaceCode'] = data['extension'][x]['extension'][i]['valueCoding']['code']\n break\n\n # search for cancer observation of the patient\n observation_found, observation_data = search_observation(data['id'])\n tb_Worker['DiagnosedWithCancer'] = observation_data['DiagnosedWithCancer']\n if observation_found:\n # We update last patient observation date if cancer observation found\n tb_Worker['LastObservedMonth'] = observation_data['LastObservedMonth']\n tb_Worker['LastObservedDay'] = observation_data['LastObservedDay']\n tb_Worker['LastObservedyear'] = observation_data['LastObservedyear']\n # determine if this is an update or an insert,\n tb_Worker['isUpdate'] = False\n\n elif data['resourceType'] == 'Observation':\n # find the patient attached to the observation\n subjectId = data['subject']['reference'].split('/')[1]\n observation_data = create_observation_dict(data)\n tb_Worker['WorkerID'] = subjectId\n tb_Worker['DiagnosedWithCancer'] = observation_data['DiagnosedWithCancer']\n tb_Worker['LastObservedMonth'] = observation_data['LastObservedMonth']\n tb_Worker['LastObservedDay'] = observation_data['LastObservedDay']\n tb_Worker['LastObservedyear'] = observation_data['LastObservedyear']\n tb_Worker['isUpdate'] = True\n else:\n print('Cannot handle this resource type yet')\n\n return tb_Worker, tb_WorkerRace", "title": "" }, { "docid": "a15b3c827583d740128a21b3e051e67b", "score": "0.46835592", "text": "def infotodict(seqinfo):\n t1 = create_key('sub-{subject}/anat/sub-{subject}_T1w')\n #t2 = create_key('anat/sub-{subject}_T2w')\n rest = create_key('sub-{subject}/func/sub-{subject}_task-rest_acq-{acq}_run-{item:02d}_bold')\n dwi_PA = create_key('sub-{subject}/dwi/sub-{subject}_acq-PA_run-{item:02d}_dwi')\n dwi_AP = create_key('sub-{subject}/dwi/sub-{subject}_acq-AP_run-{item:02d}_dwi')\n fmap_diff = create_key('sub-{subject}/fmap/sub-{subject}_run-{item:02d}_phasediff')\n fmap_magnitude = create_key('sub-{subject}/fmap/sub-{subject}_run-{item:02d}_magnitude')\n\n #info = {t1:[], t2:[], rest:[], face:[], gamble:[], conflict:[], dwi:[], fmap_rest:[], fmap_dwi:[]}\n info = {t1:[],rest:[],dwi_PA:[],dwi_AP:[],fmap_diff:[],fmap_magnitude:[]}\n for idx, s in enumerate(seqinfo):\n #anat\n if ('mp2rage' in s.protocol_name):\n if (s.dim4 == 1) and ('UNI-DEN' in (s.series_description).strip()):\n info[t1] = [s.series_id]\n\n #func \n if ('bold' in s.protocol_name):\n if (s.dim4 == 360) and ('mbep2d_bold_mb3_p3_AP_rs' in (s.series_description).strip()):\n info[rest].append({'item': s.series_id, 'acq': 'AP'})\n #if (s.dim4 == 1) and ('mbep2d_bold_mb3_p3_PA' == s.series_description):\n # info[rest].append({'item': s.series_id, 'dir': 'PA'})\n\n #dwi\n if ('diff' in s.protocol_name):\n #if (s.dim4 == 66) and ('mbep2d_diff_b1000_AP' == s.series_description):\n if ( s.dim4 > 1 and ('mbep2d_diff_b1000_AP' == (s.series_description).strip()) ) :\n info[dwi_AP].append({'item': s.series_id})\n #if (s.dim4 == 66) and ('mbep2d_diff_b1000_PA' == s.series_description):\n if ( s.dim4 > 1 and ('mbep2d_diff_b1000_PA' == (s.series_description).strip()) ):\n info[dwi_PA].append({'item': s.series_id})\n\n #field map \n if ('field_mapping' in s.protocol_name): \n if (s.dim4 == 1) and ('gre_field_mapping' == (s.series_description).strip()):\n if(s.dim3 == 64):\n info[fmap_diff].append({'item': s.series_id})\n if(s.dim3 == 128):\n info[fmap_magnitude].append({'item': s.series_id})\n \n return info", "title": "" }, { "docid": "0763f2959e764659f5998c1364a9efe1", "score": "0.46822757", "text": "def direct_data():\r\n try:\r\n file = open(\"SemanticMapping_Results.txt\", \"r\")\r\n for line in file:\r\n line = line.strip(\"\\n\")\r\n line = line.replace('[', '').replace(']', '')\r\n line = line.replace(\"'\", \"\")\r\n line = line.split(\",\")\r\n person_id = line[0].split(\"-\")\r\n person_id = person_id[1]\r\n year_of_birth = line[1]\r\n month_of_birth = line[2].strip()\r\n gender_concept_id = line[3]\r\n gender_source_value = line[4]\r\n race_concept_id = line[5]\r\n race_source_value = line[6]\r\n fill_person(person_id=person_id,\r\n gender_concept_id=gender_concept_id,\r\n year_of_birth=year_of_birth,\r\n month_of_birth=month_of_birth,\r\n race_concept_id=race_concept_id,\r\n gender_source_value=gender_source_value,\r\n race_source_value=race_source_value)\r\n for i in range(7,len(line),2):\r\n if line[i].count(' not_found') == 0:\r\n condition_id = line[i]\r\n condition = line[i + 1]\r\n conditions(person_id=person_id,condition_id=condition_id, condition=condition)\r\n file.close()\r\n except FileNotFoundError:\r\n print(\"Check if the correct files are present\")", "title": "" }, { "docid": "6ddaa1f132a5c9600703d073a5348fc6", "score": "0.4677301", "text": "def assign(ppl_data,skills_data,tasks_data):\r\n ppl,tasks,skill_num=takeInput(ppl_data,skills_data,tasks_data)\r\n data=makeDict(ppl,skill_num) # makes a dictionary of skills vs personID\r\n\r\n main_assign={}\r\n main_check=[0]*len(tasks)\r\n day=1\r\n while (sum(main_check)<len(tasks)):\r\n person_assign=[0]*len(ppl)\r\n i=0\r\n while (sum(person_assign)<len(ppl)) and (i<len(tasks)):\r\n if main_check[i]==0:\r\n task_id=tasks[i].taskID\r\n skill_req=tasks[i].skillID\r\n for x in data[skill_req]:\r\n if person_assign[x-1]==0:\r\n person_assign[x-1]=1\r\n main_assign[task_id]=[x,day]\r\n main_check[i]=1\r\n break\r\n i+=1\r\n day+=1\r\n\r\n return collections.OrderedDict(sorted(main_assign.items()))", "title": "" }, { "docid": "0801cc5a24413b24a345796396842bd1", "score": "0.46741042", "text": "def initResults(results_dict):\n\n results_dict['H1'] = {}\n results_dict['H2'] = {}\n results_dict['H12'] = {}\n results_dict['H1toH2'] = {}", "title": "" }, { "docid": "0e92587ca3e4abeb9a56e3db63128b33", "score": "0.46689984", "text": "def ReadData(self, fname):\n self.datafile=path.join(path.realpath('.'),fname)\n self.logLikelihood=0\n self.instanceMap = {}\n datatype=np.dtype([('ps_id', int),('trial_no', int),\\\n ('session', int),('condition', int),('length',float),\\\n ('actualCat',int),('idealCat',int),('responseCat',int),\\\n ('modelledCat', int)])\n data=[] # A temporary data structure which we are using before we\n # transfer all to numpy.\n self.testData = {} # initialise it here\n self.usedTestData = {} # The data structure we use in the code, with\n # selected instances\n self.catA=set() # Boolean saying that we don't have members in\n # category A\n self.catB=set() # same for cat B\n self.reEstimated=0\n self.changedCats = 0\n self.presentedOrder=[] # This will remember the order of presentation\n # of stimuli, for modelling of recency and forgetting. \n # We don't put the data in categories yet - we do that in the modelling\n # phase, where we build the model incrementally.\n if path.exists(fname):\n with open(fname) as f:\n for line in f:\n try:\n a=line.split(',')\n if a[5]=='D': # this is how it is represented in the\n # data from Texas\n actualcat=1\n #self.catB.append(instNo)\n else:\n actualcat=-1\n #self.catA.append(instNo)\n # ^ this is the category which is given to the Ps as\n # feedback. This is what we model as what they\n # remember / forget / use for category inference.\n pscat=-1\n if a[6]=='D':\n pscat=1\n # ^ this is the category the Ps responded. Most relevant in\n # test set.\n idealcat=-1\n if int(a[4])>30:\n idealcat=1\n # ^ this is the category which the ideal classifier\n # would put the stimulus in.\n data.append((int(a[0]), int(a[3]), int(a[1]), int(a[2]),\\\n self.AddNoise(int(a[4])), actualcat, idealcat,\\\n pscat, actualcat))\n # Here we are using actualCat as if it was modelledCat,\n # to simplify choosing instances which are presented in\n # category A or B.\n # ps_id, trial_no, session, condition, length,\n # actualCat, idealCat, responseCat, modelledCat\n except Exception, e:\n continue # say, if first line or something wrong\n self.trainingData = np.array(data,dtype=datatype)\n self.usedTrainingData = self.trainingData.copy()\n # Populate instanceMap\n for i, instance in enumerate(data):\n self.instanceMap[instance[0:4]]=i\n else:\n if self.verbose > 0:\n print \"The filename \"+fname+\" is invalid!\"", "title": "" }, { "docid": "c741f0503b33409b06ed27a90922a0f4", "score": "0.46638238", "text": "def load_from_dict(question_dict) -> Question:\n return question_factory(**question_dict)", "title": "" }, { "docid": "3b06d1802c95b162857d5e168e152a37", "score": "0.46602592", "text": "def deserialize(cls, problem_as_json: str, name: str):\n result = json.loads(problem_as_json)\n problem = Problem(\n name=name,\n terms=[Term.from_dict(t) for t in result['cost_function']['terms']],\n problem_type=ProblemType[result['cost_function']['type']]\n )\n \n if 'initial_configuration' in result['cost_function']:\n problem.init_config = result['cost_function']['initial_configuration']\n\n return problem", "title": "" }, { "docid": "3e8bc5e7867f6d468af91ca3b00231b1", "score": "0.46534365", "text": "def process_input(data):\n distributions = {}\n beliefs = {}\n query = []\n edges = []\n tables_data = []\n pos_temp = []\n neg_temp = []\n for node in data:\n table = []\n pos = []\n neg = []\n for key in node.keys():\n if key == 'BELIEF':\n append = 0\n new_key = node.get(key).replace('-', '')\n beliefs[new_key] = node.get(key)\n elif key == 'QUERY':\n append = 0\n query.append(node.get(key))\n else:\n if key.find('|') == -1:\n append = 0\n distributions[key] = DiscreteDistribution({key: node.get(key), '-' + key: 1 - node.get(key)})\n else:\n append = 1\n info = key.split('|')\n if info[1].find('-') == -1:\n for i in range(len(info[1])):\n edges.append((info[1][i], info[0]))\n pos_row = []\n neg_row = []\n flag = 0\n for i in range(len(info[1])):\n if flag == 1:\n flag = 0\n continue\n elif info[1][i] == '-':\n pos_row.append(info[1][i]+ info[1][i + 1])\n neg_row.append(info[1][i]+ info[1][i + 1])\n flag = 1\n else:\n pos_row.append(info[1][i])\n neg_row.append(info[1][i])\n pos_row.append(info[0])\n pos_row.append(node.get(key))\n pos_temp = pos_row\n neg_row.append('-' + info[0])\n neg_row.append(1 - node.get(key))\n neg_temp = neg_row\n if append == 1:\n pos.append(pos_temp)\n neg.append(neg_temp)\n table = pos + neg\n if append == 1:\n tables_data.append(table)\n return distributions, tables_data, beliefs, query, edges", "title": "" }, { "docid": "6a5a9ddcbe09ee8bab38665f08856d9f", "score": "0.464792", "text": "def __init__(self, **entries):\n\n # convert dictionary parameter into nested objects and attributes\n for key, value in entries.items():\n\n # checking whether key contains a dictionary\n if isinstance(value, Mapping):\n\n # iterate recursively through sub dictionary\n # and store resulting object into structure\n self.__dict__[key] = ConfStruct(**value)\n\n else:\n\n # other datatypes are stored as provided\n # into the structure\n self.__dict__[key] = value", "title": "" }, { "docid": "3381a67c3ca9dcbce51ff3c033b69170", "score": "0.46474317", "text": "def __init__(self):\n self.population = {}\n self.parents = {}\n self.internal_nrj = {}", "title": "" }, { "docid": "fc06ec82ce0ee580f6981e4b494976f1", "score": "0.46439058", "text": "def from_manual(self, data, **kwargs): # pylint: disable=arguments-differ\n super().from_manual(**kwargs)\n if not isinstance(data, dict):\n raise ValueError(\"Data needs to be a dict of dictionaries\")\n\n try:\n graph = nx.from_dict_of_dicts(data)\n except AttributeError as exc:\n raise ValueError(\"Unable to generate a graph\") from exc\n\n self.from_graph(graph)", "title": "" }, { "docid": "4f61cc38e2d9e03591a12a23a08d88ea", "score": "0.46435106", "text": "def __init__(self):\n self.parents = {}\n self.children = {}\n self.weights = {}\n self.population = {}\n self.descriptor = {}\n self.slides = {}", "title": "" }, { "docid": "ba06c756c936fe330be1b7adfd96e5ff", "score": "0.4641115", "text": "def __init__(self, input_dict):\n if isinstance(input_dict, dict):\n if {\"matrix_a\", \"matrix_b\"} <= input_dict.keys():\n self.A = convert(input_dict[\"matrix_a\"])\n self.B = convert(input_dict[\"matrix_b\"])\n else:\n raise Exception(\"'matrix_a' and 'matrix_b' must be defined.\")\n else:\n raise Exception(\"input must be a json object.\")\n if self.A.shape[-1] != self.B.shape[0]:\n raise Exception(\n \"inner dimensions between A and B must be the same.\\n A: {} B: {}\".format(\n self.A.shape[-1], self.B.shape[0]\n )\n )", "title": "" }, { "docid": "2530a11e0d56968112ff6fa65f63c400", "score": "0.46394745", "text": "def _create_local_problems(self):\n B_dict = self._config_dict['B_dict']\n t0 = self._config_dict['t0']\n q0 = self._config_dict['q0_dict']\n dq0 = self._config_dict['dq0_dict']\n ddq0 = self._config_dict['ddq0_dict']\n for problem_id, integrator in self._config_dict['integrator_dict'].items():\n inital_solutions_dict = {'t0': t0, 'q0': q0[problem_id], 'dq0': dq0[problem_id], 'ddq0': ddq0[problem_id]}\n if problem_id not in self._local_problems:\n self._local_problems[problem_id] = LinearDynamicLocalProblem(problem_id, integrator, B_dict[problem_id],\n inital_solutions_dict)\n self._local_problems[problem_id].set_config({'preconditioner': copy(self._config_dict['preconditioner']),\n 'scaling': copy(self._config_dict['scaling'])})\n self._local_problems[problem_id].update_preconditioner_and_scaling()", "title": "" }, { "docid": "a879b5b90d08fdb829b785ad2fe31cca", "score": "0.46327314", "text": "def from_dict(cls, idx: int, value: typing.Mapping[str, typing.Any]):\n for field in dataclasses.fields(cls):\n field_type = typing.get_origin(field.type)\n field_args = typing.get_args(field.type)\n field_value = value.get(field.name)\n optional = field_type is typing.Union and type(None) in field_args\n if not optional and field.name not in value:\n raise ValidationError(\"registry #{} missing required field '{}'\".format(idx, field.name))\n allowed_types = field_args or (field.type,)\n if not isinstance(field_value, allowed_types):\n allowed_types = [_.__name__ for _ in allowed_types]\n type_hint = \",\".join([_ for _ in allowed_types if _ != \"NoneType\"])\n raise ValidationError(\n \"registry #{} field {}={} is type {}, not type {}\".format(\n idx, field.name, field_value, type(field_value).__name__, type_hint\n )\n )\n allowed_field_names = {_.name for _ in dataclasses.fields(cls)}\n for field in value.keys() - allowed_field_names:\n raise ValidationError(\"registry #{} field {} may not be specified\".format(idx, field))\n return cls(**value)", "title": "" }, { "docid": "2bdf7034218de04304681a3b23deba86", "score": "0.46260697", "text": "def competition_values(data, obj):\n value_map = {'A76Action': 'a_76_fair_act_action',\n 'commercialItemAcquisitionProcedures': 'commercial_item_acquisitio',\n 'commercialItemTestProgram': 'commercial_item_test_progr',\n 'evaluatedPreference': 'evaluated_preference',\n 'extentCompeted': 'extent_competed',\n 'fedBizOpps': 'fed_biz_opps',\n 'localAreaSetAside': 'local_area_set_aside',\n 'numberOfOffersReceived': 'number_of_offers_received',\n 'priceEvaluationPercentDifference': 'price_evaluation_adjustmen',\n 'reasonNotCompeted': 'other_than_full_and_open_c',\n 'research': 'research',\n 'smallBusinessCompetitivenessDemonstrationProgram': 'small_business_competitive',\n 'solicitationProcedures': 'solicitation_procedures',\n 'statutoryExceptionToFairOpportunity': 'fair_opportunity_limited_s',\n 'typeOfSetAside': 'type_set_aside'}\n\n for key, value in value_map.items():\n try:\n obj[value] = extract_text(data[key])\n except (KeyError, TypeError):\n obj[value] = None\n\n # get descriptions for things in the value map\n value_map = {'A76Action': 'a_76_fair_act_action_desc',\n 'commercialItemAcquisitionProcedures': 'commercial_item_acqui_desc',\n 'commercialItemTestProgram': 'commercial_item_test_desc',\n 'evaluatedPreference': 'evaluated_preference_desc',\n 'extentCompeted': 'extent_compete_description',\n 'fedBizOpps': 'fed_biz_opps_description',\n 'localAreaSetAside': 'local_area_set_aside_desc',\n 'reasonNotCompeted': 'other_than_full_and_o_desc',\n 'research': 'research_description',\n 'solicitationProcedures': 'solicitation_procedur_desc',\n 'statutoryExceptionToFairOpportunity': 'fair_opportunity_limi_desc',\n 'typeOfSetAside': 'type_set_aside_description'}\n\n for key, value in value_map.items():\n try:\n obj[value] = extract_text(data[key]['@description'])\n except (KeyError, TypeError):\n obj[value] = None\n\n return obj", "title": "" }, { "docid": "8b8ed05644dbe9b16d4ac0bd4b6dbaeb", "score": "0.4624942", "text": "def makeNddata(paramDict, dataDim = 'kSigma', indepDim = 'site', errorDim = 'kSigma Error',selections=False):#{{{\n\n if selections:\n selectionDict = {}\n for key in selections.keys():\n selectionDict.update({key:paramDict.get(key)})\n data = paramDict.get(dataDim)\n indep = paramDict.get(indepDim)\n error = paramDict.get(errorDim)\n trueData = []\n trueIndep = []\n trueError = []\n for count,value in enumerate(data):\n if value != None:\n if selections:\n ### Currently this is only going to work for a single selection.\n for key in selectionDict.keys():\n if selectionDict.get(key)[count] == selections.get(key):\n appendData = True\n else:\n appendData = False\n break\n if appendData:\n print \"This works\"\n trueData.append(data[count])\n if indepDim:\n trueIndep.append(indep[count])\n if errorDim:\n trueError.append(error[count])\n\n else:\n trueData.append(data[count])\n if indepDim:\n trueIndep.append(indep[count])\n if errorDim:\n trueError.append(error[count])\n else:\n break\n if indepDim:\n if errorDim:\n data = pys.nddata(pys.array(trueData)).rename('value',indepDim).labels(indepDim,pys.array(trueIndep)).set_error(pys.array(trueError))\n else:\n data = pys.nddata(pys.array(trueData)).rename('value',indepDim).labels(indepDim,pys.array(trueIndep))\n else:\n if errorDim:\n data = pys.nddata(pys.array(trueData)).set_error(pys.array(trueError))\n else:\n data = pys.nddata(pys.array(trueData))\n return data", "title": "" }, { "docid": "6c907252f45b642704facbe6b6e6dee8", "score": "0.46244508", "text": "def setup_problem(self):\n # Create fitting problem\n self.problem.CreateStart(\n self.problem_user_num, self.problem_specification)\n self.problem.CreateFinish()\n\n self.problem.ControlLoopCreateStart()\n self.problem.ControlLoopCreateFinish()\n\n self.solver = iron.Solver()\n self.problem.SolversCreateStart()\n self.problem.SolverGet(\n [iron.ControlLoopIdentifiers.NODE], 1, self.solver)\n # self.solver.OutputTypeSet(iron.SolverOutputTypes.NONE)\n self.solver.OutputTypeSet(iron.SolverOutputTypes.PROGRESS)\n self.solver.LinearTypeSet(iron.LinearSolverTypes.DIRECT)\n # # self.solver.LibraryTypeSet(iron.SolverLibraries.UMFPACK) # UMFPACK/SUPERLU\n # self.solver.LinearTypeSet(iron.LinearSolverTypes.ITERATIVE)\n # self.solver.LinearIterativeMaximumIterationsSet(5000)\n # self.solver.LinearIterativeAbsoluteToleranceSet(1.0E-10)\n # self.solver.LinearIterativeRelativeToleranceSet(1.0E-05)\n self.problem.SolversCreateFinish()\n\n self.solver = iron.Solver()\n self.solver_equations = iron.SolverEquations()\n self.problem.SolverEquationsCreateStart()\n self.problem.SolverGet(\n [iron.ControlLoopIdentifiers.NODE], 1, self.solver)\n self.solver.SolverEquationsGet(self.solver_equations)\n self.solver_equations.SparsityTypeSet(\n iron.SolverEquationsSparsityTypes.SPARSE)\n _ = self.solver_equations.EquationsSetAdd(\n self.equations_set)\n self.problem.SolverEquationsCreateFinish()\n\n self.boundary_conditions = iron.BoundaryConditions()\n self.solver_equations.BoundaryConditionsCreateStart(\n self.boundary_conditions)\n # Mapping constraints\n if self.dependent_field_mappings:\n version = 1\n for mapped_node_idx in range(\n self.num_mapped_dependent_field_nodes):\n for component in range(1, self.num_data_components + 1):\n self.boundary_conditions.ConstrainNodeDofsEqual(\n self.dependent_field, iron.FieldVariableTypes.U,\n version,\n iron.GlobalDerivativeConstants.NO_GLOBAL_DERIV,\n component,\n self.mapped_dependent_field_node_nums[mapped_node_idx, :],\n 1.0)\n self.solver_equations.BoundaryConditionsCreateFinish()", "title": "" }, { "docid": "5bbeffc3317761353bc2e9fe374e2f61", "score": "0.4624434", "text": "def infotodict(seqinfo):\n # print(seqinfo)\n\n # --- ANATOMICAL ---\n t1 = create_key('{session}/anat/sub-{subject}_{session}_T1w')\n t2 = create_key('{session}/anat/sub-{subject}_{session}_T2w')\n # --- TASK ---\n # Template\n # sub-<participant_label>[_ses-<session_label>]_task-<task_label>[_acq-<label>][_rec-<label>][_run-<index>]_bold.nii[.gz]\n rest = create_key('{session}/func/sub-{subject}_{session}_task-rest_acq-{direction}_bold')\n task = create_key('{session}/func/sub-{subject}_{session}_task-{task_name}_run-{item:02d}_bold')\n # --- Fieldmap ---\n spin_echo = create_key('{session}/fmap/sub-{subject}_{session}_dir-{direction}_epi')\n\n info = {t1: [], t2:[], rest:[], task:[], spin_echo:[],}\n last_run = len(seqinfo)\n\n for s in seqinfo:\n series_number = s[2]\n protocol_name = s[12]\n motion_corrected = s[13]\n x, y, sl, nt = (s[6], s[7], s[8], s[9])\n\n # --- ANATOMICAL ---\n # T1-weighted\n if (sl == 176 or sl == 704) and (nt == 1) and ('MEMPRAGE' in protocol_name):\n info[t1] = [series_number]\n # elif (nt == 1) and ('MEMPRAGE' in protocol_name):\n # info[t1] = [series_number]\n # T2-weighted\n elif (sl == 176) and (nt == 1) and ('T2_SPACE' in protocol_name):\n info[t2] = [series_number]\n\n # --- FUNCTIONAL ---\n # Rest\n elif (sl == 65) and (nt == 300):\n if ('rffMRI_AP' in protocol_name):\n info[rest].append({'item': series_number, 'direction': 'AP'})\n elif ('rsfMRI_PA' in protocol_name):\n info[rest].append({'item': series_number, 'direction': 'PA'})\n # Tasks\n # QUESTION: are boolean-like strings converted to booleans in seqinfo?\n elif (sl == 32) and (nt == 136) and (motion_corrected == \"True\"):\n if ('fMRI_listen' in protocol_name):\n info[task_listen].append({'item': series_number, 'task_name': 'listen')\n elif ('fMRI_selfref' in protocol_name):\n info[task_listen].append({'item': series_number, 'task_name': 'selfref')\n # --- FIELDMAP ---\n # Spin Echo EPI\n elif (sl == 260) and (nt == 1):\n if ('Spin_Echo_EPI_AP' in protocol_name):\n info[spin_echo].append({'item': series_number, 'direction': 'AP'})\n elif ('Spin_Echo_EPI_PA' in protocol_name):\n info[spin_echo].append({'item': series_number, 'direction': 'PA'})\n else:\n pass\n\n return info", "title": "" }, { "docid": "cdcab6ae0b6a8ae4046f8acfd14adf1a", "score": "0.4623116", "text": "def get_data(self):\n columns = self.str_map.splitlines()\n rows = list(columns[0])\n\n for species in self.names:\n z = list()\n for y in range(len(columns)):\n temp = list()\n for x in range(len(rows)):\n v = self.island[(y + 1, x + 1)].count_species[species]\n temp.append(v)\n z.append(temp)\n self.data[species] = z\n\n self.total_age = {species: [] for species in self.names}\n for names in self.names:\n for coord in self.island:\n for units in self.island[coord].count_age[names]:\n self.total_age[names].append(units)\n\n self.total_weight = {species: [] for species in self.names}\n for names in self.names:\n for coord in self.island:\n for units in self.island[coord].count_weight[names]:\n self.total_weight[names].append(units)\n\n self.total_fitness = {species: [] for species in self.names}\n for names in self.names:\n for coord in self.island:\n for units in self.island[coord].count_fitness[names]:\n self.total_fitness[names].append(units)", "title": "" }, { "docid": "05d54fb983d44b3fc4886abbc76ecfaa", "score": "0.46227047", "text": "def extract_data(self):\n self.data = {}\n for pd in self.pds:\n self.data[pd] = {} \n for year in self.datasets.keys():\n logger.info(\"[DataFetcher : extract_data] Loading histograms for pd '%s' and year '%s' from %d total files.\" % (pd, year, len(self.files[pd][year])))\n for file in tqdm(self.files[pd][year]):\n run_number = DataFetcher.get_run_number(file)\n\n label = -1 # unknown good/bad\n if self.datasets[year][\"bad_runs\"] is not None:\n if str(run_number) in self.datasets[year][\"bad_runs\"]:\n label = 1 # bad/anomalous\n elif self.datasets[year][\"good_runs\"] is None:\n label = 0 # if only bad_runs was specified, mark everything not in bad_runs as good\n\n if self.datasets[year][\"good_runs\"] is not None:\n if str(run_number) in self.datasets[year][\"good_runs\"]:\n label = 0 # good/not anomalous\n elif self.datasets[year][\"bad_runs\"] is None:\n label = 1 # if only good_runs was specified, mark everything not in good_runs as bad\n\n logger.debug(\"[DataFetcher : load_data] Loading histograms from file %s, run %d\" % (file, run_number))\n\n histograms = self.load_data(file, run_number, self.contents) \n if not self.data[pd]:\n self.data[pd] = histograms\n\n if histograms is not None:\n histograms[\"run_number\"] = [run_number]\n histograms[\"year\"] = [year]\n histograms[\"label\"] = [label]\n for k, v in histograms.items():\n self.data[pd][k] += v", "title": "" }, { "docid": "877e9fa8a3ab11f4a0f0cb242494d96d", "score": "0.46204937", "text": "def get_leetcode_problems(self):\n # we should look the response data carefully to find law\n # return byte. content type is byte\n content = requests.get('https://leetcode.com/api/problems/algorithms/').content\n # get all problems\n self.questions = json.loads(content)['stat_status_pairs']\n # print(self.questions)\n difficultys = ['Easy', 'Medium', 'Hard']\n for i in range(len(self.questions) - 1, -1, -1):\n question = self.questions[i]\n name = question['stat']['question__title']\n url = question['stat']['question__title_slug']\n id_ = str(question['stat']['frontend_question_id'])\n if int(id_) < 10:\n id_ = '00' + id_\n elif int(id_) < 100:\n id_ = '0' + id_\n lock = question['paid_only']\n if lock:\n self.locked += 1\n difficulty = difficultys[question['difficulty']['level'] - 1]\n url = Config.leetcode_url + url + '/description/'\n q = Question(id_, name, url, lock, difficulty)\n self.table.append(q.id_)\n self.table_item[q.id_] = q\n return self.table, self.table_item", "title": "" }, { "docid": "0b9e94b2d6a1fe14ec6a54d07ce7691e", "score": "0.4616866", "text": "def create_data_tissue_tree():\n data_category, cat_is_new = TissueCategory.objects.get_or_create(cat_name='Data')\n data_names = [\"Blood Ethanol Concentration\", \"Hormone\", \"Daily Ethanol Summary\", \"Ethanol Events\", \"Necropsy Summary\", \"Electrophysiology\", \"Metabolite\", \"Protein\"]\n data_models = [MonkeyBEC, MonkeyHormone, MonkeyToDrinkingExperiment, ExperimentEvent, NecropsySummary, MonkeyEphys, MonkeyMetabolite, MonkeyProtein]\n for _name, _model in zip(data_names, data_models):\n _tst, tst_is_new = TissueType.objects.get_or_create(tst_tissue_name=_name, category=data_category)\n new_tss_count = 0\n for _mky in _model.objects.order_by().values_list('monkey', flat=True).distinct():\n _mky = Monkey.objects.get(pk=_mky)\n _tss, tss_is_new = TissueSample.objects.get_or_create(monkey=_mky, tissue_type=_tst, tss_sample_quantity=1, tss_units='whole')\n if tss_is_new:\n new_tss_count += 1\n print \"%s Data Type %s: %d new data samples created\" % (\"New\" if tst_is_new else \"Old\", _name, new_tss_count)\n\n\n # \"Ethanol Drinks\",\n # ExperimentBout, ExperimentDrink,\n\n ### Experiment Bouts don't have an ebt.monkey field....\n _tst, tst_is_new = TissueType.objects.get_or_create(tst_tissue_name=\"Ethanol Bouts\", category=data_category)\n new_tss_count = 0\n for _mky in ExperimentBout.objects.order_by().values_list('mtd__monkey', flat=True).distinct():\n _mky = Monkey.objects.get(pk=_mky)\n _tss, tss_is_new = TissueSample.objects.get_or_create(monkey=_mky, tissue_type=_tst, tss_sample_quantity=1, tss_units='whole')\n if tss_is_new:\n new_tss_count += 1\n print \"%s Data Type %s: %d new data samples created\" % (\"New\" if tst_is_new else \"Old\", \"Ethanol Bouts\", new_tss_count)\n\n ### Experiment Drinks don't have an edr.monkey field....\n _tst, tst_is_new = TissueType.objects.get_or_create(tst_tissue_name=\"Ethanol Drinks\", category=data_category)\n new_tss_count = 0\n for _mky in ExperimentDrink.objects.order_by().values_list('ebt__mtd__monkey', flat=True).distinct():\n _mky = Monkey.objects.get(pk=_mky)\n _tss, tss_is_new = TissueSample.objects.get_or_create(monkey=_mky, tissue_type=_tst, tss_sample_quantity=1, tss_units='whole')\n if tss_is_new:\n new_tss_count += 1\n print \"%s Data Type %s: %d new data samples created\" % (\"New\" if tst_is_new else \"Old\", \"Ethanol Drinks\", new_tss_count)\n\n print \"Success.\"", "title": "" }, { "docid": "397206c26fe65cccde7a5c656f41cdfe", "score": "0.4613603", "text": "def _walk_structure_for_problems(a, b, aname, bname, problem_list):\n if type(a) != type(b) and not ( # pylint: disable=unidiomatic-typecheck\n _are_both_of_integer_type(a, b) or _are_both_of_sequence_type(a, b) or\n _are_both_of_set_type(a, b) or _are_both_of_mapping_type(a, b)):\n # We do not distinguish between int and long types as 99.99% of Python 2\n # code should never care. They collapse into a single type in Python 3.\n problem_list.append('%s is a %r but %s is a %r' %\n (aname, type(a), bname, type(b)))\n # If they have different types there's no point continuing\n return\n\n if isinstance(a, abc.Set):\n for k in a:\n if k not in b:\n problem_list.append(\n '%s has %r but %s does not' % (aname, k, bname))\n for k in b:\n if k not in a:\n problem_list.append('%s lacks %r but %s has it' % (aname, k, bname))\n\n # NOTE: a or b could be a defaultdict, so we must take care that the traversal\n # doesn't modify the data.\n elif isinstance(a, abc.Mapping):\n for k in a:\n if k in b:\n _walk_structure_for_problems(\n a[k], b[k], '%s[%r]' % (aname, k), '%s[%r]' % (bname, k),\n problem_list)\n else:\n problem_list.append(\n \"%s has [%r] with value %r but it's missing in %s\" %\n (aname, k, a[k], bname))\n for k in b:\n if k not in a:\n problem_list.append(\n '%s lacks [%r] but %s has it with value %r' %\n (aname, k, bname, b[k]))\n\n # Strings/bytes are Sequences but we'll just do those with regular !=\n elif (isinstance(a, abc.Sequence) and\n not isinstance(a, _TEXT_OR_BINARY_TYPES)):\n minlen = min(len(a), len(b))\n for i in range(minlen):\n _walk_structure_for_problems(\n a[i], b[i], '%s[%d]' % (aname, i), '%s[%d]' % (bname, i),\n problem_list)\n for i in range(minlen, len(a)):\n problem_list.append('%s has [%i] with value %r but %s does not' %\n (aname, i, a[i], bname))\n for i in range(minlen, len(b)):\n problem_list.append('%s lacks [%i] but %s has it with value %r' %\n (aname, i, bname, b[i]))\n\n else:\n if a != b:\n problem_list.append('%s is %r but %s is %r' % (aname, a, bname, b))", "title": "" }, { "docid": "b4763be31d79a86b7a63a2cb06f14230", "score": "0.46135858", "text": "def _load_params(self, d):\n\n # It appears the structure gets serialized into a dictionary by the action of writing\n # the specs. Let's give it life again!\n self.structure = Structure.from_dict(d['structure'])\n\n self.name = d['name']\n self.job_type = d['job_type']\n self.version = d['version']\n\n if 'nproc' in d:\n self.nproc = int(d['nproc'])\n else:\n self.nproc = 16\n\n if 'maximum_relaxations' in d:\n self.maximum_relaxations = int(d['maximum_relaxations'])\n else:\n self.maximum_relaxations = 9\n\n if 'distress_number_relaxations' in d:\n self.distress_number_relaxations = int(d['distress_number_relaxations'])\n else:\n self.distress_number_relaxations = 6\n\n\n if 'supplementary_incar_dict' in d:\n self.supplementary_incar_dict = d['supplementary_incar_dict']\n else:\n self.supplementary_incar_dict = None\n\n if 'strategy_type' in d:\n if d['strategy_type'] == 'HexaCyanoFerrate':\n self.U_strategy = U_Strategy_HexaCyanoFerrate()\n\n elif d['strategy_type'] == 'RAMP':\n if 'current_U_Fe' not in d:\n print(\"==== ERROR: the variable 'current_U_Fe' must be defined for RAMPING!\")\n sys.exit()\n U_Fe = d['current_U_Fe']\n self.U_strategy = U_Strategy_RAMP(new_U_dict={'Fe':U_Fe})\n\n elif d['strategy_type'] == 'MaterialsProject':\n self.U_strategy = U_Strategy_MaterialsProject(variable_magnetization_dict={'Fe':[5,4]})\n\n elif d['strategy_type'] == 'MaterialsProject_V2':\n if 'variable_magnetization_dict' not in d:\n print(\"==== ERROR: the variable 'variable_magnetization_dict' must be defined!\")\n sys.exit()\n variable_magnetization_dict = d['variable_magnetization_dict']\n self.U_strategy = U_Strategy_MaterialsProject_V2(variable_magnetization_dict)\n elif d['strategy_type'] == 'U_Strategy_Yamada_Nitrogen':\n if 'variable_magnetization_dict' not in d:\n print(\"==== ERROR: the variable 'variable_magnetization_dict' must be defined!\")\n sys.exit()\n variable_magnetization_dict = d['variable_magnetization_dict']\n self.U_strategy = U_Strategy_Yamada_Nitrogen(variable_magnetization_dict)\n else:\n print(\"UNKNOWN STRATEGY! FAIL HARD\")\n sys.exit()\n else:\n self.U_strategy = None", "title": "" } ]
468b816a7ffedcccb4557d5212ec44ff
plugins > print the number of plugins currently loaded.
[ { "docid": "f1c3235b866d5f55ca388bb8ebf47c15", "score": "0.712377", "text": "def show_plugins(bot, nick, chan, arg):\n plugins = len(bot.cmdhandler.loaded_plugins)\n out = bot.hicolor(\"Module Manager\" + box[\"vert\"])\n out += bot.style.color(\" %d plugins loaded\" % (plugins), color=\"silver\")\n bot.msg(chan, out)", "title": "" } ]
[ { "docid": "dcacb8f68b550a3690bca2b827313e38", "score": "0.79757935", "text": "def num_available_plugins():\n return len(conf['plugins'])", "title": "" }, { "docid": "7cbc4fe4ae87850590bc366de633aac8", "score": "0.7857803", "text": "def __get_plugins_count(self):\n # First - check Regius plugins.\n plugins_to_load = 0\n for item in os.listdir(os.path.join(self.config.get_temp_value(\"REGIUS_PATH\"), \"plugins\")):\n plugins_to_load += 1\n\n # Second: check application-related plugins.\n for item in os.listdir(os.path.join(self.config.get_temp_value(\"SCRIPT_PATH\"), \"plugins\")):\n plugins_to_load += 1\n\n return plugins_to_load", "title": "" }, { "docid": "0e2fdfb2020a51faf3cf6cb04578312a", "score": "0.7742657", "text": "def num_enabled_plugins():\n enabled = 0\n for plugin in conf['plugins']:\n if 'module' in conf['plugins'][plugin]:\n enabled += 1\n return enabled", "title": "" }, { "docid": "ed51b6f76a9280d2280fb8e348f637b3", "score": "0.6671052", "text": "def list_plugins(bot, nick, chan, arg):\n bot.msg(chan, col(bot.cmdhandler.loaded_plugins, bot.state.data[\"plug_cols\"]))", "title": "" }, { "docid": "9087a94f0a6040f61dcb3b03291b8384", "score": "0.6521926", "text": "def onPluginsLoaded(self):\r\n print(\"ECHO - on plugins loaded\")", "title": "" }, { "docid": "2ae0438c17ecbd9af913b0c03b38ec4b", "score": "0.6511027", "text": "def test_get_plugin_list(self):\r\n\r\n plugin_list = get_plugins()\r\n msg = ('No plugins were found, not even the built-in ones')\r\n assert len(plugin_list) > 0, msg", "title": "" }, { "docid": "280ce1cab8b27b8362e572c8f9afe896", "score": "0.63500196", "text": "def count_db(self):\n return len(self._plugin_manager.getPluginsOfCategory('MetaDBPlugins'))", "title": "" }, { "docid": "e9c9c5fef5b7fa7d762890bb1f312e63", "score": "0.6144119", "text": "def get_plugin_instances_status_count(self, status):\n return self.plugin_instances.filter(status=status).count()", "title": "" }, { "docid": "b03cbc58e2e20bd7e076616216b20075", "score": "0.61064", "text": "def test_list_plugins(self):\n resp, error = self.execute([fix, 'list_plugins'])\n self.assertTrue(\"List of available plugins:\" in resp, resp)\n self.assertTrue(\"bad: Plugin has a syntax error\" in resp, resp)\n self.assertTrue(\"dummy: Dummy LittleChef plugin\" in resp, resp)", "title": "" }, { "docid": "39639724dbe05e25c5005acfa413b4ea", "score": "0.60920215", "text": "def cmd_listplugins(self, data, client=None, cmd=None):\n plugins = []\n for pname in self.console._pluginOrder:\n plugins.append(\"^2%s ^7%s\" % (pname, getattr(getModule(self.console.getPlugin(pname).__module__), '__version__', '__name__')))\n \n for b in plugins:\n cmd.sayLoudOrPM(client, b)\n return True", "title": "" }, { "docid": "698a6f48d9ff420ddae7c5a98ade6091", "score": "0.5906203", "text": "def enumerate_plugins(plugins_dir):\n\tplugins = {}\n\t_update_plugins_from_dir(plugins, plugins_dir)\n\treturn plugins", "title": "" }, { "docid": "720ef71f59aa132bed816b578b86e359", "score": "0.5891654", "text": "def getInstalledPlugins():\n _, root = loadPluginTree()\n return [(x.find('name').text.strip(), x.find('location').text.strip()) for x in root.findall('./plugin')]", "title": "" }, { "docid": "34f7435cd04131ab86d60949572b0ccc", "score": "0.5860407", "text": "def plugins_enabled():", "title": "" }, { "docid": "934407e35395d60f763ccb80b769bdcf", "score": "0.5813744", "text": "def ListPluginInformation(self):\n plugin_list = self._front_end.GetWindowsRegistryPlugins()\n\n self.PrintHeader(u'Supported Plugins')\n self.PrintHeader(u'Key Plugins')\n for plugin_class in plugin_list.GetAllKeyPlugins():\n self.PrintColumnValue(plugin_class.NAME, plugin_class.DESCRIPTION)\n\n self.PrintHeader(u'Value Plugins')\n for plugin_class in plugin_list.GetAllValuePlugins():\n self.PrintColumnValue(plugin_class.NAME, plugin_class.DESCRIPTION)\n\n self._output_writer.Write(u'\\n')", "title": "" }, { "docid": "b48a69dcb806fc42acffd93ac708b986", "score": "0.58119094", "text": "def get_plugins(self):", "title": "" }, { "docid": "ef032f21ff5e229b9a63a8b0815ee8fe", "score": "0.57954305", "text": "def ListPluginNames(self):\n return list(self._plugins_by_name.keys())", "title": "" }, { "docid": "abba8f4a4540a4cbcbbf3368eda82e42", "score": "0.5707614", "text": "def list_plugins(namespace):\n if plugin_instances[namespace] is None:\n load_plugins(namespace)\n\n return list(plugin_instances[namespace].keys())", "title": "" }, { "docid": "0135cdcfb4aa6ada8b50e4a2493537bb", "score": "0.56958145", "text": "def get_plugins():\n return [p['name'] for p in FuelPlugins.get_all_data()]", "title": "" }, { "docid": "fbf6764afd7958288203b8655e98ee00", "score": "0.56935275", "text": "def num_modules():\n return 536", "title": "" }, { "docid": "7cde7cd6bffb4068d6075a2eeaaffadd", "score": "0.56453747", "text": "def plugins_to_export(self):\n return ['cpu', 'load', 'mem', 'memswap', 'network', 'diskio', 'fs', 'processcount']", "title": "" }, { "docid": "380ed6b4d2cb5bca850520b8b2f90164", "score": "0.5626538", "text": "def _CommandGetTotalNumberOfLoadedHives(self):\n return len(self._preg_cache.hive_storage)", "title": "" }, { "docid": "a58f91fd898dfde9da44b1b7e7b56094", "score": "0.5624197", "text": "def main():\n if len(sys.argv) == 0:\n sys.argv.extend([\"ocpn-plugins.xml\"])\n stats = Stats()\n sys.argv = sys.argv[1:]\n for path in sys.argv:\n tree = ET.parse(path)\n if tree.getroot().tag == 'plugin':\n plugins = [tree]\n else:\n plugins = tree.findall('./plugin')\n for plugin in plugins:\n check_plugin(plugin, path, stats)\n print(\"\")\n print(stats.message())\n sys.exit(stats.exitcode())", "title": "" }, { "docid": "2ad8f6b8a1c87c056b9df46904ee9baf", "score": "0.5612863", "text": "def testGetParserPluginsInformation(self):\n plugins_information = manager.ParsersManager.GetParserPluginsInformation()\n\n self.assertGreaterEqual(len(plugins_information), 1)\n\n available_parser_names = [name for name, _ in plugins_information]\n self.assertIn('olecf_default', available_parser_names)", "title": "" }, { "docid": "1285a99b8ac4de3aa68c4969a352c2ae", "score": "0.55664843", "text": "def test_get_plugins(self):\r\n os.environ['LANG'] = 'en'\r\n plugin_list = get_plugins()\r\n self.assertGreater(len(plugin_list), 0)\r\n\r\n # Obtain string representation\r\n string_rep = admissible_plugins_to_str(plugin_list)\r\n\r\n # Check each plugin\r\n for plugin in plugin_list.values():\r\n # Check that it's name appeears in string representation\r\n title = get_function_title(plugin)\r\n msg = ('Expected title %s in string representation: %s'\r\n % (title, string_rep))\r\n assert title in string_rep, msg\r\n\r\n # Check that every plugin has a requires line\r\n requirements = requirements_collect(plugin)\r\n msg = 'There were no requirements in plugin %s' % plugin\r\n assert(len(requirements) > 0), msg\r\n\r\n for req_str in requirements:\r\n msg = 'All plugins should return True or False'\r\n assert(requirement_check({'category': 'hazard',\r\n 'subcategory': 'earthquake',\r\n 'layerType': 'raster'},\r\n req_str) in [True, False]), msg", "title": "" }, { "docid": "82096b83c04607d3bf6bac037ffc30b6", "score": "0.55650926", "text": "def CountHuntOutputPluginLogEntries(self,\n hunt_id,\n output_plugin_id,\n with_type=None):\n\n return len(\n self.ReadHuntOutputPluginLogEntries(\n hunt_id, output_plugin_id, 0, sys.maxsize, with_type=with_type))", "title": "" }, { "docid": "34528f2ca3db612e55d911c192420f39", "score": "0.554722", "text": "def testGetPlugins(self):\n TestParserWithPlugins.RegisterPlugin(TestPlugin)\n\n generator = TestParserWithPlugins.GetPlugins()\n plugin_tuples = list(generator)\n self.assertNotEqual(len(plugin_tuples), 0)\n self.assertIsNotNone(plugin_tuples[0])\n\n TestParserWithPlugins.DeregisterPlugin(TestPlugin)", "title": "" }, { "docid": "fb18c87f41acb5c959a3d1528d7c8cd5", "score": "0.5501818", "text": "def show_all_plugins():\n matches = plugin_search_results()\n print \"\\n Help for sub-commands:\\n\"\n for match in matches:\n fpath, plugin = match\n #from IPython import Shell; Shell.IPShellEmbed(argv=['-noconfirm_exit'])()\n print ' ', fpath2namespace(fpath).upper()\n #try:\n plugin_obj = plugin.spawn()\n #except TypeError, t:\n # raise TypeError, str(t)+'\\nExpectedSignature: '+str(signature(plugin_obj.__init__)._parameters)\n plugin_obj.help(indent=3)", "title": "" }, { "docid": "19be8d43d3827e31e650a79be1935c65", "score": "0.54985535", "text": "def num_languages():\n return len(languages())", "title": "" }, { "docid": "f83490d84702dd33d78a840402d5bcc6", "score": "0.5486437", "text": "def num_players(self):\n return lib.NumPlayers(self._game)", "title": "" }, { "docid": "5cc142c8dc083249b0e4fcf292108ca5", "score": "0.54691374", "text": "def number_of_videos(self):\n num_videos = len(self._video_library.get_all_videos())\n print(f\"{num_videos} videos in the library\")", "title": "" }, { "docid": "a3f72b6fe6b7a871709e32d74704ee7d", "score": "0.5464877", "text": "def _load_plugins():\n return load_plugin_by_namespace(I_DISCOVERY_PLUGIN)", "title": "" }, { "docid": "e208b8b9b039c7bb49dc1020e6b8fdef", "score": "0.5463785", "text": "def print_plugin_state( args, counter, plugins, targets, ensembles):\n\n\toutput = []\n\tfor t in targets:\n\t\tfor r in t.restraints:\n\t\t\tpath = os.path.abspath( \"%s%srestraints_%s_%s_%05i.out\" % (args.dir,os.sep,t.name,r.type,counter) )\n\n\t\t\tfor p in plugins:\n\t\t\t\tif(r.type in p.type):\n\t\t\t\t\t# build the ensemble data list from all ensembles\n\t\t\t\t\tall_ensemble_data = []\n\t\t\t\t\tfor e in ensembles:\n\t\t\t\t\t\tall_ensemble_data.append(e.plugin_data[t.name][r.type])\n\n\t\t\t\t\ttry:\n\t\t\t\t\t\tmessages = p.ensemble_state(r, t.plugin_data[r.type], all_ensemble_data, path)\n\t\t\t\t\texcept MESMERPluginError as e:\n\t\t\t\t\t\tprint_msg(\"Plugin \\\"%s\\\" returned an error: %s\" % e.msg)\n\n\t\t\t\t\tbreak\n\n\treturn True", "title": "" }, { "docid": "8ea44620c733bb8edcbce90821b17551", "score": "0.5438984", "text": "def num_players(self):\n cnt = 0\n for player in self._player_list:\n if player:\n cnt = cnt + 1\n return cnt", "title": "" }, { "docid": "603a6b87836e9d49d8c0e2e394230e2d", "score": "0.54331994", "text": "def test_single_get_plugins(self):\r\n plugin_name = DEFAULT_PLUGINS[0]\r\n plugin_list = get_plugins(plugin_name)\r\n msg = ('No plugins were found matching %s' % plugin_name)\r\n assert len(plugin_list) > 0, msg", "title": "" }, { "docid": "a498bace58353e8dadfbc8c9e89cbf10", "score": "0.5407906", "text": "def num_packages_available(self):\n return sum([package.available_packages for package in self.packages])", "title": "" }, { "docid": "212a3f3fee9ba3dee496f88b358fb8ae", "score": "0.540093", "text": "def num_players(self):\n\t\tpass", "title": "" }, { "docid": "d53a376bfe604ce9d395c1f1c88a4666", "score": "0.5375949", "text": "def number_of_sections(self):\r\n return len(self.config.sections())", "title": "" }, { "docid": "e655a4acebb829fa153f7e1b1262b561", "score": "0.53623873", "text": "def load_mFIZ_plugins():\n # Plugin is dependent on the following scripts\n required_plugins = mFIZ_config.REQUIRED_PLUGINS\n\n # Check to see if each plug-in is loaded\n for plugin in required_plugins:\n # If the plug-in is not loaded:\n if not pm.pluginInfo(plugin, query=True, loaded=True):\n try:\n # Try loading it (and turn on autoload)\n pm.loadPlugin(plugin)\n pm.pluginInfo(plugin, autoload=True)\n print('{} Plug-in loaded'.format(plugin))\n except Exception: # Unknown error\n pass", "title": "" }, { "docid": "559b47f0e16279a0b221a96f0542f170", "score": "0.53573257", "text": "def test_count(self):\n self._setup_config(whitelist={'blues', 'rock', 'jazz'},\n count=2)\n self.assertEqual(self.plugin._resolve_genres(\n ['jazz', 'pop', 'rock', 'blues']),\n 'Jazz, Rock')", "title": "" }, { "docid": "7a22abad1b45e745b4abd48915636a93", "score": "0.5343154", "text": "def countPlayers():\n conn = connect()\n c = conn.cursor()\n c.execute(\"SELECT * FROM players;\")\n playerList = c.rowcount\n conn.close()\n return playerList", "title": "" }, { "docid": "6337b2b1255d2127c0f72457c8315c18", "score": "0.5333639", "text": "def __load_plugins(self):\n self.log(0, \"Loading plugins...\")\n\n # Obtain plugins list.\n # First - get regius plugins.\n regius_plugins = os.listdir(os.path.join(self.config.get_temp_value(\"REGIUS_PATH\"), \"plugins\"))\n plugins = os.listdir(os.path.join(self.config.get_temp_value(\"SCRIPT_PATH\"), \"plugins\"))\n self.log(2, \"Found plugins:\")\n self.log(2, \"From regius: {regius_plugins}\", {\"regius_plugins\": regius_plugins})\n self.log(2, \"From {application_name}: {app_plugins}\", {\"app_plugins\": plugins, \"application_name\": self.config.get_temp_value(\"main/application_name\")})\n\n self.log(1, \"Plugins list to load obtained, starting loading procedure...\")\n\n for plugin in regius_plugins:\n self.loader.request_plugin(plugin)\n self.loading_widget.increment_progress()\n self.loading_widget.set_action(\"Plugin '{0}' loaded.\".format(plugin))\n\n for plugin in plugins:\n self.loader.request_plugin(plugin)\n self.loading_widget.increment_progress()\n self.loading_widget.set_action(\"Plugin '{0}' loaded.\".format(plugin))", "title": "" }, { "docid": "86cb0173ef4ff9c1b122d03b96883048", "score": "0.53264374", "text": "def get_plugins() -> Dict[str, 'Plugin']:\n\n return {} # pragma: no cover", "title": "" }, { "docid": "ade1d4aaa65c94913d5026e49011eff5", "score": "0.53209645", "text": "def load_plugins():\n # Get all plugins.\n plugin_dir = get_plugin_directory()\n load_plugin_directory(plugin_dir)\n # Get all user-defined configurations.\n config_dir = conf['eva']['config_directory']\n if '~' in config_dir: config_dir = os.path.expanduser(config_dir)\n load_plugin_configs(config_dir)\n # Enable all necessary plugins.\n enable_plugins()\n gossip.trigger('eva.plugins_loaded')\n log.info('Plugins loaded successfully')", "title": "" }, { "docid": "2fb67e544ade62e035a546a7f89dc44b", "score": "0.5309181", "text": "def renderPlugins(self):\n \n results = []\n for plugin_name in self.analytics_tool.tracking_plugin_names:\n plugin = queryMultiAdapter(\n (self.context, self.request),\n interface=IAnalyticsTrackingPlugin,\n name=plugin_name,\n default=None,\n )\n if plugin:\n results.append(plugin())\n return '\\n'.join(results)", "title": "" }, { "docid": "e0432a96f84492f9a37cb880d06de9d9", "score": "0.5307709", "text": "def plugin_loading_check(self):\r\n # check nb of categories\r\n self.assertEqual(len(self.filteredPluginManager.getCategories()),1)\r\n sole_category = self.filteredPluginManager.getCategories()[0]\r\n # check the number of plugins\r\n self.assertEqual(len(self.filteredPluginManager.getPluginsOfCategory(sole_category)),1)\r\n plugins = self.filteredPluginManager.getPluginsOfCategory(sole_category)\r\n for plugin_info in plugins:\r\n TEST_MESSAGE(\"plugin info: %s\" % plugin_info)\r\n self.plugin_info = plugin_info\r\n self.assert_(self.plugin_info)\r\n self.assertEqual(self.plugin_info.name,\"Simple Plugin\")\r\n self.assertEqual(sole_category,self.plugin_info.category)", "title": "" }, { "docid": "478a92a2d4348b2eac9e22d72e7326ce", "score": "0.52811736", "text": "def num_instances(self):\n return javabridge.call(self.jobject, \"numInstances\", \"()D\")", "title": "" }, { "docid": "f16fbee0decff0a1d954cf8e737c70da", "score": "0.5278358", "text": "def import_plugins() -> None:\n for module_name in discover_plugins():\n try:\n importlib.import_module(module_name)\n except ModuleNotFoundError as e:\n logger.error(f\"Plugin {module_name} could not be loaded: {e}\")", "title": "" }, { "docid": "5f43bf58bab15858ea91d1c07a455e9e", "score": "0.5268661", "text": "def instance_count(self):\n return len(self.state.config_files)", "title": "" }, { "docid": "4e6eb98ea2d11744562e16c165f43bdb", "score": "0.5261673", "text": "def load_plugins():\n loader = _PluginLoader()\n for pkg in PLUGIN_PACKAGES:\n loader.load_plugins(pkg)\n return loader.plugins", "title": "" }, { "docid": "68adadca3fdd1701a4e3d531db1f3531", "score": "0.5250389", "text": "def compss_get_number_of_resources() -> int:\n if CONTEXT.in_pycompss():\n return __get_number_of_resources__()\n return __dummy_compss_get_number_of_resources__()", "title": "" }, { "docid": "1bcb9208265461e99cd22edafea7f4f4", "score": "0.5244642", "text": "def count_num_nv(agents):\n\n num_nv = len([agent for agent in agents if agent.strategy == 'NV'])\n\n return num_nv", "title": "" }, { "docid": "a7dfbde8c67810d83cb53e59340a4655", "score": "0.5243712", "text": "def number_of_players(self):\n return \"This is a {} players co-operative game\".format(self.number_players)", "title": "" }, { "docid": "6e336d24862e73189d4839db0e718fe2", "score": "0.52364916", "text": "def show(ctx):\n config_file = ctx.parent.params['config']\n with open(config_file, 'r') as f:\n config = json.load(f)\n\n if 'plugins' not in config:\n click.echo('no enabled plugins')\n return\n\n for key, value in config['plugins'].items():\n click.echo('module: {0} - functions: {1}'.format(\n key, ', '.join(value)))", "title": "" }, { "docid": "c6e90ffa7e239d4f9f9247020d057a67", "score": "0.52304363", "text": "def count_players(self):\n query = \"SELECT COUNT(*) FROM players\"\n return self.count_elements(query)", "title": "" }, { "docid": "f7264a0f56699bd97f8075d43b5373df", "score": "0.52287126", "text": "def active_players(self) -> int:\n return len([player for player in self.players if player.active])", "title": "" }, { "docid": "c23598c91c40e10a393d169d9a15f4c4", "score": "0.52219397", "text": "def testEnablePlugins(self):\n parser = esedb.ESEDBParser()\n\n number_of_plugins = len(parser._plugin_classes)\n\n parser.EnablePlugins([])\n self.assertEqual(len(parser._plugins_per_name), 0)\n\n parser.EnablePlugins(parser.ALL_PLUGINS)\n self.assertEqual(len(parser._plugins_per_name), number_of_plugins)\n\n parser.EnablePlugins(['file_history'])\n self.assertEqual(len(parser._plugins_per_name), 1)", "title": "" }, { "docid": "5379c9eb8bc55c6d60786219f47c1e2e", "score": "0.5221704", "text": "def load_count(self):\n\t\treturn dss.Loads.Count()", "title": "" }, { "docid": "29be6090074ff437295f9926bb8afccf", "score": "0.52152365", "text": "def get_token_count(\n plugin_name: str,\n node_config: Config,\n) -> int:\n section_name = \"storageclient.plugins.{}\".format(plugin_name)\n return int(\n node_config.get_config(\n section=section_name,\n option=\"default-token-count\",\n default=NUM_TOKENS,\n )\n )", "title": "" }, { "docid": "4e95158874b5eeed93abda1e1e4cfb5f", "score": "0.52094275", "text": "def plugins(self, bot, event):\n names = []\n for plugin in bot.plugins:\n plugin_info = plugin.__class__.__name__\n func_names = command_name_list(plugin)\n if func_names:\n plugin_info += \" (%s)\" % \", \".join(func_names)\n names.append(plugin_info)\n names = \", \".join(names)\n bot.message(names)", "title": "" }, { "docid": "a3b93db20caabe157b28a94ac2476e67", "score": "0.52049446", "text": "def num_instances(self):\n num = 0\n\n # Get all the components\n components = self.spouts() + self.bolts()\n\n # Get instances for each worker\n for component in components:\n config = component.comp.config\n for kvs in config.kvs:\n if kvs.key == constants.TOPOLOGY_COMPONENT_PARALLELISM:\n num += int(kvs.value)\n break\n\n return num", "title": "" }, { "docid": "c86bd5d644395887f17a3a0f4901318e", "score": "0.520214", "text": "def plugin_list(self):\n return [name for name in self._plugins]", "title": "" }, { "docid": "7b9cc48661e61970e8d24a3984f95d76", "score": "0.518532", "text": "def numPlayers(self):\n return self.nplayers", "title": "" }, { "docid": "cf4a97e4a623661cf7f0393c631728c9", "score": "0.518426", "text": "def info(args):\n print \"\"\"This is TiddlyWeb version %s.\n The current store is: %s.\"\"\" % (VERSION, config['server_store'][0])\n if config['system_plugins']:\n print 'System Plugins:'\n for plugin in config['system_plugins']:\n module = __import__(plugin)\n print '\\t%s (%s)' % (plugin,\n getattr(module, '__version__', 'unknown'))", "title": "" }, { "docid": "7e88101cfc8b0c823b929268a487d294", "score": "0.51812273", "text": "def _player_count(self):\n return self.active_players", "title": "" }, { "docid": "3da99c87f9f16c243d188c4e739d111b", "score": "0.51811373", "text": "def _num_fixtures(self):\n return self.fixtures.count()", "title": "" }, { "docid": "8fbecc8e826a763ada767d9db78b600a", "score": "0.51781136", "text": "def num_examples(self):\n return self.world.num_examples()", "title": "" }, { "docid": "ffea5f4e2590c765c1a3049f43c1a2bc", "score": "0.51740634", "text": "def get_num_players(self):\n return self.__num_players", "title": "" }, { "docid": "772341e1506c1c765535bf27b1e03b3c", "score": "0.5160727", "text": "def GetAllPlugins(self):\n ret = []\n _ = map(ret.extend, self._plugins.values())\n return ret", "title": "" }, { "docid": "5f30d79245fc87bc34b0ab9d82388809", "score": "0.5160231", "text": "def num_players(self):\n return lib.StateNumPlayers(self._state)", "title": "" }, { "docid": "4514f14ac54f8aad56293fbe8b0a39b0", "score": "0.51557875", "text": "def total_count(self):\n return sum(len(v) for v in self.parsed_resources.values())", "title": "" }, { "docid": "fd46e0286ad0294e2a3823a34f3dc47b", "score": "0.51413095", "text": "def numOfRunners(self):\n\t\tcount = 0\n\t\tfor candidate in self.candidates:\n\t\t\tif candidate.state == RUNNING:\n\t\t\t\tcount += 1\n\t\treturn count", "title": "" }, { "docid": "3f0cf2ea6778cf72efd6297881bd1350", "score": "0.513199", "text": "def num_players(self):\n return Player.objects.filter(game=self).count()", "title": "" }, { "docid": "f306129502024667166d9f1172fcb6f2", "score": "0.5131145", "text": "def get_num_commands(ctx):\n return len(read_tomb_commands(ctx))", "title": "" }, { "docid": "8ee85edef9d1fe3b63321427a882434c", "score": "0.51259464", "text": "def get_num_backends():\n content = urllib.urlopen(\"http://{0}:25000/backends?raw\".format(IMPALAD)).read()\n return len([b for b in content.strip().split('\\n') if '22000' in b])", "title": "" }, { "docid": "32e49254d08f44d415d069836e99d0e4", "score": "0.5121033", "text": "def count( self ):\n try: \n return len(self.__qa_cli)\n except Exception:\n return 0", "title": "" }, { "docid": "bdf2ef0279f0f0335b2c1de546d32179", "score": "0.51143324", "text": "def countPlayers():\n return_value = return_query(connect(), query_count_player)\n return return_value[0][0]", "title": "" }, { "docid": "402f445ffff01281a979033586006ec3", "score": "0.51096016", "text": "def testGetPlugins(self):\n manager.AnalysisPluginManager.RegisterPlugin(TestAnalysisPlugin)\n\n # Use set-comprehension to create a set of the analysis plugin names.\n plugin_set = {name for name, _ in list(\n manager.AnalysisPluginManager.GetPlugins())}\n self.assertTrue('test_plugin' in plugin_set)\n\n manager.AnalysisPluginManager.DeregisterPlugin(TestAnalysisPlugin)", "title": "" }, { "docid": "3bf60dedf3928ace0b2bdfaf0c9f213c", "score": "0.5106814", "text": "def loads_count(self):\n return reduce(\n lambda count, node: count + node.loads_count,\n self.__nodes,\n 0\n )", "title": "" }, { "docid": "6f086b129b7ce88f4ab5226b5443510c", "score": "0.51062006", "text": "def get_items_count(self):\n return self.wait_for_counter_loaded().text.split()[2]", "title": "" }, { "docid": "e274d2e58c105f5c77391d424cefabdf", "score": "0.51055175", "text": "def countPlayers():\n return tournament.count_players()", "title": "" }, { "docid": "cd3723cb50c78b5c99d1468660bb73a4", "score": "0.50994736", "text": "def __unicode__(self):\n return u'%d plugins(s)' % self.number", "title": "" }, { "docid": "f651ae72d038d809c867ed5dffa32228", "score": "0.50994396", "text": "def ProjectCount(self):\n return len(self._projects)", "title": "" }, { "docid": "c9ad1d3a481a3c04946754ab46e6006a", "score": "0.5097202", "text": "def load_plugins(self, main_window):\n\n plugins = self.conf_client.get_string_list('plugins')\n if not plugins:\n plugins = []\n\n for plugin_name in plugins:\n plugin = get_plugin(plugin_name)\n if not plugin:\n continue\n\n if hasattr(plugin, \"enable\"):\n plugin.enable(self, main_window)\n\n if not plugin_name in self.enabled_plugins:\n self.enabled_plugins.append(plugin_name)", "title": "" }, { "docid": "6cc9efe0daec5da78974fdbf1d6846d8", "score": "0.5095139", "text": "def count_robots(cls):\n print(\"Robots in system: \" + str(cls.robots))", "title": "" }, { "docid": "a4ca3d029a5afde8a1032114432b6aef", "score": "0.5088188", "text": "def names(self):\n return list(self._plugins) + self._stevedore_manager.names()", "title": "" }, { "docid": "767f8d98c693cf7c8a3d170e39a63065", "score": "0.50817925", "text": "def get_cur_num_players(self):\n return len(self.users)", "title": "" }, { "docid": "c22afe5504ff0abc1eb02197cedc29e6", "score": "0.50799817", "text": "def get_session_count():\r\n global HOUSE\r\n cnt = 0\r\n if len(HOUSE.keys()) > 0:\r\n for key in HOUSE.keys():\r\n for entry in HOUSE[key]:\r\n if HOUSE[key][entry].running:\r\n cnt += 1\r\n return cnt", "title": "" }, { "docid": "cd08b67c9c0dbebb2658b37840b00773", "score": "0.5079039", "text": "def GetNumNodes(self):\n return sum(self.nodeCounts)", "title": "" }, { "docid": "59be923976cbd8dfa82f24d7de9056b1", "score": "0.5078792", "text": "def loadingPlugins(self):\n files=os.listdir(\"%s//Plugins//\" % (QtHelper.dirExec()))\n for x in files:\n \n curpath=os.path.join( \"%s//Plugins//\" % (QtHelper.dirExec()), x)\n if not os.path.isfile( curpath ):\n for y in os.listdir( curpath ):\n fullpath=os.path.join( \"%s//Plugins//%s//%s\" % (QtHelper.dirExec(), x, y) )\n if os.path.isfile(fullpath):\n \n # for dev only\n if y.endswith(\".exe\"):\n pluginId = x\n \n showMessageSplashscreen( self.tr('Loading plugin(s)...') ) \n\n self.trace(\"plugin %s detected\" % pluginId)\n p = PluginProcess(self, cmd='\"%s\"' % fullpath)\n p.DataReceived.connect(self.onPluginData)\n p.startPlugin()\n self.trace(\"plugin started\")", "title": "" }, { "docid": "b07b7aae57bd56218f582f772a261d52", "score": "0.5074652", "text": "def command_plugins():\n entrypoint: EntryPoint\n broken_libs = defaultdict(set)\n for index, entrypoint in enumerate(iter_entry_points(\"aiogram_cli.plugins\"), start=1):\n click.echo(f\"{index:3}.\", nl=False)\n\n dist = entrypoint.dist\n try:\n plugin = entrypoint.load()\n except Exception as e:\n broken_libs[dist.name].add(entrypoint.name)\n click.secho(f\" {entrypoint.name}\", fg=\"red\", err=True, nl=False)\n click.echo(f\" (by {dist.name} v{dist.version})\", err=True, nl=False)\n click.echo(f\" is unusable in due to {type(e).__name__}: {e}\")\n continue\n\n description = plugin.__doc__.split(\"\\n\")[0] if plugin.__doc__ else \"\"\n\n click.secho(f\" {entrypoint.name}\", fg=\"green\", nl=False)\n click.echo(f\" (by {dist.name} v{dist.version})\", nl=False)\n if description:\n click.echo(f\": {description}\")\n else:\n click.echo()\n\n if broken_libs:\n click.echo()\n click.secho(\"Broken libraries:\", fg=\"red\", err=True)\n for lib, plugins in broken_libs.items():\n click.echo(f\" {lib} ({', '.join(plugins)})\")", "title": "" }, { "docid": "9f3c71f4d6357c060e03587205b72d75", "score": "0.50698847", "text": "def _count_tests_known(self):\n return sum(scoreboard.total for scoreboard in self._all_suites)", "title": "" }, { "docid": "a62e68996334fe7a3e2e52544fa8bba4", "score": "0.5057423", "text": "def how_many(cls):\n print \"We have {:d} robots.\".format(cls.population)", "title": "" }, { "docid": "9e10a285a87337c747ff5e0c11714232", "score": "0.5050437", "text": "def get_num_nodes(self):\n return len(self.get_ip_addresses())", "title": "" }, { "docid": "310b10fdfaf4d7e706313a63a1c1ad07", "score": "0.5047634", "text": "def store_init_resources():\n world.count_resources('init')", "title": "" }, { "docid": "dcd1c0a46f2a020642efd5512214dfbf", "score": "0.5046801", "text": "def n(self):\n return len(self.register)", "title": "" }, { "docid": "ea0bfd94a5e2d9cf8d5fec9b294e21cf", "score": "0.5043264", "text": "def count_tests(module_name):\n\n #mod = __import__(module_name, fromlist=['Test'])\n #klass = getattr(mod, 'Test')\n #return inspect.getmembers(klass, predicate=inspect.ismethod)", "title": "" }, { "docid": "03d7f58d1559244faa0c836a5b62a6b4", "score": "0.5042983", "text": "def particlecount(self):\n return len(self.particles)", "title": "" }, { "docid": "75c75eef33681ef5d65de67a38caa96e", "score": "0.5041051", "text": "def show_count():\n print(count)", "title": "" }, { "docid": "2fb6a4b314809b648c22333e3d7de746", "score": "0.5037993", "text": "def list_plugins(self, client):\n\n plugins = client.get_client_plugins()\n\n enabled = [\n ' [enabled] {} {} - {}'.format(\n pn.__name__.replace('client_plugins.', ''),\n pn.Plugin.version,\n pn.Plugin.invocation\n )\n for pn in plugins\n if pn.Plugin.enabled\n ]\n\n disabled = [\n ' [disabled] {} {} - {}'.format(\n pn.__name__.replace('client_plugins.', ''),\n pn.Plugin.version,\n pn.Plugin.invocation\n )\n for pn in plugins\n if not pn.Plugin.enabled\n ]\n\n output = ['\\n< Client Plugins >'] + enabled + disabled + ['< /Client Plugins >']\n\n client.send_data('\\n\\n'.join(output) + '\\n\\n')\n return", "title": "" }, { "docid": "c95247e4cc6524c51b7ef8ddee195c0e", "score": "0.50311035", "text": "def discover_namespace_plugins(\n namespace_name: str = \"allennlp_plugins\",\n) -> Iterable[pkgutil.ModuleInfo]:\n try:\n reload = namespace_name in sys.modules\n\n namespace_module = importlib.import_module(namespace_name)\n\n if reload:\n importlib.reload(namespace_module)\n\n return pkgutil.iter_modules(\n namespace_module.__path__, namespace_module.__name__ + \".\" # type: ignore\n )\n except ModuleNotFoundError:\n return []", "title": "" } ]
19a4fb3923a20f6b31e95b0130b064d2
Gets an SQL insert statement suitable for finding the name. The SQL requires a parameter of name { getVariableName()}. the SQL, not null String
[ { "docid": "e09e1ef33e7ddc15d8d5a38872a17353", "score": "0.7751664", "text": "def sql_insert(self):\n return 'INSERT INTO ' + self.get_table_name() + ' (id, name) ' + \\\n 'VALUES (:dim_id, :' + self.get_variable_name() + ')'", "title": "" } ]
[ { "docid": "797df8230cf2fca03ac17ee5362befc1", "score": "0.7569555", "text": "def get_insert_statement(self) -> str:\r\n return (f\"INSERT INTO {self.table_name} VALUES({self.id}, \"\r\n f\"$${self.name}$$) ON CONFLICT (id) DO NOTHING\")", "title": "" }, { "docid": "797df8230cf2fca03ac17ee5362befc1", "score": "0.7569555", "text": "def get_insert_statement(self) -> str:\r\n return (f\"INSERT INTO {self.table_name} VALUES({self.id}, \"\r\n f\"$${self.name}$$) ON CONFLICT (id) DO NOTHING\")", "title": "" }, { "docid": "797df8230cf2fca03ac17ee5362befc1", "score": "0.7569555", "text": "def get_insert_statement(self) -> str:\r\n return (f\"INSERT INTO {self.table_name} VALUES({self.id}, \"\r\n f\"$${self.name}$$) ON CONFLICT (id) DO NOTHING\")", "title": "" }, { "docid": "883600c4a1822639fef6c90d86a8f71c", "score": "0.75408953", "text": "def get_insert_statement(self) -> str:\r\n if self.id is None:\r\n return (f\"INSERT INTO {self.table_name}(iso_639_1, name) VALUES($${self.iso_639_1}$$, \"\r\n f\"$${self.name}$$) ON CONFLICT (id) DO NOTHING\")\r\n else:\r\n return (f\"INSERT INTO {self.table_name} VALUES({self.id}, \"\r\n f\"$${self.iso_639_1}$$, \"\r\n f\"$${self.name}$$) ON CONFLICT (id) DO NOTHING\")", "title": "" }, { "docid": "c1f0d194948bf4c69a43745dd12b8d76", "score": "0.7338113", "text": "def get_insert_statement(self) -> str:\r\n if self.id is None:\r\n return (f\"INSERT INTO {self.table_name}(iso_3166_1, name) VALUES($${self.iso_3166_1}$$, \"\r\n f\"$${self.name}$$) ON CONFLICT (id) DO NOTHING\")\r\n else:\r\n return (f\"INSERT INTO {self.table_name} VALUES({self.id}, \"\r\n f\"$${self.iso_3166_1}$$, \"\r\n f\"$${self.name}$$) ON CONFLICT (id) DO NOTHING\")", "title": "" }, { "docid": "d21f2cb4cd34b1c0ccb2870a9af52339", "score": "0.7281517", "text": "def get_insert_statement(self) -> str:\r\n return (f\"INSERT INTO {self.table_name} VALUES({self.id}, \"\r\n f\"{self.movie_id}, \"\r\n f\"{self.cast_id}, \"\r\n f\"$${self.credit_id}$$, \"\r\n f\"ARRAY[$${'$$, $$'.join([x.lstrip() for x in self.character.split('/')])}$$], \"\r\n f\"{self.gender}, \"\r\n f\"$${self.name}$$, \"\r\n f\"{self.order}, \"\r\n f\"$${self.profile_path}$$) ON CONFLICT (id) DO NOTHING\")", "title": "" }, { "docid": "8af0d988e2575e94170995bc64774934", "score": "0.7270971", "text": "def get_insert_statement(self) -> str:\r\n return (f\"INSERT INTO {self.table_name} VALUES({self.id}, \"\r\n f\"{self.movie_id}, \"\r\n f\"$${self.credit_id}$$, \"\r\n f\"$${self.department}$$, \"\r\n f\"{self.gender}, \"\r\n f\"$${self.job}$$, \"\r\n f\"$${self.name}$$, \"\r\n f\"$${self.profile_path}$$) ON CONFLICT (id) DO NOTHING\")", "title": "" }, { "docid": "2dae62baf6ef9c623a91e1f363c2b3ee", "score": "0.72169405", "text": "def generate_insert_statement(self):\n insert_data = self.data.get('insert_data')\n _fields = \", \".join([\n field for field in self.data.get('insert_data').keys()])\n place_holder = \", \".join(['?' for value in insert_data.values()])\n return F\"INSERT INTO {self.data.get('table_name')}({_fields})\"\\\n F\"VALUES({place_holder})\"\n # \"insert into <>\"", "title": "" }, { "docid": "828dedb0aecc1a23b291981ce0e4b286", "score": "0.71323586", "text": "def getSQLToInsert(self):\n if self.__variables:\n raise ValueError(self.name + \" contains variables\")\n\n if not self.__rows:\n raise ValueError(\"No rows are specified in this drawn table\")\n\n return 'INSERT INTO {}({}) VALUES'.\\\n format(self.name, ', '.join(self.__columns)) + \\\n \", \".join(map(self.__tuple2str, self.__rows))", "title": "" }, { "docid": "998acf6f7b956ecb337c22b581596960", "score": "0.7114034", "text": "def get_insert_statement(self) -> str:\r\n return (f\"INSERT INTO {self.table_name} VALUES({self.id}, \"\r\n f\"$${self.name}$$, \"\r\n f\"$${self.poster_path}$$, \"\r\n f\"$${self.backdrop_path}$$) ON CONFLICT (id) DO NOTHING\")", "title": "" }, { "docid": "20a1c04eea7006be27def15e83e37895", "score": "0.67553383", "text": "def get_insert_query(table_name):\n return f\"INSERT INTO {table_name} (subject,predicate,object) VALUES (?,?,?) ON CONFLICT (subject,predicate,object) DO NOTHING\"", "title": "" }, { "docid": "a0b93c59a75de660b04b96b77fbf783e", "score": "0.64046633", "text": "def _insertQuery(self):\n values = ['%%(%s)s' % attr for attr in self.attributes\n if hasattr(self, attr)]\n query = 'INSERT INTO %s (%s) VALUES (%s)' % (self.tableName,\n ', '.join(self.boundAttributes()),\n ', '.join(values))\n return query", "title": "" }, { "docid": "ba3e58b406e0410e14c5ba7945bb3f45", "score": "0.63101053", "text": "def get_bulk_insert_statement(self):\n columns = self.table.get_insert_columns()\n column_count = len(self.table.get_insert_columns(False))\n insert_stmt = \"INSERT INTO \" + self.table_name()\n insert_stmt += \" (\" + columns + \")\"\n insert_stmt += \" VALUES (\"\n for _ in range(0, column_count):\n insert_stmt += \"?, \"\n insert_stmt = insert_stmt.rstrip(\", \") + \")\"\n return insert_stmt", "title": "" }, { "docid": "1a5ad9edcf44f9bcd3e9249f5b0ddb13", "score": "0.6163575", "text": "def get_catalog_insert_query():\n return f\"INSERT INTO catalog (value) VALUES (?) ON CONFLICT DO NOTHING\"", "title": "" }, { "docid": "9d237ec6af3cd4784c8e21479e921db7", "score": "0.6109515", "text": "def build_insert(table_name, attributes):\n sql = \"INSERT INTO %s\" %(table_name)\n column_str = u\"\"\n value_str = u\"\"\n for index, (key, value) in enumerate(attributes.items()):\n if index > 0:\n column_str += u\",\"\n value_str += u\",\"\n column_str += key\n value_str += value_to_sql_str(value)\n sql = sql + u\"(%s) VALUES(%s)\" %(column_str, value_str)\n return sql", "title": "" }, { "docid": "36cace5680f09ca8de110d999cfb20b5", "score": "0.6036644", "text": "def sql(db_name) -> str:\n return DbExist._sql_template.substitute(\n db_name=PgSql.to_string(db_name))", "title": "" }, { "docid": "576339fb3466ac5472f32542135c714b", "score": "0.60200024", "text": "def sql(self) -> str:\n sql = cast(str, self.args[0])\n\n # Collapse spaces\n sql = \" \".join(sql.split())\n\n return sql", "title": "" }, { "docid": "69ea33157634bd16019fb79b8802651f", "score": "0.59335166", "text": "def generate_sql_insert(tablename, columns, placeholder=\"%s\"):\n sql = \"insert into %s values (%s)\" % (tablename, ','.join([\"%s\"]*len(columns[0].split(','))))\n \n return sql.encode('utf8')", "title": "" }, { "docid": "129baabe22c365defb50dbab1e33f54f", "score": "0.5896588", "text": "def _sql(self, table, column):\n sql = self.select(\"sqlite_master\", \"sql\", type=\"table\").fetchone()[\"sql\"].partition(column)\n comma = sql[2].find(\",\") # index of first comma after column to remove\n if comma < 0: # last column in the create statement\n return sql[0][:sql[0].rfind(\",\")] + \")\" # need to cut off last comma before column\n return sql[0] + sql[2][comma+1:] # jump over column in last part", "title": "" }, { "docid": "8c26a8749408e16243cf23538144efd4", "score": "0.5868305", "text": "def sql_writer_insert(table_name, *args):\n header_list = []\n s_list = []\n for value in args:\n header_list.append(value)\n s_list.append('%s')\n\n # Convert\n header_list = ','.join(map(str, header_list))\n s_list = ','.join(map(str, s_list))\n\n sql = \"INSERT INTO \" + table_name + \" (\" + header_list + \") \" + \"VALUES\" + \" (\" + s_list + \")\"\n\n return sql", "title": "" }, { "docid": "52c679de8d6bd4af5b5ce6ea7bb1b9de", "score": "0.5836736", "text": "def insertTable(self,name,params):\n insert = self.sqls.find(id=\"insertSql\").find(id=name).string\n if insert:\n self._logger.info(\" insert into table \"+name)\n self.db.insert(insert,params)\n else:\n self._logger.error(\"did not find the table \"+name+\" when insert\")", "title": "" }, { "docid": "13d77f29db19c8d962dd04a7e2a29179", "score": "0.5829191", "text": "def createSQL(self):\n\t\tdefn = self._getColumnDefinition()\n\t\treturn cleanStatement('%(name)s %(data_type)s %(precision_def)s %(not_null_def)s %(default_def)s %(primary_key)s' % defn)", "title": "" }, { "docid": "51d148e0625495a3982706c5f4ceea8e", "score": "0.58226645", "text": "def sql(db_name, db_user, db_pass) -> str:\n return CreateDb._sql_template.substitute(\n db_name=PgSql.to_quoted_identifier(db_name),\n db_user=PgSql.to_quoted_identifier(db_user),\n db_pass=PgSql.to_string(db_pass)\n )", "title": "" }, { "docid": "4f5c1490e3321c98b97291684b26f8ab", "score": "0.58204526", "text": "def generate_insert_query(column_definitions: List[Tuple[str, str]],\n dbname: str,\n tablename: str) -> str:\n insert_line = f\"INSERT INTO {dbname}.{tablename}\"\n\n column_names = [col[0] for col in column_definitions]\n column_names = \", \".join(column_names)\n column_line = f\"({column_names})\"\n\n value_placeholder = \"%s\"\n number_of_columns = len(column_definitions)\n values = \", \".join([value_placeholder] * number_of_columns)\n values_line = f\"({values})\"\n\n insert_query = f\"{insert_line}\\n{column_line}\\nVALUES\\n{values_line}\"\n\n return insert_query", "title": "" }, { "docid": "3765284236e5ebc52bb74a62f5c97385", "score": "0.5811341", "text": "def get_sql(database_name, table_name, sql_id):\n db = get_xml_dict(database_name, table_name)\n sql = db.get(sql_id)\n return sql", "title": "" }, { "docid": "3765284236e5ebc52bb74a62f5c97385", "score": "0.5811341", "text": "def get_sql(database_name, table_name, sql_id):\n db = get_xml_dict(database_name, table_name)\n sql = db.get(sql_id)\n return sql", "title": "" }, { "docid": "52514320f34f4798a50ff2b3f73187bd", "score": "0.5767643", "text": "def scd2_new_insert(self) -> str:\n all_his_columns = self.get_his_col_names()\n all_stg_columns = self.get_staging_columns()\n all_stg_columns.extend(self.set_metadata_colums())\n exist_stat = self.get_staging_table_pk_col()\n filters = self.def_equal_pk_col()\n\n stmt = (self._history_table.insert().\n from_select(all_his_columns,\n select(all_stg_columns).where(\n ~exists(exist_stat).where(and_(\n *filters)))\n ))\n return str(stmt.compile(bind=self._con,\n compile_kwargs={\"literal_binds\": True}))", "title": "" }, { "docid": "7d0452b9c24970a7a69cf8d7ea929c34", "score": "0.5753541", "text": "def sql(self):\n sql = self.args[0]\n\n # Collapse spaces\n sql = \" \".join(sql.split())\n\n return sql", "title": "" }, { "docid": "1fc3ba13b407b3533a49447b88bd3cd2", "score": "0.572256", "text": "def sql_from_stmt(s):\n return str(s.compile(compile_kwargs={\"literal_binds\": True}))", "title": "" }, { "docid": "694a510cf03c314c5bd2a1da547f1a3d", "score": "0.56841576", "text": "def create_table_sql(self):\n name_part = self.column_name\n dtype_part = self.data_type.sqlite_dtype_name\n if self.nullable == True:\n nullable_part = None\n else:\n nullable_part = \"NOT NULL\"\n if self.default == None:\n default_part = None\n else:\n default_part = \"DEFAULT %s\" % self.__SQL__(self.default)\n return \" \".join([i for i in [name_part, dtype_part, nullable_part, default_part] if i])", "title": "" }, { "docid": "8423581719daffeac2c845e353d0921a", "score": "0.56686586", "text": "def get_sql(self):\n # reset arg index and args\n self.arg_index = 0\n self.args = {}\n\n # build the WHERE sql portion if needed\n if len(self.wheres):\n where = self.build_where_part(self.wheres)\n return 'WHERE {0} '.format(where)\n return ''", "title": "" }, { "docid": "b835664494af58f925876f31288f95ba", "score": "0.5667475", "text": "def get_insert_many_query(table_name):\n return f\"INSERT INTO {table_name} (subject,predicate,object) VALUES ? ON CONFLICT (subject,predicate,object) DO NOTHING\"", "title": "" }, { "docid": "3e03eebc38bfd1d63057eb7c2d79c2be", "score": "0.56349796", "text": "def statement_name(self) -> \"str\":\n return self._attrs.get(\"statementName\")", "title": "" }, { "docid": "55557d7e40360af6bcfe74ddaeb715bc", "score": "0.55802536", "text": "def sql_load_statement(self) -> str:\n sql_load_statement = (\n (TEMPLATES_DIR / \"hub_link_dml.sql\")\n .read_text()\n .format(**self.sql_placeholders)\n )\n\n self._logger.info(\"Loading SQL for link (%s) generated.\", self.name)\n self._logger.debug(\"\\n(%s)\", sql_load_statement)\n\n return sql_load_statement", "title": "" }, { "docid": "74e66fb86c14e87345122038d2a7756e", "score": "0.55529034", "text": "def insert(table_name, tuple_str):\n return \"INSERT IGNORE INTO %s VALUES(%s);\\n\" % (table_name, tuple_str)", "title": "" }, { "docid": "19e346df9c2ef012390e1d3f73975689", "score": "0.5536459", "text": "async def prep_single_insert_stmt(self):\n sql = \"\"\"INSERT into object (\n tac_id, session_id, name, color, country, grp, pilot, type,\n alive, coalition, first_seen, last_seen, lat, lon, alt, roll,\n pitch, yaw, u_coord, v_coord, heading, velocity_kts, impacted,\n impacted_dist, parent, parent_dist, updates, can_be_parent\n )\n VALUES($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14,\n $15, $16, $17, $18, $19, $20, $21, $22, $23, $24, $25, $26, $27, $28)\n\n RETURNING id\n \"\"\"\n stmt = await self.con.prepare(sql)\n return stmt", "title": "" }, { "docid": "f8df6ca9dc6d3e0a1d3420ed825d7ba9", "score": "0.5505148", "text": "def sql_select_get(self):\n return 'SELECT dim.id AS dim_id ' + \\\n 'FROM ' + self.get_table_name() + ' dim ' + \\\n 'WHERE dim.name = :' + self.get_variable_name() + ' '", "title": "" }, { "docid": "aa6db4cc06766b36ad784fe53653ef69", "score": "0.55033696", "text": "def insertrecord_sql(self, record):\n kvs = [[field, record[field]] for field, idx, cast, typename in self.getfields()]\n\n def postprocess(v):\n if v == None: return 'NULL'\n else: return \"'%s'\" % str(v)\n\n return \"insert into %s (%s) values (%s)\" % \\\n (self.tablename, ','.join([kv[0] for kv in kvs]), ','.join([postprocess(kv[1]) for kv in kvs]))", "title": "" }, { "docid": "ae43cb46cfa7b74d7e72bb2b65cdfe15", "score": "0.5497713", "text": "def compileInsertQuery(self, query, parameters={}, literal_binds=False):\n compile_kwargs = {\"literal_binds\": literal_binds}\n query = str(query.compile(compile_kwargs=compile_kwargs))\n values = []\n # example of the insert string:\n # INSERT INTO test_table (id, nullable_column, empty) VALUES (:id, :nullable_column, :empty)\n pattern_string = r'(INSERT INTO) (\\S+).*\\((.*?)\\).*(VALUES).*\\((.*?)\\)(.*\\;?)'\n\n res = re.findall(pattern_string, query, re.IGNORECASE | re.DOTALL)\n if len(res) > 0:\n # regexp matching result should look like this:\n # `id, nullable_column, empty`\n intos = str(res[0][2])\n count = 1\n # so we need to split it by comma value to iterate over\n for into_value in intos.split(', '):\n values.append(parameters[into_value])\n query = query.replace(':' + into_value, '$' + str(count))\n count += 1\n return query, tuple(values)", "title": "" }, { "docid": "bf6a874ae291d2eacac645cac0c2593a", "score": "0.5493709", "text": "def _insert(self, table, **kwargs):\n\n # Note MySQL uses \"%s\", whereas SQLite uses \"?\".\n sql = (\"INSERT INTO %s (\" % table) \\\n + \", \".join(kwargs.keys()) + \") VALUES (\" \\\n + \", \".join([\"%s\"] * len(kwargs.keys())) + \")\"\n\n # If there are more than one rows to insert, \"unpack\" the keyword\n # argument iterables and zip them up. This effectively rearranges\n # a list of columns into a list of rows.\n try:\n parameters = list(zip(*kwargs.values()))\n except TypeError:\n parameters = [kwargs.values()]\n\n return (sql, parameters)", "title": "" }, { "docid": "ebfb23290ca2f9add1c01195d012a24b", "score": "0.54779017", "text": "def compileInsertQuery(self, query, parameters={}, literal_binds=False):\n compile_kwargs = {\"literal_binds\": literal_binds}\n query = str(query.compile(compile_kwargs=compile_kwargs))\n values = []\n # example of the insert string:\n # INSERT INTO test_table (id, nullable_column, empty) VALUES (:id, :nullable_column, :empty)\n pattern_string = r'(INSERT INTO) (\\S+).*\\((.*?)\\).*(VALUES).*\\((.*?)\\)(.*\\;?)'\n\n res = re.findall(pattern_string, query, re.IGNORECASE | re.DOTALL)\n if len(res) > 0:\n # regexp matching result should look like this:\n # `id, nullable_column, empty`\n intos = str(res[0][2])\n\n # so we need to split it by comma value to iterate over\n for into_value in intos.split(', '):\n values.append(parameters[into_value])\n query = query.replace(':' + into_value, '?')\n return query, tuple(values)", "title": "" }, { "docid": "583d313c769733979daf3e19a13ed7f1", "score": "0.5476616", "text": "def _insert(self, table, **kwargs):\n\n sql = (\"INSERT INTO %s (\" % table) \\\n + \", \".join(kwargs) + \") VALUES (\" \\\n + \", \".join([\"?\"] * len(kwargs)) + \")\"\n\n # If there are more than one rows to insert, \"unpack\" the keyword\n # argument iterables and zip them up. This effectively rearranges\n # a list of columns into a list of rows.\n try:\n parameters = list(zip(*list(kwargs.values()))) # FIXME\n except TypeError:\n parameters = [list(kwargs.values())]\n\n return (sql, parameters)", "title": "" }, { "docid": "b11b62137524529585f99baee58e92c0", "score": "0.54089713", "text": "def db_sql_get(self) -> str:\n return self._db_sql", "title": "" }, { "docid": "246f45a004e91979d1ab068fcb75366c", "score": "0.5378781", "text": "def INSERT_TABLE(name, vars, **kwargs):\n conn = None\n commit = to_commit_or_not_to_commit(False, kwargs) # Default set to False since many insertion may done in a short amount of time\n conn, curs = return_conn_curs_after_kwargs(kwargs)\n\n #try:\n curs.execute(\"INSERT INTO %s VALUES (%s)\" % (name, t2s.table_in2str(vars)))\n #except sqlite3.OperationalError:\n # print(\"INSERT INTO %s VALUES (%s)\" % (name, t2s.table_in2str(vars)))\n # curs.execute(\"INSERT INTO %s VALUES (%s)\" % (name, t2s.table_in2str(vars)))\n if commit and conn:\n conn.commit()\n should_i_close_now(conn, kwargs)", "title": "" }, { "docid": "7397a59e73f5fa02de9621fdb1362955", "score": "0.53753614", "text": "def construct_domain_stmt(data_dict):\n stmt = INSERT_INTO_DOMAIN.format(data_dict[\"HitID\"],\n data_dict[\"DomainID\"],\n data_dict[\"Name\"],\n data_dict[\"Description\"])\n return stmt", "title": "" }, { "docid": "1e7aa442949e54487b7b4d20497e29eb", "score": "0.5337269", "text": "def sqlsafe(name):\n n = name.find(\"'\")\n if n > -1:\n name = name[:n+1] + \"'\" + name[n+1:]\n return name", "title": "" }, { "docid": "61fddd75ddc35d24744fa8d806536592", "score": "0.53321606", "text": "def sql(table, columns) -> str:\n return LoadFromCsv._sql_template.substitute(\n table=table, columns=columns)", "title": "" }, { "docid": "20401ac8879316de0f7412647230622e", "score": "0.53260314", "text": "def CreateStatement(self):\n pass", "title": "" }, { "docid": "523612a99444853f36b4d6064520094e", "score": "0.5274776", "text": "def get_id_query_statement(self) -> str:\r\n return f\"SELECT id FROM {self.table_name} WHERE iso_639_1='{self.iso_639_1}'\"", "title": "" }, { "docid": "7400be4d3620247baa2e29a5d1fae041", "score": "0.526111", "text": "def get_id_query_statement(self) -> str:\r\n return f\"SELECT id FROM {self.table_name} WHERE iso_3166_1='{self.iso_3166_1}'\"", "title": "" }, { "docid": "1fce583e56dcab8c81817be1f3f470e1", "score": "0.5250217", "text": "def get_sql_create_table_statement(self):\n db_field_list = []\n\n for field_dict in self.extra_fields:\n db_field_list.append('{field_name} {db_data_type}'.format(\n **field_dict))\n\n for field_dict in self.mapping_list:\n db_field_name = field_dict.get(\n 'db_field_name', field_dict['field_name'])\n db_field_list.append('{0} {1}'.format(\n db_field_name, field_dict['db_data_type']))\n\n return 'CREATE TABLE {schema}.{table_name} ({fields})'.format(\n schema=self._schema_name,\n table_name=self.table_name,\n fields=', '.join(db_field_list)\n )", "title": "" }, { "docid": "00acae6bbe68753f86a7f67df0e85b20", "score": "0.52487254", "text": "def insert_query(**kw):\n with current_app.app_context():\n result = db.execute(text(fetch_query_string('select_query_where_name.sql')), **kw).fetchall()\n if result:\n kw['query_id'] = result[0]['id']\n else:\n result = db.execute(text(fetch_query_string('insert_query.sql')), **kw)\n kw['query_id'] = result.lastrowid\n db.execute(text(fetch_query_string('insert_query_node.sql')), **kw)", "title": "" }, { "docid": "14e2d416c1a85b210265994c558289c7", "score": "0.52419233", "text": "def insert(self, stmt, *args):\n execute = self.db.execute(stmt, args)\n return execute\n # event_id = execute.lastrowid\n # return event_id", "title": "" }, { "docid": "5c36b70e0b7b6a7ae4ab2a4dbda91ba9", "score": "0.5225209", "text": "def block_name(name, gm='', comment=''):\n\n sql = \"\"\"\n INSERT INTO reject_name\n VALUES (?, ?, ?, ?);\n \"\"\"\n\n now = datetime.datetime.now()\n THE_CURSOR.execute(sql, (name.lower(), now, gm, comment))", "title": "" }, { "docid": "e0d239f8ebe843d97ea99a37a7f99769", "score": "0.5223835", "text": "def sql_position_insertion(self):\n string = \"INSERT INTO `positions` (`protocol`, `deviceid`, `servertime`, \"\\\n \"`devicetime`, `valid`, `latitude`, `longitude`, `altitude`, \"\\\n \"`speed`, `course`, `address`, `attributes`, `accuracy`, `network`) \"\\\n \"VALUES ('{}', {}, '{}', '{}', {}, {}, {}, {}, {}, {}, '{}', \"\\\n \"'{}', {}, 'null')\" \\\n .format(self.protocol,\\\n str(self.deviceid),\\\n time.strftime(\"%Y-%m-%d %H:%M:%S\", self.servertime),\\\n time.strftime(\"%Y-%m-%d %H:%M:%S\", self.devicetime),\\\n self.valid,\\\n str(self.latitude),\\\n str(self.longitude),\\\n str(self.altitude),\\\n str(self.speed),\\\n str(self.course),\\\n self.address,\\\n self.attributes,\\\n str(self.accuracy))\n\n return string", "title": "" }, { "docid": "1c9404ab0c7ebde46dd408e3437f5705", "score": "0.52114934", "text": "def linsert(self, name, where, refvalue, value):\r\n return self.execute_command('LINSERT', name, where, refvalue, value)", "title": "" }, { "docid": "3aedcf5ab2ae3efbcc23812603ca1eb0", "score": "0.5191632", "text": "def get_create_sql(self):\n pk = [f.field_name for f in self.key_fields]\n if self.path:\n pk += [\"path\"]\n pk += [\"ts\"]\n r = [\n \"CREATE TABLE IF NOT EXISTS %s (\" % self._get_raw_db_table(),\n \",\\n\".join(\" %s %s\" % (n, t) for n, t in self.iter_fields()),\n \") ENGINE = MergeTree(date, (%s), 8192)\" % \", \".join(pk),\n ]\n return \"\\n\".join(r)", "title": "" }, { "docid": "115ddf541a6a1bec68e3f44785880ce7", "score": "0.51881915", "text": "def get_sql(self):\n self.tables = [self.model.table(),]\n where = u''\n order = u''\n limit = u''\n \n # WHERE instructions\n if len(self.where_fields) > 0:\n where_clauses = []\n for key, value in self.where_fields.iteritems():\n where_clauses.append(self.get_where_clause(key, value))\n where = u' WHERE %s' % ' AND '.join(where_clauses)\n \n # ORDER instructions (only will be used for SELECT)\n if len(self.order_fields):\n order_clauses = []\n for key, value in self.order_fields.iteritems():\n order_clauses.append(u'%s %s' % (key, value))\n order = u' ORDER BY %s' % ' AND '.join(order_clauses)\n \n # LIMIT instructions\n if self.slice.stop != None and self.slice.stop > 0:\n limit = u' LIMIT %d' % self.slice.stop\n \n # SELECT statement if operation not set by delete(), insert(), etc.\n if not self.operation:\n fields = [u'%s.%s' % (self.model.table(), f) for f in self.fields]\n self.operation = u\"SELECT %s FROM %s\" % \\\n (u', '.join(fields), u', '.join(self.tables))\n else:\n # No order for DELETE, UPDATE, etc.\n order = u''\n \n return u'%s%s%s%s;' % (self.operation, where, order, limit)", "title": "" }, { "docid": "7fd56f5251fcce08a21c99d15ba72ba9", "score": "0.518464", "text": "def buildQueryFromDictionary(self, tableName, dict):\n queryS = \"INSERT INTO \"+tableName\n queryH = '('\n queryV = 'VALUES ('\n for key, val in dict.items():\n queryH += str(key)+','\n queryV += str(val)+','\n queryH = queryH[:-1]\n queryV = queryV[:-1]\n queryH += ')'\n queryV += ')'\n return queryS+queryH+' '+queryV", "title": "" }, { "docid": "5d69dc332c2f2655e74c886f8be0b08c", "score": "0.5168678", "text": "def compile_insert(self, query, values):\n # Essentially we will force every insert to be treated as a batch insert which\n # simply makes creating the SQL easier for us since we can utilize the same\n # basic routine regardless of an amount of records given to us to insert.\n table = self.wrap_table(query.from__)\n\n if not isinstance(values, list):\n values = [values]\n\n columns = self.columnize(values[0].keys())\n\n # We need to build a list of parameter place-holders of values that are bound\n # to the query. Each insert should have the exact same amount of parameter\n # bindings so we can just go off the first list of values in this array.\n parameters = self.parameterize(values[0].values())\n\n value = [\"(%s)\" % parameters] * len(values)\n\n parameters = \", \".join(value)\n\n return \"INSERT INTO %s (%s) VALUES %s\" % (table, columns, parameters)", "title": "" }, { "docid": "b80dd5f0b3c387d5793253cd5c82932b", "score": "0.5153982", "text": "def to_sql_statement_create(self):\n\n return sql_text(f\"CREATE POLICY {self.signature} on {self.on_entity} {self.definition}\")", "title": "" }, { "docid": "110758c82672c4d81b15ec74902e2bf0", "score": "0.51271755", "text": "def sql_select_search(self, name):\n return 'SELECT dim.id AS dim_id ' + \\\n 'FROM ' + self.get_table_name() + ' dim ' + \\\n 'WHERE ' + self.get_dialect().sql_wildcard_query('UPPER(dim.name) ', 'UPPER(:' + self.get_variable_name() + ')', name)", "title": "" }, { "docid": "29ffa3ff70352c01d188369ca59171c4", "score": "0.51101494", "text": "def insert_replace_table_name(stmt, table_name):\r\n name, columns = insert_find_table_info_tokens(stmt)\r\n name.value = table_name", "title": "" }, { "docid": "e3bd571f62b1308d1a6c5451e56763b9", "score": "0.5082314", "text": "def insert(self, **kwargs):\n if kwargs:\n column = kwargs.get('column')\n tableName = kwargs.get('tableName')\n if type(column) is dict:\n \n keys = list(column.keys())\n values = list(column.values())\n \n values = ','.join([i for i in values])\n column=MysqlConnector.addTicks(keys)\n query = \"INSERT INTO \"+ MysqlConnector.addTicks(tableName) + '('+ ','.join(str(i) for i in column) +')' + \" VALUES (\"+values +');'\n \n execute = kwargs.get(\"execute\")\n if execute:\n self.executeQuery(query)\n logging.debug(f\" Inserting successful with query: {query}\")\n logging.info(\n f\" Inserting into {tableName} values :{kwargs['column']}\"\n )\n else:\n logging.debug(\n f' Returning query as \"execute\" field passed as {execute}'\n )\n return query \n else:\n logging.critical(\n f\" Column field,value should be of type dict\")\n \n else:\n logging.debug(f\" No Parameters passed \")", "title": "" }, { "docid": "61fdd572710fb1ab8e2f7fef8606289f", "score": "0.5075374", "text": "def statement_name(self, statement_name: \"str\"):\n self._attrs[\"statementName\"] = statement_name", "title": "" }, { "docid": "376f393faeda6efd239c3945cf8f4d78", "score": "0.50689656", "text": "def getSQL(self):\n if type(self.date) == int and type(self.country) == str:\n return self.sql % (self.country, self.date)\n else:\n raise Exception('Error: You cannot call getSQL() without setting a country as string and date as int.')", "title": "" }, { "docid": "7e4bcad1c1f483e44b31536ba07fdf63", "score": "0.50585675", "text": "def build_query_from_table(name):\n return \"SELECT * FROM {0}\".format(name)", "title": "" }, { "docid": "577fc4341886c0fba82e6844aa912036", "score": "0.504281", "text": "def getSQLToCreate(self):\n sql = 'CREATE TABLE {}('.format(self.name) + ', '.join(\n [n + ' ' + t + c for n, t, c in\n zip(self.__columns, self.__types, self.__localConstraints)])\n\n # Add global constraints\n sql += ')' if not self.__globalConstraints else \\\n self.__globalConstraints\n return sql", "title": "" }, { "docid": "317d53b1c6eaae57506553b830608bfa", "score": "0.50362325", "text": "def get_sql(self):\n sql = ''\n if self.limit and self.limit > 0:\n sql += 'LIMIT {0} '.format(self.limit)\n if self.offset and self.offset > 0:\n sql += 'OFFSET {0} '.format(self.offset)\n return sql", "title": "" }, { "docid": "d2027b181cec6efeb11c5c8cd9f00bca", "score": "0.50137883", "text": "def _build_triple_sql_command(self, subject, predicate, obj, context, quoted):\n stmt_table = (\n self.tables[\"quoted_statements\"]\n if quoted\n else self.tables[\"asserted_statements\"]\n )\n\n triple_pattern = statement_to_term_combination(\n subject,\n predicate,\n obj,\n context,\n )\n command = stmt_table.insert()\n\n if quoted:\n params = {\n \"subject\": subject,\n \"predicate\": predicate,\n \"object\": obj,\n \"context\": context.identifier,\n \"termComb\": triple_pattern,\n \"objLanguage\": isinstance(obj, Literal) and obj.language or None,\n \"objDatatype\": isinstance(obj, Literal) and obj.datatype or None\n }\n else:\n params = {\n \"subject\": subject,\n \"predicate\": predicate,\n \"object\": obj,\n \"context\": context.identifier,\n \"termComb\": triple_pattern,\n }\n return command, params", "title": "" }, { "docid": "740a35fa5c6409a163d8ec52f7bb3ac5", "score": "0.501309", "text": "def construct_insert_table_sql(db_table, db_fields):\n\n # logger.debug(F\"DB fields: {db_fields}\")\n db_insert_sql_str = '''INSERT INTO ''' + db_table + ''' ('''\n\n field_cnt = 0\n # logger.debug(F\"Length of passed field list: {len(db_fields)}\")\n for field in db_fields:\n field_cnt += 1\n db_insert_sql_str += field\n # logger.debug(F\"...field count: {field_cnt}, field: {field}\")\n if field_cnt != len(db_fields): # don't add the comma for the last field\n db_insert_sql_str += ', '\n else:\n db_insert_sql_str += ') VALUES ('\n\n # Insert placeholders for all fields with a trailing \",\" until the last field; in this case, remove the\n # trailing \",\" and terminate.\n ph_range = range(0, len(db_fields) - 1)\n for field_ph in ph_range:\n db_insert_sql_str += '''?,'''\n\n db_insert_sql_str += '''?)'''\n\n logger.debug(F\"SQL INSERT statement: '{db_insert_sql_str}'\")\n\n return db_insert_sql_str", "title": "" }, { "docid": "e3ab4006cb7c2bd1e44e0ef74c6a36dd", "score": "0.50115865", "text": "def generate_query(table_name, primary_key, table_content, action='insert',):\r\n query_template_insert = 'INSERT INTO ' + table_name + ' ('\r\n query_template_values = ') VALUES ('\r\n query_template_update = 'UPDATE ' + table_name + ' SET '\r\n query_template_where_primary_key = ' WHERE ' + '`primary_key` = \"' + primary_key + '\"'\r\n\r\n if action is 'insert':\r\n table_content['primary_key'] = primary_key\r\n for table_field, table_value in table_content.iteritems():\r\n if table_value is not None:\r\n query_template_insert += '`{}`, '.format(table_field)\r\n query_template_values += '\"{}\", '.format(table_value)\r\n\r\n return query_template_insert[:-2] + query_template_values[:-2] + ')'\r\n\r\n elif action is 'update':\r\n for table_field, table_value in table_content.iteritems():\r\n if table_value is not None:\r\n query_template_update += '`{}` = \"{}\", '.format(table_field, table_value)\r\n\r\n return query_template_update[:-2] + query_template_where_primary_key", "title": "" }, { "docid": "96b6f5c0bbd6a6585f094bd1e675c6bd", "score": "0.50077736", "text": "def add_statement(self):\r\n self.current_word = self.current_word + 1\r\n var1 = self.split_words[self.current_word]\r\n self.current_word = self.current_word + 1\r\n\r\n var2 = self.split_words[self.current_word]\r\n self.current_word = self.current_word + 1\r\n\r\n final_add_statement = StatementAdd(str(var1), str(var2))\r\n my_print(final_add_statement)\r\n return final_add_statement", "title": "" }, { "docid": "fceaa023d9a4345e66ce1c14cbd6e0d4", "score": "0.50055534", "text": "def instruction_create_field(self, field_name, constraint):\n sql_field = type(self).SQL_TYPES[constraint.name_type]\n if constraint.has(\"auto_increment\"):\n sql_field = \"SERIAL\"\n if constraint.has(\"pkey\"):\n sql_field += \" PRIMARY KEY\"\n instruction = field_name + \" \" + sql_field\n return instruction", "title": "" }, { "docid": "17257d530ac42ed7655d3da6f30b9bd5", "score": "0.49991077", "text": "def to_sql(query):\n where = PostgreSQLGateway.to_sql_where(query.get_where())\n action = query.get_action()\n params = query.get_params()\n fields = query.get_select()\n param_fields, param_values = PostgreSQLGateway.to_sql_params_tuple(params)\n sql_params = {\n # all\n \"table_name\": query.get_from(),\n \"fields\": \", \".join(fields),\n # SELECT, INSERT\n \"param_fields\" : param_fields,\n # INSERT\n \"param_values\" : param_values,\n # SELECT, DELETE, UPDATE\n \"where\" : \"WHERE %s\" % where if where else \"\",\n # UPDATE\n \"params\" : PostgreSQLGateway.to_sql_params(params),\n \"returning\" : \"RETURNING %s\" % \", \".join(fields) if fields else \"\",\n }\n\n if action == ACTION_CREATE:\n sql = PostgreSQLGateway.SQL_INSERT_STR % sql_params\n elif action == ACTION_GET:\n sql = PostgreSQLGateway.SQL_SELECT_STR % sql_params\n elif action == ACTION_UPDATE:\n sql = PostgreSQLGateway.SQL_UPDATE_STR % sql_params\n elif action == ACTION_DELETE:\n sql = PostgreSQLGateway.SQL_DELETE_STR % sql_params\n else:\n raise Exception, \"Not implemented\"\n\n return sql", "title": "" }, { "docid": "e6b31c3e37634d86dc5234b4d87c9b36", "score": "0.49963412", "text": "def get_insert_player(player_id, games, date):\n con.execute(f\"insert into players (id ,Games, Created_date) values ('{player_id}', '{games}', '{date}')\")\n con.commit()\n return \"Inserted successfully\"", "title": "" }, { "docid": "60d10c6ff75295f4e89c0bbd3903bb0f", "score": "0.49950036", "text": "def sql(self):\n\n print \"Expression: \", self.expression()\n\n sql = \"SELECT \" + ','.join(self.expression()) + \" FROM ob\"\n return sql", "title": "" }, { "docid": "6f8678a0be84af5a20f78b1c292d4bbd", "score": "0.49947834", "text": "def get_insert_game(title, platform):\n con.execute(f\"insert into games (Title, Platform) values ('{title}', '{platform}')\")\n con.commit()\n return \"Inserted successfully\"", "title": "" }, { "docid": "747f20dc65c02177d31b94d3cc1cafc4", "score": "0.4991973", "text": "def ensure(self, name):\n select = self.sql_select_get()\n args = DbMapSqlParameterSource().add_value(self.get_variable_name(), name)\n result = self.get_db_connector().get_odbc_template().query_for_list(select, args, rch=True)\n if len(result) == 1:\n return result[0].get(\"dim_id\")\n \n id_ = self.next_id()[0]\n args.add_value(\"dim_id\", id_)\n self.get_db_connector().get_odbc_template().update(self.sql_insert(), args)\n return id_", "title": "" }, { "docid": "0dc0869ffadecbf310fac178fb111600", "score": "0.49882898", "text": "def createTable(self,name):\n create = self.sqls.find(id=\"createSql\").find(id=name).string\n if create:\n self._logger.info(\" create table \"+name)\n self.db.execute(create)\n else:\n self._logger.error(\"error occured when create table \"+name)", "title": "" }, { "docid": "766868fc3eabc1e382622c50baefbc11", "score": "0.49764967", "text": "def insert_stmt(self, table_name, val_str):\n ret = True\n self.db_lock.acquire()\n ins_str = \"insert into \" + table_name + \" values \" + val_str\n self.logger.debug(ins_str)\n\n if not self.execute_sql(ins_str):\n self.logger.warning(\"Trouble inserting data to %s.\\n\" % table_name)\n self.logger.warning(\"val str %s\\n\" % val_str)\n ret = False\n self.db_lock.release()\n\n return ret", "title": "" }, { "docid": "155cb334ad9a4b06aeb052ced32aa11d", "score": "0.49757108", "text": "def get_statement(self, position: int) -> stmt.Statement:", "title": "" }, { "docid": "b2a01c69517efec38550aeaa51077ef2", "score": "0.4958225", "text": "def insert_find_table_info_tokens(stmt):\r\n query_type_token = stmt.token_next_by_type(0, ptokens.DML)\r\n search_start_index = stmt.token_index(query_type_token) + 1\r\n\r\n # The parser sucks so we have to take care of two cases; grr should've learned\r\n # to write my own parser\r\n function = stmt.token_next_by_instance(search_start_index, psql.Function)\r\n identifier = stmt.token_next_by_instance(search_start_index, psql.Identifier)\r\n\r\n # If there's no function, or the first identifier comes before the first function\r\n if function is None or (identifier is not None\r\n and stmt.token_index(identifier) < stmt.token_index(function)):\r\n parenthesis = function.token_next_by_instance(stmt.token_index(identifier) + 1,\r\n psql.Parenthesis)\r\n else: # We have a function\r\n identifier = function.token_next_by_instance(0, psql.Identifier)\r\n parenthesis = function.token_next_by_instance(0, psql.Parenthesis)\r\n\r\n name = identifier.token_next_by_type(0, ptokens.Name)\r\n columns = find_tokens_by_instance(parenthesis.tokens, psql.Identifier, True)\r\n return name, columns", "title": "" }, { "docid": "cafa2783cc71da76316d55a17457c3a7", "score": "0.49576822", "text": "def sql_select_names(self):\n return 'SELECT name FROM ' + self.get_table_name() + ' ORDER BY name'", "title": "" }, { "docid": "6867f780590e700f569ab6347dfdc69d", "score": "0.4954662", "text": "def __str__(self):\n return create_table_query_generator.generate_query(self)", "title": "" }, { "docid": "6e8e091c4bc62e14e5a74cc88af98999", "score": "0.4953806", "text": "def insert(self, table_name, values, columns=\"\"):\n col_string = \"\"\n if columns != \"\":\n columns = \", \".join(columns)\n col_string = f\"({columns})\"\n values = \", \".join([ \"'\"+str(value)+\"'\" if type(value) is str else str(value) for value in values ])\n val_string = f\"({values})\"\n insert_string = f\"INSERT INTO {table_name} {col_string} VALUES {val_string};\"\n LOGGER.debug(insert_string)\n with self.connection.cursor().execute(insert_string):\n LOGGER.debug(\"Insert Succeeded.\")", "title": "" }, { "docid": "bafa2e5a34ceabf4640aa11a317f4ec2", "score": "0.4952808", "text": "def register(self, name):\n try:\n connection = self.connect()\n c = connection.cursor()\n \n name = self.check_reg_name(name)\n c.execute(\"INSERT INTO student (name) VALUES (?)\", (name,))\n \n connection.commit()\n return name\n except:\n output = \"\"\"\n \\n\\n{0} is already in the database. I am going to assume \\n{0} is your name.\n \"\"\".format(name)\n geek_print(output)\n return name", "title": "" }, { "docid": "261b33c472b4118c90d0d260f9f2af72", "score": "0.4950554", "text": "def to_sql(self, *args):\n if self.is_binary_operator:\n prefix = ''\n sep = ' ' + self.name + ' '\n else:\n prefix = self.name\n sep = ', '\n arg_strs = (arg_to_sql(arg) for arg in self.args if arg != NO_VALUE)\n return prefix + '(' + sep.join(arg_strs) + ')'", "title": "" }, { "docid": "4df0faa0c7c7e92fd845052f924f9b83", "score": "0.4949721", "text": "def insert(self, name=None, type=None, path=None):\n self.cursor.execute(self.insert_sql, (name, type, path))", "title": "" }, { "docid": "da27a5e5c267787dc94243b8f1acc79b", "score": "0.49419463", "text": "def registerPlayer(name):\n query = (\"INSERT INTO \" + PLAYER_TABLE + \" (player_name) VALUES (%s)\")\n qry_params = (name, )\n queryExecutor(query, qry_params)", "title": "" }, { "docid": "0e126c456e83441919537f0b7f92a74b", "score": "0.49383527", "text": "def insert ( student_name , father_name , Class , date_of_birth , roll_no , tution_fee , annual_fee , examination_fee ):\n \n with OBJ:\n obj.execute(\"INSERT INTO students VALUES('{}','{}','{}','{}','{}',{},{},{})\".format(student_name ,father_name,Class,date_of_birth ,roll_no,tution_fee,annual_fee,examination_fee))", "title": "" }, { "docid": "71c6279556542c5451e05f6491ab10a5", "score": "0.49367046", "text": "def insert(self, data, table_name=None):", "title": "" }, { "docid": "14cd807433947761bccf1a0b4726f221", "score": "0.49254194", "text": "def _prepare_script_for_sqlplus(self, script_name):\n # getting sqlplus headers and setting output csv filename in it\n try:\n template_file = Path(self.dwh_sys.sql_tmpl_dir) / self.main_template\n output_csv_file = self.data_dir / self.csv_name_format.format(Path(script_name).stem,\n self.date.strftime(\"%Y%m%d\"))\n output_crc_file = self.data_dir / self.crc_name_format.format(Path(script_name).stem,\n self.date.strftime(\"%Y%m%d\"))\n template = template_file.read_text(encoding=\"utf8\")\n\n script_file = Path(self.dwh_sys.sql_tmpl_dir) / script_name\n script_body = script_file.read_text(encoding=\"utf8\").format(**self.params)\n\n script = template.format(output_csv_file, script_body, output_crc_file)\n\n full_script = \"\\n\".join([\"--\" + self.dwh_sys.session_uuid, script])\n\n # saving script in temp directory\n script_path = Path(self.dwh_sys.tmp_dir) / '{}.sql'.format(script_file.stem)\n script_path.write_text(full_script, encoding=\"utf8\")\n return script_path, output_csv_file, output_crc_file\n except Exception as e:\n raise ScriptPrepException(str(e), script_name)", "title": "" }, { "docid": "a59fa5cc349ccc71af26743d033cc251", "score": "0.49136114", "text": "def _get_select_statement(self):\r\n fields = self.model._columns.keys()\r\n if self._defer_fields:\r\n fields = [f for f in fields if f not in self._defer_fields]\r\n elif self._only_fields:\r\n fields = self._only_fields\r\n db_fields = [self.model._columns[f].db_field_name for f in fields]\r\n return 'SELECT {}'.format(', '.join(['\"{}\"'.format(f) for f in db_fields]))", "title": "" }, { "docid": "a1527722f11deb1d9dcc43cf330267a1", "score": "0.49072057", "text": "def statement(self):\n return self._cursor.statement", "title": "" }, { "docid": "c41676451279708b0219274e3e9bb09d", "score": "0.4907143", "text": "def sql(self) -> Optional[pulumi.Input['GcpIntegrationsSqlArgs']]:\n return pulumi.get(self, \"sql\")", "title": "" }, { "docid": "c41676451279708b0219274e3e9bb09d", "score": "0.4907143", "text": "def sql(self) -> Optional[pulumi.Input['GcpIntegrationsSqlArgs']]:\n return pulumi.get(self, \"sql\")", "title": "" }, { "docid": "12a395919c04d3d52d3191d18c87c054", "score": "0.48991546", "text": "def get_sql(self, debug=False, use_cache=True):\n # TODO: enable caching\n # if self.sql and use_cache and not debug:\n # return self.sql\n\n # auto alias any naming collisions\n self.check_name_collisions()\n\n # if debugging, return the debug formatted sql\n if debug:\n return self.format_sql()\n\n # build each part of the query\n sql = ''\n sql += self.build_withs()\n sql += self.build_select_fields()\n sql += self.build_from_table()\n sql += self.build_joins()\n sql += self.build_where()\n sql += self.build_groups()\n sql += self.build_order_by()\n sql += self.build_limit()\n\n # remove any whitespace from the beginning and end of the sql\n self.sql = sql.strip()\n\n return self.sql", "title": "" }, { "docid": "28b1c6b59fe3b129f323aba6063234d3", "score": "0.4892272", "text": "def statement():\n return first(head_statement() , chain_statement()).name('statement')", "title": "" }, { "docid": "6255554e3e8ecdd36de4d0659c5ac156", "score": "0.48918658", "text": "def create_table_sql(self):\n cmd_CREATE_TABLE = \"CREATE TABLE %s\" % self.table_name\n cmd_DATATYPE = \",\\n\\t\".join([column.create_table_sql() for column in self.columns.values()])\n \n primary_key_columns = [column.column_name for column in self.columns.values() if column.primary_key]\n if len(primary_key_columns) == 0:\n cmd_PRIMARY_KEY = \"\"\n else:\n cmd_PRIMARY_KEY = \",\\n\\tPRIMARY KEY (%s)\" % \", \".join(primary_key_columns)\n \n template = \"%s (\\n\\t%s%s);\" \n return template % (cmd_CREATE_TABLE,\n cmd_DATATYPE,\n cmd_PRIMARY_KEY,)", "title": "" } ]
0ec645bd7a0883f088fd2421ec337771
Enables TLS on a domain using a certificate managed by Fastly. DNS records need to be modified on the domain being secured, in order to respond to the ACME domain ownership challenge.
[ { "docid": "8ee7609bb5435519e1fcd4dac819054c", "score": "0.0", "text": "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n certificate_authority: Optional[pulumi.Input[str]] = None,\n common_name: Optional[pulumi.Input[str]] = None,\n configuration_id: Optional[pulumi.Input[str]] = None,\n domains: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n force_destroy: Optional[pulumi.Input[bool]] = None,\n force_update: Optional[pulumi.Input[bool]] = None,\n __props__=None):\n ...", "title": "" } ]
[ { "docid": "cd5cd5edb2b9bb7e31879024608cb884", "score": "0.59300214", "text": "def test_tls(self):\n self.setupTrafficDirectorGrpc()\n self.setupSecurityPolicies(server_tls=True,\n server_mtls=False,\n client_tls=True,\n client_mtls=False)\n\n test_server: _XdsTestServer = self.startSecureTestServer()\n self.setupServerBackends()\n test_client: _XdsTestClient = self.startSecureTestClient(test_server)\n\n self.assertTestAppSecurity(_SecurityMode.TLS, test_client, test_server)\n self.assertSuccessfulRpcs(test_client)", "title": "" }, { "docid": "1987d8f5e726f91829cb306bbc5a6780", "score": "0.5784345", "text": "def startTLS(self):\n pass", "title": "" }, { "docid": "d38e98df4747e3f8c9e091034ccce219", "score": "0.57633823", "text": "def tls():\n return c_tls()", "title": "" }, { "docid": "5951bb4b0ecf706ad8a401a9afaee899", "score": "0.54922044", "text": "def add_dnskey(self, record: DnsKey):\n LOGGER.info(\"Adding DNSKEY record to chain of trust %s\", record)\n self.dnskeys[record.key_tag()] = record", "title": "" }, { "docid": "02a0711d3dad7c66be11806601eca299", "score": "0.5491678", "text": "def host_tls(self, host_tls: ConfigNodePropertyBoolean):\n\n self._host_tls = host_tls", "title": "" }, { "docid": "7e05f47f509a75db98df7d85691a24c4", "score": "0.54697895", "text": "def main():\n if len(argv) < 2:\n print 'Usage: %s <hostname> [port]' % (argv[0],)\n return 1\n\n port = 443\n if len(argv) == 3:\n port = int(argv[2])\n\n hostname = argv[1]\n client = socket()\n #client.settimeout(2)\n\n #print 'Connecting...',\n stdout.flush()\n client.connect((hostname, port))\n #print 'connected', client.getpeername()\n\n client_ssl = Connection(Context(TLSv1_METHOD), client)\n client_ssl.set_connect_state()\n client_ssl.set_tlsext_host_name(hostname)\n client_ssl.do_handshake()\n\n host = client_ssl.getpeername()\n servername = client_ssl.get_servername()\n x509 = client_ssl.get_peer_certificate()\n notAfter = datetime.strptime(x509.get_notAfter(), '%Y%m%d%H%M%SZ')\n cert_chain = client_ssl.get_peer_cert_chain()\n\n now = datetime.now()\n timedelta = notAfter - now \n\n DNS=''\n for i in xrange(x509.get_extension_count()):\n ret = str(x509.get_extension(i))\n if re.match('^DNS:', ret):\n DNS = ret.replace('DNS:','')\n\n print \"servername: %s, host: %s, port: %s\" %(servername, host[0], host[1])\n print \"\\tnotAfter: %s, remain: %s days\" %(notAfter, timedelta.days)\n print \"\\tDNS: \",DNS\n print '\\tCert Chain:'\n\n for i,v in enumerate(cert_chain):\n print '\\t%s,i,%s' %(i,v.get_subject())\n print '\\t%s,s,%s' %(i,v.get_issuer())\n\n client_ssl.close()", "title": "" }, { "docid": "ddab615940f63aa90fcde9d4b07118d1", "score": "0.53967077", "text": "async def set_vod_domain_certificate_with_options_async(\n self,\n request: vod_20170321_models.SetVodDomainCertificateRequest,\n runtime: util_models.RuntimeOptions,\n ) -> vod_20170321_models.SetVodDomainCertificateResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.cert_name):\n query['CertName'] = request.cert_name\n if not UtilClient.is_unset(request.domain_name):\n query['DomainName'] = request.domain_name\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.sslpri):\n query['SSLPri'] = request.sslpri\n if not UtilClient.is_unset(request.sslprotocol):\n query['SSLProtocol'] = request.sslprotocol\n if not UtilClient.is_unset(request.sslpub):\n query['SSLPub'] = request.sslpub\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='SetVodDomainCertificate',\n version='2017-03-21',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n vod_20170321_models.SetVodDomainCertificateResponse(),\n await self.call_api_async(params, req, runtime)\n )", "title": "" }, { "docid": "392c7b8a2130959223e4eeaa67d213e1", "score": "0.5388213", "text": "def set_vod_domain_certificate_with_options(\n self,\n request: vod_20170321_models.SetVodDomainCertificateRequest,\n runtime: util_models.RuntimeOptions,\n ) -> vod_20170321_models.SetVodDomainCertificateResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.cert_name):\n query['CertName'] = request.cert_name\n if not UtilClient.is_unset(request.domain_name):\n query['DomainName'] = request.domain_name\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.sslpri):\n query['SSLPri'] = request.sslpri\n if not UtilClient.is_unset(request.sslprotocol):\n query['SSLProtocol'] = request.sslprotocol\n if not UtilClient.is_unset(request.sslpub):\n query['SSLPub'] = request.sslpub\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='SetVodDomainCertificate',\n version='2017-03-21',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n vod_20170321_models.SetVodDomainCertificateResponse(),\n self.call_api(params, req, runtime)\n )", "title": "" }, { "docid": "47e88db388ddd4e4cf83a67884a16788", "score": "0.5327237", "text": "def attach_in_kubernetes(self, domain):\n # only create if it exists - We raise an exception when a secret doesn't exist\n try:\n name = '%s-certificate' % self.name\n namespace = domain.app.id\n data = {\n 'tls.crt': self.certificate,\n 'tls.key': self.key\n }\n\n secret = self._scheduler.secret.get(namespace, name).json()['data']\n except KubeException:\n self._scheduler.secret.create(namespace, name, data)\n else:\n # update cert secret to the TLS Ingress format if required\n if secret != data:\n try:\n self._scheduler.secret.update(namespace, name, data)\n except KubeException as e:\n msg = 'There was a problem updating the certificate secret ' \\\n '{} for {}'.format(name, namespace)\n raise ServiceUnavailable(msg) from e", "title": "" }, { "docid": "9e65dac162fb8b32f5ae23d17a4d2dab", "score": "0.53025967", "text": "def create_txt_record(args):\n domain_name, token = args[0], args[2]\n fqdn_tuple = extract(domain_name)\n base_domain_name = \".\".join([fqdn_tuple.domain, fqdn_tuple.suffix])\n\n if fqdn_tuple.subdomain is '':\n txtrecord = u'_acme-challenge'\n else:\n txtrecord = u'_acme-challenge.{0}'.format(fqdn_tuple.subdomain)\n name = \"{0}.{1}\".format(txtrecord, base_domain_name)\n record = {\n 'hostname': txtrecord,\n 'type': u'TXT',\n 'content': token,\n 'ttl': u'300',\n 'priority': u'10'\n }\n\n b = requests.session()\n b.verify = False\n b.headers.update({u'Content-Type': u'application/json',\n u'Api-Username': api_acct,\n u'Api-Token': api_token})\n url = u'https://{0}/api/dns/create/{1}'.format(api_host, base_domain_name)\n create_record = b.post(url, json.dumps(record)).json()\n logger.info(\" + (hook) TXT record created: {0}.{1} => {2}\".format(\n txtrecord,\n base_domain_name,\n token))\n logger.info(\" + (hook) Result: {0}\".format(create_record['result']))\n logger.info(\" + (hook) Settling down for 10s...\")\n time.sleep(10)\n\n while not _has_dns_propagated(name, token):\n logger.info(\" + (hook) DNS not propagated, waiting 30s...\")\n time.sleep(30)", "title": "" }, { "docid": "b1818da5a13bae4f55b57f910f45dd5b", "score": "0.52920467", "text": "def tls_enabled(self, tls_enabled):\n\n self._tls_enabled = tls_enabled", "title": "" }, { "docid": "27b1463d437ecb45c34c42a00755f1a3", "score": "0.5287935", "text": "def create_tls(cls, hostname, keyfile, certfile, ca_certs, port=LEAP_PORT):\n ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)\n ssl_context.minimum_version = ssl.TLSVersion.TLSv1_2\n ssl_context.load_verify_locations(ca_certs)\n ssl_context.load_cert_chain(certfile, keyfile)\n ssl_context.verify_mode = ssl.CERT_REQUIRED\n\n async def _connect():\n res = await open_connection(\n hostname,\n port,\n server_hostname=\"\",\n ssl=ssl_context,\n family=socket.AF_INET,\n )\n return res\n\n return cls(_connect)", "title": "" }, { "docid": "0cbd5f1c54886fe91a569bea7ef05989", "score": "0.52857643", "text": "def you_can_use_https_by_giving_secure_argument_true(self):\n conn = Mock()\n springnote.httplib \\\n .expects(once()).method(\"HTTPSConnection\") \\\n .will(return_value(conn))\n conn.expects(once()).method(\"request\") \\\n .after(\"HTTPSConnection\", springnote.httplib)\n conn.expects(once()).getresponse().after(\"request\")\n\n self.sn.springnote_request(\"GET\", \"http://url.com/data.json\", secure=True)", "title": "" }, { "docid": "e3657e5ddc6bf5fd08932872806b9f4c", "score": "0.5279144", "text": "def publish_fqdns_enabled(\n name, hostname, username, password, verify_ssl=True, cert=None, cert_common_name=None\n):\n\n log.info(\"Getting the manager's config\")\n\n cert_common_name = cert_common_name\n cert = cert\n\n get_current_config = _get_publish_fqdns_revision_from_nsxt(\n hostname,\n username,\n password,\n verify_ssl=verify_ssl,\n cert=cert,\n cert_common_name=cert_common_name,\n )\n if \"error\" in get_current_config:\n return _create_state_response(name, None, None, False, get_current_config[\"error\"])\n\n current_publish_fqdns, current_revision = _get_publish_fqdns_revision_from_response(\n get_current_config\n )\n\n if __opts__.get(\"test\"):\n log.info(\"publish_fqdns_enabled is called with test option\")\n return _create_state_response(\n name,\n None,\n None,\n None,\n \"State publish_fqdns_enabled will execute with params {}, {}, {}, {}, {}\".format(\n name, hostname, username, password, verify_ssl\n ),\n )\n\n if current_publish_fqdns:\n return _create_state_response(\n name, None, None, True, \"publish_fqdns is already set to True\"\n )\n\n publish_fqdns = True\n\n log.info(\"Updating the NSX-T manager's config\")\n updated_config_response = _set_publish_fqdns_in_nsxt(\n publish_fqdns,\n current_revision,\n hostname,\n username,\n password,\n verify_ssl=verify_ssl,\n cert=cert,\n cert_common_name=cert_common_name,\n )\n\n if \"error\" in updated_config_response:\n return _create_state_response(name, None, None, False, updated_config_response[\"error\"])\n\n return _create_state_response(\n name,\n get_current_config,\n updated_config_response,\n True,\n \"publish_fqdns has been set to True\",\n )", "title": "" }, { "docid": "88613fd8e57e57f1eedd554dfbd5d89a", "score": "0.52542114", "text": "def require_tls(self) -> bool:\n return pulumi.get(self, \"require_tls\")", "title": "" }, { "docid": "91279a86951b25fc64334cd0ffb27002", "score": "0.51716524", "text": "def cloudflare_fallback_publish(prog, api, tlsa, hash):\n prog.log.info2(\n \" + publishing TLSA record for {} (fallback)\".format(tlsa.pstr()))\n\n import requests\n\n headers = { \"X-Auth-Email\": api.email,\n \"X-Auth-Key\": api.key,\n \"Content-Type\": \"application/json\" }\n\n data = '{{ \"type\": \"TLSA\", \"name\": \"_{}._{}.{}\", \"data\": {{ \"usage\": {}, \"selector\": {}, \"matching_type\": {}, \"certificate\": \"{}\" }} }}'.format(\n tlsa.port, tlsa.protocol, tlsa.domain, tlsa.usage,\n tlsa.selector, tlsa.matching, hash)\n\n try:\n r = requests.post(\"https://api.cloudflare.com/client/v4/zones/{}/dns_records\".format(api.zone), data=data, headers=headers)\n except ConnectionError:\n raise Except.DNSProcessingError(\"connection error encountered\")\n except requests.exceptions.Timeout:\n raise Except.DNSProcessingError(\"request timed out\")\n except requests.exceptions.TooManyRedirects:\n raise Except.DNSProcessingError(\"too many redirects\")\n except requests.exceptions.RequestException as ex:\n raise Except.DNSProcessingError(\"{}\".format(ex))\n\n prog.log.info3(\" + HTTP response: {}\".format(r.status_code))\n\n response = r.json()\n prog.log.info3(\" + JSON response: {}\".format(\n str(response).replace(api.key, '<redacted>')) )\n\n errors = get_errors(response)\n\n # record is already up\n if len(errors) == 1 and errors[0][0] == 81057:\n # we will only accept this code if it is the only error\n # encountered\n raise Except.DNSSkipProcessing(errors[0][1])\n\n if errors:\n raise Except.DNSProcessingError(errors)\n\n if r.status_code >= 400 and r.status_code < 600:\n raise Except.DNSProcessingError(\n \"Cloudflare4 HTTP response was {}\".format(r.status_code))\n\n if not response['success']:\n raise Except.DNSProcessingError(\"Cloudflare4 JSON response failure\")", "title": "" }, { "docid": "3f2318c3174e0ba4e3008d65d08d0d10", "score": "0.5170376", "text": "def manual_dns(self, email=None, domains=None, expand=True, on_domain_challenge=None):\n if email is not None:\n self.email = email\n if domains is not None:\n self.domains = domains\n\n email = self.email\n if (self.email is None or len(self.email) == 0) \\\n and self.FALLBACK_EMAIL is not None and len(self.FALLBACK_EMAIL) > 0:\n email = self.FALLBACK_EMAIL\n\n cmd = self.get_manual_dns(self.domains, email=email, expand=expand, staging=self.staging)\n cmd_exec = 'sudo -E -H %s %s' % (self.CERTBOT_PATH, cmd)\n log_obj = self.CERTBOT_LOG\n\n mdns = LetsEncryptManualDns(email=email, domains=self.domains, on_domain_challenge=on_domain_challenge,\n cmd=cmd, cmd_exec=cmd_exec, log_obj=log_obj,\n audit=self.audit, sysconfig=self.sysconfig)\n return mdns", "title": "" }, { "docid": "c4c24c5b28cfb54dc44ebe1a8460328d", "score": "0.51511526", "text": "async def test_async_start_tls_server(self):\n with patch.object(ssl.SSLContext, \"load_cert_chain\"):\n await self.start_server(do_tls=True, do_ident=True)\n self.assertEqual(self.server.control.Identity.VendorName, \"VendorName\")\n self.assertIsNotNone(self.server.sslctx)", "title": "" }, { "docid": "1e7e094f173710afc05ec080ce4e9c66", "score": "0.5147838", "text": "def start_tls(self):\r\n log.info(\"Negotiating TLS\")\r\n ssl_versions = {3: 'TLS 1.0', 1: 'SSL 3', 2: 'SSL 2/3'}\r\n log.info(\"Using SSL version: %s\", ssl_versions[self.ssl_version])\r\n if self.ca_certs is None:\r\n cert_policy = ssl.CERT_NONE\r\n else:\r\n cert_policy = ssl.CERT_REQUIRED\r\n\r\n ssl_args = {\r\n 'certfile': self.certfile,\r\n 'keyfile': self.keyfile,\r\n 'ca_certs': self.ca_certs,\r\n 'cert_reqs': cert_policy,\r\n 'do_handshake_on_connect': False,\r\n }\r\n\r\n if sys.version_info >= (2, 7):\r\n ssl_args['ciphers'] = self.ciphers\r\n\r\n ssl_socket = ssl.wrap_socket(self.socket, **ssl_args);\r\n\r\n if hasattr(self.socket, 'socket'):\r\n # We are using a testing socket, so preserve the top\r\n # layer of wrapping.\r\n self.socket.socket = ssl_socket\r\n else:\r\n self.socket = ssl_socket\r\n\r\n try:\r\n self.socket.do_handshake()\r\n except (Socket.error, ssl.SSLError):\r\n log.error('CERT: Invalid certificate trust chain.')\r\n if not self.event_handled('ssl_invalid_chain'):\r\n self.disconnect(self.auto_reconnect, send_close=False)\r\n else:\r\n self._der_cert = self.socket.getpeercert(binary_form=True)\r\n self.event('ssl_invalid_chain', direct=True)\r\n return False\r\n\r\n self._der_cert = self.socket.getpeercert(binary_form=True)\r\n pem_cert = ssl.DER_cert_to_PEM_cert(self._der_cert)\r\n log.debug('CERT: %s', pem_cert)\r\n self.event('ssl_cert', pem_cert, direct=True)\r\n\r\n try:\r\n cert.verify(self._expected_server_name, self._der_cert)\r\n except cert.CertificateError as err:\r\n if not self.event_handled('ssl_invalid_cert'):\r\n log.error(err)\r\n self.disconnect(self.auto_reconnect, send_close=False)\r\n else:\r\n self.event('ssl_invalid_cert', pem_cert, direct=True)\r\n\r\n self.set_socket(self.socket)\r\n return True", "title": "" }, { "docid": "631a3d056989f8f7f68752990190fc59", "score": "0.50779516", "text": "def hack_ssl():\n ctx = ssl.create_default_context()\n ctx.check_hostname = False\n ctx.verify_mode = ssl.CERT_NONE\n return ctx", "title": "" }, { "docid": "98be0544f90ea9a05a8f09c2ac5d32ed", "score": "0.5058791", "text": "def domains_dns_setHosts(self, domain, host_records):\n\n extra_payload = self._list_of_dictionaries_to_numbered_payload(host_records)\n sld, tld = domain.split(\".\", 1)\n extra_payload.update({\n 'SLD': sld,\n 'TLD': tld\n })\n return self.get_element_dict(self._call(\"namecheap.domains.dns.setHosts\", extra_payload), 'DomainDNSSetHostsResult')", "title": "" }, { "docid": "879cb975b179cadff05bf804bb987628", "score": "0.5051515", "text": "def starttls():\n\t\traise SMTPSSLException, \"Cannot perform StartTLS within SSL session.\"", "title": "" }, { "docid": "70cea51b5d2ac89718d24c21c0cfac67", "score": "0.5026635", "text": "def migrate_tls_settings(content):\n state = \"out\"\n indent = 0\n comments = []\n\n keep_settings = [\n 'certificate_authorities',\n 'certificate',\n 'cipher_suites',\n 'curve_types'\n ]\n\n rename_settings = {\n \"certificate_key\": \"key\",\n }\n\n regex_replace_settings = {\n \"insecure\": [\n [re.compile(\".*insecure.*false\"), \"verification_mode: full\"],\n [re.compile(\".*insecure.*true\"), \"verification_mode: none\"],\n ]\n }\n\n version_indent = [None]\n min_version = [None]\n max_version = [None]\n min_version_used = max_version_used = False\n\n ssl_versions_old = {\"SSL-3.0\": 0, \"1.0\": 1, \"1.1\": 2, \"1.2\": 3}\n ssl_versions_new = [\"SSLv3\", \"TLSv1.0\", \"TLSv1.1\", \"TLSv1.2\"]\n\n def get_old_tls_version(v, default):\n if v not in ssl_versions_old:\n return ssl_versions_old[default]\n return ssl_versions_old[v]\n\n def make_version_info():\n if version_indent[0] is None:\n return\n\n indent = version_indent[0]\n commented_out = not (min_version_used or max_version_used)\n v_start = min_version[0]\n v_end = max_version[0]\n\n if min_version_used != max_version_used:\n if not min_version_used:\n v_start = \"1.0\"\n if not max_version_used:\n v_end = \"1.2\"\n\n v_start = get_old_tls_version(v_start, \"1.0\")\n v_end = get_old_tls_version(v_end, \"1.2\")\n versions = (ssl_versions_new[i] for i in xrange(v_start, v_end + 1))\n\n line = indent * ' ' + ('#' if commented_out else '')\n line += \"supported_protocols:\"\n line += \"[\" + \", \".join(versions) + \"]\"\n\n yield \"\"\n yield line\n\n version_indent[0] = None\n min_version[0] = None\n max_version[0] = None\n\n for line in content.splitlines():\n tmp = line.expandtabs()\n line_indent = len(tmp) - len(tmp.lstrip())\n line_start = tmp.lstrip()\n tmp = line_start.split(':', 1)\n setting = None\n value = None\n commented_out = len(line_start) > 0 and line_start[0] == '#'\n if len(tmp) > 1:\n setting = tmp[0]\n value = tmp[1].strip()\n if setting[0] == '#':\n setting = setting[1:]\n\n def create_setting(l):\n return (line_indent * \" \") + ('#' if commented_out else '') + l\n\n if state == \"out\":\n if setting == \"tls\":\n state = \"in\"\n indent = line_indent\n yield create_setting(\"ssl:\")\n else:\n yield line\n elif state == \"in\":\n if setting is not None and line_indent <= indent:\n for l in make_version_info():\n yield l\n # last few comments have been part of next line -> print\n for l in comments:\n yield l\n yield line\n comments = []\n state = \"out\"\n elif setting is None:\n comments.append(line)\n elif setting in keep_settings:\n for c in comments:\n yield c\n comments = []\n yield line\n elif setting in rename_settings:\n new_name = rename_settings[setting]\n for c in comments:\n yield c\n comments = []\n yield line.replace(setting, new_name, 1)\n elif setting in regex_replace_settings:\n # drop comments and add empty line before new setting\n comments = []\n yield \"\"\n\n for pattern in regex_replace_settings[setting]:\n regex, val = pattern\n if regex.match(line):\n yield create_setting(regex.sub(line, val, 1))\n break\n elif setting == 'min_version':\n comments = []\n min_version[0] = value\n min_version_used = not commented_out\n version_indent[0] = line_indent\n elif setting == 'max_version':\n comments = []\n max_version[0] = value\n max_version_used = not commented_out\n version_indent[0] = line_indent\n else:\n yield line\n else:\n yield line\n\n # add version info in case end of output is SSL/TLS section\n if state == 'in':\n for l in make_version_info():\n yield l", "title": "" }, { "docid": "c42532b24cea0c94aed78b5da7a6619c", "score": "0.50252885", "text": "def post_enable_certificate_authority(\n self, response: operations_pb2.Operation\n ) -> operations_pb2.Operation:\n return response", "title": "" }, { "docid": "45253a59a7af23edf004d97b97e536e7", "score": "0.5003005", "text": "def set_ssl_config(self, ssl_config, env_name):\n\n # verify machine which to use\n encrypt_position = ssl_config.get('encrypt-position', 0)\n try:\n use_host = env.hosts[encrypt_position]\n if use_host != env.host_string:\n return\n except IndexError:\n self.error(\"`ssl.encrypt-position' value is invalid.\")\n\n # verify domains\n domains = ssl_config.get('domains')\n if not domains and not isinstance(domains, list):\n self.error(\"`ssl.domains' must be config.\")\n\n # set dh_param\n dh_param = ssl_config.get('dhparam')\n if dh_param:\n dh_param_file = dh_param['path']\n dh_param_length = dh_param.get('length', 4096)\n\n run(\"test -f {0} || openssl dhparam -out {0} {1}\".format(\n dh_param_file, dh_param_length))\n pass\n\n def create_certicifate():\n try:\n self.letsencrypt_server(domains)\n except ValueError as e:\n self.warn(e)\n\n # get certificate\n certificate_remote_dir = \"/etc/letsencrypt/live/\" + domains[0]\n fullchain = run('cat %s' % os.path.join(certificate_remote_dir,\n 'fullchain.pem'))\n private_key = run(\n 'cat %s' % os.path.join(certificate_remote_dir, 'privkey.pem'))\n\n print(fullchain)\n print(private_key)\n pass\n\n # try to manage load balancer\n load_balancer = ssl_config.get('load-balancer')\n if load_balancer:\n lb_isp = load_balancer.get('isp')\n upstream_mode = load_balancer.get('upstream-mode')\n\n if lb_isp.lower() == 'qingcloud.com':\n from cabric.cloud.qingcloud import QingCloud\n client = QingCloud()\n client.connect(load_balancer['zone'])\n client.connector.debug = self.options.debug\n\n if upstream_mode:\n create_certicifate()\n return\n\n # try to set forward policy\n policy_name = 'letsencrypt-' + env_name\n policy = client.get_or_create_loadbalancer_policy(policy_name)\n\n # try to set forward rule\n rules = [{\n 'loadbalancer_policy_rule_name': domain,\n 'rule_type': 'url',\n 'val': '^/.well-known'\n } for domain in ssl_config['domains']]\n\n for rule in rules:\n client.get_or_add_loadbalancer_policy_rules(\n policy['loadbalancer_policy_id'], rule)\n\n client.apply_loadbalancer_policy(\n policy['loadbalancer_policy_id'])\n\n http_listener = load_balancer.get('http-listener')\n\n # try to set backend\n # ..note::\n # please make sure you backend works right.\n backend = load_balancer.get('backend')\n backend.update({\n 'loadbalancer_backend_name': policy[\n 'loadbalancer_policy_name'],\n 'loadbalancer_policy_id': policy['loadbalancer_policy_id']\n })\n if http_listener and backend:\n client.get_or_add_load_balancer_backends(http_listener,\n backend)\n pass\n\n create_certicifate()\n pass\n elif lb_isp is None:\n self.warn(\"load balancer isp not specified.\"\n \"skip config load balancer\")\n pass\n else:\n self.warn(\"unknown isp for load balancer %s,\"\n \"skip config load balancer\" % lb_isp)\n pass\n pass\n else:\n create_certicifate()\n pass\n pass", "title": "" }, { "docid": "fb9a033e1907af32dba321656f9a91a9", "score": "0.49898055", "text": "def update_dns(subdomain, auth, ipv4_address):\n # Extract the domain\n domain = get_fld(subdomain, fix_protocol=True)\n\n # Find the zone ID corresponding to the domain\n cur_page = 1\n zone_names_to_ids = {}\n while True:\n zone_resp = requests.get(CLOUDFLARE_ZONE_QUERY_API, headers=auth, timeout=6, params={'per_page': 50, 'page': cur_page})\n if zone_resp.status_code != 200:\n print('Authentication error: make sure your email and API key are correct. To set new values, run cloudflare-ddns --configure')\n return\n data = zone_resp.json()\n total_pages = data['result_info']['total_pages']\n for zone in data['result']:\n zone_names_to_ids[zone['name']] = zone['id']\n if (cur_page < total_pages):\n cur_page += 1\n else:\n break\n\n if domain not in zone_names_to_ids:\n print('The domain {domain} doesn\\'t appear to be one of your CloudFlare domains. We only found {domain_list}.'.format(domain=domain, domain_list=map(str, zone_names_to_ids.keys())))\n return\n zone_id = zone_names_to_ids[domain]\n\n # Find DNS records\n record_a = None\n record_aaaa = None\n for dns_record in requests.get(\n CLOUDFLARE_ZONE_DNS_RECORDS_QUERY_API.format(zone_id=zone_id),\n headers=auth,\n params={'name': subdomain},\n timeout=6,\n ).json()['result']:\n if dns_record['type'] == 'A':\n record_a = dns_record\n elif dns_record['type'] == 'AAAA':\n record_aaaa = dns_record\n\n # Update the record as necessary\n update_dns_record(auth, zone_id, record_a, ipv4_address)", "title": "" }, { "docid": "be5677420a2adb629013ea7da9dc9893", "score": "0.49647927", "text": "def starttls(self, keyfile = None, certfile = None):\r\n self.ehlo_or_helo_if_needed()\r\n if not self.has_extn(\"starttls\"):\r\n raise SMTPException(\"STARTTLS extension not supported by server.\")\r\n (resp, reply) = self.docmd(\"STARTTLS\")\r\n if resp == 220:\r\n if not _have_ssl:\r\n raise RuntimeError(\"No SSL support included in this Python\")\r\n self.sock = ssl.wrap_socket(self.sock, keyfile, certfile)\r\n self.file = SSLFakeFile(self.sock)\r\n # RFC 3207:\r\n # The client MUST discard any knowledge obtained from\r\n # the server, such as the list of SMTP service extensions,\r\n # which was not obtained from the TLS negotiation itself.\r\n self.helo_resp = None\r\n self.ehlo_resp = None\r\n self.esmtp_features = {}\r\n self.does_esmtp = 0\r\n return (resp, reply)", "title": "" }, { "docid": "0c697da8a6f6cd4c6b0d619c6da875bc", "score": "0.49458942", "text": "def _handle_starttls_proceed(self, proceed):\r\n log.debug(\"Starting TLS\")\r\n if self.xmpp.start_tls():\r\n self.xmpp.features.add('starttls')\r\n raise RestartStream()", "title": "" }, { "docid": "c3dbad144dfe6e401248293e18166cb5", "score": "0.49347243", "text": "def cloudflare_native_publish(prog, api, tlsa, hash):\n prog.log.info2(\n \" + publishing TLSA record for {} (native)\".format(tlsa.pstr()))\n\n from CloudFlare.exceptions import CloudFlareAPIError\n\n try:\n api.cloudflare.zones.dns_records.post(api.zone,\n data={\n \"type\": \"TLSA\",\n \"name\": \"_{}._{}.{}\".format(tlsa.port, tlsa.protocol,\n tlsa.domain),\n \"data\": {\n \"usage\": int(tlsa.usage),\n \"selector\": int(tlsa.selector),\n \"matching_type\": int(tlsa.matching),\n \"certificate\": hash\n }\n })\n prog.log.info2(\" + publishing record: success\")\n except CloudFlareAPIError as exc:\n if len(exc) > 0:\n errs = []\n for e in exc:\n errs += [ \"Cloudflare error {}: {}\".format(int(e), str(e)) ]\n raise Except.DNSProcessingError(errs)\n elif int(exc) == 81057:\n raise Except.DNSSkipProcessing(str(exc))\n else:\n raise Except.DNSProcessingError(\n \"Cloudflare error {}: {}\".format(int(exc), str(exc)) )", "title": "" }, { "docid": "c91ce1e356b319fde2543bf6fd6fd157", "score": "0.49275577", "text": "async def test_async_start_tls_server_no_loop(self):\n with patch.object(ssl.SSLContext, \"load_cert_chain\"):\n await self.start_server(do_tls=True, do_forever=False, do_ident=True)\n self.assertEqual(self.server.control.Identity.VendorName, \"VendorName\")\n self.assertIsNotNone(self.server.sslctx)", "title": "" }, { "docid": "085c43db0868582c49092fe659fdda8e", "score": "0.49210992", "text": "def ControlDomainPolicy(self) -> bool:", "title": "" }, { "docid": "ec4420babe901d21e18cb0862846681f", "score": "0.49139607", "text": "def check_connecting_endpoint_tls(tls):\n if not isinstance(tls, dict):\n raise InvalidConfigException(\"'tls' in endpoint must be dictionary ({} encountered)\".format(type(tls)))\n\n for k in tls:\n if k not in ['ca_certificates', 'hostname', 'certificate', 'key']:\n raise InvalidConfigException(\"encountered unknown attribute '{}' in listening endpoint TLS configuration\".format(k))\n\n for k in ['certificate', 'key']:\n if k in tls and not os.path.exists(tls[k]):\n raise InvalidConfigException(\n \"File '{}' for '{}' in TLS configuration \"\n \"not found\".format(tls[k], k)\n )\n\n if 'ca_certificates' in tls:\n if not isinstance(tls['ca_certificates'], list):\n raise InvalidConfigException(\"'ca_certificates' must be a list\")\n for fname in tls['ca_certificates']:\n if not os.path.exists(fname):\n raise InvalidConfigException(\"'ca_certificates' contains non-existant path '{}'\".format(fname))\n\n for req_k in ['hostname']:\n if req_k not in tls:\n raise InvalidConfigException(\"listening endpoint TLS configuration requires '{}'\".format(req_k))", "title": "" }, { "docid": "0d50848a60b90c2b009abb63a3bf790f", "score": "0.4908038", "text": "def ddns(ip, sub_domain=SUB_DOMAIN):\n print \"try setting %s.%s to %s\" % (SUB_DOMAIN, DOMAIN, ip)\n params.update(dict(value=ip, sub_domain=sub_domain, record_id=RECORD_ID[sub_domain]))\n headers = {\"Content-type\": \"application/x-www-form-urlencoded\", \"Accept\": \"text/json\"}\n conn = httplib.HTTPSConnection(\"dnsapi.cn\")\n conn.request(\"POST\", \"/Record.Ddns\", urllib.urlencode(params), headers)\n \n response = conn.getresponse()\n print response.status, response.reason\n data = response.read()\n print data\n conn.close()\n return response.status == 200", "title": "" }, { "docid": "185992cf7bc515d4d5d716c9361a5941", "score": "0.49056885", "text": "def configure_ssl():\n OpenstackLoadbalancerCharm.singleton.configure_ssl()", "title": "" }, { "docid": "50a0eb490bfa37f2239ebd3c3982cea5", "score": "0.48963815", "text": "async def cert_extend(self, cert):\n if cert['cert_type'] in (\n CA_TYPE_EXISTING, CA_TYPE_INTERNAL, CA_TYPE_INTERMEDIATE\n ):\n root_path = CERT_CA_ROOT_PATH\n else:\n root_path = CERT_ROOT_PATH\n cert['cert_root_path'] = root_path\n cert['cert_certificate_path'] = os.path.join(\n root_path, '{0}.crt'.format(cert['cert_name'])\n )\n cert['cert_privatekey_path'] = os.path.join(\n root_path, '{0}.key'.format(cert['cert_name'])\n )\n cert['cert_csr_path'] = os.path.join(\n root_path, '{0}.csr'.format(cert['cert_name'])\n )\n\n def cert_issuer(cert):\n issuer = None\n if cert['cert_type'] in (CA_TYPE_EXISTING, CERT_TYPE_EXISTING):\n issuer = \"external\"\n elif cert['cert_type'] == CA_TYPE_INTERNAL:\n issuer = \"self-signed\"\n elif cert['cert_type'] in (CERT_TYPE_INTERNAL, CA_TYPE_INTERMEDIATE):\n issuer = cert['cert_signedby']\n elif cert['cert_type'] == CERT_TYPE_CSR:\n issuer = \"external - signature pending\"\n return issuer\n cert['cert_issuer'] = cert_issuer(cert)\n\n cert['cert_chain_list'] = []\n if cert['cert_chain']:\n certs = RE_CERTIFICATE.findall(cert['cert_certificate'])\n else:\n certs = [cert['cert_certificate']]\n signing_CA = cert['cert_issuer']\n # Recursively get all internal/intermediate certificates\n while signing_CA not in [\"external\", \"self-signed\", \"external - signature pending\"]:\n certs.append(signing_CA['cert_certificate'])\n signing_CA['cert_issuer'] = cert_issuer(signing_CA)\n signing_CA = signing_CA['cert_issuer']\n\n cert_obj = None\n try:\n for c in certs:\n # XXX Why load certificate if we are going to dump it right after?\n # Maybe just to verify its integrity?\n # Logic copied from freenasUI\n cert_obj = crypto.load_certificate(crypto.FILETYPE_PEM, c)\n cert['cert_chain_list'].append(\n crypto.dump_certificate(crypto.FILETYPE_PEM, cert_obj).decode()\n )\n except:\n self.logger.debug('Failed to load certificate {0}'.format(cert['cert_name']), exc_info=True)\n\n try:\n if cert['cert_privatekey']:\n key_obj = crypto.load_privatekey(crypto.FILETYPE_PEM, cert['cert_privatekey'])\n cert['cert_privatekey'] = crypto.dump_privatekey(crypto.FILETYPE_PEM, key_obj).decode()\n except:\n self.logger.debug('Failed to load privatekey {0}'.format(cert['cert_name']), exc_info=True)\n\n try:\n if cert['cert_CSR']:\n csr_obj = crypto.load_certificate_request(crypto.FILETYPE_PEM, cert['cert_CSR'])\n cert['cert_CSR'] = crypto.dump_certificate_request(crypto.FILETYPE_PEM, csr_obj).decode()\n except:\n self.logger.debug('Failed to load csr {0}'.format(cert['cert_name']), exc_info=True)\n\n cert['cert_internal'] = 'NO' if cert['cert_type'] in (CA_TYPE_EXISTING, CERT_TYPE_EXISTING) else 'YES'\n\n obj = None\n # date not applicable for CSR\n cert['cert_from'] = None\n cert['cert_until'] = None\n if cert['cert_type'] == CERT_TYPE_CSR:\n obj = csr_obj\n elif cert_obj:\n obj = cert_obj\n notBefore = obj.get_notBefore()\n t1 = dateutil.parser.parse(notBefore)\n t2 = t1.astimezone(dateutil.tz.tzutc())\n cert['cert_from'] = t2.ctime()\n\n notAfter = obj.get_notAfter()\n t1 = dateutil.parser.parse(notAfter)\n t2 = t1.astimezone(dateutil.tz.tzutc())\n cert['cert_until'] = t2.ctime()\n\n if obj:\n cert['cert_DN'] = '/' + '/'.join([\n '%s=%s' % (c[0].decode(), c[1].decode())\n for c in obj.get_subject().get_components()\n ])\n\n return cert", "title": "" }, { "docid": "4e2c4ec1a7dd46ba86a23cc1472812ed", "score": "0.4892445", "text": "def setup_domain(fqdn):\n # zone is the Route53 zone we find or the one we create if it doesn't exist\n # reminder, for s3 bucket websites, the fqdn and the bucket must be the same name\n bucket = bucket_manager.get_bucket(fqdn)\n\n zone = domain_manager.find_hosted_zone(fqdn) \\\n or domain_manager.create_hosted_zone(fqdn)\n endpoint = util.get_endpoint(bucket_manager.get_region_name(bucket))\n\n a_record = domain_manager.create_s3_domain_record(zone, fqdn, endpoint)\n print(f\"Domain configured http://{fqdn}\")\n print(f\"A Record is {a_record}\")", "title": "" }, { "docid": "5d4a016830a15abebf12f585fa833880", "score": "0.4888427", "text": "def StartSSL( self,\n keyfile = None,\n certfile = None,\n server_side = False,\n cert_reqs = 0,\n ca_certs = None ) :\n ...", "title": "" }, { "docid": "4fe468eb340bb32e85fb0b2b2444a205", "score": "0.48768646", "text": "def enable_certificates(self) -> bool:\n return pulumi.get(self, \"enable_certificates\")", "title": "" }, { "docid": "2d437c0ab023690e9e5f91d8c209ce52", "score": "0.4858633", "text": "def handle(self, *args, **options):\n\n # TLS1.2-only ciphers not identifiable by a single certain algorithm\n # TODO: find a better way to map these to TLS1.2\n misc_tls12 = [\n 'TLS_RSA_WITH_NULL_SHA256',\n 'TLS_RSA_WITH_AES_128_CBC_SHA256',\n 'TLS_RSA_WITH_AES_256_CBC_SHA256',\n 'TLS_DH_RSA_WITH_AES_128_CBC_SHA256',\n 'TLS_DH_RSA_WITH_AES_256_CBC_SHA256',\n 'TLS_DH_DSS_WITH_AES_128_CBC_SHA256',\n 'TLS_DH_DSS_WITH_AES_256_CBC_SHA256',\n 'TLS_DHE_RSA_WITH_AES_128_CBC_SHA256',\n 'TLS_DHE_RSA_WITH_AES_256_CBC_SHA256',\n 'TLS_DHE_DSS_WITH_AES_128_CBC_SHA256',\n 'TLS_DHE_DSS_WITH_AES_256_CBC_SHA256',\n 'TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256',\n 'TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384',\n 'TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256',\n 'TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384',\n 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256',\n 'TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384',\n 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256',\n 'TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384',\n 'TLS_DH_anon_WITH_AES_128_CBC_SHA256',\n 'TLS_DH_anon_WITH_AES_256_CBC_SHA256',\n 'TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256',\n 'TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384',\n 'TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256',\n 'TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384',\n 'TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256',\n 'TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384',\n 'TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256',\n 'TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384',\n ]\n\n tls10, _ = TlsVersion.objects.get_or_create(major=1, minor=0)\n tls11, _ = TlsVersion.objects.get_or_create(major=1, minor=1)\n tls12, _ = TlsVersion.objects.get_or_create(major=1, minor=2)\n tls13, _ = TlsVersion.objects.get_or_create(major=1, minor=3)\n\n for cipher_suite in CipherSuite.objects.all():\n if not 'WITH' in cipher_suite.name:\n # TLS1.3 IANA names don't include WITH\n cipher_suite.tls_version.add(tls13)\n elif 'IDEA' in cipher_suite.name or 'DES' in cipher_suite.name:\n # IDEA and DES are deprecated with TLS1.2\n cipher_suite.tls_version.add(tls13)\n cipher_suite.tls_version.add(tls12)\n cipher_suite.tls_version.add(tls11)\n cipher_suite.tls_version.add(tls10)\n elif 'MGM' in cipher_suite.name:\n # GOST MGM are supported by TLS1.3 onwards\n cipher_suite.tls_version.add(tls13)\n elif 'POLY1305' in cipher_suite.name or\\\n 'GCM' in cipher_suite.name or\\\n 'CCM' in cipher_suite.name or\\\n 'GOST' in cipher_suite.name:\n # ChaCha/Poly, GCM, CCM, and GOST are supported from TLS1.2 onwards\n cipher_suite.tls_version.add(tls13)\n cipher_suite.tls_version.add(tls12)\n elif cipher_suite.name in misc_tls12:\n # catch some others TLS1.2+ ciphers by name\n cipher_suite.tls_version.add(tls13)\n cipher_suite.tls_version.add(tls12)\n else:\n # default: supported by all TLS versions\n cipher_suite.tls_version.add(tls13)\n cipher_suite.tls_version.add(tls12)\n cipher_suite.tls_version.add(tls11)\n cipher_suite.tls_version.add(tls10)\n cipher_suite.save()", "title": "" }, { "docid": "082884b4008ef30351c733db3d552c6e", "score": "0.48558894", "text": "def secure_server_socket(httpd: HTTPServer,\n cert_path:str,\n key_path:str = None,\n ca_path:str = None,\n ) -> HTTPServer:\n\n httpd.socket = ssl.wrap_socket(\n httpd.socket, \n certfile=cert_path, \n keyfile=key_path,\n ca_certs=ca_path,\n cert_reqs= ssl.CERT_REQUIRED,\n server_side=True\n )\n return httpd", "title": "" }, { "docid": "343b37dce082758478e874d2389c4d7a", "score": "0.48553225", "text": "def require_tls(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"require_tls\")", "title": "" }, { "docid": "379d0a0e903d98efd788759bf03aac21", "score": "0.485364", "text": "async def test_async_tls_server_serve_forever_twice(self):\n with patch.object(ssl.SSLContext, \"load_cert_chain\"):\n await self.start_server(do_tls=True)\n with pytest.raises(RuntimeError):\n await self.server.serve_forever()", "title": "" }, { "docid": "853bd5089ee896751958da0ef1c62c20", "score": "0.4848976", "text": "def domains_dns_addHost(self, domain, host_record):\n host_records_remote = self.domains_dns_getHosts(domain)\n\n host_records_remote.append(host_record)\n host_records_remote = [self._elements_names_fix(x) for x in host_records_remote]\n\n extra_payload = self._list_of_dictionaries_to_numbered_payload(host_records_remote)\n sld, tld = domain.split(\".\", 1)\n extra_payload.update({\n 'SLD': sld,\n 'TLD': tld\n })\n return self.get_element_dict(self._call(\"namecheap.domains.dns.setHosts\", extra_payload), 'DomainDNSSetHostsResult')", "title": "" }, { "docid": "232588ce817c508af0b05c1545ba85bd", "score": "0.48461455", "text": "def ensure_tls_cert(self):\n print(\"To connect to your keyspace, you must have a TLS certificate.\")\n print(\"Checking for TLS certificate...\")\n cert_path = os.path.join(os.path.dirname(__file__), QueryManager.DEFAULT_CERT_FILE)\n if not os.path.exists(cert_path):\n cert_choice = q.ask(\n f\"Press enter to download a certificate from {QueryManager.CERT_URL} \"\n f\"or enter the full path to the certificate you want to use: \")\n if cert_choice:\n cert_path = cert_choice\n else:\n cert = requests.get(QueryManager.CERT_URL).text\n with open(cert_path, 'w') as cert_file:\n cert_file.write(cert)\n else:\n q.ask(f\"Certificate {cert_path} found. Press Enter to continue.\")\n print(f\"Certificate {cert_path} will be used to secure the connection to your keyspace.\")\n return cert_path", "title": "" }, { "docid": "0f0c4cfb6ca23446099de3103953ffc0", "score": "0.48360634", "text": "def set_trusted_update_mode(self, host_id: int, duration:int = 0, enabled: bool = True) -> str:\n modify_trusted_updatemode_request = ModifyTrustedUpdateModeRequest(duration, enabled)\n url = \"https://{}:{}/rest/hosts/{}/trusted-update-mode\".format(self.host, self.port, host_id)\n headers = {'Content-Type': 'application/json'}\n cookies = dict(sID=self.session_id)\n r = requests.post(url, data=modify_trusted_updatemode_request.to_json(), verify=self.verify_ssl, cookies=cookies, headers=headers, proxies=self.proxy)\n return json.dumps(dict(status_code=r.status_code))", "title": "" }, { "docid": "6b1a9320b740ee3030eba83718c129c6", "score": "0.4836026", "text": "def set_domain(domain):\n set_hosts(domain)\n click.echo(\n 'Host file was set: {} -> 127.0.0.1'.format(', '.join(domain))\n )", "title": "" }, { "docid": "0f65fe5dc472b03744f4529fe888343d", "score": "0.48339292", "text": "def add_setup(self, domain: str, handler: SetupHandler):\n if \".\" in domain:\n _, domain = domain.rsplit(\".\", 1)\n self.setups[domain] = handler", "title": "" }, { "docid": "0773f5cb9d209171df86f7e54e491fe1", "score": "0.4825939", "text": "def ldap_over_tls(self) -> Optional[bool]:\n return pulumi.get(self, \"ldap_over_tls\")", "title": "" }, { "docid": "668037b4b3a3497d28cf4452d2514c9e", "score": "0.48255065", "text": "async def smtp_STARTTLS(self, arg: str) -> None:\n assert self.transport is not None\n\n self.event_handler.record_command(\"STARTTLS\", arg)\n\n log.info(\"%s STARTTLS\", self.session.peer)\n if arg:\n await self.push(\"501 Syntax: STARTTLS\")\n return\n if not self.tls_context:\n await self.push(\"454 TLS not available\")\n return\n await self.push(\"220 Ready to start TLS\")\n\n # Create SSL layer.\n self._tls_protocol = asyncio.sslproto.SSLProtocol( # type: ignore\n self.loop, self, self.tls_context, None, server_side=True\n )\n self._original_transport = self.transport\n self._original_transport.set_protocol(self._tls_protocol)\n\n self.transport = self._tls_protocol._app_transport\n self._tls_protocol.connection_made(self._original_transport)", "title": "" }, { "docid": "e733e134b29182c707719f1495c72cd3", "score": "0.4824015", "text": "def require_tls(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"require_tls\")", "title": "" }, { "docid": "8b5d068fd6c19da71490d1debaeb150a", "score": "0.48055193", "text": "def enable_dns_support(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"enable_dns_support\")", "title": "" }, { "docid": "4155f055a142fc654b8fa8e4a8d9c639", "score": "0.48005188", "text": "async def set_vod_domain_certificate_async(\n self,\n request: vod_20170321_models.SetVodDomainCertificateRequest,\n ) -> vod_20170321_models.SetVodDomainCertificateResponse:\n runtime = util_models.RuntimeOptions()\n return await self.set_vod_domain_certificate_with_options_async(request, runtime)", "title": "" }, { "docid": "59afa2743c3b4c739d0b50fd86369434", "score": "0.47934142", "text": "def DNS(backend):\n _LOGGER.debug('Using deprecated admin interface.')\n return _ldap.DNS(backend.conn)", "title": "" }, { "docid": "4a3447a8a4b99dc50a2975fc2e7ab415", "score": "0.4787656", "text": "def starttls(self, keyfile=None, certfile=None, **kw):\n\n name = 'STARTTLS'\n\n if name not in self.capabilities:\n raise self.abort('TLS not supported by server')\n\n if hasattr(self, '_tls_established') and self._tls_established:\n raise self.abort('TLS session already established')\n\n try:\n typ, dat = self._simple_command(name)\n finally:\n self.state_change_pending.release()\n\n if typ == 'OK':\n import ssl\n self.sock = ssl.wrap_socket(self.sock, keyfile, certfile)\n self.read_fd = self.sock.fileno()\n\n typ, dat = self.capability()\n if dat == [None]:\n raise self.error('no CAPABILITY response from server')\n self.capabilities = tuple(dat[-1].upper().split())\n self._tls_established = True\n else:\n raise self.error(\"Couldn't establish TLS session: %s\" % dat)\n\n typ, dat = self._untagged_response(typ, dat, name)\n return self._deliver_dat(typ, dat, kw)", "title": "" }, { "docid": "1b1ae88922d1dafaa5fdad71fdf3347c", "score": "0.4786549", "text": "def SetDomain(self, domain):", "title": "" }, { "docid": "24ba513639ee38a48b7539d55f6694a4", "score": "0.476643", "text": "def enable_dns_support(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_dns_support\")", "title": "" }, { "docid": "24ba513639ee38a48b7539d55f6694a4", "score": "0.476643", "text": "def enable_dns_support(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_dns_support\")", "title": "" }, { "docid": "6297e13b491c5ccb2f3529a831550e32", "score": "0.475964", "text": "def setup_cdn(fqdn, domain_name):\n # searching to see if we already have a CF Distribution setup for this domain\n dist = dist_manager.find_matching_dist(fqdn)\n\n # if we don't find a CF distribution for this domain, create it\n if not dist:\n cert = cert_manager.find_matching_cert(fqdn)\n if not cert: # SSL is not optional at this time\n print(\"Error: No matching cert found.\")\n return\n\n # create a CF distribution\n dist = dist_manager.create_dist(fqdn, cert)\n print(\"Waiting for distribution deployment...\")\n dist_manager.await_deploy(dist)\n\n # create route53 record for the CF distribution\n zone = domain_manager.find_hosted_zone(domain_name) \\\n or domain_manager.create_hosted_zone(domain_name)\n\n domain_manager.create_cf_domain_record(zone, fqdn, dist['DomainName'])\n print(f\"Domain configured: https://{fqdn}\")\n\n return", "title": "" }, { "docid": "146b7841fa6706a25cfd794850facda3", "score": "0.47561592", "text": "def set_tld_list(self, tlds):\n self.tlds_list = tlds", "title": "" }, { "docid": "c8377980e37abdfbcde36935c46cdd9d", "score": "0.475091", "text": "def _add_server_to_backend(reversep, backend_name, domain, ip, kind='http'):\n _backends = reversep.setdefault('{0}_backends'.format(kind), {})\n bck = _backends.setdefault(backend_name,\n {'name': backend_name,\n 'raw_opts': [\n 'balance roundrobin',\n ],\n 'servers': []})\n # for now rely on settings xforwardedfor header\n if reversep['{0}_proxy'.format(kind)].get(\n 'http_proxy_mode', 'xforwardedfor'\n ) == 'xforwardedfor':\n bck['raw_opts'].append('option http-server-close')\n bck['raw_opts'].append('option forwardfor')\n else:\n # in not much time we ll switch to the haproxy proxy protocol which\n # leverage the xforwardedfor hack\n bck['raw_opts'].append('source 0.0.0.0 usesrc clientip')\n srv = {'name': 'srv_{0}{1}'.format(domain, len(bck['servers']) + 1),\n 'bind': '{0}:80'.format(ip),\n 'opts': 'check'}\n if not srv['bind'] in [a.get('bind') for a in bck['servers']]:\n bck['servers'].append(srv)", "title": "" }, { "docid": "4d953368bf5740753cb82f6e1b9c9ea4", "score": "0.47500953", "text": "def accept_ssl_certificate(self, path, on_failure=None):\n raise NotImplementedError", "title": "" }, { "docid": "2d8fe8e73dfeb071714f10fdd6c7fef5", "score": "0.47205502", "text": "def update_subdomain_ex(\n self,\n request: pds_models.UpdateSubdomainRequest,\n runtime: pds_models.RuntimeOptions,\n ) -> pds_models.UpdateSubdomainModel:\n request.validate()\n runtime.validate()\n _runtime = {\n 'timeouted': 'retry',\n 'readTimeout': runtime.read_timeout,\n 'connectTimeout': runtime.connect_timeout,\n 'localAddr': runtime.local_addr,\n 'httpProxy': runtime.http_proxy,\n 'httpsProxy': runtime.https_proxy,\n 'noProxy': runtime.no_proxy,\n 'maxIdleConns': runtime.max_idle_conns,\n 'socks5Proxy': runtime.socks_5proxy,\n 'socks5NetWork': runtime.socks_5net_work,\n 'retry': {\n 'retryable': runtime.autoretry,\n 'maxAttempts': UtilClient.default_number(runtime.max_attempts, 3)\n },\n 'backoff': {\n 'policy': UtilClient.default_string(runtime.backoff_policy, 'no'),\n 'period': UtilClient.default_number(runtime.backoff_period, 1)\n },\n 'ignoreSSL': runtime.ignore_ssl\n }\n _last_request = None\n _last_exception = None\n _now = time.time()\n _retry_times = 0\n while TeaCore.allow_retry(_runtime.get('retry'), _retry_times, _now):\n if _retry_times > 0:\n _backoff_time = TeaCore.get_backoff_time(_runtime.get('backoff'), _retry_times)\n if _backoff_time > 0:\n TeaCore.sleep(_backoff_time)\n _retry_times = _retry_times + 1\n try:\n _request = TeaRequest()\n accesskey_id = self.get_access_key_id()\n access_key_secret = self.get_access_key_secret()\n security_token = self.get_security_token()\n access_token = self.get_access_token()\n real_req = UtilClient.to_map(request)\n _request.protocol = UtilClient.default_string(self._protocol, 'https')\n _request.method = 'POST'\n _request.pathname = self.get_pathname(self._nickname, f'/v2/subdomain/update')\n _request.headers = TeaCore.merge({\n 'user-agent': self.get_user_agent(),\n 'host': UtilClient.default_string(self._endpoint, f'{self._domain_id}.api.aliyunpds.com'),\n 'content-type': 'application/json; charset=utf-8'\n }, request.httpheaders)\n real_req['httpheaders'] = None\n if not UtilClient.empty(access_token):\n _request.headers['authorization'] = f'Bearer {access_token}'\n elif not UtilClient.empty(accesskey_id) and not UtilClient.empty(access_key_secret):\n if not UtilClient.empty(security_token):\n _request.headers['x-acs-security-token'] = security_token\n _request.headers['date'] = UtilClient.get_date_utcstring()\n _request.headers['accept'] = 'application/json'\n _request.headers['x-acs-signature-method'] = 'HMAC-SHA1'\n _request.headers['x-acs-signature-version'] = '1.0'\n string_to_sign = ROAUtilClient.get_string_to_sign(_request)\n _request.headers['authorization'] = f'acs {accesskey_id}:{ROAUtilClient.get_signature(string_to_sign, access_key_secret)}'\n _request.body = UtilClient.to_jsonstring(real_req)\n _last_request = _request\n _response = TeaCore.do_action(_request, _runtime)\n resp_map = None\n obj = None\n if UtilClient.equal_number(_response.status_code, 200):\n obj = UtilClient.read_as_json(_response.body)\n resp_map = UtilClient.assert_as_map(obj)\n return TeaCore.from_map(\n pds_models.UpdateSubdomainModel(),\n {\n 'body': resp_map,\n 'headers': _response.headers\n }\n )\n if not UtilClient.empty(_response.headers.get('x-ca-error-message')):\n raise TeaException({\n 'data': {\n 'requestId': _response.headers.get('x-ca-request-id'),\n 'statusCode': _response.status_code,\n 'statusMessage': _response.status_message\n },\n 'message': _response.headers.get('x-ca-error-message')\n })\n obj = UtilClient.read_as_json(_response.body)\n resp_map = UtilClient.assert_as_map(obj)\n raise TeaException(TeaCore.merge({\n 'data': {\n 'requestId': _response.headers.get('x-ca-request-id'),\n 'statusCode': _response.status_code,\n 'statusMessage': _response.status_message\n }\n }, resp_map))\n except Exception as e:\n if TeaCore.is_retryable(e):\n _last_exception = e\n continue\n raise e\n raise UnretryableException(_last_request, _last_exception)", "title": "" }, { "docid": "e42ecc844b606b7baa4c1a74af025f06", "score": "0.47139207", "text": "def fetch_certificate_chain(domain, timeout=300):\n conn_details = {\"date\": get_scan_date()}\n certificates = []\n\n hostname = idna.encode(domain)\n # SSL.TLSv1_2_METHOD, SSL.SSLv23_METHOD\n context = SSL.Context(SSL.SSLv23_METHOD)\n context.set_timeout(timeout)\n context.verify_mode = SSL.VERIFY_NONE\n context.set_ocsp_client_callback(ocsp_callback, data=conn_details)\n\n print(f'Connecting to {domain} to get certificate chain...')\n\n try:\n s = socket()\n conn = SSL.Connection(context, s)\n conn.set_connect_state()\n # Server name indicator support (SNI)\n conn.set_tlsext_host_name(hostname)\n conn.connect((hostname, 443))\n ocsp_staple_data = conn.request_ocsp()\n conn.do_handshake()\n certificates = conn.get_peer_cert_chain()\n\n # Connection details\n conn_details[\"server_name\"] = conn.get_servername().decode('utf-8')\n conn_details[\"ip\"] = s.getpeername()[0]\n conn_details[\"protocol\"] = conn.get_protocol_version_name()\n conn_details[\"cipher\"] = conn.get_cipher_name()\n\n if len(certificates) == 0:\n raise vis_ex.NoCertificatesError(\n f\"No certificates found for domain: {domain}\")\n\n cert_chain = [Cert_repr(cert) for cert in certificates]\n\n if len(cert_chain) == 1:\n inter_cert = fetch_intermediate_cert(cert_chain[0])\n cert_chain.append(inter_cert)\n\n try:\n cert_chain = sort_chain(cert_chain, domain)\n except vis_ex.InvalidCertificateChain as icc:\n print(str(icc))\n\n return cert_chain, conn_details\n\n except vis_ex.IntermediateFetchingError as ife:\n ife_string = f\"Failed to fetch intermediate certificate: {str(ife)}\"\n raise vis_ex.IntermediateFetchingError(\n ife_string, cert_chain, conn_details)\n except Exception as e:\n raise vis_ex.CertificateFetchingError(textwrap.fill(\n f\"Error occured while getting certificates for {domain}: {type(e)}: {e}\", 50))\n\n finally:\n s.close()", "title": "" }, { "docid": "093073cfdfa6e46531616cdb567d80c4", "score": "0.47126892", "text": "def add_ds(self, record: DS):\n LOGGER.info(\"Adding DS record to chain of trust %s\", record)\n self.ds_records[record.key_tag].append(record)", "title": "" }, { "docid": "5bd308a308c81a7c0e9b877c80b92c7f", "score": "0.4703797", "text": "def create_ssl_certificate(self, workdir):\n if os.path.exists(workdir + '/cert'):\n shutil.rmtree(workdir + '/cert')\n os.mkdir(workdir + '/cert')\n else:\n os.mkdir(workdir + '/cert')\n cwd = os.getcwd()\n os.chdir(workdir + '/cert')\n key_query = \"openssl genrsa 2048 > ca-key.pem \"\n subprocess.call(key_query, shell=True, stderr=subprocess.DEVNULL)\n key_query = \"openssl req -new -x509 -nodes -days 3600 \" \\\n \"-key ca-key.pem -out ca.pem -subj\" \\\n \" '/CN=www.percona.com/O=Database Performance./C=US' \"\n subprocess.call(key_query, shell=True, stderr=subprocess.DEVNULL)\n key_query = \"openssl req -newkey rsa:2048 -days 3600 \" \\\n \"-nodes -keyout server-key.pem -out server-req.pem -subj \" \\\n \"'/CN=www.fb.com/O=Database Performance./C=AU' \"\n subprocess.call(key_query, shell=True, stderr=subprocess.DEVNULL)\n key_query = \"openssl rsa -in server-key.pem -out server-key.pem \"\n subprocess.call(key_query, shell=True, stderr=subprocess.DEVNULL)\n key_query = \"openssl x509 -req -in server-req.pem \" \\\n \"-days 3600 -CA ca.pem -CAkey ca-key.pem \" \\\n \"-set_serial 01 -out server-cert.pem \"\n subprocess.call(key_query, shell=True, stderr=subprocess.DEVNULL)\n key_query = \"openssl req -newkey rsa:2048 -days 3600 -nodes -keyout \" \\\n \"client-key.pem -out client-req.pem -subj \" \\\n \"'/CN=www.percona.com/O=Database Performance./C=IN' \"\n subprocess.call(key_query, shell=True, stderr=subprocess.DEVNULL)\n key_query = \"openssl rsa -in client-key.pem -out client-key.pem \"\n subprocess.call(key_query, shell=True, stderr=subprocess.DEVNULL)\n key_query = \"openssl x509 -req -in client-req.pem -days \" \\\n \"3600 -CA ca.pem -CAkey ca-key.pem \" \\\n \"-set_serial 01 -out client-cert.pem \"\n subprocess.call(key_query, shell=True, stderr=subprocess.DEVNULL)\n if os.path.isfile(workdir + '/conf/ssl.cnf'):\n os.remove(workdir + '/conf/ssl.cnf')\n cnf_name = open(workdir + '/conf/ssl.cnf', 'a+')\n cnf_name.write('\\n')\n cnf_name.write('[mysqld]\\n')\n cnf_name.write('ssl-ca = ' + workdir + '/cert/ca.pem\\n')\n cnf_name.write('ssl-cert = ' + workdir + '/cert/server-cert.pem\\n')\n cnf_name.write('ssl-key = ' + workdir + '/cert/server-key.pem\\n')\n cnf_name.write('[client]\\n')\n cnf_name.write('ssl-ca = ' + workdir + '/cert/ca.pem\\n')\n cnf_name.write('ssl-cert = ' + workdir + '/cert/client-cert.pem\\n')\n cnf_name.write('ssl-key = ' + workdir + '/cert/client-key.pem\\n')\n cnf_name.write('[sst]\\n')\n cnf_name.write('encrypt = 4\\n')\n cnf_name.write('ssl-ca = ' + workdir + '/cert/ca.pem\\n')\n cnf_name.write('ssl-cert = ' + workdir + '/cert/server-cert.pem\\n')\n cnf_name.write('ssl-key = ' + workdir + '/cert/server-key.pem\\n')\n cnf_name.close()\n os.chdir(cwd)\n return 0", "title": "" }, { "docid": "c02134eaef73f29300a45842c8542df1", "score": "0.47026166", "text": "def ddns(domain=None, host=None, username=None, token=None):\n ip = requests.get('https://api.ipify.org').text\n name = Name(username, token)\n\n data = name.list_records(domain)\n existing_records = [i for i in data[\"records\"] if i.get(\"host\", None) == host]\n\n if len(existing_records) == 1:\n record = existing_records[0]\n # keep existing record\n if record[\"type\"] == \"A\" and record[\"answer\"] == ip:\n click.echo(\"We have found same record there, quit\")\n return\n else:\n # update existing record\n click.echo(\"update existing record\")\n name.update_record(domain, record[\"id\"], host, \"A\", ip)\n return\n\n click.echo(\"create record \" + host + \".\" + domain + \" to \" + ip)\n name.create_record(domain, host, \"A\", ip)", "title": "" }, { "docid": "f1c4163fce94b735a53aebf9ee828b5a", "score": "0.46953368", "text": "def host_tls(self) -> ConfigNodePropertyBoolean:\n return self._host_tls", "title": "" }, { "docid": "3b8ac4ec97ec44d6e75085b1f12e6704", "score": "0.46520513", "text": "def check_listening_endpoint_tls(tls):\n if not isinstance(tls, dict):\n raise InvalidConfigException(\"'tls' in endpoint must be dictionary ({} encountered)\".format(type(tls)))\n\n for k in tls:\n if k not in ['key', 'certificate', 'dhparam', 'ciphers', 'ca_certificates']:\n raise InvalidConfigException(\"encountered unknown attribute '{}' in listening endpoint TLS configuration\".format(k))\n\n for k in [('key', True), ('certificate', True), ('dhparam', False), ('ciphers', False)]:\n\n if k[1] and not k[0] in tls:\n raise InvalidConfigException(\"missing mandatory attribute '{}' in listening endpoint TLS configuration\".format(k[0]))\n\n if k[0] in tls:\n if not isinstance(tls[k[0]], six.text_type):\n raise InvalidConfigException(\"'{}' in listening endpoint TLS configuration must be string ({} encountered)\".format(k[0], type(tls[k[0]])))\n # all options except \"ciphers\" are filenames\n if k[0] not in ['ciphers', 'ca_certificates'] and not exists(tls[k[0]]):\n raise InvalidConfigException(\n \"Path '{}' doesn't exist for '{}' in TLS config\".format(tls[k[0]], k[0])\n )", "title": "" }, { "docid": "1cac557a658a8b924ff3022b95c71c71", "score": "0.46475768", "text": "def verify_certificates(self, option=True):\n self.verify = option", "title": "" }, { "docid": "5854d7d5c4704f2a5b254c5a1ee7e04a", "score": "0.46354812", "text": "def IsDnsEnabled(self) -> bool:", "title": "" }, { "docid": "804fac7539e4bda23754f7f32235798d", "score": "0.4632509", "text": "def publish_fqdns_disabled(\n name, hostname, username, password, verify_ssl=True, cert=None, cert_common_name=None\n):\n\n cert_common_name = cert_common_name\n cert = cert\n\n log.info(\"Getting the manager's config\")\n get_current_config = _get_publish_fqdns_revision_from_nsxt(\n hostname,\n username,\n password,\n verify_ssl=verify_ssl,\n cert=cert,\n cert_common_name=cert_common_name,\n )\n if \"error\" in get_current_config:\n return _create_state_response(name, None, None, False, get_current_config[\"error\"])\n\n current_publish_fqdns, current_revision = _get_publish_fqdns_revision_from_response(\n get_current_config\n )\n\n if __opts__.get(\"test\"):\n log.info(\"publish_fqdns_disabled is called with test option\")\n return _create_state_response(\n name,\n None,\n None,\n None,\n \"State publish_fqdns_disabled will execute with params {}, {}, {}, {}, {}\".format(\n name, hostname, username, password, verify_ssl\n ),\n )\n\n if not current_publish_fqdns:\n return _create_state_response(\n name, None, None, True, \"publish_fqdns is already set to False\"\n )\n\n publish_fqdns = False\n\n log.info(\"Updating the manager's config\")\n updated_config_response = _set_publish_fqdns_in_nsxt(\n publish_fqdns,\n current_revision,\n hostname,\n username,\n password,\n verify_ssl=verify_ssl,\n cert=cert,\n cert_common_name=cert_common_name,\n )\n\n if \"error\" in updated_config_response:\n return _create_state_response(name, None, None, False, updated_config_response[\"error\"])\n\n return _create_state_response(\n name,\n get_current_config,\n updated_config_response,\n True,\n \"publish_fqdns has been set to False\",\n )", "title": "" }, { "docid": "aa91e3173bc48f3787659496415a6aed", "score": "0.46321034", "text": "def set_vod_domain_certificate(\n self,\n request: vod_20170321_models.SetVodDomainCertificateRequest,\n ) -> vod_20170321_models.SetVodDomainCertificateResponse:\n runtime = util_models.RuntimeOptions()\n return self.set_vod_domain_certificate_with_options(request, runtime)", "title": "" }, { "docid": "49d74ef6645c21d45962c99c23cb7725", "score": "0.46302924", "text": "def enable_for_urllib3(instrumentation_key, telemetry_channel=None, always_flush=False):\n from urllib3.connectionpool import HTTPConnectionPool, HTTPSConnectionPool\n __enable_for_urllib3(HTTPConnectionPool, HTTPSConnectionPool, instrumentation_key, telemetry_channel, always_flush)", "title": "" }, { "docid": "cf75efc63e022655a859ab536b3428cc", "score": "0.462953", "text": "def main(**opts):\n\n dest_fqdn = opts[\"dest\"] + \".netlify.com.\"\n account_slug = opts[\"netlify_account_slug\"]\n if account_slug:\n account_slug += \"/\"\n\n def check_cname():\n try:\n cname_answer = dns.resolver.query(opts[\"custom_domain\"], \"CNAME\")\n except dns.resolver.NoAnswer:\n cname_answer = []\n\n if len(cname_answer) != 1 or cname_answer[0].target.to_text() != dest_fqdn:\n raise click.ClickException(\n \"{} must be a CNAME pointing to {}\".format(\n opts[\"custom_domain\"], dest_fqdn\n )\n )\n\n if opts[\"custom_domain\"] and opts[\"checks\"]:\n check_cname()\n\n def nf_req(method, path, **kw):\n if kw.pop(\"absolute\", False):\n url = path\n else:\n url = NETLIFY_ENDPOINT + path\n\n success_codes = kw.pop(\"success\", {200, 201, 204})\n\n h = kw.setdefault(\"headers\", {})\n h.setdefault(\"Authorization\", \"Bearer \" + opts[\"netlify_token\"])\n logger.debug(\"request %s %s %s\", method, url, kw)\n response = requests.request(method, url, **kw)\n logger.debug(\"response %s %s\", response, response.headers)\n\n if response.status_code not in success_codes:\n raise click.ClickException(\n \"netlify api {} {} returned http code {}: {}\".format(\n url, method.upper(), response.status_code, response.content.decode()\n )\n )\n\n return response\n\n def set_site_cert():\n path = pathlib.Path(opts[\"cert\"])\n nf_req(\n \"post\",\n f\"sites/{opts['dest']}.netlify.com/ssl\",\n json={\n \"certificate\": path.joinpath(\"cert.pem\").read_text(),\n \"key\": path.joinpath(\"privkey.pem\").read_text(),\n \"ca_certificates\": path.joinpath(\"chain.pem\").read_text(),\n }\n )\n\n if not opts[\"src\"]:\n if opts[\"cert\"]:\n set_site_cert()\n\n sys.exit(0)\n\n src_site_path = \"sites/\" + opts[\"src\"][0]\n\n def find_deploy():\n absolute = False\n path = src_site_path + \"/deploys\"\n\n while path:\n resp = nf_req(\"get\", path, absolute=absolute)\n\n for i in resp.json():\n if (i[\"commit_ref\"] or \"\").startswith(opts[\"src\"][1]):\n return i[\"id\"]\n\n path = resp.links.get(\"next\", {\"url\": None})[\"url\"]\n absolute = True\n\n src_deploy_id = find_deploy()\n if not src_deploy_id:\n raise click.ClickException(\"No deploy matching specified commit\")\n\n def get_deploy_files(deploy_id):\n result = {}\n\n absolute = False\n path = \"deploys/{}/files\".format(deploy_id)\n\n while path:\n resp = nf_req(\"get\", path, absolute=absolute)\n\n for i in resp.json():\n result[i[\"id\"]] = i[\"sha\"]\n\n path = resp.links.get(\"next\", {\"url\": None})[\"url\"]\n absolute = True\n\n return result\n\n deploy_files = get_deploy_files(src_deploy_id)\n\n def patch():\n patched_files = {}\n\n for path, old, new in opts[\"patch\"]:\n content = patched_files.get(path, None)\n\n if content is None:\n if path not in deploy_files:\n raise click.ClickException(\n 'deploy does not contain \"{}\" file'.format(path)\n )\n\n resp = nf_req(\n \"get\",\n \"deploys/{}/files{}\".format(src_deploy_id, path),\n headers={\"Content-Type\": \"application/vnd.bitballoon.v1.raw\"},\n )\n content = resp.content\n\n content = content.split(old.encode())\n if len(content) != 2:\n raise click.ClickException(\n '\"{}\" must occur exactly once in \"{}\" but {} occurrences found'.format(\n old, path, len(content) - 1\n )\n )\n content = new.encode().join(content)\n\n patched_files[path] = content\n\n return patched_files\n\n changed_files = {}\n changed_files.update(patch())\n\n def create_site():\n data = {\"name\": opts[\"dest\"]}\n\n if opts[\"custom_domain\"]:\n data[\"custom_domain\"] = opts[\"custom_domain\"]\n\n resp = nf_req(\"post\", f\"{account_slug}sites\", json=data)\n return resp.json()[\"id\"]\n\n dest_site = None\n if opts[\"update\"]:\n dest_site = \"{}.netlify.com\".format(opts[\"dest\"])\n metadata_resp = nf_req(\n \"head\", \"sites/{}/metadata\".format(dest_site), success=[200, 404]\n )\n if metadata_resp.status_code != 200:\n dest_site = None\n\n site_created = False\n if dest_site is None:\n dest_site = create_site()\n site_created = True\n\n dest_site_path = \"sites/\" + dest_site\n\n try:\n if opts[\"custom_domain\"] and site_created and not opts[\"cert\"]:\n nf_req(\"post\", dest_site_path + \"/ssl\")\n\n if opts[\"cert\"]:\n set_site_cert()\n\n nf_req(\"patch\", dest_site_path, json={\n \"force_ssl\": True,\n \"prerender\": \"netlify\" if opts[\"prerendering\"] else None,\n })\n\n # note: apparently, Netlify needs at least one file to be uploaded\n # for a deploy to be considered complete, so, we add a file containing a UUID.\n # We also use this for verifying the deploy by getting the UUID file\n # via the destination url\n\n clone_uuid = str(uuid.uuid4()).encode()\n changed_files[opts[\"clone_id_path\"]] = clone_uuid\n\n for changed_path, changed_content in changed_files.items():\n deploy_files[changed_path] = hashlib.sha1(changed_content).hexdigest()\n\n def deploy():\n deploy_resp = nf_req(\n \"post\", dest_site_path + \"/deploys\", json={\"files\": deploy_files}\n )\n deploy_resp_json = deploy_resp.json()\n required = deploy_resp_json[\"required\"]\n\n if not set(required).issubset(set(deploy_files[i] for i in changed_files)):\n raise click.ClickException(\n 'unexpected \"required\" list returned by deploy'\n )\n\n for changed_path, changed_content in changed_files.items():\n nf_req(\n \"put\",\n \"deploys/{}/files{}\".format(deploy_resp_json[\"id\"], changed_path),\n headers={\"Content-Type\": \"application/octet-stream\"},\n data=changed_content,\n )\n\n deploy()\n\n def check_get_uuid():\n fqdns = [dest_fqdn]\n if opts[\"custom_domain\"]:\n fqdns.append(opts[\"custom_domain\"])\n\n for fqdn in fqdns:\n url = \"https://{}{}\".format(fqdn, opts[\"clone_id_path\"])\n for attempts in range(opts[\"up_check_attempts\"], 0, -1):\n time.sleep(1)\n\n try:\n response = requests.get(url)\n except requests.exceptions.RequestException as e:\n if attempts > 1:\n logger.debug(\"uuid check failed {}, retrying\".format(e))\n continue\n\n raise click.ClickException(\n \"failed to get {}: {}\".format(url, e)\n )\n\n if response.status_code != 200:\n if attempts > 1:\n logger.debug(\n \"uuid check returned http code {}, retrying\".format(\n response.status_code\n )\n )\n continue\n\n raise click.ClickException(\n \"status {} getting {}\".format(response.status_code, url)\n )\n\n if response.content != clone_uuid:\n if attempts > 1:\n logger.debug(\n 'uuid check returned wrong uuid \"{}\", retrying'.format(\n clone_uuid.decode()\n )\n )\n continue\n\n raise click.ClickException(\n 'uuid (\"{}\") obtained from {} does not match uploaded one (\"{}\")'.format(\n response.content.decode(), url, clone_uuid.decode()\n )\n )\n\n break\n\n if opts[\"checks\"]:\n check_get_uuid()\n except Exception:\n if site_created:\n nf_req(\"delete\", dest_site_path)\n\n raise", "title": "" }, { "docid": "995b921f57f54a202f9a98b05a4ec43a", "score": "0.4623086", "text": "def secure(self, SNI = None, verify = False):\n self.report.debug(\"Wrapping socket for TLS\")\n if SNI:\n self.report.debug(\"Using SNI extension for %s\" % SNI)\n context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2) # SSL, TLS1, TLS1.1 is largely deprecated now.\n context.verify_mode = ssl.CERT_OPTIONAL\n # Are we going to test the certificate for validity?\n if verify == True:\n context.verify_mode = ssl.CERT_REQUIRED\n context.check_hostname = True\n context.load_default_certs()\n \n self.socket = context.wrap_socket(self.socket, server_hostname = SNI)\n \n while True:\n try:\n self.socket.do_handshake()\n break\n except ssl.SSLWantReadError:\n select.select([self.socket], [], [])\n except ssl.SSLWantWriteError:\n select.select([], [self.socket], [])\n self.report.debug(\"Shook hands, TLS ready\")\n \n return context\n else:\n self.socket = ssl.wrap_socket(self.socket)", "title": "" }, { "docid": "b679e5e751537dece617ef8eaa8dc296", "score": "0.46178213", "text": "def set_dns_cname(instance, host_name):\n\n r53 = adns.connect()\n rrs = adns.current_rrs(r53)\n rrs.sync_instance(rrs, instance)\n rrs.commit()", "title": "" }, { "docid": "e17c33e727c7a205781649b48db9a8d3", "score": "0.46166646", "text": "def setup_domain(domain):\n # get the entire bucket when passing just bucket name to this function\n bucket = bucket_manager.get_bucket(domain)\n\n zone = domain_manager.find_hosted_zone(domain) \\\n or domain_manager.create_hosted_zone(domain)\n # print(zone)\n print(\"zone = \")\n pprint(zone)\n # endpoint is the domain endpoint for the bucket we're building record for\n endpoint = util.get_endpoint(bucket_manager.get_region_name(bucket))\n a_record = domain_manager.create_s3_domain_record(zone, domain, endpoint)\n # Note - there's no need to do an or-check during domain record creation\n # as upsert either uploads a record if not present or updates if it is\n print(\"Domain configured: http://{}\".format(domain))\n print(\"a_record = \")\n pprint(a_record)", "title": "" }, { "docid": "9dec320d65ec3f2f615acc06892e2db0", "score": "0.46045572", "text": "def host_ssl(self, host_ssl: ConfigNodePropertyBoolean):\n\n self._host_ssl = host_ssl", "title": "" }, { "docid": "05744dc04e9587664d5aa37e18ffb4a5", "score": "0.45999497", "text": "def can_https(tls_ver):\n output = True\n\n # check python version\n if sys.version_info < (3, 7):\n _LOGGER.error(\"PyISY cannot use HTTPS: Invalid Python version. See docs.\")\n output = False\n\n # check that Python was compiled against correct OpenSSL lib\n if \"PROTOCOL_TLSv1_1\" not in dir(ssl):\n _LOGGER.error(\n \"PyISY cannot use HTTPS: Compiled against old OpenSSL library. See docs.\"\n )\n output = False\n\n # check the requested TLS version\n if tls_ver not in [1.1, 1.2]:\n _LOGGER.error(\n \"PyISY cannot use HTTPS: Only TLS 1.1 and 1.2 are supported by the ISY controller.\"\n )\n output = False\n\n return output", "title": "" }, { "docid": "3edb919cc9fe125afe581de96b912ada", "score": "0.45989674", "text": "def _ldap_connect(self):\n try:\n connection = ldap.initialize(self.ldap_uri)\n connection.set_option(ldap.OPT_PROTOCOL_VERSION, 3)\n connection.set_option(ldap.OPT_REFERRALS, int(self.chase_referrals))\n\n if self.ldap_uri.startswith(\"ldaps://\"):\n # Require server certificate but ignore it's validity. (allow self-signed)\n connection.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, self.cert_policy)\n\n if self.use_tls:\n # Require TLS connection.\n ldap.set_option(ldap.OPT_X_TLS, self.cert_policy)\n # Require server certificate but ignore it's validity. (allow self-signed)\n ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)\n connection.start_tls_s()\n LOG.debug(\"Connection now using TLS\")\n return connection\n except ldap.LDAPError as e:\n LOG.debug(f\"(_ldap_connect) LDAP Error: {str(e)} : Type {type(e)}\")\n return False", "title": "" }, { "docid": "1d4adef6fb53b487a34676f1af2ff62b", "score": "0.45972762", "text": "def setup_cert():\n sudo(\"certbot --nginx -d {} certonly\".format(env.host_string))", "title": "" }, { "docid": "40474536a4ba09381d04b2d2e33c8ef9", "score": "0.45949692", "text": "def starttls(socket, success=None, failure=None, io=None, **options):\n\n ## Default Options\n\n options.setdefault('do_handshake_on_connect', False)\n options.setdefault('ssl_version', ssl.PROTOCOL_SSLv23)\n\n ## Handlers\n\n def done():\n \"\"\"Handshake finished successfully.\"\"\"\n\n io.remove_handler(wrapped.fileno())\n success and success(wrapped)\n\n def error():\n \"\"\"The handshake failed.\"\"\"\n\n if failure:\n return failure(wrapped)\n ## By default, just close the socket.\n io.remove_handler(wrapped.fileno())\n wrapped.close()\n\n def handshake(fd, events):\n \"\"\"Handler for SSL handshake negotiation. See Python docs for\n ssl.do_handshake().\"\"\"\n\n if events & io.ERROR:\n error()\n return\n\n try:\n new_state = io.ERROR\n wrapped.do_handshake()\n return done()\n except ssl.SSLError as exc:\n if exc.args[0] == ssl.SSL_ERROR_WANT_READ:\n new_state |= io.READ\n elif exc.args[0] == ssl.SSL_ERROR_WANT_WRITE:\n new_state |= io.WRITE\n else:\n logging.exception('starttls: caught exception during handshake')\n error()\n\n if new_state != state[0]:\n state[0] = new_state\n io.update_handler(fd, new_state)\n\n ## set up handshake state; use a list as a mutable cell.\n io = io or loop()\n state = [io.ERROR]\n\n ## Wrap the socket; swap out handlers.\n io.remove_handler(socket.fileno())\n wrapped = SSLSocket(socket, **options)\n wrapped.setblocking(0)\n io.add_handler(wrapped.fileno(), handshake, state[0])\n\n ## Begin the handshake.\n handshake(wrapped.fileno(), 0)\n return wrapped", "title": "" }, { "docid": "c24ee32b8cedc8135c94cd2c0ea565bd", "score": "0.4594548", "text": "def cloudflare_fallback_read(prog, api, tlsa):\n prog.log.info2(\" + getting TLSA records for _{}._{}.{} (fallback)\".format(\n tlsa.port, tlsa.protocol, tlsa.domain))\n\n import requests\n\n headers = { \"X-Auth-Email\": api.email,\n \"X-Auth-Key\": api.key,\n \"Content-Type\": \"application/json\" }\n\n params = { \"type\": \"TLSA\",\n \"name\":\"_{}._{}.{}\".format(\n tlsa.port, tlsa.protocol, tlsa.domain) }\n\n try:\n r = requests.get(\"https://api.cloudflare.com/client/v4/zones/{}/dns_records\".format(api.zone), params=params, headers=headers)\n except ConnectionError:\n raise Except.DNSProcessingError(\"connection error encountered\")\n except requests.exceptions.Timeout:\n raise Except.DNSProcessingError(\"request timed out\")\n except requests.exceptions.TooManyRedirects:\n raise Except.DNSProcessingError(\"too many redirects\")\n except requests.exceptions.RequestException as ex:\n raise Except.DNSProcessingError(\"{}\".format(ex))\n\n prog.log.info3(\" + HTTP response: {}\".format(r.status_code))\n\n response = r.json()\n prog.log.info3(\" + JSON response: {}\".format(\n str(response).replace(api.key, '<redacted>')) )\n\n errors = get_errors(response)\n if errors:\n raise Except.DNSProcessingError(errors)\n\n if r.status_code >= 400 and r.status_code < 600:\n raise Except.DNSProcessingError(\n \"Cloudflare4 HTTP response was {}\".format(r.status_code))\n\n if not response['success']:\n raise Except.DNSProcessingError(\"Cloudflare4 JSON response failure\")\n\n ret = { r['data']['certificate'].lower(): r['id']\n for r in response['result']\n if str(r['data']['usage']) == str(tlsa.usage)\n and str(r['data']['selector']) == str(tlsa.selector)\n and str(r['data']['matching_type']) == str(tlsa.matching) }\n if ret:\n return ret\n\n raise Except.DNSNotLive(\"no TLSA records found\")", "title": "" }, { "docid": "66d9232f3b3e888598cd5d5dc6a657d9", "score": "0.45897397", "text": "def postConnectionCheck(cert, target):\n print \"Server DN:\", str(cert.get_subject())\n return True", "title": "" }, { "docid": "4d1c3f9bb91445ff44e8442f51754849", "score": "0.45817274", "text": "def linode_domain():\n module_args = {\n 'domain': {\n 'type': 'str'\n },\n 'name': {\n 'type': 'str',\n 'required': True,\n },\n 'state': {\n 'type': 'str',\n 'required': True,\n 'choices': ['absent', 'present']\n },\n 'target': {\n 'type': 'str'\n }\n }\n\n execute = {\n 'absent': del_subdomain,\n 'present': add_subdomain\n }\n\n result = {'changed': False,\n 'subdomain': '',\n 'domain': '',\n 'target': ''}\n\n module = AnsibleModule(\n argument_spec=module_args,\n supports_check_mode=True\n )\n\n # state with no modifications\n if module.check_mode:\n module.exit_json(**result)\n\n client = linode_api4.LinodeClient(os.environ.get('LINODE_TOKEN'))\n\n result = execute.get(module.params.get('state'))(module, client)\n\n # use whatever logic you need to determine whether or not this module\n # made any modifications to your target\n # if module.params['new']:\n # result['changed'] = True\n\n # during the execution of the module, if there is an exception or a\n # conditional state that effectively causes a failure, run\n # AnsibleModule.fail_json() to pass in the message and the result\n if module.params['name'] == 'fail me':\n module.fail_json(msg='You requested this to fail', **result)\n\n # in the event of a successful module execution, you will want to\n # simple AnsibleModule.exit_json(), passing the key/value results\n module.exit_json(**result)", "title": "" }, { "docid": "283b003ce7177e86a8ac8211a981c133", "score": "0.4579807", "text": "def make_ss_cert(key_file, domains):\n assert domains, \"Must provide one or more hostnames for the CSR.\"\n rsa_key = M2Crypto.RSA.load_key(key_file)\n pubkey = M2Crypto.EVP.PKey()\n pubkey.assign_rsa(rsa_key)\n\n cert = M2Crypto.X509.X509()\n cert.set_pubkey(pubkey)\n cert.set_serial_number(1337)\n cert.set_version(2)\n\n current_ts = long(time.time())\n current = M2Crypto.ASN1.ASN1_UTCTIME()\n current.set_time(current_ts)\n expire = M2Crypto.ASN1.ASN1_UTCTIME()\n expire.set_time((7 * 24 * 60 * 60) + current_ts)\n cert.set_not_before(current)\n cert.set_not_after(expire)\n\n name = cert.get_subject()\n name.C = \"US\"\n name.ST = \"Michigan\"\n name.L = \"Ann Arbor\"\n name.O = \"University of Michigan and the EFF\"\n name.CN = domains[0]\n cert.set_issuer(cert.get_subject())\n\n cert.add_ext(M2Crypto.X509.new_extension('basicConstraints', 'CA:FALSE'))\n #cert.add_ext(M2Crypto.X509.new_extension(\n # 'extendedKeyUsage', 'TLS Web Server Authentication'))\n cert.add_ext(M2Crypto.X509.new_extension(\n 'subjectAltName', \", \".join([\"DNS:%s\" % d for d in domains])))\n\n cert.sign(pubkey, 'sha256')\n assert cert.verify(pubkey)\n assert cert.verify()\n #print check_purpose(,0\n return cert.as_pem()", "title": "" }, { "docid": "eb3512f38b91d27a56c365652a9f65fb", "score": "0.45711163", "text": "def tls_client_auth_subject_dn(self, tls_client_auth_subject_dn):\n\n self._tls_client_auth_subject_dn = tls_client_auth_subject_dn", "title": "" }, { "docid": "072efef67c5ae2b9122c3c11e0d953a7", "score": "0.45662457", "text": "def create_subdomain_ex(\n self,\n request: pds_models.CreateSubdomainRequest,\n runtime: pds_models.RuntimeOptions,\n ) -> pds_models.CreateSubdomainModel:\n request.validate()\n runtime.validate()\n _runtime = {\n 'timeouted': 'retry',\n 'readTimeout': runtime.read_timeout,\n 'connectTimeout': runtime.connect_timeout,\n 'localAddr': runtime.local_addr,\n 'httpProxy': runtime.http_proxy,\n 'httpsProxy': runtime.https_proxy,\n 'noProxy': runtime.no_proxy,\n 'maxIdleConns': runtime.max_idle_conns,\n 'socks5Proxy': runtime.socks_5proxy,\n 'socks5NetWork': runtime.socks_5net_work,\n 'retry': {\n 'retryable': runtime.autoretry,\n 'maxAttempts': UtilClient.default_number(runtime.max_attempts, 3)\n },\n 'backoff': {\n 'policy': UtilClient.default_string(runtime.backoff_policy, 'no'),\n 'period': UtilClient.default_number(runtime.backoff_period, 1)\n },\n 'ignoreSSL': runtime.ignore_ssl\n }\n _last_request = None\n _last_exception = None\n _now = time.time()\n _retry_times = 0\n while TeaCore.allow_retry(_runtime.get('retry'), _retry_times, _now):\n if _retry_times > 0:\n _backoff_time = TeaCore.get_backoff_time(_runtime.get('backoff'), _retry_times)\n if _backoff_time > 0:\n TeaCore.sleep(_backoff_time)\n _retry_times = _retry_times + 1\n try:\n _request = TeaRequest()\n accesskey_id = self.get_access_key_id()\n access_key_secret = self.get_access_key_secret()\n security_token = self.get_security_token()\n access_token = self.get_access_token()\n real_req = UtilClient.to_map(request)\n _request.protocol = UtilClient.default_string(self._protocol, 'https')\n _request.method = 'POST'\n _request.pathname = self.get_pathname(self._nickname, f'/v2/subdomain/create')\n _request.headers = TeaCore.merge({\n 'user-agent': self.get_user_agent(),\n 'host': UtilClient.default_string(self._endpoint, f'{self._domain_id}.api.aliyunpds.com'),\n 'content-type': 'application/json; charset=utf-8'\n }, request.httpheaders)\n real_req['httpheaders'] = None\n if not UtilClient.empty(access_token):\n _request.headers['authorization'] = f'Bearer {access_token}'\n elif not UtilClient.empty(accesskey_id) and not UtilClient.empty(access_key_secret):\n if not UtilClient.empty(security_token):\n _request.headers['x-acs-security-token'] = security_token\n _request.headers['date'] = UtilClient.get_date_utcstring()\n _request.headers['accept'] = 'application/json'\n _request.headers['x-acs-signature-method'] = 'HMAC-SHA1'\n _request.headers['x-acs-signature-version'] = '1.0'\n string_to_sign = ROAUtilClient.get_string_to_sign(_request)\n _request.headers['authorization'] = f'acs {accesskey_id}:{ROAUtilClient.get_signature(string_to_sign, access_key_secret)}'\n _request.body = UtilClient.to_jsonstring(real_req)\n _last_request = _request\n _response = TeaCore.do_action(_request, _runtime)\n resp_map = None\n obj = None\n if UtilClient.equal_number(_response.status_code, 200):\n obj = UtilClient.read_as_json(_response.body)\n resp_map = UtilClient.assert_as_map(obj)\n return TeaCore.from_map(\n pds_models.CreateSubdomainModel(),\n {\n 'body': resp_map,\n 'headers': _response.headers\n }\n )\n if not UtilClient.empty(_response.headers.get('x-ca-error-message')):\n raise TeaException({\n 'data': {\n 'requestId': _response.headers.get('x-ca-request-id'),\n 'statusCode': _response.status_code,\n 'statusMessage': _response.status_message\n },\n 'message': _response.headers.get('x-ca-error-message')\n })\n obj = UtilClient.read_as_json(_response.body)\n resp_map = UtilClient.assert_as_map(obj)\n raise TeaException(TeaCore.merge({\n 'data': {\n 'requestId': _response.headers.get('x-ca-request-id'),\n 'statusCode': _response.status_code,\n 'statusMessage': _response.status_message\n }\n }, resp_map))\n except Exception as e:\n if TeaCore.is_retryable(e):\n _last_exception = e\n continue\n raise e\n raise UnretryableException(_last_request, _last_exception)", "title": "" }, { "docid": "8cb1b0d3755d1ebaba19ab4958a29c71", "score": "0.45639908", "text": "def __init__(__self__, *,\n domain_name: pulumi.Input[str],\n certificate_authority_arn: Optional[pulumi.Input[str]] = None,\n certificate_transparency_logging_preference: Optional[pulumi.Input[str]] = None,\n domain_validation_options: Optional[pulumi.Input[Sequence[pulumi.Input['CertificateDomainValidationOptionArgs']]]] = None,\n subject_alternative_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Sequence[pulumi.Input['CertificateTagArgs']]]] = None,\n validation_method: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"domain_name\", domain_name)\n if certificate_authority_arn is not None:\n pulumi.set(__self__, \"certificate_authority_arn\", certificate_authority_arn)\n if certificate_transparency_logging_preference is not None:\n pulumi.set(__self__, \"certificate_transparency_logging_preference\", certificate_transparency_logging_preference)\n if domain_validation_options is not None:\n pulumi.set(__self__, \"domain_validation_options\", domain_validation_options)\n if subject_alternative_names is not None:\n pulumi.set(__self__, \"subject_alternative_names\", subject_alternative_names)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if validation_method is not None:\n pulumi.set(__self__, \"validation_method\", validation_method)", "title": "" }, { "docid": "a279e84c1a953836a7b25d163833ecb7", "score": "0.45606703", "text": "def install_ldap_server():\n if 'nongluuldapinfo' in session:\n del session['nongluuldapinfo']\n form = InstallServerForm()\n\n data = {'title': 'Install Symas Open-Ldap Server',\n 'button': 'Install',\n }\n\n if request.method == 'POST':\n if form.validate_on_submit():\n ldp = Server.query.filter(\n Server.hostname == form.hostname.data).first()\n if ldp:\n flash(\"{0} is already in LDAP servers List\".format(\n form.hostname.data), \"warning\")\n return render_template('new_server.html', form=form,\n data=data)\n\n session['nongluuldapinfo'] = {\n 'fqn_hostname': form.hostname.data.strip(),\n 'ip_address': form.ip_address.data.strip(),\n 'ldap_password': form.ldap_password.data.strip(),\n 'ldap_user': 'ldap',\n 'ldap_group': 'ldap',\n 'countryCode': form.countryCode.data.strip(),\n 'state': form.state.data.strip(),\n 'city': form.city.data.strip(),\n 'orgName': form.orgName.data.strip(),\n 'admin_email': form.admin_email.data.strip(),\n }\n\n return redirect(url_for('cluster.install_ldap_server'))\n\n return render_template('new_server.html', form=form, data=data)", "title": "" }, { "docid": "f62b0f88376bd9e1b1e6c7c32db82bd4", "score": "0.45569125", "text": "def lets_encrypt(contact_mail):\n return FMHTTPSStrategy({\"contactMail\": contact_mail}, \"LETSENCRYPT\", True)", "title": "" }, { "docid": "35091d56d796b48165c1754d9a0420e9", "score": "0.4551751", "text": "def test_modify_response_descriptor_certificate_certificate_resource(self):\n pass", "title": "" }, { "docid": "26359ea8e1680dc7585bbbc466029722", "score": "0.45402324", "text": "def check_domain(tc, domain):\n _fetch_from_domain(tc, domain)\n domain.refresh()\n _fetch_from_domain(tc, domain)", "title": "" }, { "docid": "841a86983a1fcfa724364d492752073d", "score": "0.4533951", "text": "def set_dns(self, config):\n settings = xmlobjects.LanSettings()\n settings.parseXML(self.settings)\n settings.setDnsManual(config)\n return self.api('dhcp/settings', settings)", "title": "" }, { "docid": "ff224395f98fbb3f0a2b9dcf571d1eb6", "score": "0.45327842", "text": "def _update_protection_enabled(enabled, url, cookie):\n cookies = {\n 'agh_session': cookie\n }\n\n data = {\n 'protection_enabled': enabled\n }\n \n if enabled:\n print(\" - Enabling global protection\")\n else:\n print(\" - Disabling global protection\")\n \n response = requests.post('{}/control/dns_config'.format(url), data=json.dumps(data), cookies=cookies)\n\n if response.status_code == 403:\n raise UnauthenticatedError\n elif response.status_code != 200:\n raise SystemError", "title": "" }, { "docid": "6d86650c0e03b16db5634f92843fd795", "score": "0.45301774", "text": "async def update_subdomain_ex_async(\n self,\n request: pds_models.UpdateSubdomainRequest,\n runtime: pds_models.RuntimeOptions,\n ) -> pds_models.UpdateSubdomainModel:\n request.validate()\n runtime.validate()\n _runtime = {\n 'timeouted': 'retry',\n 'readTimeout': runtime.read_timeout,\n 'connectTimeout': runtime.connect_timeout,\n 'localAddr': runtime.local_addr,\n 'httpProxy': runtime.http_proxy,\n 'httpsProxy': runtime.https_proxy,\n 'noProxy': runtime.no_proxy,\n 'maxIdleConns': runtime.max_idle_conns,\n 'socks5Proxy': runtime.socks_5proxy,\n 'socks5NetWork': runtime.socks_5net_work,\n 'retry': {\n 'retryable': runtime.autoretry,\n 'maxAttempts': UtilClient.default_number(runtime.max_attempts, 3)\n },\n 'backoff': {\n 'policy': UtilClient.default_string(runtime.backoff_policy, 'no'),\n 'period': UtilClient.default_number(runtime.backoff_period, 1)\n },\n 'ignoreSSL': runtime.ignore_ssl\n }\n _last_request = None\n _last_exception = None\n _now = time.time()\n _retry_times = 0\n while TeaCore.allow_retry(_runtime.get('retry'), _retry_times, _now):\n if _retry_times > 0:\n _backoff_time = TeaCore.get_backoff_time(_runtime.get('backoff'), _retry_times)\n if _backoff_time > 0:\n TeaCore.sleep(_backoff_time)\n _retry_times = _retry_times + 1\n try:\n _request = TeaRequest()\n accesskey_id = await self.get_access_key_id_async()\n access_key_secret = await self.get_access_key_secret_async()\n security_token = await self.get_security_token_async()\n access_token = await self.get_access_token_async()\n real_req = UtilClient.to_map(request)\n _request.protocol = UtilClient.default_string(self._protocol, 'https')\n _request.method = 'POST'\n _request.pathname = self.get_pathname(self._nickname, f'/v2/subdomain/update')\n _request.headers = TeaCore.merge({\n 'user-agent': self.get_user_agent(),\n 'host': UtilClient.default_string(self._endpoint, f'{self._domain_id}.api.aliyunpds.com'),\n 'content-type': 'application/json; charset=utf-8'\n }, request.httpheaders)\n real_req['httpheaders'] = None\n if not UtilClient.empty(access_token):\n _request.headers['authorization'] = f'Bearer {access_token}'\n elif not UtilClient.empty(accesskey_id) and not UtilClient.empty(access_key_secret):\n if not UtilClient.empty(security_token):\n _request.headers['x-acs-security-token'] = security_token\n _request.headers['date'] = UtilClient.get_date_utcstring()\n _request.headers['accept'] = 'application/json'\n _request.headers['x-acs-signature-method'] = 'HMAC-SHA1'\n _request.headers['x-acs-signature-version'] = '1.0'\n string_to_sign = ROAUtilClient.get_string_to_sign(_request)\n _request.headers['authorization'] = f'acs {accesskey_id}:{ROAUtilClient.get_signature(string_to_sign, access_key_secret)}'\n _request.body = UtilClient.to_jsonstring(real_req)\n _last_request = _request\n _response = await TeaCore.async_do_action(_request, _runtime)\n resp_map = None\n obj = None\n if UtilClient.equal_number(_response.status_code, 200):\n obj = await UtilClient.read_as_json_async(_response.body)\n resp_map = UtilClient.assert_as_map(obj)\n return TeaCore.from_map(\n pds_models.UpdateSubdomainModel(),\n {\n 'body': resp_map,\n 'headers': _response.headers\n }\n )\n if not UtilClient.empty(_response.headers.get('x-ca-error-message')):\n raise TeaException({\n 'data': {\n 'requestId': _response.headers.get('x-ca-request-id'),\n 'statusCode': _response.status_code,\n 'statusMessage': _response.status_message\n },\n 'message': _response.headers.get('x-ca-error-message')\n })\n obj = await UtilClient.read_as_json_async(_response.body)\n resp_map = UtilClient.assert_as_map(obj)\n raise TeaException(TeaCore.merge({\n 'data': {\n 'requestId': _response.headers.get('x-ca-request-id'),\n 'statusCode': _response.status_code,\n 'statusMessage': _response.status_message\n }\n }, resp_map))\n except Exception as e:\n if TeaCore.is_retryable(e):\n _last_exception = e\n continue\n raise e\n raise UnretryableException(_last_request, _last_exception)", "title": "" }, { "docid": "68788eb30537c3d4d3eef9f5521d207d", "score": "0.45299643", "text": "def AddDomainAndListen(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "title": "" }, { "docid": "e952cf8042bd7b4156b5b4e3e2f7c3f4", "score": "0.45262837", "text": "def set_ssl_certificates(self, cert_names, stack_name, max_retries=1, retry_delay=10):\n # List of all certificates replaced\n replaced_certificates = []\n for cert_name in cert_names:\n # Get the cert id and also its arn\n cert_id = \"{0}-{1}\".format(cert_name, stack_name)\n cert_arn = self.iam.get_arn_for_cert(cert_id)\n\n # Get all stack load balancers\n load_balancer_resources = self.cfn.get_stack_load_balancers(stack_name)\n found_load_balancer_names = [lb[\"PhysicalResourceId\"] for lb in load_balancer_resources]\n # Use load balancer names to filter getting load balancer details\n load_balancers = []\n if len(found_load_balancer_names) > 0:\n load_balancers = self.conn_elb.get_all_load_balancers(load_balancer_names=found_load_balancer_names)\n\n # Look for https listeners on load balancers and update the cert\n # using the arn\n if len(load_balancers) > 0:\n for load_balancer in load_balancers:\n for listener in load_balancer.listeners:\n # Get protocol, if https, update cert\n in_port = listener[0]\n protocol = listener[2]\n # If the protocol is HTTPS then set the cert on the listener\n if protocol == \"HTTPS\":\n logging.info(\"ELB::set_ssl_certificates: \"\n \"Found HTTPS protocol on '%s', \"\n \"updating SSL certificate with '%s'\"\n % (load_balancer.name, cert_arn))\n # Get current listener certificate arn\n previous_cert_arn = None\n lb = self.conn_elb.get_all_load_balancers(load_balancer.name)[0]\n for listener in lb.listeners:\n # We're looking for a tuple of the form (443, 80, 'HTTPS', 'HTTP', <cert_arn>)\n if 'HTTPS' in listener.get_tuple():\n previous_cert_arn = listener[4]\n # Set the current certificate on the listener to the new one\n retries = 0\n while retries < max_retries:\n retries += 1\n try:\n self.conn_elb.set_lb_listener_SSL_certificate(load_balancer.name,\n in_port,\n cert_arn)\n if previous_cert_arn:\n previous_cert_name = previous_cert_arn.split('/')[1].split(\"-%s\" % stack_name)[0]\n replaced_certificates.append(previous_cert_name)\n\n logging.info(\"update_certs:Successfully set ssl cert to '%s', \"\n \" replacing cert '%s'\"\n % (cert_arn, previous_cert_name))\n\n break\n except BotoServerError as e:\n logging.warning(\"update_certs: Cannot set ssl certs, reason '%s', \"\n \"waiting %s seconds on retry %s/%s\"\n % (e.error_message, retry_delay, retries, max_retries))\n # Only sleep if we're going to try again\n if retries < max_retries:\n time.sleep(retry_delay)\n else:\n # Throw key error. There being no load balancers to update is not\n # necessarily a problem but since the caller expected there to be let\n # it handle this situation\n raise CloudResourceNotFoundError(\"ELB::set_ssl_certificates: \"\n \"No load balancers found in stack,\")\n\n return replaced_certificates", "title": "" }, { "docid": "66f00d86d3b948d8fb9bcebae2abcf3c", "score": "0.45217693", "text": "def enable_dns_hostnames(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"enable_dns_hostnames\")", "title": "" }, { "docid": "3378c02e3fe8ac2ea5cacfa7dd0c156c", "score": "0.45175916", "text": "def tlsReady(self, context):\n self.transport.startTLS(context, self.factory)", "title": "" }, { "docid": "23ef68e30f905a857ccbe250dba9d01e", "score": "0.45120403", "text": "def guess_and_set_verify_certificate(self) -> \"Requester\":\n\n if PyFunceble.facility.ConfigLoader.is_already_loaded() and bool(\n PyFunceble.storage.CONFIGURATION.verify_ssl_certificate\n ):\n self.set_verify_certificate(\n bool(PyFunceble.storage.CONFIGURATION.verify_ssl_certificate)\n )\n else:\n self.set_verify_certificate(self.STD_VERIFY_CERTIFICATE)\n\n return self", "title": "" } ]
1a4b322d5b57367bcf7b5797e458b438
Convert arbitrary document to a certain output format.
[ { "docid": "c2f43e81af0deb2eb6f73c379e1c1b2e", "score": "0.0", "text": "def parse_document(self, state):\n # preprocess document\n d = self.parse_block(state)\n d += self.close_auto_break(state)\n return d", "title": "" } ]
[ { "docid": "901e4f5b03e58a84e897711711bdfa76", "score": "0.665919", "text": "def convert(record: cfg.OpenAPI, outformat: cfg.Format = None) -> NoReturn:\n content = record.oas\n # Output\n out(content, outformat \\\n or cfg.Format.JSON if record.oastype == cfg.Format.YAML \\\n else cfg.Format.YAML)", "title": "" }, { "docid": "f082f76b1c73faa14809a2ddde4f48c7", "score": "0.6049381", "text": "def convert_docx(\n document : Document, \n use_num_convert : bool, \n use_eng_convert : bool,\n use_highlight : bool\n ):\n diff_originals, diff_covnerts, diff_indices = [], [], []\n for i, paragraph in enumerate(document.paragraphs):\n original_text = paragraph.text\n if use_num_convert:\n paragraph.text = convert_character(paragraph.text)\n if use_eng_convert:\n paragraph.text = convert_eng_character(paragraph.text)\n if original_text != paragraph.text:\n if use_highlight:\n paragraph.runs[0].font.highlight_color = WD_COLOR_INDEX.YELLOW\n diff_originals.append(original_text)\n diff_covnerts.append(paragraph.text)\n diff_indices.append(i)\n\n df = pd.DataFrame([diff_indices, diff_originals, diff_covnerts]).T\n df.columns = ['index', 'original', 'converted']\n\n return document, df", "title": "" }, { "docid": "1ea5ab88a694d7230990820efc868366", "score": "0.5951679", "text": "def save_document(filepath_output, document):\r\n document.save(filepath_output)", "title": "" }, { "docid": "22bb863b75c1f70ad159226347674f02", "score": "0.58499795", "text": "def convert(in_file, in_format, out_file, out_format, parse_args=None, **kwargs):\n if parse_args is None:\n parse_args = {}\n trees = parse(in_file, in_format, **parse_args)\n return write(trees, out_file, out_format, **kwargs)", "title": "" }, { "docid": "523821ede9f8ba0714d25c73c36ea938", "score": "0.5772332", "text": "def alpino(doc, output=\"raw\"):\n from ._alpino import tokenize, parse_raw, interpret_parse\n\n try:\n transf = {\"raw\": identity, \"saf\": interpret_parse}[output]\n except KeyError:\n raise ValueError(\"Unknown output format %r\" % output)\n\n return pipe(doc, fetch, tokenize, parse_raw, transf)", "title": "" }, { "docid": "cbabc0224ba10a8d0e9cc28352398ea9", "score": "0.5724264", "text": "def doc_to_string(doc):\n\n if isinstance(doc, str):\n return doc\n elif isinstance(doc, np.ndarray):\n doc = \" \".join(list(map(str, doc.flatten())))\n elif isinstance(doc, (list, tuple)):\n doc = \" \".join(sum(doc, [])) # reduces [[\"1\", \"2], [\"3\"]] to [\"1\", \"2\", \"3\"]\n elif isinstance(doc, (pd.DataFrame, pd.Series)):\n doc = \" \".join(list(map(str, doc.values.flatten())))\n else:\n raise ValueError(\"Can't convert file type of document to string.\")\n return doc", "title": "" }, { "docid": "68f623caf59cbe64e0aee9f05992c5d0", "score": "0.5714349", "text": "def apply_to_document(self, document):\n\n txt = self.template.replace('<MACROS>', \"\") # document._macros)\n if document._identifier is not None:\n txt = txt.replace('<IDENTIFIER>',\n r'\\hl{{{0:s}}}'.format(document._identifier) or 'Abstract ')\n else:\n txt = txt.replace('<IDENTIFIER>', 'Abstract ')\n txt = txt.replace('<TITLE>', document.title)\n txt = txt.replace('<AUTHORS>', self.short_authors(document))\n txt = txt.replace('<ABSTRACT>', document.abstract.replace(r'\\n', ' '))\n figures = ''.join([self.figure_to_latex(fig) for fig in\n self.select_figures(document) ])\n txt = txt.replace('<FIGURES>', figures)\n txt = txt.replace('<COMMENTS>', document.comment or '')\n txt = txt.replace('<DATE>', document.date)\n\n return txt", "title": "" }, { "docid": "8e0474e69055060095521fd3e8586ead", "score": "0.5619136", "text": "def convert(self, filetype: str) -> None:\n text = pypandoc.convert_file(\n self.filepath,\n filetype,\n format=self.filetype,\n # extra_args='--extract-media ./' + output\n )\n return text", "title": "" }, { "docid": "997c40315e2204253f086bc1168c53b9", "score": "0.5595928", "text": "def test_transform_document_docx_replace(self):\n pass", "title": "" }, { "docid": "e2ef14e0a41eb39b152aeda191910543", "score": "0.55698484", "text": "def convert_document(self, doc: etree._Element) -> DOCUMENT:\n \n text = ''\n entities = []\n \n if doc.tag != 'DOC':\n raise ValueError(\"`convert_document` expects a DOC tag.\")\n \n if doc.text is not None:\n # Initial text before any tag\n text += doc.text\n \n for tag in doc:\n tag_text, tag_entities = self._convert_tag(tag)\n text = self._avoid_word_agglutination(text, tag_text)\n\n # Entity start and end offsets are relative to begin of `tag`.\n # Shift tag_entities by current doc text length.\n for entity in tag_entities:\n self._shift_offset(entity, len(text))\n\n # If last character was not a whitespace or punctuation, add space\n # to prevent that an entity contains a word only partially\n if tag_text:\n text = self.append_text_safe(text, tag_text)\n \n entities.extend(tag_entities)\n \n return {\n 'doc_id': doc.attrib['DOCID'],\n 'doc_text': ''.join(text),\n 'entities': entities,\n }", "title": "" }, { "docid": "81106757937bd058a7fe0b4122372129", "score": "0.54446113", "text": "def transform(self, document):\n\n return self.batchtransform([document])[0]", "title": "" }, { "docid": "7f94ec1807655814948000635f994acb", "score": "0.53771496", "text": "def create_document(self, args):\n collection = args[\"collection\"] if \"collection\" in args else \"\"\n template = args[\"template\"] if \"template\" in args else \"\"\n\n doc, msg = datastore.create_document(template, collection)\n if doc:\n if args.get(\"output_format\") == \"json\":\n return R(response=doc.AsJSON())\n elif args.get(\"output_format\") == \"text\":\n return R(response=doc.AsText())\n else:\n return R(response=doc.AsXML())\n else:\n return R(err_code=R.Codes.GetError, err_detail=msg)", "title": "" }, { "docid": "5b9cc87fbafa88c0a555daed403e66a1", "score": "0.5350113", "text": "def format(self):\n\n if not hasattr(self, \"_format\"):\n self._format = self.__opts.get(\"format\")\n if not self._format and self.id:\n query = Query(\"format f\", \"f.name\", \"f.id\")\n query.join(\"doc_type t\", \"t.format = f.id\")\n query.where(query.Condition(\"t.id\", self.id))\n rows = query.execute(self.cursor).fetchall()\n if rows:\n self._format, self._format_id = rows[0]\n if not self._format:\n self._format = \"xml\"\n return self._format", "title": "" }, { "docid": "cca0764723396c0adcf4aa06d1522cb0", "score": "0.532944", "text": "def generate(self, html, **kwargs):\n from_format = kwargs.get('from_format', 'html')\n to_format = kwargs.get('to_format', 'tex')\n # create temp file\n self.file_object = NamedTemporaryFile(suffix='.pdf')\n\n extra_args = (\n '--smart',\n '--standalone',\n '-o', self.file_object.name\n )\n # generate it using pandoc\n self.service.convert(html, to_format, format=from_format, extra_args=extra_args)\n # return the file which is now populated with the docx forms\n return File(self.file_object)", "title": "" }, { "docid": "8adcc552a25ce3af79daf6073b9d0130", "score": "0.5329171", "text": "def convert():\n pass", "title": "" }, { "docid": "9314db732a52153138272244726e93d9", "score": "0.52948475", "text": "def save(self, output: str) -> None:\n # Handle file types here docx etc.\n filetype = output.split('.')[-1]\n if filetype == self.filetype:\n if self.filetype == 'docx':\n zip_file = ZipFile(output, \"w\")\n for key, value in self.raw.items():\n zip_file.writestr(key, value)\n zip_file.close()\n\n else:\n pass", "title": "" }, { "docid": "673acb4a412ebaacd220bb2eff1a2775", "score": "0.5290371", "text": "def vectorize_doc(document):\n return bc.encode([document])[0]", "title": "" }, { "docid": "a00b759e45d888a1644ec24cefb69a21", "score": "0.520206", "text": "def convert(ifilename, handler, iformat=None):\n if iformat is None:\n iformat = format_from_suffix(os.path.splitext(ifilename)[1][1:])\n # XXX: Backwards-compat\n if hasattr(handler, \"_ofilename\"):\n ofilename = handler._ofilename\n # Choose conversion\n if iformat == \"mesh\":\n # Convert from mesh to xml format\n mesh2xml(ifilename, ofilename)\n elif iformat == \"gmsh\":\n # Convert from gmsh to xml format\n gmsh2xml(ifilename, handler)\n elif iformat == \"Triangle\":\n # Convert from Triangle to xml format\n triangle2xml(ifilename, ofilename)\n elif iformat == \"xml-old\":\n # Convert from old to new xml format\n xml_old2xml(ifilename, ofilename)\n elif iformat == \"metis\":\n # Convert from metis graph to dolfin graph xml format\n metis_graph2graph_xml(ifilename, ofilename)\n elif iformat == \"scotch\":\n # Convert from scotch graph to dolfin graph xml format\n scotch_graph2graph_xml(ifilename, ofilename)\n elif iformat == \"diffpack\":\n # Convert from Diffpack tetrahedral grid format to xml format\n diffpack2xml(ifilename, ofilename)\n elif iformat == \"abaqus\":\n # Convert from abaqus to xml format\n abaqus.convert(ifilename, handler)\n elif iformat == \"NetCDF\":\n # Convert from NetCDF generated from ExodusII format to xml format\n netcdf2xml(ifilename, ofilename)\n elif iformat == \"ExodusII\":\n # Convert from ExodusII format to xml format via NetCDF\n exodus2xml(ifilename, ofilename)\n elif iformat == \"StarCD\":\n # Convert from Star-CD tetrahedral grid format to xml format\n starcd2xml(ifilename, ofilename)\n else:\n _error(\"Sorry, cannot convert between %s and DOLFIN xml file formats.\" % iformat)\n\n # XXX: handler.close messes things for other input formats than abaqus or gmsh\n if iformat in (\"abaqus\", \"gmsh\"):\n handler.close()", "title": "" }, { "docid": "42dcfecc6e38589ffaceae6727487ad2", "score": "0.5169634", "text": "def _convert_data_format(self, file_path, index):\n if(self.preprocess_type in ['frame']) :\n return self._frame_parser(file_path, index)\n elif(self.preprocess_type in ['mecab', 'kkma', 'twitter']) :\n return self._nlp_parser(file_path, index)", "title": "" }, { "docid": "2700235472cae19ea655d612c10dc517", "score": "0.5166065", "text": "def convert_original_to_txt(self):\n\n doc_lst = self.da.create_content_list(self.original_doc_dir)\n\n for i in range(len(doc_lst)):\n file_name = doc_lst[i]\n try:\n main(self.original_doc_dir, file_name, self.original_txt_dir)\n except:\n continue", "title": "" }, { "docid": "c2f056b367e6eb374808fd6c550d1239", "score": "0.51613605", "text": "def Marshall(self, doc, obj):", "title": "" }, { "docid": "f3a650bf31f925602ced1e03e8aa6de6", "score": "0.51230574", "text": "def _parse_document_type(self, doc):\n try:\n doi = self._parse_doi(doc)\n\n works = Works()\n doc_type = works.doi(doi)['type']\n\n if doc_type == 'book-chapter':\n return 'chapter'\n else:\n return 'paper'\n except:\n return 'paper'", "title": "" }, { "docid": "614a0195dc1d0b1aaa452056d0cb6945", "score": "0.50970495", "text": "def to_json(doc):\n return {\n \"@type\": \"doc\",\n \"id_path\": doc.id_path,\n \"output_path\": doc.output_path,\n \"input_path\": doc.input_path,\n \"created\": doc.created.timestamp(),\n \"modified\": doc.modified.timestamp(),\n \"title\": doc.title,\n \"section\": doc.section,\n \"content\": doc.content,\n # TODO manually serialize meta?\n \"meta\": doc.meta,\n \"templates\": doc.templates\n }", "title": "" }, { "docid": "3d34101836a233c30af94709c1f10eea", "score": "0.5055919", "text": "def build(\n documentPath,\n outputUFOFormatVersion=3,\n roundGeometry=True,\n verbose=True, # not supported\n logPath=None, # not supported\n progressFunc=None, # not supported\n processRules=True,\n logger=None,\n useVarlib=False,\n ):\n import os, glob\n if os.path.isdir(documentPath):\n # process all *.designspace documents in this folder\n todo = glob.glob(os.path.join(documentPath, \"*.designspace\"))\n else:\n # process the \n todo = [documentPath]\n results = []\n for path in todo:\n document = DesignSpaceProcessor(ufoVersion=outputUFOFormatVersion)\n document.useVarlib = useVarlib\n document.roundGeometry = roundGeometry\n document.read(path)\n try:\n r = document.generateUFO(processRules=processRules)\n results.append(r)\n except:\n if logger:\n logger.exception(\"ufoProcessor error\")\n #results += document.generateUFO(processRules=processRules)\n reader = None\n return results", "title": "" }, { "docid": "cbbd6cfa1276ef991164b84c907a36d8", "score": "0.5029552", "text": "def _convert_figure(self, cell_out, resources, figure_name, data_type, data):\n\n if not self.to_format in cell_out:\n if data_type == self.from_format:\n filename = figure_name + '.' + self.to_format\n if filename not in resources['figures']:\n\n #On the cell, make the figure available via \n # cell.outputs[i].pdf_filename ... etc (PDF in example)\n cell_out[self.to_format + '_filename'] = filename\n\n #In the resources, make the figure available via\n # resources['figures']['filename'] = data\n resources['figures'][filename] = self.convert_figure(data_type, data)", "title": "" }, { "docid": "e4a375c881195305ec7b245a76080255", "score": "0.5028239", "text": "def json_to_output(obj, to='markdown', meta={}):\n extra_args = pdoc_args\n source = {'blocks': obj, \"pandoc-api-version\":[1,17,0,5], 'meta': meta}\n return pd.convert_text(source=json.dumps(source),\n to=to,\n format='json',\n extra_args=extra_args,\n filters=filters)", "title": "" }, { "docid": "2ef6105bd679defc20fbcb49a76446b5", "score": "0.5027762", "text": "def docx():\n print(\"Generating docx output\")\n outputFlags = ['--to=docx', '-o', './bin/thesis.docx']\n runPandocCommand(outputFlags)", "title": "" }, { "docid": "b44f2fdeac73dcfdc7b90147071c6148", "score": "0.50031245", "text": "def translate_file(self, fname, outfname, format=\"conll\"):\n\n if format == \"conll\":\n lines = readconll(fname)\n elif format == \"plaintext\":\n lines = readplaintext(fname)\n else:\n print(\"Format not known: \" + format)\n exit()\n\n # everything is done in conll format. That is... one word per line. \n outlines = self.translate(lines)\n\n print(\"Writing to:\", outfname)\n if format == \"conll\":\n writeconll(outfname, outlines)\n elif format == \"plaintext\":\n writeplaintext(outfname, outlines)\n else:\n print(\"Unknown format: \" + format)", "title": "" }, { "docid": "9ffccdeacf32e4befdbc56f78fd0f879", "score": "0.49957997", "text": "def document(str_doc, doc_type=\"html\"):\n if len(str_doc) == 0:\n print_msg(\"invalid document string !\")\n return None\n if not isinstance(str_doc, str):\n str_doc = str_doc.decode(\"utf8\")\n doc_type = doc_type.lower().strip()\n if doc_type == \"html\":\n parser = etree.HTMLParser()\n doc = html.document_fromstring(str_doc, parser)\n elif doc_type == \"xml\":\n parser = etree.XMLParser()\n doc = html.document_fromstring(str_doc, parser)\n elif doc_type == \"json\":\n json.load(str_doc)\n else:\n raise Exception(\"unsupported document type: %s\" % doc_type)\n return doc", "title": "" }, { "docid": "b2ace4456eacf8cd3f9fba7b82f00596", "score": "0.49595502", "text": "def _build_final_response(out, request, template):\n converter = get_converter()\n if converter and converter.is_available():\n out.seek(0)\n out = converter.convert(out.read(), \"pdf\")\n result = _build_response(out, request, template, \"pdf\")\n else:\n result = _build_response(out.getvalue(), request, template, \"odt\")\n return result", "title": "" }, { "docid": "57c065733e218bc5ee603c1e6898bcd6", "score": "0.49565306", "text": "def saveDocument(doc,filename):\n file_object = open(filename,\"w\");\n xml.dom.ext.PrettyPrint(doc,file_object);\n file_object.close();", "title": "" }, { "docid": "37b7a660448310b1adb6886533a5c196", "score": "0.49542993", "text": "def main():\n\n # pylint: disable=global-statement\n global PANDOCVERSION\n global Image\n\n # Get the output format and document\n fmt = args.fmt\n doc = json.loads(STDIN.read())\n\n # Initialize pandocxnos\n # pylint: disable=too-many-function-args\n PANDOCVERSION = pandocxnos.init(args.pandocversion, doc)\n\n # Element primitives\n if PANDOCVERSION < '1.16':\n Image = elt('Image', 2)\n\n # Chop up the doc\n meta = doc['meta'] if PANDOCVERSION >= '1.18' else doc[0]['unMeta']\n blocks = doc['blocks'] if PANDOCVERSION >= '1.18' else doc[1:]\n\n # Process the metadata variables\n process(meta)\n\n # First pass\n attach_attrs_image = attach_attrs_factory(Image,\n extract_attrs=_extract_attrs)\n detach_attrs_image = detach_attrs_factory(Image)\n insert_secnos = insert_secnos_factory(Image)\n delete_secnos = delete_secnos_factory(Image)\n filters = [insert_secnos, process_figures, delete_secnos] \\\n if PANDOCVERSION >= '1.16' else \\\n [attach_attrs_image, insert_secnos, process_figures,\n delete_secnos, detach_attrs_image]\n altered = functools.reduce(lambda x, action: walk(x, action, fmt, meta),\n filters, blocks)\n\n # Second pass\n process_refs = process_refs_factory(references.keys())\n replace_refs = replace_refs_factory(references,\n use_cleveref_default, False,\n plusname if not capitalize else\n [name.title() for name in plusname],\n starname, 'figure')\n altered = functools.reduce(lambda x, action: walk(x, action, fmt, meta),\n [repair_refs, process_refs, replace_refs],\n altered)\n\n # Insert supporting TeX\n if fmt == 'latex':\n\n rawblocks = []\n\n if has_unnumbered_figures:\n rawblocks += [RawBlock('tex', TEX0),\n RawBlock('tex', TEX1),\n RawBlock('tex', TEX2)]\n\n if captionname != 'Figure':\n rawblocks += [RawBlock('tex', TEX3 % captionname)]\n\n insert_rawblocks = insert_rawblocks_factory(rawblocks)\n\n altered = functools.reduce(lambda x, action: walk(x, action, fmt, meta),\n [insert_rawblocks], altered)\n\n # Update the doc\n if PANDOCVERSION >= '1.18':\n doc['blocks'] = altered\n else:\n doc = doc[:1] + altered\n\n # Dump the results\n json.dump(doc, STDOUT)\n\n # Flush stdout\n STDOUT.flush()", "title": "" }, { "docid": "86075d7578b9b9ad66171376409041be", "score": "0.49494043", "text": "def create_document(\n self, codefn, annfn,\n title='', tpl=None, out_file=None, **kwargs):\n\n processed = self.process_files(codefn, annfn, **kwargs)\n\n # Add title and join annotations\n processed['title'] = title\n processed['annotations'] = '\\n'.join(processed['annotations'])\n processed['styles'] = '\\n'.join(processed['styles'])\n\n if tpl is None:\n tpl = default_tpl\n document = tpl.format(**processed)\n\n # Warning: might raise IO exceptions\n if out_file is not None:\n with open(out_file, 'w') as of:\n of.write(document)\n\n return document", "title": "" }, { "docid": "c807c907243aa1fe3f84457f6f16f77e", "score": "0.4942886", "text": "def create_document(self, document: Dict[str, Any]):\n document = create(self, document)\n\n # Action based on type\n method = getattr(self, f\"handle_{document.doc_type}\", None)\n if method:\n method(document)\n\n return document", "title": "" }, { "docid": "d59b2a98139700cf40c5ce6967bdc598", "score": "0.49422213", "text": "def convert_input(self, corpus):\n logger.info(\"serializing temporary corpus to %s\", self.fcorpustxt())\n with smart_open(self.fcorpustxt(), 'wb') as fout:\n self.corpus2sttm(corpus, fout)", "title": "" }, { "docid": "d7481672881a3f5c3636585f520340b7", "score": "0.494086", "text": "def output(self, fout=None, formato=None, verbose=True):\n def write_full(fout):\n fi = helper.openfile(fout, 'w')\n fi.write(str(self))\n helper.closefile(fi)\n\n def write_short(fout):\n # fi = helper.openfile(fout, 'w'); fi.write(self.preview().encode(self.encoding))\n fi = helper.openfile(fout, 'w')\n fi.write(self.preview())\n helper.closefile(fi)\n\n exp_meth = {'b': self.export_bibtex,\n 'l': self.export_latex, 't': self.export_latex,\n 'h': self.export_html, 'x': self.export_xml,\n 's': write_short, 'f': write_full\n }\n if verbose:\n print(('# %d items to output' % (len(self.ListItems))))\n\n if formato is not None:\n fform = formato[0].lower()\n else:\n if (fout is not None) and (fout != '-'):\n fform = os.path.splitext(fout)[1][1].lower()\n else:\n fform = 's'\n exp_meth[fform](fout)", "title": "" }, { "docid": "b7061d8c9f08574218b82335da54b52f", "score": "0.4938", "text": "def toTALYSFormat(self):\n raise NotImplementedError()", "title": "" }, { "docid": "b7061d8c9f08574218b82335da54b52f", "score": "0.4938", "text": "def toTALYSFormat(self):\n raise NotImplementedError()", "title": "" }, { "docid": "b7061d8c9f08574218b82335da54b52f", "score": "0.4938", "text": "def toTALYSFormat(self):\n raise NotImplementedError()", "title": "" }, { "docid": "988ff892f3d16c68b269d9ef8348c973", "score": "0.4933133", "text": "def process(self, doc):\n self.doc = doc\n if self.replace_words is True:\n self.replace_words_fun()\n if self.remove_html_tags is True:\n self.remove_html_tags_fun()\n if self.remove_stopwords is True:\n self.remove_stopwords_fun()\n if self.remove_numbers is True:\n self.remove_numbers_fun()\n if self.remove_punctations is True:\n self.remove_punctations_fun() \n if self.lemmatize is True:\n self.lemmatize_fun()\n return self.doc", "title": "" }, { "docid": "2a86290dc15bd839381b6f436506529f", "score": "0.49270606", "text": "def convert(input_filename, output_filename):\n c_file = pkg_resources.resource_filename('ShapelyChipDesigns', 'convert.rb')\n os.system('klayout -rd input='+input_filename+' -rd output='+output_filename+' -r '+c_file)", "title": "" }, { "docid": "61c5af4ce3f769664238cf8e0709dfbf", "score": "0.4926635", "text": "def corenlp(doc, output='raw'):\n from ._corenlp import parse, stanford_to_saf\n\n try:\n transf = {\"raw\": identity, \"saf\": stanford_to_saf}[output]\n except KeyError:\n raise ValueError(\"Unknown output format %r\" % output)\n\n return pipe(doc, fetch, parse, transf)", "title": "" }, { "docid": "c84ade00730ae6d9e3c378aed5da8751", "score": "0.4923184", "text": "def main():\n # pylint: disable=global-statement\n global PANDOCVERSION\n global AttrMath\n\n # Get the output format and document\n fmt = args.fmt\n doc = json.loads(STDIN.read())\n\n # Initialize pandocxnos\n # pylint: disable=too-many-function-args\n PANDOCVERSION = pandocxnos.init(args.pandocversion, doc)\n\n # Element primitives\n AttrMath = elt('Math', 2)\n\n # Chop up the doc\n meta = doc['meta'] if PANDOCVERSION >= '1.18' else doc[0]['unMeta']\n blocks = doc['blocks'] if PANDOCVERSION >= '1.18' else doc[1:]\n\n # First pass\n attach_attrs_math = attach_attrs_factory(Math, allow_space=True)\n detach_attrs_math = detach_attrs_factory(Math)\n insert_secnos = insert_secnos_factory(Math)\n delete_secnos = delete_secnos_factory(Math)\n altered = functools.reduce(lambda x, action: walk(x, action, fmt, meta),\n [attach_attrs_math, insert_secnos,\n process_tables, delete_secnos,\n detach_attrs_math], blocks)\n # Second pass\n altered = functools.reduce(lambda x, action: walk(x, action, fmt, meta),\n [replace_table_references], altered)\n\n # Update the doc\n if PANDOCVERSION >= '1.18':\n doc['blocks'] = altered\n else:\n doc = doc[:1] + altered\n\n # Dump the results\n json.dump(doc, STDOUT)\n\n # Flush stdout\n STDOUT.flush()", "title": "" }, { "docid": "bcd81e1d378b0b75c9159b4b6ae732ef", "score": "0.49127468", "text": "def convert(file_path, annotation_fmt_from, annotation_fmt_to,\n working_dir=None, remove_source=True):\n converter = get_converter(annotation_fmt_from, annotation_fmt_to)\n if converter is None:\n raise GenestackException('Unsupported conversion: from %s to %s' % (\n annotation_fmt_from, annotation_fmt_to))\n return converter(file_path, working_dir, remove_source=remove_source)", "title": "" }, { "docid": "930f5eb16d94bd505cc4ffcb802a09b1", "score": "0.49003428", "text": "def convert_outputs(self):\n if 'output_structure' in self.ctx.workchain.outputs:\n self.out('relaxed_structure', self.ctx.workchain.outputs.output_structure)\n self.out('total_energy', get_total_energy(self.ctx.workchain.outputs.output_parameters))\n forces = get_forces_output_folder(self.ctx.workchain.outputs.retrieved, self.inputs.cp2k.structure)\n if forces:\n self.out('forces', forces)\n stress = get_stress_output_folder(self.ctx.workchain.outputs.retrieved)\n if stress:\n self.out('stress', stress)", "title": "" }, { "docid": "e931dc56c66b1808e9853e1919373fd2", "score": "0.4896408", "text": "def _format_out(data, token_info=None, _format=None):\n accept = cherrypy.request.headers.get(\"Accept\")\n if data is None:\n if accept and \"text/html\" in accept:\n return html.format(data, cherrypy.request, cherrypy.response, token_info)\n # cherrypy.response.status = HTTPStatus.NO_CONTENT.value\n return\n elif hasattr(data, \"read\"): # file object\n if _format:\n cherrypy.response.headers[\"Content-Type\"] = _format\n elif \"b\" in data.mode: # binariy asssumig zip\n cherrypy.response.headers[\"Content-Type\"] = 'application/zip'\n else:\n cherrypy.response.headers[\"Content-Type\"] = 'text/plain'\n # TODO check that cherrypy close file. If not implement pending things to close per thread next\n return data\n if accept:\n if \"application/json\" in accept:\n cherrypy.response.headers[\"Content-Type\"] = 'application/json; charset=utf-8'\n a = json.dumps(data, indent=4) + \"\\n\"\n return a.encode(\"utf8\")\n elif \"text/html\" in accept:\n return html.format(data, cherrypy.request, cherrypy.response, token_info)\n\n elif \"application/yaml\" in accept or \"*/*\" in accept or \"text/plain\" in accept:\n pass\n # if there is not any valid accept, raise an error. But if response is already an error, format in yaml\n elif cherrypy.response.status >= 400:\n raise cherrypy.HTTPError(HTTPStatus.NOT_ACCEPTABLE.value,\n \"Only 'Accept' of type 'application/json' or 'application/yaml' \"\n \"for output format are available\")\n cherrypy.response.headers[\"Content-Type\"] = 'application/yaml'\n return yaml.safe_dump(data, explicit_start=True, indent=4, default_flow_style=False, tags=False,\n encoding='utf-8', allow_unicode=True) # , canonical=True, default_style='\"'", "title": "" }, { "docid": "610dcdba43b78bc7e7749749bac4b5f5", "score": "0.48963147", "text": "def _convert_format(format, reverse=False):\n\n if reverse:\n return _convert_record2fits(format)\n else:\n return _convert_fits2record(format)", "title": "" }, { "docid": "4b627fb14b8f5144b4db37b92ef831a1", "score": "0.48921055", "text": "def frog(doc, output='raw'):\n from ._frog import call_frog, parse_frog, frog_to_saf\n if output not in ('raw', 'tokens', 'saf'):\n raise ValueError(\"Uknown output: {output}, \"\n \"please choose either raw, tokens, or saf\"\n .format(**locals()))\n text = fetch(doc)\n result = call_frog(text)\n if output == 'raw':\n return list(result)\n if output in ('tokens', 'saf'):\n result = parse_frog(result)\n if output == 'tokens':\n return list(result)\n return frog_to_saf(result)", "title": "" }, { "docid": "1302ce53ee7ceb41036ead9297db7f1b", "score": "0.48836058", "text": "def convert(self, context):\n title_tag = self.find(tag=h1)\n if not title_tag:\n title_tag = self.find(tag=h2)\n if not title_tag:\n title = context.title\n document = self.find()\n else:\n title_tag = title_tag[0]\n title = title_tag.extract_text()\n document = self.find(ignore=title_tag.__eq__)\n\n # add <p> nodes around elements that aren't allowed top-level\n document = fix_structure(document, context, allow_tables=1)\n\n return silva.silva_document(\n silva.title(title),\n silva.doc(document))", "title": "" }, { "docid": "8dbbf2f245de0d58695c7f78485ddd32", "score": "0.48826376", "text": "def format_doc(self, doc, path, locale=None, parameterize=False):\n path = '' if path is None else path\n\n # Most params should always be replaced.\n params = self.params_pod()\n params.update(self.params_doc(path, doc))\n params = self.params_lower(path, params)\n\n path = utils.safe_format(path, **params)\n\n if parameterize:\n path = self.parameterize(path)\n\n params = {}\n\n if locale is None:\n locale = doc.locale\n params['locale'] = self._locale_or_alias(locale)\n\n path = utils.safe_format(path, **params)\n path = self.strip_double_slash(path)\n return self.trailing_slash(doc, path)", "title": "" }, { "docid": "ccc993d3e22f6b8c60a5315705cdb665", "score": "0.487097", "text": "def write(doc, output_dir):\n write_file_deep(PurePath(output_dir).joinpath(doc.output_path), doc.content)", "title": "" }, { "docid": "b97590c764281d240a681fcaf708d1c6", "score": "0.48617533", "text": "def _primative_to_document(data, base_url=None):\n if isinstance(data, dict) and data.get('_type') == 'document':\n meta = data.get('_meta', {})\n if not isinstance(meta, dict):\n meta = {}\n\n url = meta.get('url', '')\n if not isinstance(url, string_types):\n url = ''\n url = urlparse.urljoin(base_url, url)\n\n title = meta.get('title', '')\n if not isinstance(title, string_types):\n title = ''\n\n return Document(url=url, title=title, content={\n _unescape_key(key): _primative_to_document(value, url)\n for key, value in data.items()\n if key not in ('_type', '_meta')\n })\n\n elif isinstance(data, dict) and data.get('_type') == 'link':\n url = data.get('url', '')\n if not isinstance(url, string_types):\n url = ''\n url = urlparse.urljoin(base_url, url)\n\n trans = data.get('trans')\n if not isinstance(trans, string_types) or (trans not in _transition_types):\n trans = None\n\n fields = data.get('fields', [])\n if not isinstance(fields, list):\n fields = []\n else:\n # Ignore any field items that don't match the required structure.\n fields = [\n item for item in fields\n if isinstance(item, string_types) or (\n isinstance(item, dict) and\n isinstance(item.get('name'), string_types)\n )\n ]\n # Transform the strings or dicts into strings or Field instances.\n fields = [\n item if isinstance(item, string_types) else\n Field(item['name'], required=bool(item.get('required', False)))\n for item in fields\n ]\n\n return Link(url=url, trans=trans, fields=fields)\n\n elif isinstance(data, dict) and data.get('_type') == 'error':\n messages = data.get('messages', [])\n if not isinstance(messages, list):\n messages = []\n\n # Ignore any messages which are have incorrect type.\n messages = [\n message for message in messages\n if isinstance(message, string_types)\n ]\n\n return Error(messages)\n\n elif isinstance(data, dict):\n return Object({\n _unescape_key(key): _primative_to_document(value, base_url)\n for key, value in data.items()\n if key not in ('_type', '_meta')\n })\n\n elif isinstance(data, list):\n return Array([\n _primative_to_document(item, base_url) for item in data\n ])\n\n return data", "title": "" }, { "docid": "d6262b19b8fef442dccf9d3fd2f37e7e", "score": "0.4854445", "text": "def convert(self, context):\n context.title = ''\n body = self.find('body')[0]\n return body.convert(context)", "title": "" }, { "docid": "9b7e428801e9befe412d6d16e8926574", "score": "0.48487213", "text": "def convert(self, animeDocuments):\n return json.dumps(animeDocuments, cls=AnimeDocumentEncoder)", "title": "" }, { "docid": "a86079196efe06f720c51dc45f99d480", "score": "0.48436618", "text": "def render_doc(document):\n if not document:\n return None\n\n # Pull over document id as it isn't included in fields.\n document_dict = {'id': document.doc_id}\n for field in document.fields:\n if isinstance(field, search.DateField):\n value = str(field.value)\n else:\n value = field.value\n document_dict[field.name] = value\n\n if isinstance(document, ScoredDocument):\n document_dict[\"_sort_scores\"] = document.sort_scores\n return document_dict", "title": "" }, { "docid": "e373f19f97d51b6d643663ff0fd0db99", "score": "0.48249713", "text": "def convert_to_xml(self):\n document = Element('document', attrib={'global-id': self.document_name.split(\".\")[0]})\n # Url\n url = SubElement(document, 'url')\n url.text = self._url\n # Source ID\n source_id = SubElement(document, 'source-id')\n source_id.text = self.source_id\n # Local ID\n local_id = SubElement(document, 'local-id')\n local_id.text = self.local_id\n # Source name\n source_name = SubElement(document, 'source-name')\n source_name.text = self.source_name\n\n # Article\n article = SubElement(document, 'article')\n # Article title\n article_title = SubElement(article, 'article-title')\n article_title.text = self._title\n # Article title transliterated\n article_title_transliterated = SubElement(article, 'article-title-transliterated')\n article_title_transliterated.text = self.title_transliterated\n # Article time\n article_time = SubElement(article, 'article-time')\n article_time.text = self._time\n # Article author\n article_author = SubElement(article, 'article-author')\n article_author.text = self.author\n # Article text\n article_text = SubElement(article, 'article-text')\n article_text.text = self.text\n # Article text transliterated\n article_text_transliterated = SubElement(article, 'article-text-transliterated')\n article_text_transliterated.text = self.text_transliterated\n\n # Comments\n comments = SubElement(document, 'comments')\n # Comments count\n comments_count = SubElement(comments, 'comments-count')\n comments_count.text = str(len(self.comments))\n # Comments list\n comments_list = SubElement(comments, 'comment-list')\n for c in self.comments:\n # Comment\n comment = SubElement(comments_list, 'comment', attrib={'comment-id': c.id})\n # Comment parent ID\n comment_parent_id = SubElement(comment, 'comment-parent-id')\n comment_parent_id.text = c.parent_id\n # Comment text\n comment_text = SubElement(comment, 'comment-text')\n comment_text.text = c.text\n # Comment text transliterated\n comment_text_transliterated = SubElement(comment, 'comment-text-transliterated')\n comment_text_transliterated.text = c.text_transliterated\n\n xml_string = tostring(document, encoding='utf-8', method='xml')\n return unescape(xml_string.decode('utf-8'))", "title": "" }, { "docid": "ca98af7b6802542f9576a4677de808bd", "score": "0.4817154", "text": "def format(self, outputs):\n raise NotImplementedError()", "title": "" }, { "docid": "33410e1de5187346e2590af9604e836d", "score": "0.48145938", "text": "def shred_document(stream, output_fn=print_revision, revisions=0):\n assert stream, \"Need a valid input stream to parse\"\n for rev in revision_generator(stream, revisions):\n output_fn(rev)", "title": "" }, { "docid": "6585ee8f883251331f972ea375890470", "score": "0.48081967", "text": "def _coerce_line(parsed_line, out_form):\n if out_form == 'parsed_line':\n return parsed_line\n timestamp = parsed_line.timestamp.isoformat()\n if out_form == 'log':\n tags = '|'.join(parsed_line.tags)\n return MESSAGE_STR_TEMPLATE.format(\n timestamp=timestamp,\n tags=tags,\n message=parsed_line.message\n ).strip()\n\n elif out_form == 'json':\n # Translate from a named tuple to a dict, and then dump as a json string\n return json.dumps({\n 'timestamp': timestamp,\n 'tags': parsed_line.tags,\n 'message': parsed_line.message\n }).strip()\n\n elif out_form == 'json-pretty':\n # Translate from a named tuple to a dict, and then dump as a json string\n return json.dumps({\n 'timestamp': timestamp,\n 'tags': parsed_line.tags,\n 'message': parsed_line.message\n }, indent=2).strip()\n else:\n raise TypeError('form must be one of parsed_line, log, json, or json-pretty. '\n 'Instead got {form}'.format(form=out_form))", "title": "" }, { "docid": "161847374fcd370860d951b701b65037", "score": "0.48073113", "text": "def convert():\n generated_content_path = Path(\"content\", \"generated\")\n\n generated_date = datetime.now().strftime(\"%Y-%m-%d %H:%M\")\n\n with open(\"tech/article_template.html\") as readable:\n template = readable.read()\n\n authors = load_authors()\n\n base_lingos = {ling.id: ling.term for ling in load_base_lingos()}\n\n for lingo in load_lingos():\n term_slug = slugify(lingo.term)\n path = Path(lingo.category, term_slug, lingo.language)\n author = authors[lingo.author]\n end_content = str(template)\n replacements = [\n (\"NON_LOCALISED_TITLE\", base_lingos[lingo.id]),\n (\"TITLE\", lingo.term),\n (\"INITIAL\", lingo.id[0]),\n (\"ID\", lingo.id),\n (\"CONTENT\", lingo.text),\n (\"AUTHOR_URL\", author.main_link),\n (\"AUTHOR\", author.display_name),\n (\"TAGS\", \", \".join(lingo.tags)),\n (\"CATEGORY\", lingo.category),\n (\"LANGUAGE\", languages[lingo.language]),\n (\"SLUG\", str(path)),\n (\"DATE\", generated_date),\n ]\n for tag, value in replacements:\n end_content = end_content.replace(tag, value)\n\n (generated_content_path / path).mkdir(parents=True, exist_ok=True)\n with open(generated_content_path / path / \"post.html\", \"w\") as writeable:\n writeable.write(end_content)", "title": "" }, { "docid": "62c0c12ab62af0f441853595b73336a8", "score": "0.48029518", "text": "def create_document_from_object(self, obj: Any):\n document = create_from_object(self, obj)\n\n # Action based on type\n method = getattr(self, f\"handle_{document.doc_type}\", None)\n if method:\n method(document)\n\n return document", "title": "" }, { "docid": "db4f2eefcf6497a45752f024656336d6", "score": "0.47953203", "text": "def parsed_document(self, document, operation_name):\n pass", "title": "" }, { "docid": "769be160a46f8d8ec8aa43f3f50f68b9", "score": "0.47937933", "text": "def render_output_schema_from_output(output):\n return _dict_to_schema(output)", "title": "" }, { "docid": "3034502d61ed6041debee07ef5e27401", "score": "0.47868425", "text": "def prepare_document(self, doc, **kwargs):\n raise NotImplementedError", "title": "" }, { "docid": "7486f870bc896a06a198ad31a78b0378", "score": "0.4786826", "text": "def convert(source, schema: Schema = None, output='micheline', inline=False):\n if isinstance(source, str):\n try:\n source = michelson_to_micheline(source)\n except ValueError:\n assert schema\n source = encode_micheline(source, schema)\n elif not is_micheline(source):\n assert schema\n source = encode_micheline(source, schema)\n\n if output == 'michelson':\n return micheline_to_michelson(source, inline)\n elif output == 'object':\n assert schema\n return decode_micheline(source, schema)\n elif output == 'micheline':\n return source\n else:\n assert False, output", "title": "" }, { "docid": "0f6ab77e45feb75189a6efe6d99adb28", "score": "0.47839436", "text": "def dump(self, document, verbose=False):\n if verbose:\n options = {\n 'ensure_ascii': False,\n 'indent': 4,\n 'separators': VERBOSE_SEPARATORS\n }\n else:\n options = {\n 'ensure_ascii': False,\n 'indent': None,\n 'separators': COMPACT_SEPARATORS\n }\n\n data = _document_to_primative(document)\n return force_bytes(json.dumps(data, **options))", "title": "" }, { "docid": "2ef62b02211f28a08e1024e455142897", "score": "0.4773916", "text": "def __convert_content__(self, parse_object: ParsedObject):\n return f\"{parse_object.content}{self.__convert__(parse_object.children)}\\n\"", "title": "" }, { "docid": "309c7caadc2d3e10753b9b7af99c841e", "score": "0.47665867", "text": "def test_convert(self):\n inputLines = ['.TH BAR \"1\" \"Oct 2007\" \"\" \"\"', \"Foo\\n\"]\n output = (\"<html><head>\\n<title>BAR.1</title></head>\\n<body>\\n\\n\"\n \"<h1>BAR.1</h1>\\n\\n<p>Foo\\n\\n</p>\\n\\n</body>\\n</html>\\n\")\n self.assertConvert(inputLines, output)", "title": "" }, { "docid": "272f7ef245b4f23715fd90d342b5084c", "score": "0.47541624", "text": "def tex2str(tex):\n\n from tempfile import NamedTemporaryFile\n\n # perform some minor tweaks to the given tex document to get\n # around compilation problems that frequently arise with PMC\n # NXML embedded tex:\n\n # remove \"\\usepackage{pmc}\". It's not clear what the contents\n # of this package are (I have not been able to find it), but\n # compilation more often succeeds without it than with it.\n tex = tex.replace('\\\\usepackage{pmc}', '')\n\n # replace \"\\documentclass{minimal}\" with \"\\documentclass{slides}\".\n # It's not clear why, but some font commands (e.g. \"\\tt\") appear\n # to fail with the former.\n tex = re.sub(r'(\\\\documentclass(?:\\[[^\\[\\]]*\\])?\\{)minimal(\\})',\n r'\\1slides\\2', tex)\n\n # now ready to try conversion.\n\n # create a temporary file for the tex content\n try:\n with NamedTemporaryFile('w', suffix='.tex') as tex_tmp:\n tex_tmp.write(tex.encode(OUTPUT_ENCODING))\n tex_tmp.flush()\n\n tex_out_fn = tex_compile(tex_tmp.name)\n\n if tex_out_fn is None:\n # failed to compile\n print >> sys.stderr, 'rewritetex: failed to compile tex document:\\n\"\"\"\\n%s\\n\"\"\"' % tex.encode(OUTPUT_ENCODING)\n return None\n\n # if no output file name could be found in tex output\n # in the expected format, back off to an expected default\n if tex_out_fn == \"\":\n expected_out_fn = tex_tmp.name.replace(\".tex\", \".dvi\")\n tex_out_fn = os.path.join(TEX_OUTPUTDIR,\n os.path.basename(expected_out_fn))\n\n dvistr = run_catdvi(tex_out_fn)\n\n try:\n dvistr = dvistr.decode(INPUT_ENCODING)\n except UnicodeDecodeError:\n print >> sys.stderr, 'rewritetex: error decoding catdvi output as %s (adjust INPUT_ENCODING?)' % INPUT_ENCODING\n\n if dvistr is None or dvistr == \"\":\n print >> sys.stderr, 'rewritetex: likely error invoking catdvi (empty output)'\n return None\n\n # perform minor whitespace cleanup\n dvistr = re.sub(r'\\s+', ' ', dvistr)\n dvistr = re.sub(r'^\\s+', '', dvistr)\n dvistr = re.sub(r'\\s+$', '', dvistr)\n\n return dvistr\n except IOError:\n print >> sys.stderr, \"rewritetex: failed to create temporary file\"\n raise", "title": "" }, { "docid": "2fea376deafee808176b4cf5a7b7f698", "score": "0.47539192", "text": "def datasets_to_doc(output_datasets):\n def dataset_to_yaml(index, dataset):\n return yaml.dump(dataset.metadata_doc, Dumper=SafeDumper, encoding='utf-8')\n\n return xr_apply(output_datasets, dataset_to_yaml, dtype='O').astype('S')", "title": "" }, { "docid": "57ce94875a76447fd5d61ca39ba334d9", "score": "0.47434884", "text": "def fix_structure(nodes, context, allow_tables=0):\n result = [] # Contain the converted document\n paragraph_buffer = [] # Contain nodes to create a new paragraph\n\n def get_paragraph_type(node):\n ptype = str(node.getattr('class', 'normal'))\n if ptype not in ['normal', 'lead', 'annotation']:\n ptype = 'normal'\n return ptype\n\n def flush_paragraph(ptype='normal'):\n # This finish the current paragraph, and add it to the result\n if paragraph_buffer:\n result.append(build_paragraph(paragraph_buffer, context, ptype))\n del paragraph_buffer[:]\n\n def convert_node(node, paragraph_type):\n # This convert the given node to silva format. Tables and top\n # level elements are extracted from the node and added\n # afterward.\n tables = []\n if allow_tables:\n tables = retrieve_tables_and_sources(node, context)\n toplevel = retrieve_toplevel_elements(node, context)\n if not hasattr(node, 'should_be_removed'):\n # Node have not been marked to be removed, so it is no table\n # or top level element.\n paragraph_buffer.append(node.convert(context))\n else:\n # It is a table or a top level element\n flush_paragraph(paragraph_type)\n result.extend(tables)\n result.extend(toplevel)\n\n ptype = 'normal'\n for node in nodes:\n # flatten p's by ignoring the element itself and walking through it as\n # if it's contents are part of the current element's contents\n if node.name() == 'p':\n ptype = get_paragraph_type(node)\n for child in node.find():\n convert_node(child, ptype)\n flush_paragraph(ptype)\n node.should_be_removed = 1\n else:\n convert_node(node, ptype)\n flush_paragraph(ptype)\n return result", "title": "" }, { "docid": "2a46adbbd8d343bdc262852e48494b73", "score": "0.47419456", "text": "def bson_encode(self, doc):\n return bson.encode(doc)", "title": "" }, { "docid": "48334024dd2f7741c38365a805e13a65", "score": "0.47405845", "text": "def showDocument(doc):\n xml.dom.ext.PrettyPrint(doc);", "title": "" }, { "docid": "a254549593a08512b46f77271ba3732f", "score": "0.47279647", "text": "def translate_index(doc, use_tyson):\n return {\n 'root': doc.attrib['root'],\n 'products': [translate_product(x) for x in doc.iter('product')],\n 'shapes': [translate_shape(x) for x in doc.iter('shape')],\n 'shells': [translate_shell(x) for x in doc.iter('shell')],\n 'annotations': [translate_annotation(x) for x in doc.iter('annotation')],\n 'useTyson': use_tyson\n }", "title": "" }, { "docid": "6cf9968bd22e5b023c4d6b415e921d60", "score": "0.47188497", "text": "def output_content(content: Dict, outformat: Format) -> NoReturn:\n if outformat is Format.YAML:\n dump_yaml(content)\n elif outformat is Format.JSON:\n dump_json(content)", "title": "" }, { "docid": "9ea966a097c834627babea8b7f4ca7cc", "score": "0.47165266", "text": "def convert(voc_path, voc_name_path, txt_output_path, use_difficult=False):\n\n def _read_voc_txt(path):\n with open(path, 'r') as f:\n txt = f.readlines()\n\n return [line.strip() for line in txt]\n\n img_idx_path = os.path.join(voc_path, 'VOC2007', 'ImageSets', 'Main', 'train.txt')\n img_idx = _read_voc_txt(img_idx_path)\n voc2007_train_img_path = [os.path.join(voc_path, 'VOC2007', 'JPEGImages', idx + '.jpg') for idx in img_idx]\n voc2007_train_ann_path = [os.path.join(voc_path, 'VOC2007', 'Annotations', idx + '.xml') for idx in img_idx]\n\n img_idx_path = os.path.join(voc_path, 'VOC2007', 'ImageSets', 'Main', 'val.txt')\n img_idx = _read_voc_txt(img_idx_path)\n voc2007_val_img_path = [os.path.join(voc_path, 'VOC2007', 'JPEGImages', idx + '.jpg') for idx in img_idx]\n voc2007_val_ann_path = [os.path.join(voc_path, 'VOC2007', 'Annotations', idx + '.xml') for idx in img_idx]\n\n img_idx_path = os.path.join(voc_path, 'VOC2007', 'ImageSets', 'Main', 'test.txt')\n img_idx = _read_voc_txt(img_idx_path)\n voc2007_test_img_path = [os.path.join(voc_path, 'VOC2007', 'JPEGImages', idx + '.jpg') for idx in img_idx]\n voc2007_test_ann_path = [os.path.join(voc_path, 'VOC2007', 'Annotations', idx + '.xml') for idx in img_idx]\n\n img_idx_path = os.path.join(voc_path, 'VOC2012', 'ImageSets', 'Main', 'train.txt')\n img_idx = _read_voc_txt(img_idx_path)\n voc2012_train_img_path = [os.path.join(voc_path, 'VOC2012', 'JPEGImages', idx + '.jpg') for idx in img_idx]\n voc2012_train_ann_path = [os.path.join(voc_path, 'VOC2012', 'Annotations', idx + '.xml') for idx in img_idx]\n\n img_idx_path = os.path.join(voc_path, 'VOC2012', 'ImageSets', 'Main', 'val.txt')\n img_idx = _read_voc_txt(img_idx_path)\n voc2012_val_img_path = [os.path.join(voc_path, 'VOC2012', 'JPEGImages', idx + '.jpg') for idx in img_idx]\n voc2012_val_ann_path = [os.path.join(voc_path, 'VOC2012', 'Annotations', idx + '.xml') for idx in img_idx]\n\n # we don't have test dataset of VOC2012\n # img_idx_path = os.path.join(voc_path, 'VOC2012', 'ImageSets', 'Main', 'test.txt')\n # img_idx = _read_voc_txt(img_idx_path)\n # test_img_path.extend([os.path.join(voc_path, 'VOC2012', 'JPEGImages', idx + '.jpg') for idx in img_idx])\n # test_ann_path.extend([os.path.join(voc_path, 'VOC2012', 'Annotations', idx + '.xml') for idx in img_idx])\n\n # voc_name_path = os.path.join('.', 'data', 'classes', 'voc.name')\n voc_name = _read_voc_txt(voc_name_path)\n\n def _check_bbox(sx1, sy1, sx2, sy2, sw, sh):\n x1, y1, x2, y2, w, h = int(sx1), int(sy1), int(sx2), int(sy2), int(sw), int(sh)\n\n if x1 < 1 or x2 < 1 or x1 > w or x2 > w or y1 < 1 or y2 < 1 or y1 > h or y2 > h:\n logging.warning('cross boundary (' + str(w) + ',' + str(h) + '),(' + ','.join([str(x1), str(y1), str(x2), str(y2)]) + ')')\n\n return str(min(max(x1, 0), w)), str(min(max(y1, 0), h)), str(min(max(x2, 0), w)), str(min(max(y2, 0), h))\n\n return str(x1-1), str(y1-1), str(x2-1), str(y2-1)\n\n def _write_to_text(img_paths, ann_paths, txt_path):\n with open(txt_path, 'w') as f:\n for img_path, ann_path in zip(img_paths, ann_paths):\n root = ET.parse(ann_path).getroot()\n objects = root.findall('object')\n line = img_path\n\n size = root.find('size')\n width = size.find('width').text.strip()\n height = size.find('height').text.strip()\n\n for obj in objects:\n difficult = obj.find('difficult').text.strip()\n if (not use_difficult) and difficult == '1':\n continue\n\n bbox = obj.find('bndbox')\n class_idx = voc_name.index(obj.find('name').text.lower().strip())\n xmin = bbox.find('xmin').text.strip()\n xmax = bbox.find('xmax').text.strip()\n ymin = bbox.find('ymin').text.strip()\n ymax = bbox.find('ymax').text.strip()\n\n xmin, ymin, xmax, ymax = _check_bbox(xmin, ymin, xmax, ymax, width, height)\n\n line += ' ' + ','.join([xmin, ymin, xmax, ymax, str(class_idx)])\n\n logging.info(line)\n f.write(line + '\\n')\n\n _write_to_text(voc2007_train_img_path, voc2007_train_ann_path, os.path.join(txt_output_path, 'voc2007_train.txt'))\n _write_to_text(voc2007_val_img_path, voc2007_val_ann_path, os.path.join(txt_output_path, 'voc2007_val.txt'))\n _write_to_text(voc2007_test_img_path, voc2007_test_ann_path, os.path.join(txt_output_path, 'voc2007_test.txt'))\n _write_to_text(voc2012_train_img_path, voc2012_train_ann_path, os.path.join(txt_output_path, 'voc2012_train.txt'))\n _write_to_text(voc2012_val_img_path, voc2012_val_ann_path, os.path.join(txt_output_path, 'voc2012_val.txt'))\n\n return len(voc2007_train_img_path) + len(voc2007_val_img_path) + len(voc2012_train_img_path) + len(\n voc2012_val_img_path), len(voc2007_test_img_path)", "title": "" }, { "docid": "9a243b72ce18b229a7cda44a3e8c858f", "score": "0.47067568", "text": "def cli(input, output, schema):\n from spellbook.data_formatting import translator\n\n args = SimpleNamespace(**{\"input\": input, \"output\": output, \"schema\": schema})\n translator.process_args(args)", "title": "" }, { "docid": "393bb4ded3a9f7e2537d6ba69c6d1e88", "score": "0.47062725", "text": "def convert(data, in_format, out_format, name=None, pretty=False):\n # Decide on a json formatter depending on desired prettiness\n dumps = json.dumps if pretty else json.compress\n\n # Shortcut for avoiding pybel dependency\n if not HAS_OB and in_format == 'json' and out_format == 'json':\n return dumps(json.loads(data) if isinstance(data, str) else data)\n elif not HAS_OB:\n raise ImportError(\"Chemical file format conversion requires pybel.\")\n\n # Bring up with open babel dev: mmcif seems to be a better parser than cif\n if in_format == 'cif':\n in_format = 'mmcif'\n\n # These use the open babel library to interconvert, with additions for json\n if in_format == 'json':\n mol = json_to_pybel(json.loads(data) if isinstance(data, str)\n else data)\n elif in_format == 'pybel':\n mol = data\n else:\n mol = pybel.readstring(in_format, data)\n\n # Infer structure in cases where the input format has no specification\n if not mol.OBMol.HasNonZeroCoords():\n mol.make3D()\n\n # Make P1 if that's a thing, recalculating bonds in process\n if in_format == 'mmcif' and hasattr(mol, 'unitcell'):\n mol.unitcell.FillUnitCell(mol.OBMol)\n mol.OBMol.ConnectTheDots()\n mol.OBMol.PerceiveBondOrders()\n\n mol.OBMol.Center()\n\n if out_format == 'pybel':\n return mol\n elif out_format == 'object':\n return pybel_to_json(mol, name)\n elif out_format == 'json':\n return dumps(pybel_to_json(mol, name))\n else:\n return mol.write(out_format)", "title": "" }, { "docid": "b169237a3907ef92af9b24ad28b8908b", "score": "0.4704999", "text": "def json_to_document(self, resp):\n document = Document()\n if 'id' in resp:\n document.set_id(resp['id'])\n if 'file_name' in resp:\n document.set_file_name(resp['file_name'])\n if 'content_type' in resp:\n document.set_content_type(resp['content_type'])\n if 'versions' in resp:\n for value in resp['versions']:\n version = Version()\n if 'id' in value:\n version.set_id(value['id'])\n if 'uploaded_by' in value:\n version.set_uploaded_by(value['uploaded_by'])\n if 'description' in value:\n version.set_description(value['description'])\n if 'file_size' in value:\n version.set_file_size(value['file_size'])\n if 'version' in value:\n version.set_version(value['version'])\n if 'uploaded_date' in value:\n version.set_uploaded_date(value['uploaded_date'])\n if 'uploaded_date_long in value':\n version.set_uploaded_date_long(value['uploaded_date_long'])\n document.set_versions(version)\n if 'folder' in resp:\n folder = Folder()\n folder.set_id(resp['folder']['id'])\n folder.set_name(resp['folder']['name'])\n folder.set_is_discussion(resp['folder']['is_discussion'])\n document.set_folder(folder)\n if 'link' in resp: \n if 'self' in resp['link']:\n if 'url' in resp['link']['self']:\n document.set_url(resp['link']['self']['url'])\n return document", "title": "" }, { "docid": "a578268675a209e7440f20201b021260", "score": "0.4692783", "text": "def convert(self):\n raise NotImplementedError", "title": "" }, { "docid": "0e8bf8de0d8fa04f77ebec6d16e26082", "score": "0.4692599", "text": "def output_to_desired(doc_topic_location=None, topic_word_location=None, total_topic_location=None):\n doc_topic = numpy.loadtxt(doc_topic_location, delimiter=\"\\n\", dtype=str)\n topic_word = numpy.loadtxt(topic_word_location, delimiter=\"\\n\", dtype=str)\n total_topic = numpy.loadtxt(total_topic_location, delimiter=\" \", dtype=str)[1:]\n no_of_topics = len(total_topic)\n no_of_docs = len(doc_topic)\n no_of_words = len(topic_word)\n doc_topic_numpy = numpy.zeros((no_of_docs, no_of_topics))\n topic_word_numpy = numpy.zeros((no_of_topics, no_of_words))\n for doc_number, i_chunk in enumerate(doc_topic):\n i_chunk = i_chunk.split(\" \")[2:]\n for i_i_chunk in i_chunk:\n topic, weight = i_i_chunk.split(\":\")\n doc_topic_numpy[doc_number, topic] = int(weight)\n for word_number, i_word in enumerate(topic_word):\n i_word = i_word.split(\" \")[1:]\n for i_i_word in i_word:\n topic, weight = i_i_word.split(\":\")\n topic_word_numpy[topic, word_number] = int(weight)\n\n # normalize\n # doc_topic_numpy_norm = normalize(doc_topic_numpy, norm='l1', axis=1)\n # topic_word_numpy_norm = normalize(topic_word_numpy, norm='l1', axis=0)\n\n # dont normalize\n doc_topic_numpy_norm = doc_topic_numpy\n topic_word_numpy_norm = topic_word_numpy\n\n # replace zero value with minimum value\n # doc_topic_numpy_norm[doc_topic_numpy_norm == 0] = 1 ** -15\n # topic_word_numpy_norm[topic_word_numpy_norm == 0] = 1 ** -15\n\n return doc_topic_numpy_norm, topic_word_numpy_norm", "title": "" }, { "docid": "d251414406307a3d19e516bd3f25d0d7", "score": "0.46895579", "text": "def transformDocuments2(D, L=[], T=[], **kargs): # this is not the same as seqTransform.transform()\n def get_cohort(): # only needed if save_ <- True\n try: \n return kargs['cohort']\n except: \n pass \n raise ValueError, \"cohort info is mandatory for saving a new copy of labeled document.\"\n def determined_doctype(): \n pass \n def show_stats(): \n print('transformDocuments2> nD: %d, nT: %d, nL: %d' % (len(D), len(T), len(L)))\n cohort_name = kargs.get('cohort', '?')\n if len(L) > 0: \n n_classes = len(set(L)) # seqparams.arg(['n_classes', ], default=1, **kargs) \n print(' + Stats: n_docs: %d, n_classes:%d | cohort: %s' % (len(D), n_classes, cohort_name))\n else: \n print(' + Stats: n_docs: %d, n_classes:? (no labeling info) | cohort: %s' % (len(D), cohort_name)) \n return\n def do_slice(): \n if not kargs.has_key('slice_policy'): return False\n if kargs['slice_policy'].startswith(('noop', 'reg', 'compl', )): # effective operations \n return False \n return True\n\n from tdoc import TDoc # base class defined in seqparams\n import seqReader as sr\n\n seq_ptype = kargs.pop('seq_ptype', 'regular')\n D, L, T = transformDocuments(D, L=L, T=T, \n \n # document-wise filter\n policy=kargs.pop('policy', 'empty'), # 'empty': remove empty document, 'unknown'\n min_ncodes=kargs.pop('min_ncodes', 3), \n \n # content filter\n seq_ptype=seq_ptype, \n predicate=kargs.pop('predicate', None), \n simplify_code=kargs.pop('simplify_code', False)) # T contains typically just timestamps\n # ret = {}\n # ret['sequence'] = docs \n # if kargs.has_key('labels') and len(labels) > 0: ret['label'] = labels\n # if kargs.has_key('items') and len(items) > 0: ret['item'] = items \n \n ### save the transformed documents but this would require extra parameters not relevant to transformation operaiton itself (e.g. corhot)\n # becuase we need it to name the new file\n\n # cut transform (predignosis, postdiagnosis, up-until a set of code, regular, etc.)\n # if do_slice(): \n\n # # inplace operation by default\n # # [note] slice_predicate is not the same as the predicate for transformDocuments\n # nD0 = len(D)\n # D, T = sliceDocuments(D, T=T, \n # policy=kargs.get('slice_policy', 'noop'), \n # cohort=get_cohort(), \n # predicate=kargs.get('slice_predicate', None), # infer predicate from cohort if possible\n # cutpoint=kargs.get('cutpoint', None), \n # n_active=1, \n # inclusive=kargs.get('inclusive', True))\n # assert len(D) == len(T)\n # assert len(D) == nD0, \"size of document set should not be different after splicing nD: %d -> %d\" % (len(D), nD0)\n\n if kargs.get('save_', False): \n overwrite = True\n fpath = TDoc.getPath(cohort=get_cohort(), seq_ptype=seq_ptype, doc_type='labeled', ext='csv') # usually there is one file per cohort \n if not os.path.exists(fpath) and overwrite: \n # path params: cohort=get_cohort(), seq_ptype=seq_ptype, doctype=doctype, ext='csv', baesdir=basedir\n header = ['sequence', 'timestamp', 'label', ]\n adict = {h: [] for h in header}\n adict['sequence'], adict['timestamp'], adict['label'] = D, T, L\n df.to_csv(fpath, sep='|', index=False, header=True)\n print('transformDocuments2> Saving the transformed document (seq_ptype=%s) to:\\n%s\\n' % (seq_ptype, fpath))\n # sr.readDocToCSV(cohort=get_cohort(), sequences=D, timestamps=T, labels=L, seq_ptype=seq_ptype)\n show_stats()\n return (D, L, T)", "title": "" }, { "docid": "4e843978638bfc38768c1515c3c1342f", "score": "0.46863934", "text": "def convert_all(only_unconverted=True):\n site = getSite()\n\n qi = getToolByName(site, 'portal_quickinstaller', None)\n if not qi:\n return\n if not qi.isProductInstalled('collective.documentviewer'):\n return\n if getRequest().get('plone.app.contenttypes_migration_running', False):\n \"\"\"Don't migrate while running a plone.app.contenttypes migration.\n \"\"\"\n return\n\n cat = getToolByName(site, 'portal_catalog')\n res = cat(portal_type='File')\n length = len(res)\n\n async_enabled = celeryInstalled()\n\n for cnt, item in enumerate(res, 1):\n\n logger.info('processing %s/%s', cnt, length)\n\n obj = item.getObject()\n\n settings = Settings(obj)\n if only_unconverted and settings.successfully_converted:\n continue\n\n gsettings = GlobalSettings(site)\n\n if not allowedDocumentType(obj, gsettings.auto_layout_file_types):\n continue\n\n auto_layout = gsettings.auto_select_layout\n if auto_layout and obj.getLayout() != 'documentviewer':\n obj.setLayout('documentviewer')\n\n if obj.getLayout() == 'documentviewer' and gsettings.auto_convert:\n queueJob(obj)\n if not async_enabled:\n # conversion lasts an eternity. commit the results immediately.\n transaction.commit()", "title": "" }, { "docid": "69d52a637a84577724c025eba40e217b", "score": "0.46833774", "text": "def processDoc(fname, options={}):\n # add script arguments to a comment in the generated metadata\n tnow = d1_common.date_time.local_now_iso()\n comment = lxml.etree.Comment(\n \"Warning: This file was generated by an automated process. Manual edits may \"\n \"be overwritten without further warning.\\n\"\n \"timestamp:: %s\\n\"\n \"created_with:: generate_sysmeta.py\\n\"\n \"arguments:: %s\\n\"\n \"command:: generate_sysmeta.py %s\\n\"\n % (tnow, repr(sys.argv[1:]), \" \".join(sys.argv[1:]))\n )\n sysm = d1_test.instance_generator.system_metadata.generate_from_file_path(\n fname, options\n )\n root = lxml.etree.fromstring(sysm.toxml(\"utf-8\"))\n root.insert(0, comment)\n pxml = lxml.etree.tostring(\n root, pretty_print=True, encoding=\"utf-8\", xml_declaration=True\n )\n return pxml", "title": "" }, { "docid": "96bdf7e968f1de9f016487b58681c7b9", "score": "0.46760288", "text": "def parse_doc(self, document):\r\n for passage in document.passages:\r\n for sentence in passage.sentences:\r\n text = sentence.text\r\n tree = self.parse(text)\r\n if tree:\r\n sentence.infons['parse tree'] = str(tree)\r\n else:\r\n sentence.infons['parse tree'] = None\r\n logging.exception(\r\n 'No parse tree for sentence: %s', sentence.offset)\r\n return document", "title": "" }, { "docid": "735b5a13677bc61be9443781c4b73a2c", "score": "0.46744812", "text": "def transform(self, doc, y=None):\n check_data_format(doc)\n negative, positive, neutral = [], [], []\n NOUN_pos, NOUN_neg, VERB_pos, VERB_neg, ADJ_pos, ADJ_neg, ADV_pos, ADV_neg = [], [], [], [], [], [], [], []\n for line in doc:\n pos, neg, neu = 0, 0, 0\n pos_counts = Counter()\n tokens = preprocess(line, lowercase=True)\n gen = (tup for tup in pos_tag(tokens) if tup[0].strip('_NEG') in self.words)\n for token, tag in gen:\n wntag = get_wordnet_pos(tag)\n if token.endswith('_NEG'):\n if self.words[token.strip('_NEG')] == 'positive':\n neg += 1\n if wntag:\n pos_counts[wntag + '_neg'] += 1\n elif self.words[token.strip('_NEG')] == 'negative':\n pos += 1\n if wntag:\n pos_counts[wntag + '_pos'] += 1\n else:\n neu += 1\n else:\n if self.words[token.strip('_NEG')] == 'positive':\n pos += 1\n if wntag:\n pos_counts[wntag + '_pos'] += 1\n elif self.words[token.strip('_NEG')] == 'negative':\n neg += 1\n if wntag:\n pos_counts[wntag + '_neg'] += 1\n else:\n neu += 1\n negative.append(neg)\n positive.append(pos)\n neutral.append(neu)\n NOUN_pos.append(pos_counts['n_pos'])\n NOUN_neg.append(pos_counts['n_neg'])\n VERB_pos.append(pos_counts['v_pos'])\n VERB_neg.append(pos_counts['v_neg'])\n ADJ_pos.append(pos_counts['a_pos'])\n ADJ_neg.append(pos_counts['a_neg'])\n ADV_pos.append(pos_counts['r_pos'])\n ADV_neg.append(pos_counts['r_neg'])\n features = list_to_numpy_vector([positive, neutral, negative, NOUN_pos, NOUN_neg, VERB_pos, VERB_neg, ADJ_pos,\n ADJ_neg, ADV_pos, ADV_neg])\n return csr_matrix(np.hstack(features))", "title": "" }, { "docid": "3487596048784e8a19dab7011dd9878c", "score": "0.46687654", "text": "def pythonise_(self, **options):\n document = options.get('document', False)\n comment = options.get('comment', False)\n prefix = options.get('prefix', None)\n\n if prefix is not None:\n myName = \"%s.%s\" % (prefix, self._internal_name)\n else:\n myName = self._internal_name\n\n result = []\n if document:\n result.append(\"%s.document_(\\\"\\\"\\\"%s\\\"\\\"\\\")\" % (\n myName,\n self._internal_documentation)\n )\n if comment:\n result.append(\"# %s: %s\" % (\n myName, self._internal_documentation.replace(\n \"\\n\", \"\\n# \"),\n ))\n for attr in self._internal_settings:\n if attr in self._internal_children:\n result.append(\"%s.section_(\\'%s\\')\" % (myName, attr))\n result.extend(getattr(self, attr).pythonise_(\n document=document, comment=comment, prefix=myName))\n continue\n if attr in self._internal_docstrings:\n if comment:\n result.append(\"# %s.%s: %s\" % (\n myName, attr,\n self._internal_docstrings[attr].replace(\"\\n\", \"\\n# \")\n ))\n result.append(\"%s.%s = %s\" % (\n myName,\n attr, formatAsString(getattr(self, attr))\n ))\n\n if attr in self._internal_docstrings:\n if document:\n result.append(\n \"%s.document_(\\\"\\\"\\\"%s\\\"\\\"\\\", \\'%s\\')\" % (\n myName,\n self._internal_docstrings[attr], attr))\n return result", "title": "" }, { "docid": "43dd91237cadfb5d244fad464bc2f63f", "score": "0.46669444", "text": "def latex2rst(input, output, encoding=\"utf-8\", fLOG=noLOG, temp_file=None):\n if encoding not in (\"utf-8\", \"utf8\"):\n with open(input, \"r\", encoding=encoding) as f:\n content = f.read()\n if temp_file is None:\n raise ValueError(\"temp_file cannot be None, encoding is not utf-8 and a temporary \" +\n \"file will be used to do the conversion.\")\n with open(temp_file, \"w\", encoding=\"utf-8\") as f:\n f.write(content)\n input = temp_file\n else:\n temp_file = None\n cmd = f'-s -t rst --toc \"{input}\" -o \"{output}\"'\n out, err = call_pandoc(cmd, fLOG=fLOG)\n if temp_file is not None:\n os.remove(temp_file)\n return out, err", "title": "" }, { "docid": "4cac623fad8d9b41adf1d8521533f30d", "score": "0.46626896", "text": "def domxml_to_native(info_format, xml_file, options, **dargs):\r\n cmd = \"domxml-to-native %s %s %s\" % (info_format, xml_file, options)\r\n return command(cmd, **dargs)", "title": "" }, { "docid": "e10840a206d3e35cdff16ffd86942fea", "score": "0.4652952", "text": "def transform(self, doc, y=None):\n check_data_format(doc)\n positive, negative, neutral = [], [], []\n lines = [preprocess(line, word_transformation='', handle_negation=False, lowercase=True) for line in doc]\n for line in tqdm(lines):\n pos, neg, neu = 0, 0, 0\n tagged = pos_tag(line)\n for word, tag in tagged:\n wntag = get_wordnet_pos(tag)\n synsets = wn.synsets(word, pos=wntag)\n if not synsets:\n continue\n synset = synsets[0]\n swn_synset = swn.senti_synset(synset.name())\n pos_score = swn_synset.pos_score()\n neu_score = swn_synset.obj_score()\n neg_score = swn_synset.neg_score()\n pos += pos_score\n neg += neg_score\n neu += neu_score\n positive.append(pos)\n negative.append(neg)\n neutral.append(neu)\n features = list_to_numpy_vector([positive, neutral, negative])\n return csr_matrix(np.hstack(features))", "title": "" }, { "docid": "bc3425f7cc6898666d2bad7898805900", "score": "0.4651358", "text": "def convert_outputs(self):\n self.out('relaxed_structure', self.ctx.workchain.outputs.relax__structure)\n self.out('total_energy', get_total_energy(self.ctx.workchain.outputs.misc))\n self.out('forces', get_forces(self.ctx.workchain.outputs.forces))\n self.out('stress', get_stress(self.ctx.workchain.outputs.stress))", "title": "" }, { "docid": "98d3179858338309af396d4553abc7df", "score": "0.46504486", "text": "def _save(self):\n data = json.dumps(self._doc)\n f = File(self._outfile_name)\n f.write(data)", "title": "" }, { "docid": "ef5d0e5c8a274e31947b72edc8145fea", "score": "0.464871", "text": "def handle_raw_output(ctx, data):\n if ctx.obj['format'] == 'json':\n print(json_dump(data))\n exit(0)\n if ctx.obj['format'] == 'yaml':\n print(yaml_dump(data), end='')\n exit(0)", "title": "" }, { "docid": "9fa5f8722da2df65653d218b78411a0b", "score": "0.46457294", "text": "def convert(self):\n\t\traise NotImplementedError", "title": "" }, { "docid": "97bb9a0553691bf5b92a379abda38d7d", "score": "0.46423066", "text": "def cmd_save_to(ooo_info,args):\n dest_file = args.split(',')[0]\n format = args.split(',')[1]\n doc = ooo_info['doc']\n dest_url = systemPathToFileUrl(dest_file)\n logger.info( \"save to %s from %s\"%(format,dest_file))\n pp_filter = PropertyValue()\n pp_url = PropertyValue()\n pp_overwrite = PropertyValue()\n pp_filter.Name = \"FilterName\"\n pp_filter.Value = format\n pp_url.Name = \"URL\"\n pp_url.Value = dest_url\n pp_overwrite.Name = \"Overwrite\"\n pp_overwrite.Value = True\n outprop = (pp_filter, pp_url, pp_overwrite)\n doc.storeToURL(dest_url, outprop)", "title": "" }, { "docid": "74d8f94af1ad24851ca023d2997700de", "score": "0.46412227", "text": "def process(self, doc):\n\n # only return a list if given a list\n singleton_input = not isinstance(doc, list)\n if singleton_input:\n docs = [doc]\n else:\n docs = doc\n\n if docs and isinstance(docs[0], str):\n docs = [Document([], text=text) for text in docs]\n\n # run language identification\n docs_w_langid = self.lang_id_pipeline.process(docs)\n\n # create language specific batches, store global idx with each doc\n lang_batches = {}\n for doc in docs_w_langid:\n if doc.lang not in lang_batches:\n lang_batches[doc.lang] = []\n lang_batches[doc.lang].append(doc)\n\n # run through each language, submit a batch to the language specific pipeline\n for lang in lang_batches.keys():\n self._update_pipeline_cache(lang)\n self.pipeline_cache[lang](lang_batches[lang])\n\n # only return a list if given a list\n if singleton_input:\n return docs_w_langid[0]\n else:\n return docs_w_langid", "title": "" }, { "docid": "02f4993947f8f917163ff156accee51f", "score": "0.46357006", "text": "def format_view(self, doc, path, parameterize=False):\n path = '' if path is None else path\n\n # Most params should always be replaced.\n params = self.params_pod()\n params.update(self.params_doc(path, doc))\n params = self.params_lower(path, params)\n\n path = utils.safe_format(path, **params)\n\n if parameterize:\n path = self.parameterize(path)\n\n return self.strip_double_slash(path)", "title": "" }, { "docid": "9688cad880e17b63182c3bd74daec6dc", "score": "0.4633948", "text": "def parse_pdf(self, documents_path: Path, output_path: Path):\n output_string = StringIO()\n\n with open(documents_path, 'rb') as in_file:\n parser = PDFParser(in_file)\n doc = PDFDocument(parser)\n rsrcmgr = PDFResourceManager()\n device = TextConverter(rsrcmgr, output_string, laparams=LAParams())\n interpreter = PDFPageInterpreter(rsrcmgr, device)\n\n for page in PDFPage.create_pages(doc):\n interpreter.process_page(page)\n\n read_text = output_string.getvalue()\n\n # write input to txt file\n with open(output_path, 'w', encoding='utf-8') as outfile:\n outfile.write(read_text)", "title": "" }, { "docid": "16e07df3b7570d8bf92c49d7763ee442", "score": "0.46286482", "text": "def convert(self):\n log.debug('enhanced SR not support yet, sorry!')", "title": "" }, { "docid": "cc1ecf6282d16a048a824bf4fea4b944", "score": "0.46247196", "text": "def doc_to_tensor(document):\n return to_var(torch.tensor([token_to_id(token) for token in document.tokens]))", "title": "" }, { "docid": "e0ea9800acb9193da33bde4fc5c2afc6", "score": "0.4620172", "text": "def transform(self, doc, y=None):\n check_data_format(doc)\n pos_positives, pos_negatives, neg_negatives, neg_positives = [], [], [], []\n lines = [preprocess(line, word_transformation='', lowercase=True) for line in doc]\n for line in tqdm(lines):\n pos_pos, pos_neg, neg_neg, neg_pos = 0, 0, 0, 0\n for word in line:\n if word.endswith(\"_NEG\"):\n w = word.strip('_NEG')\n if w in self.words_uni:\n score = self.words_uni[w]\n if score > 0:\n pos_neg += 1\n else:\n neg_neg += 1\n elif word in self.words_uni:\n score = self.words_uni[word]\n if score >0:\n pos_pos += 1\n else:\n neg_pos += 1\n\n pos_positives.append(pos_pos)\n neg_positives.append(neg_pos)\n pos_negatives.append(pos_neg)\n neg_negatives.append(neg_neg)\n\n features = list_to_numpy_vector([pos_positives, neg_positives, pos_negatives, neg_negatives])\n return csr_matrix(np.hstack(features))", "title": "" } ]
617a52f77ef670d3ce933cddbcc6dee0
Generate a proof that the ith leaf is in the tree.
[ { "docid": "47ae9b279773cebefee87327b4deffc7", "score": "0.6056217", "text": "def proof_of_inclusion(self, node_number):\n proof = str(self.root.data)\n node = self.leaves[node_number]\n while node.data != self.root.data:\n proof += \" \" + node.get_prefix_of_brother() + node.get_brother_data()\n node = node.parent\n return proof", "title": "" } ]
[ { "docid": "ce657d99ef16ca127dd9ea0934ed7499", "score": "0.61243963", "text": "def test_proofs(self):\n\n for i in range(0, self.num_iters):\n elems = [random.randint(0, 100000000) for i in range(0, 100)]\n elems.sort()\n mht = MHT.new(elems)\n\n for elem in elems:\n proof = mht.contains(elem)\n self.assertTrue(MHTUtils.verify(mht.root.hval, elem, proof),\n 'Returned proof does not verify that %d is in \\\n the tree' %elem)", "title": "" }, { "docid": "6ba4ce6c0cfea6134f7d028de47c28e2", "score": "0.5940333", "text": "def binary_tree():\n \n yield 1\n yield 1\n \n s = 1 # sum of previous terms\n c = 1 # current value\n p = 1 # previous value\n \n while True:\n c = 2*c*s+(c*c)\n s += p\n p = c\n yield c", "title": "" }, { "docid": "4e4821e56e301116ae8fb32710c675ea", "score": "0.5814657", "text": "def generate_proof_of_inclusion(self, digest):\n # initialize\n proof = [self.root.data]\n route_to_digest = self._get_route_to_leaf(digest)\n\n # skip nodes that can be computed using only the digest value\n while len(route_to_digest) > 1 and route_to_digest[-2].left is route_to_digest[-2].right:\n route_to_digest.pop(-1)\n # if at least two nodes were removed, append the last hash of node that its subtree is actually a linked list\n if len(route_to_digest) < self.depth:\n proof.append(route_to_digest[-1].data)\n # if only one node was removed, return it to the route - it's smaller than a hash\n elif len(route_to_digest) == self.depth:\n proof.append(route_to_digest[-1].left.data)\n\n # create the rest of the proof\n for i in range(len(route_to_digest) - 2, -1, -1):\n # if the left son is on the path to digest, add the other sibling to the proof\n if route_to_digest[i].left is route_to_digest[i + 1]:\n proof.append(route_to_digest[i].right.data)\n else:\n proof.append(route_to_digest[i].left.data)\n\n # return proof\n return proof", "title": "" }, { "docid": "86ea376b26b68753427a1a4abe037521", "score": "0.5622951", "text": "async def test_proof_of_inclusion_by_hash_no_ancestors(data_store: DataStore, tree_id: bytes32) -> None:\n await data_store.autoinsert(key=b\"\\x04\", value=b\"\\x03\", tree_id=tree_id, status=Status.COMMITTED)\n root = await data_store.get_tree_root(tree_id=tree_id)\n assert root.node_hash is not None\n node = await data_store.get_node_by_key(key=b\"\\x04\", tree_id=tree_id)\n\n proof = await data_store.get_proof_of_inclusion_by_hash(node_hash=node.hash, tree_id=tree_id)\n\n assert proof == ProofOfInclusion(node_hash=node.hash, layers=[])", "title": "" }, { "docid": "31364fddac6e47f7cdc1a3e3b88b1d7d", "score": "0.556975", "text": "def find_leaf(yes_list, no_list, records):\r\n illnesses_dict = Counter()\r\n for record in records:\r\n if symptom_check(no_list, record, yes_list):\r\n illnesses_dict[record.get_illness()] += 1\r\n if len(illnesses_dict) == 0:\r\n # if no match to record was found\r\n return records[0].get_illness()\r\n return max(illnesses_dict, key=illnesses_dict.get)", "title": "" }, { "docid": "426a2fa1ce1e935b1ae21efeb089f903", "score": "0.5549291", "text": "def square_tree(t):\r\n return tree(label(t)**2, [square_tree(branch) for branch in branches(t)])", "title": "" }, { "docid": "85c7afb63d08611a7e533458616da4a6", "score": "0.5467426", "text": "def inorder_iterative_binary_tree(self, root, p):\n stack = list()\n result = list()\n temp = root\n\n while temp or stack:\n if temp:\n stack.append(temp)\n temp = temp.left\n else:\n temp = stack.pop()\n result.append(temp)\n temp = temp.right\n\n k = self.find_next(result, p)\n\n return k", "title": "" }, { "docid": "49e37f51b209591d98041997aaf5b821", "score": "0.54661494", "text": "def test_root(): \n root = simple_forest().isroot()\n assert root[4] == True\n assert root.sum() == 1", "title": "" }, { "docid": "2ee3848976b9512c9a14eb11bd9fe0d8", "score": "0.54637444", "text": "def proof_of_work(self, last_proof:int) -> int:\n \n # if other node found a valid proof, this would be set to False \n global stop_mining \n proof = random.randint(1,1000)\n\n # until find a valid proof or other node has find one\n while (not self.valid_proof(last_proof, proof)) and (not stop_mining):\n proof += 1\n # print(proof)\n return proof", "title": "" }, { "docid": "b3773c7041c27af0148b32172e1de466", "score": "0.54631054", "text": "async def test_proof_of_inclusion_by_hash_program(data_store: DataStore, tree_id: bytes32) -> None:\n\n await add_01234567_example(data_store=data_store, tree_id=tree_id)\n node = await data_store.get_node_by_key(key=b\"\\x04\", tree_id=tree_id)\n\n proof = await data_store.get_proof_of_inclusion_by_hash(node_hash=node.hash, tree_id=tree_id)\n\n assert proof.as_program() == [\n b\"\\x04\",\n [\n bytes32.fromhex(\"fb66fe539b3eb2020dfbfadfd601fa318521292b41f04c2057c16fca6b947ca1\"),\n bytes32.fromhex(\"6d3af8d93db948e8b6aa4386958e137c6be8bab726db86789594b3588b35adcd\"),\n bytes32.fromhex(\"c852ecd8fb61549a0a42f9eb9dde65e6c94a01934dbd9c1d35ab94e2a0ae58e2\"),\n ],\n ]", "title": "" }, { "docid": "154f9edb588f8ed677dbbff8c124012d", "score": "0.5440042", "text": "def leafGenerator(self, root): \n if root.left:\n return leafGenerator(root.left)\n if root.right:\n return leafGenerator(root.right)\n else:\n yield root.val", "title": "" }, { "docid": "583b6f095bb5e366dd905003114ed88f", "score": "0.5417039", "text": "def proof_of_work():\n last_block = blockchain[-1]\n last_hash = hash_block(last_block)\n proof = 0\n # Try different PoW numbers and return the first valid one\n while not valid_proof(open_transactions, last_hash, proof):\n proof += 1\n #print(open_transactions)\n print(\"proof \", proof)\n return proof", "title": "" }, { "docid": "51ec812abf00480f36bed5ba024d5969", "score": "0.5413761", "text": "def test_biophys_table_parent_of01(self):\n result = objectundertest.biophys_table_parent_of(111)\n self.assertEqual(result, 110)", "title": "" }, { "docid": "3c0ff43155383f67e7ed00acdc46f1c0", "score": "0.5379388", "text": "async def test_proof_of_inclusion_by_hash(data_store: DataStore, tree_id: bytes32) -> None:\n await add_01234567_example(data_store=data_store, tree_id=tree_id)\n root = await data_store.get_tree_root(tree_id=tree_id)\n assert root.node_hash is not None\n node = await data_store.get_node_by_key(key=b\"\\x04\", tree_id=tree_id)\n\n proof = await data_store.get_proof_of_inclusion_by_hash(node_hash=node.hash, tree_id=tree_id)\n\n print(node)\n await _debug_dump(db=data_store.db_wrapper)\n\n expected_layers = [\n ProofOfInclusionLayer(\n other_hash_side=Side.RIGHT,\n other_hash=bytes32.fromhex(\"fb66fe539b3eb2020dfbfadfd601fa318521292b41f04c2057c16fca6b947ca1\"),\n combined_hash=bytes32.fromhex(\"36cb1fc56017944213055da8cb0178fb0938c32df3ec4472f5edf0dff85ba4a3\"),\n ),\n ProofOfInclusionLayer(\n other_hash_side=Side.RIGHT,\n other_hash=bytes32.fromhex(\"6d3af8d93db948e8b6aa4386958e137c6be8bab726db86789594b3588b35adcd\"),\n combined_hash=bytes32.fromhex(\"5f67a0ab1976e090b834bf70e5ce2a0f0a9cd474e19a905348c44ae12274d30b\"),\n ),\n ProofOfInclusionLayer(\n other_hash_side=Side.LEFT,\n other_hash=bytes32.fromhex(\"c852ecd8fb61549a0a42f9eb9dde65e6c94a01934dbd9c1d35ab94e2a0ae58e2\"),\n combined_hash=bytes32.fromhex(\"7a5193a4e31a0a72f6623dfeb2876022ab74a48abb5966088a1c6f5451cc5d81\"),\n ),\n ]\n\n assert proof == ProofOfInclusion(node_hash=node.hash, layers=expected_layers)", "title": "" }, { "docid": "f004dc177f5122ab7fa4405e92a393fb", "score": "0.5376612", "text": "def isleaf(self):\n return self.preceeding == 0", "title": "" }, { "docid": "95aa744bbeb7e14d64108ed8cd14c8e3", "score": "0.5369018", "text": "def test_insert_branching():\n test_tree = trie.TrieTree()\n test_tree.insert('fir')\n test_tree.insert('fae')\n assert len(test_tree.root.next_list) == 1\n assert len(test_tree.root.next_list[0].next_list) == 2", "title": "" }, { "docid": "aafa0e0f747680731e2069651fec1796", "score": "0.5365063", "text": "def square_tree(t):\n t.entry = t.entry ** 2\n for branch in t.branches:\n square_tree(branch)", "title": "" }, { "docid": "690d9e5d9281b4d93d496112c2ab2bd4", "score": "0.536333", "text": "def square_tree(t):\n return tree(label(t) **2, [square_tree(branch) for branch in branches(t)])", "title": "" }, { "docid": "26a472697afd1df23db49c7851213d2a", "score": "0.5317311", "text": "def virtice_match(tree, number):\n count = 0\n if tree.is_leaf():\n if tree.label == number:\n count+=1\n return count\n else:\n if tree.label == number:\n count+=1\n for branch in tree.branches:\n count += virtice_match(branch, number)\n return count", "title": "" }, { "docid": "cf661f1302d2c5cdf9bf4a524035dcb0", "score": "0.5311621", "text": "def test_binary_tree():", "title": "" }, { "docid": "4c95a94cf7880bf09dfa091ee351f9db", "score": "0.53066725", "text": "def proof_of_work(self,last_proof):\n proof = 0\n while not self.valid_proof(proof,last_proof):\n proof += 1\n return proof", "title": "" }, { "docid": "2e04ac0273083b5b9a3dd2384024e900", "score": "0.52912134", "text": "def verify(hashed_leaves, auth_set, height, hashfun):\n stack = []\n i_auth = 0\n i_leaf = 0\n hash_stage = [None, None]\n while i_auth < len(auth_set) or i_leaf < len(hashed_leaves):\n # Pick the next given leaf\n height = 0\n index, current_hash = hashed_leaves[i_leaf]\n i_leaf += 1\n while True:\n hash_stage[index % 2] = current_hash\n needed_node = (height, index ^ 1)\n # Consume as many nodes from the stack and\n # the auth set as possible\n if len(stack) > 0 and needed_node == stack[-1][0]:\n _, hash_stage[(index % 2) ^ 1] = stack.pop()\n elif i_auth < len(auth_set) and \\\n needed_node == auth_set[i_auth][0]:\n _, hash_stage[(index % 2) ^ 1] = auth_set[i_auth]\n i_auth += 1\n else: break\n current_hash = hashfun(hash_stage[0], hash_stage[1])\n height += 1\n index >>= 1\n stack.append(((height, index), current_hash))\n print(\"New stack: {}\".format(stack))\n print(\"Remaining auth nodes: {}\".format(auth_set[i_auth:]))\n\n assert(len(stack) == 1 and (height, 0) == stack[0][0])\n # Return the root's hash\n return stack[0][1]", "title": "" }, { "docid": "bd869dbe94d58e3e3a9a92d075d6d2f3", "score": "0.5257572", "text": "def find_leaf(self, x, y):\n if not self.qt.sq.containing((x, y)):\n return\n self.qt.find_leaf(x, y)", "title": "" }, { "docid": "f407d7d6f1c55ac74b74e22044253417", "score": "0.5256717", "text": "def leaf_hash(D):\r\n return D", "title": "" }, { "docid": "c187b4a91abdc8b808ce56f25de2d676", "score": "0.5249078", "text": "async def test_proof_of_inclusion_by_hash_bytes(data_store: DataStore, tree_id: bytes32) -> None:\n await add_01234567_example(data_store=data_store, tree_id=tree_id)\n node = await data_store.get_node_by_key(key=b\"\\x04\", tree_id=tree_id)\n\n proof = await data_store.get_proof_of_inclusion_by_hash(node_hash=node.hash, tree_id=tree_id)\n\n expected = (\n b\"\\xff\\x04\\xff\\xff\\xa0\\xfbf\\xfeS\\x9b>\\xb2\\x02\\r\\xfb\\xfa\\xdf\\xd6\\x01\\xfa1\\x85!)\"\n b\"+A\\xf0L W\\xc1o\\xcak\\x94|\\xa1\\xff\\xa0m:\\xf8\\xd9=\\xb9H\\xe8\\xb6\\xaaC\\x86\\x95\"\n b\"\\x8e\\x13|k\\xe8\\xba\\xb7&\\xdb\\x86x\\x95\\x94\\xb3X\\x8b5\\xad\\xcd\\xff\\xa0\\xc8R\\xec\"\n b\"\\xd8\\xfbaT\\x9a\\nB\\xf9\\xeb\\x9d\\xdee\\xe6\\xc9J\\x01\\x93M\\xbd\\x9c\\x1d5\\xab\\x94\"\n b\"\\xe2\\xa0\\xaeX\\xe2\\x80\\x80\"\n )\n\n assert bytes(proof.as_program()) == expected", "title": "" }, { "docid": "51db11ed3cec06afe2bca749aa568a93", "score": "0.52330416", "text": "def test_leaves():\n f = simple_forest()\n assert f.leaves_of_a_subtree([0, 1]) == True\n assert f.leaves_of_a_subtree([0, 3]) == False\n assert f.leaves_of_a_subtree([1, 3]) == False\n assert f.leaves_of_a_subtree([0, 1, 3]) == True\n assert f.leaves_of_a_subtree([1]) == True", "title": "" }, { "docid": "3e5a9fea543239277c7dc694cbbe3564", "score": "0.52209544", "text": "def test_leaf_nodes_single_level(self):\n root = self.sample.process_tree\n pieces = 'abcdefg'\n for piece in pieces:\n node = self.sample._insert_node(None, piece, 1, root)\n leaf_nodes = list(self.sample.leaf_nodes)\n self.assertEqual(len(leaf_nodes), len(pieces))\n for node in leaf_nodes:\n self.assertIn(node.piece, pieces)", "title": "" }, { "docid": "0447cc1dd1e886af30bca13d7d1d0714", "score": "0.5219343", "text": "def is_leaf(self,p):\n return self.num_children == 0", "title": "" }, { "docid": "4c5d3f1a2a55a6783ceb174fa51eb985", "score": "0.5217075", "text": "def construct_leaf_test(leaves):\n if leaves:\n return lambda node: node in leaves\n\n return lambda node: all([ch in string.digits for ch in node])", "title": "" }, { "docid": "5e56283531df48c2aada7910cf0f207c", "score": "0.5205885", "text": "def test_find_match_single(small_tree):\n tree = FM(small_tree, 1)\n assert len(tree) == 1\n for node in tree:\n assert node.val == 1", "title": "" }, { "docid": "55845bc906cfa59246934c436b5678e0", "score": "0.51916033", "text": "def test_biophys_table_parent_of03(self):\n result = objectundertest.biophys_table_parent_of(93)\n self.assertEqual(result, 90)", "title": "" }, { "docid": "4cf5697109a0db5028cc697ff91ef8eb", "score": "0.5190632", "text": "def square_tree(t):\n sq_branched = [square_tree(branch) for branch in branches(t)]\n return tree(label(t) ** 2, sq_branched)", "title": "" }, { "docid": "1cb7893c549875564a5d23f6e29b24da", "score": "0.5179883", "text": "def test_select_parent():\n pop = create_a_population()\n pop.dots[3].fitness = 1\n pop.total_fitness = 1\n dot = pop.select_parent()\n assert dot.instructions == pop.dots[3].instructions", "title": "" }, { "docid": "34d531976fd2cee88e53d6fd5ebeffba", "score": "0.5176065", "text": "def test_contains_is_in_tree():\n test_tree = trie.TrieTree()\n test_tree.insert('fir')\n test_tree.insert('fae')\n test_tree.insert('fox')\n assert test_tree.contains('fox')", "title": "" }, { "docid": "83a53dec9a50863163d0d991ff0a18c6", "score": "0.51670474", "text": "def leaf_procedure(self):\n return self._leaf_procedure", "title": "" }, { "docid": "29d712d9fa1bd2673e5c623d39b4128e", "score": "0.51333916", "text": "def inorder(tree: BinaryTree):\n if tree:\n inorder(tree.get_left_child())\n print(tree.get_root_value())\n inorder(tree.get_right_child())", "title": "" }, { "docid": "12a0bf5d275ffcdd71749df32dbbb859", "score": "0.5129729", "text": "def proof_of_work(self):\n last_block = self.__chain[-1]\n last_hash = hash_block(last_block)\n proof = 0\n # Try different PoW numbers and return the first valid one\n while not Verification.valid_proof(self.__open_transactions, last_hash, proof):\n proof += 1\n return proof", "title": "" }, { "docid": "502ff324680755cf2fa509bb4efe5d41", "score": "0.5129303", "text": "def _get_leaf(self, values) -> int:\n return self.id", "title": "" }, { "docid": "df8ee5f6b032b1e340e00a150d39578d", "score": "0.5122926", "text": "def test_biophys_table_parent_of02(self):\n result = objectundertest.biophys_table_parent_of(110)\n self.assertEqual(result, 110)", "title": "" }, { "docid": "604bd1232319f665fd6aa81423a806cf", "score": "0.5116977", "text": "def merkle_proof(tx, merkle_tree):\n #### YOUR CODE HERE\n treeLs = merkle_tree.leaves\n # txIndex = merkle_tree.leaves.index(tx)\n # if len(tx) == 1:\n # \treturn []\n # else:\n # \tfor i in range(0, len(lves), 2):\n # \t\tprint(lves[i], lves[i+1])\n\n def rProof(txs, tx, nodes):\n \tif len(txs) == 1:\n \t\treturn nodes\n \thashed = []\n \tH = 0\n \tfor i in range(0, len(txs), 2):\n \t\thashed.append(hash_data(txs[i] + txs[i+1]))\n \t\tif (txs[i] == tx):\n \t\t\tnodes.insert(0, Node('r', txs[i+1]))\n \t\t\tH = hash_data(tx + txs[i+1])\n \t\telif (txs[i+1] == tx):\n \t\t\tnodes.insert(0, Node('l', txs[i]))\n \t\t\tH = hash_data(txs[i] + tx)\n \treturn rProof(hashed, H, nodes)\n\n return rProof(treeLs, tx, [])", "title": "" }, { "docid": "3861d12c87f8d6b138784c53d669834c", "score": "0.51119906", "text": "def is_leaf(self):\r\n return self.positive_child is None", "title": "" }, { "docid": "930bf7355441088ad9e019042a2dfc8c", "score": "0.50974417", "text": "def get_leaf(self, values) -> int:\n return self._get_leaf(values)", "title": "" }, { "docid": "64f64f965ac49042af7eba5abdc76813", "score": "0.5092851", "text": "def height(t):\r\n if is_leaf(t):\r\n return 1\r\n return 1+ max(height(branch) for branch in branches(t))", "title": "" }, { "docid": "2a94995749ce76010a9e03c2165818b1", "score": "0.5079147", "text": "def test_fixture_left(left_heavy):\n assert left_heavy.root.val == 10\n assert left_heavy.root.left.val == 8\n assert left_heavy.root.left.left.val == 6\n assert left_heavy.root.left.left.left.val == 4", "title": "" }, { "docid": "db742e91122b67971e4a46930828fc84", "score": "0.507097", "text": "def is_leaf(tree):\n return not branches(tree)", "title": "" }, { "docid": "5c83e17ba67801c5c799d1e467f96f20", "score": "0.5059171", "text": "async def test_proof_of_inclusion_by_hash_equals_by_key(data_store: DataStore, tree_id: bytes32) -> None:\n\n await add_01234567_example(data_store=data_store, tree_id=tree_id)\n node = await data_store.get_node_by_key(key=b\"\\x04\", tree_id=tree_id)\n\n proof_by_hash = await data_store.get_proof_of_inclusion_by_hash(node_hash=node.hash, tree_id=tree_id)\n proof_by_key = await data_store.get_proof_of_inclusion_by_key(key=b\"\\x04\", tree_id=tree_id)\n\n assert proof_by_hash == proof_by_key", "title": "" }, { "docid": "38fc38cf7468dd04e1309bf49795d39c", "score": "0.50577486", "text": "def is_leaf(self):\r\n return len(self.children)==0 #si le noeud n'a pas d'enfant\r", "title": "" }, { "docid": "7789a81be8408e6e1546444dae517ca8", "score": "0.5055764", "text": "def proof_of_work(self, last_block):\n\n last_proof = last_block['proof']\n last_hash = self.hash(last_block)\n proof = 0\n while self.valid_proof(last_proof, proof, last_hash) is False:\n proof += 1\n return proof", "title": "" }, { "docid": "44096e50a2cf7ba583993e4d01b80a21", "score": "0.505135", "text": "def height(t):\n if is_leaf(t):\n return 0\n return 1 + max([height(branch) for branch in branches(t)])", "title": "" }, { "docid": "32c12a3c1a5f5667c1c3f7af83c87e3d", "score": "0.5027467", "text": "def test_walk_none_node_in_order():\n empty_bst = BST()\n assert empty_bst.in_order(lambda n: print(n)) is None", "title": "" }, { "docid": "b0aed6abadf07e6644ebb2f66b40d80b", "score": "0.5021815", "text": "def in_order(t):\n \"*** YOUR CODE HERE ***\"\n if t.is_leaf():\n yield t.root\n else:\n yield from in_order(t.left)\n yield t.root\n yield from in_order(t.right)", "title": "" }, { "docid": "fb053cadd9eacdd450af1438ea704220", "score": "0.50190103", "text": "def proof_or_counterexample(formula):\n proof_list = []\n # create list of proofs by model\n all_models_list = list(all_models(sorted(list(formula.variables()))))\n for model in all_models_list:\n\n if not evaluate(formula, model):\n return model\n else:\n proof_list.append(prove_in_model(formula, model))\n # run on each level of the tree\n tree_level_size = int(math.log(len(proof_list), 2))\n temp_proof_list = []\n for tree_level in range(0, tree_level_size):\n # run on each\n for proof_index in range(0, len(proof_list), 2):\n temp_proof_list.append(reduce_assumption(proof_list[proof_index + 1], proof_list[proof_index]))\n\n proof_list = temp_proof_list\n temp_proof_list = []\n assert len(proof_list) == 1\n\n return proof_list[0]\n # Task 6.5", "title": "" }, { "docid": "cbde7af7691bfddd8bf87e3546494d97", "score": "0.5005975", "text": "def verify_proof(tx, merkle_proof):\n #### YOUR CODE HERE\n mRev = merkle_proof[::-1]\n ret = tx\n for txn in mRev:\n if txn.direction == 'r':\n ret = hash_data(ret + txn.tx)\n else:\n ret = hash_data(txn.tx + ret)\n return ret", "title": "" }, { "docid": "f8f22e25a1caf38cac3b3c4ffcf8aaf1", "score": "0.50021034", "text": "def height(tree):\n if is_leaf(tree):\n return 0\n else: \n return 1 + max([height(branch) for branch in branches(tree)])", "title": "" }, { "docid": "4371e0f9ea414494c26f0ca094949ea9", "score": "0.49947542", "text": "def proof_of_work(block):\n block_string = json.dumps(block, sort_keys=True).encode()\n proof = 0\n while valid_proof(block_string, proof) is False:\n proof += 1\n return proof", "title": "" }, { "docid": "30dbb51aab42493e24542657e71d5663", "score": "0.49890673", "text": "def tree_policy(self, node):\n while not node.state.terminal_test():\n if not node.fully_explored():\n return self.expand(node)\n node = self.best_child(node)\n return node", "title": "" }, { "docid": "2b303785905c55f34c787e40421901c3", "score": "0.49889982", "text": "def _compute_upper_bound_loose(self, tree, n_parts, n_examples, n_features):\n c, m, l = n_parts, n_examples, n_features\n\n if c > m or c > tree.n_leaves:\n return 0\n elif c == m or c == 1 or m == 1:\n return 1\n elif m <= tree.n_leaves:\n return stirling(m, c)\n if tree not in self.pfub_table:\n self.pfub_table[tree] = {}\n if (c, m, l) not in self.pfub_table[tree]:\n N = 0\n k_left = m - tree.right_subtree.n_leaves\n k_right = m - tree.left_subtree.n_leaves\n N = 0\n if c == 2:\n N += 2*l * (1\n + 2 * self._compute_upper_bound_loose(tree.left_subtree, 2, k_left, l)\n + 2 * self._compute_upper_bound_loose(tree.right_subtree, 2, k_right, l)\n + 2 * self._compute_upper_bound_loose(tree.left_subtree, 2, k_left, l) * self._compute_upper_bound_loose(tree.right_subtree, 2, k_right, l)\n )\n else:\n N += 2*l * sum(\n sum(\n binom(a, c - b) * binom(b, c - a) * factorial(a + b - c) *\n self._compute_upper_bound_loose(tree.left_subtree, a, k_left, l) *\n self._compute_upper_bound_loose(tree.right_subtree, b, k_right, l)\n for b in range(max(1,c-a), c+1)\n )\n for a in range(1, c+1)\n )\n N *= m - tree.n_leaves\n\n if tree.left_subtree == tree.right_subtree:\n N /= 2\n\n self.pfub_table[tree][c, m, l] = min(N, stirling(n_examples, n_parts))\n return self.pfub_table[tree][c, m, l]", "title": "" }, { "docid": "4817d00c11d42552d0c79473fc7f2b82", "score": "0.49873397", "text": "def test_single_node_tree():\n root = Node(3)\n assert checkBST(root) is True", "title": "" }, { "docid": "7e854e8dcacf8ce9260587acafb3ef3e", "score": "0.49862826", "text": "def rooted_tree():\n \n A = [0,1]\n \n yield from A\n \n for n in naturals(1):\n out = 0\n for k in range(1,n+1):\n d_sum = 0\n for d in range(1,k+1):\n if k%d == 0:\n d_sum += d*A[d]\n d_sum *= A[n-k+1]\n out += d_sum\n \n A.append(out//n)\n yield out//n", "title": "" }, { "docid": "e1dd9376a84a59e75d188a272eee85bc", "score": "0.4978711", "text": "def test_bst_created_with_iterable_four():\n bintree = BST([1, 3, 2, 4])\n assert bintree.root.val == 1\n assert bintree.root.right.left.val == 2\n assert bintree.root.right.right.val == 4", "title": "" }, { "docid": "15d13685d33842441faa6ee6881e4f7e", "score": "0.49750555", "text": "def is_leaf(self):\n return not (self.has_left_child() or self.has_right_child())", "title": "" }, { "docid": "aec56bc0fc5d7e0b35636d2b62f1f8fa", "score": "0.49660045", "text": "def simple_pow(self, last_proof: int) -> int:\n this_proof = 0\n while not self.validate_proof(last_proof=last_proof, this_proof=this_proof):\n\n this_proof += 1\n return this_proof", "title": "" }, { "docid": "fe813e1932dd0bc9ed9838610cc1420c", "score": "0.49646083", "text": "def test_biophys_table_parent_of05(self):\n result = objectundertest.biophys_table_parent_of(0)\n self.assertEqual(result, 0)", "title": "" }, { "docid": "33126238287b203c9e927b4ab423a29b", "score": "0.49582124", "text": "def _compute_upper_bound_tight(self, tree, n_parts, n_examples, n_features):\n c, m, l = n_parts, n_examples, n_features\n\n if c > m or c > tree.n_leaves:\n return 0\n elif c == m or c == 1 or m == 1:\n return 1\n elif m <= tree.n_leaves:\n return stirling(m, c)\n # Modification 1: Check first in the table if value is already computed.\n if tree not in self.pfub_table:\n self.pfub_table[tree] = {}\n if (c, m, l) not in self.pfub_table[tree]:\n N = 0\n min_k = tree.left_subtree.n_leaves\n max_k = m - tree.right_subtree.n_leaves\n for k in range(min_k, max_k+1):\n # Modification 2: Since c = 2 is the most common use case, we give an optimized version, writing explicitely the sum over a and b.\n if c == 2:\n N += min(2*l, binom(m, k)) * (1\n + 2 * self._compute_upper_bound_tight(tree.left_subtree, 2, k, l)\n + 2 * self._compute_upper_bound_tight(tree.right_subtree, 2, m-k, l)\n + 2 * self._compute_upper_bound_tight(tree.left_subtree, 2, k, l) * self._compute_upper_bound_tight(tree.right_subtree, 2, m-k, l)\n )\n else:\n N += min(2*l, binom(m, k)) * sum(\n sum(\n binom(a, c - b) * binom(b, c - a) * factorial(a + b - c) *\n self._compute_upper_bound_tight(tree.left_subtree, a, k, l) *\n self._compute_upper_bound_tight(tree.right_subtree, b, m-k, l)\n for b in range(max(1,c-a), c+1)\n )\n for a in range(1, c+1)\n )\n\n if tree.left_subtree == tree.right_subtree:\n N /= 2\n\n # Modification 3: Add value to look up table.\n self.pfub_table[tree][n_parts, n_examples, n_features] = min(N, stirling(n_examples, n_parts))\n\n return self.pfub_table[tree][n_parts, n_examples, n_features]", "title": "" }, { "docid": "9c8ff2712e8fb1ed50a7aafbf3a14b3e", "score": "0.4955379", "text": "def istree(cls, sequence, proj=False, multiroot=False):\n\n from src.utils.alg import tarjan\n if proj and not cls.isprojective(sequence):\n return False\n n_roots = sum(head == 0 for head in sequence)\n if n_roots == 0:\n return False\n if not multiroot and n_roots > 1:\n return False\n if any(i == head for i, head in enumerate(sequence, 1)):\n return False\n return next(tarjan(sequence), None) is None", "title": "" }, { "docid": "8e9a31f1f0af195e4738d7ae7b7adf08", "score": "0.49527258", "text": "def leaf_node(self):\n return struct.unpack(\"<B\", self._buffer[36:37])[0]", "title": "" }, { "docid": "ec2b0e9e10108c65f34f786da41f2814", "score": "0.49474722", "text": "def testLeaf(self, curr):\r\n if (curr.n_depth == Params.maxHeight) or \\\r\n (curr.n_budget <= 0) or \\\r\n (curr.n_data is None or curr.n_data.shape[1] == 0) or \\\r\n (curr.n_count <= self.param.minPartSize):\r\n return True\r\n return False", "title": "" }, { "docid": "ab71e9f6c511f19eb765a15747c6ee37", "score": "0.4939341", "text": "def test_biophys_table_parent_of04(self):\n result = objectundertest.biophys_table_parent_of(9)\n self.assertEqual(result, 0)", "title": "" }, { "docid": "2461e91b107786d05d229f9f89c1c9e0", "score": "0.49249792", "text": "def get_root(phrase):\n parsed = PARSER(phrase)\n for token in parsed:\n if token.dep_ == \"ROOT\":\n if token.lemma_ == \"be\":\n head = token.head.lemma_\n else:\n head = token.lemma_\n if head in [\"be\", \"get\", \"do\", \"have\"]:\n return False\n return head\n return False", "title": "" }, { "docid": "ed6b04e6f118ef51c313b48ffcaed721", "score": "0.49207643", "text": "def height(t):\n if is_leaf(t):\n return 0\n else:\n return 1 + max([height(b) for b in branches(t)])\n # return 1 + max([0] + [height(branch) for branch in branches(t)])", "title": "" }, { "docid": "675637834245da6fd2a1208e6a50d28d", "score": "0.4910223", "text": "def proof_of_work(chain: List[blockchainConstants.Block], open_tx: List[blockchainTx.Transaction]) -> int:\n last_block = blockchainHelpers.get_last_block(chain)\n last_hash = createHashedBlock(last_block)\n proof = 0\n\n while not valid_proof(open_tx, last_hash, proof):\n proof += 1\n\n return proof", "title": "" }, { "docid": "f73ed328fc2b03a382de1181f1669e84", "score": "0.49001804", "text": "def height(t):\n\n # if is_leaf(t):\n # return 0\n # else:\n # heights = []\n # for b in branches(t):\n # heights.append(height(b))\n # return 1 + max(heights)\n\n # write in one line\n return 1 + max([-1]+[height(b) for b in branches(t)])", "title": "" }, { "docid": "87a846c28c2e41f46e175921573a1c9f", "score": "0.48996893", "text": "def test_delete_leaf():\n tree = make_random_tree()\n for trial in range(trials):\n to_delete = B.breadthfirst_traverse(tree)[-1]\n tree = B.delete(tree, to_delete)\n assert B.search(tree, to_delete) == None", "title": "" }, { "docid": "b8f87bdc10ec29b6673c57b956e62ca9", "score": "0.48970497", "text": "def is_leaf(self, p):\n return self.num_children(p) == 0", "title": "" }, { "docid": "9c5ff1ca366730981981bbb1a7fb6924", "score": "0.4868217", "text": "def proof_of_work(previous_hash: str) -> int:\n proof = 0\n while validate_proof(previous_hash, proof) is False:\n proof += 1\n\n return proof", "title": "" }, { "docid": "d338474bf91145a1268598bf6d61d4f5", "score": "0.48662788", "text": "def _get_fuzzy_leaf(self, means, stdevs, hard_edges=False, hard_leaves=False) -> int:\n membership = np.zeros((23,) + means.shape[1:])\n membership[self.id] = 1\n return membership", "title": "" }, { "docid": "7cc8812e9bbaef9c59f84f869a48494f", "score": "0.48575324", "text": "def compute_root(poly, x_0, epsilon):\n## assert len(poly) > 1\n## assert x_0 != 0\n## assert epsilon > 0\n\n count = 0\n poly = [-13.39, 0.0, 17.5, 3.0, 1.0]\n\n # while answer > epsilon:\n for i, val in enumerate(poly):\n print i, val", "title": "" }, { "docid": "f78cad8608770aeb790758c4e03bd835", "score": "0.48530474", "text": "def fib_tree(n):\n\tif n == 0 or n== 1:\n\t\treturn tree(n)\n\telse:\n\t\tleft = fib_tree(n - 2)\n\t\tright = fib_tree(n - 1)\n\t\tfib_n = label(left) + label(right)\n\t\treturn tree(fib_n, [left, right])", "title": "" }, { "docid": "e670801c2f1d79be5e5a339984ec8da9", "score": "0.4851631", "text": "def _height1(self): # works, but O(n^2) worst-case time\n return max(self.depth(p) for p in self.positions() if self.is_leaf(p))", "title": "" }, { "docid": "f3024546f3f577ceee9a991e5124ee3a", "score": "0.4842801", "text": "def check_complexity(self):\r\n node_all = self.simple_graph_traverse()\r\n num_internal, num_leaf = 0, 0\r\n for i in range(len(node_all)):\r\n n = node_all[i]\r\n if n.substitute is not None:\r\n print(\"substitute not done.\")\r\n\r\n if n.label is None:\r\n num_internal += 1\r\n else:\r\n num_leaf += 1\r\n return num_internal, num_leaf", "title": "" }, { "docid": "9e1c4c120c8cbc81191c85611fe9994b", "score": "0.4836654", "text": "def bstcheckinorder(tree, prev=None):\n\n if not tree:\n return True\n else:\n\n bstcheckinorder(tree.left, prev)\n\n if prev is not None and prev>=tree.data:\n return False\n prev = tree.data\n\n bstcheckinorder(tree.right, prev)\n\n return True", "title": "" }, { "docid": "c8813d5376e5b892b7976ed9a73aecb1", "score": "0.4830377", "text": "def check_proof_of_inclusion(self, digest, classification_bit, proof):\n # parse digest and proof\n path_int = int(digest, 16)\n correct_final_result = proof.pop(0)\n\n # Compute self-hash-shortcut\n current_hash_val = classification_bit\n # there are depth+1 nodes in the route from root to leaf. We poped the root, so a full proof is depth long.\n if len(proof) < self.depth:\n correct_last_self_hash_val = proof.pop(0)\n self_hashes_count = self.depth - len(proof) # num of missing hashes from the proof\n while self_hashes_count > 0:\n self_hashes_count -= 1\n path_int = path_int >> 1\n current_hash_val = hash_function(current_hash_val + current_hash_val)\n # check we reached to the correct last self hash result\n if current_hash_val != correct_last_self_hash_val:\n return False\n\n # compute rest of the hashes using the proof\n for sibling_hash in proof:\n if path_int & 1 == SparseBinaryNode.RIGHT_DIRECTION:\n current_hash_val = hash_function(sibling_hash + current_hash_val)\n else:\n current_hash_val = hash_function(current_hash_val + sibling_hash)\n path_int = path_int >> 1\n\n # return validation result\n return current_hash_val == correct_final_result", "title": "" }, { "docid": "10ac6a66925c46d97f04d33e3495199c", "score": "0.4828936", "text": "def isLeaf(self, p):\n return self.numOfChildren(p) == 0", "title": "" }, { "docid": "115588e6d9c4bc8d7854aa7d72632c11", "score": "0.48236212", "text": "def roots(rev=None):", "title": "" }, { "docid": "cf38a65b0be0c3afa11e643a52b0365f", "score": "0.48185816", "text": "def is_leaf(self):\n return self.left is None and self.right is None", "title": "" }, { "docid": "1d6b8952bf55153bfdac454d9d312c1d", "score": "0.48132658", "text": "def test_lowest_common_ancestor(self):\n t1 = TreeNode.read(StringIO(u\"((a,(b,c)d)e,f,(g,h)i)j;\"))\n t2 = t1.copy()\n t3 = t1.copy()\n t4 = t1.copy()\n input1 = ['a'] # return self\n input2 = ['a', 'b'] # return e\n input3 = ['b', 'c'] # return d\n input4 = ['a', 'h', 'g'] # return j\n exp1 = t1.find('a')\n exp2 = t2.find('e')\n exp3 = t3.find('d')\n exp4 = t4\n obs1 = t1.lowest_common_ancestor(input1)\n obs2 = t2.lowest_common_ancestor(input2)\n obs3 = t3.lowest_common_ancestor(input3)\n obs4 = t4.lowest_common_ancestor(input4)\n self.assertEqual(obs1, exp1)\n self.assertEqual(obs2, exp2)\n self.assertEqual(obs3, exp3)\n self.assertEqual(obs4, exp4)\n\n # verify multiple calls work\n t_mul = t1.copy()\n exp_1 = t_mul.find('d')\n exp_2 = t_mul.find('i')\n obs_1 = t_mul.lowest_common_ancestor(['b', 'c'])\n obs_2 = t_mul.lowest_common_ancestor(['g', 'h'])\n self.assertEqual(obs_1, exp_1)\n self.assertEqual(obs_2, exp_2)\n\n # empty case\n with self.assertRaises(ValueError):\n t1.lowest_common_ancestor([])", "title": "" }, { "docid": "aba0b596c93707de5965f1bbc42c585f", "score": "0.4809677", "text": "def count_trees(num):\n if num < 2:\n return 1\n count = 0\n for i in range(1,num+1):\n left = count_trees(i-1)\n right = count_trees(num-i)\n count += left*right\n return count", "title": "" }, { "docid": "79e2686a132c144d8f45519fc1c511ff", "score": "0.4809397", "text": "def build_tree(self, leaf=40):\n\n print(\"- building tree: haversine\")\n balltree = BallTree(self.angular_distr[:, :2],\n leaf_size=leaf,\n metric='haversine')\n return balltree", "title": "" }, { "docid": "515266b0a01a66e7c223244891b31b7d", "score": "0.48067024", "text": "def validate(proof:dict) -> dict:\n return merkle_tree.validate(proof)", "title": "" }, { "docid": "e34054a0f6804d5731530565ade592da", "score": "0.4805752", "text": "def branch_factor(cost, depth):\n\n\t# Create an array of coefficients for a polynomial equation.\n\tcoeffs = []\n\n\tfor i in range(depth):\n\t\tcoeffs.append(1)\n\n\tcoeffs.append( -1 * cost)\n\n\t# Solve for the roots of the equation.\n\troots = numpy.roots(coeffs)\n\n\t# Choose the valid root and return it, rounded to \n\tfor comp in roots:\n\t\tif comp.imag == 0.0:\n\t\t\treturn -1.0*round(comp.real, 2)", "title": "" }, { "docid": "a4d8accc523b698a47a781dc7ccbc363", "score": "0.48036015", "text": "def clover_leaf(game: Board, player: Player) -> float:\n loc = game.get_player_location(player)\n opp = game.get_player_location(game.get_opponent(player))\n for leaf in CLOVER:\n if (opp[0] + leaf[0], opp[1] + leaf[1]) == loc:\n return 1.\n return 0.", "title": "" }, { "docid": "c015f5fbb1c890af967a6bf6dbb1beae", "score": "0.48005182", "text": "def test_tree_with_height_of_2():\n root = Node(5, Node(3, Node(1), Node(4)), Node(8, Node(7), Node(10)))\n assert checkBST(root) is True", "title": "" }, { "docid": "4a850833aad8c378cafb8b707d582add", "score": "0.4797511", "text": "def challenge2(self):\n value = compute_value(self.tree)\n print(f\"Root node value: {value}\")", "title": "" }, { "docid": "46d74b0e158678c2bc945923371c213f", "score": "0.47927275", "text": "def is_leaf(self):\n return self.left is None or self.right is None", "title": "" }, { "docid": "32d091ac5fb39a748f1dbad18a2d890b", "score": "0.47864258", "text": "def prufer(self):\n try:\n removed = set()\n if self.is_tree() == False:\n return False\n i = 0\n seq = list()\n max_itors = self.verts - 2\n leaf = None\n leaf_neighbors = None\n while i < max_itors:\n leaf = self.__get_smallest_leaf()\n if leaf is None:\n print(\"No more leaves left\")\n return False\n leaf_neighbors = list(self.__neighbors_of(leaf))\n if len(leaf_neighbors) > 1:\n raise GraphException(\"Prufer leaf has > 1 neighbor!\")\n seq.append(leaf_neighbors[0])\n # print(\"seq at\", i, seq)\n self.__remove_vert(leaf)\n removed.add(leaf)\n i += 1\n return seq\n\n except Exception as ex:\n print(\"prufer error: \", ex.message)\n raise GraphException(ex)", "title": "" }, { "docid": "7817364708d4d5bec4d94483da831eb7", "score": "0.47663158", "text": "def search_node_with_generator(self, generator, tree):\n # TODO: implement double hash with diffset\n\n frozen_generator = frozenset(generator)\n for index in reversed(range(len(generator)-1, len(tree))):\n for node in tree[index]:\n #if frozen_generator.issubset(node.closure):\n for gen in node.generators:\n if frozen_generator == frozenset(gen):\n return node\n return None", "title": "" }, { "docid": "a13507d0919a979b7eaa5bbd11a8c3ff", "score": "0.47657606", "text": "def _root(self, i: int):\n while i != self.alist[i]:\n i = self.alist[i]\n return i", "title": "" }, { "docid": "a611b5fe3c8a3e292f06316b76e9e4be", "score": "0.47640717", "text": "def test_aa_tree():", "title": "" }, { "docid": "b4613a9b529de593b6e51855a5d2896b", "score": "0.47638527", "text": "def test_repr_small_bst(small_bst):\n assert small_bst.root.val == 4\n assert small_bst.root.right.val == 5", "title": "" }, { "docid": "9cf5ac61f9c49945b53a41bc228f7da7", "score": "0.47626057", "text": "def test_inorder_traverse(small_bst):\n a = []\n small_bst.in_order(a.append)\n assert a == [2, 3, 3.5, 4, 5, 6, 7]", "title": "" } ]
6b168d7bc2fb1b6ba301b47a626ba2d1
itkImageFunctionIRGBAUC3RGBAUCD_cast(itkLightObject obj) > itkImageFunctionIRGBAUC3RGBAUCD
[ { "docid": "27974120579307c54033c0a2f2e79c83", "score": "0.9448457", "text": "def itkImageFunctionIRGBAUC3RGBAUCD_cast(obj: 'itkLightObject') -> \"itkImageFunctionIRGBAUC3RGBAUCD *\":\n return _itkImageFunctionBasePython.itkImageFunctionIRGBAUC3RGBAUCD_cast(obj)", "title": "" } ]
[ { "docid": "452631e89920cd61ceb0e3baa7f6b392", "score": "0.93457943", "text": "def itkImageFunctionIRGBAUC2RGBAUCD_cast(obj: 'itkLightObject') -> \"itkImageFunctionIRGBAUC2RGBAUCD *\":\n return _itkImageFunctionBasePython.itkImageFunctionIRGBAUC2RGBAUCD_cast(obj)", "title": "" }, { "docid": "f750f299021dc4450613416d6c8d3ab8", "score": "0.9202988", "text": "def cast(obj: 'itkLightObject') -> \"itkImageFunctionIRGBAUC3RGBAUCD *\":\n return _itkImageFunctionBasePython.itkImageFunctionIRGBAUC3RGBAUCD_cast(obj)", "title": "" }, { "docid": "3bb5ad5df4e9aaf6f95e1ce718a702c9", "score": "0.9118124", "text": "def cast(obj: 'itkLightObject') -> \"itkImageFunctionIRGBAUC2RGBAUCD *\":\n return _itkImageFunctionBasePython.itkImageFunctionIRGBAUC2RGBAUCD_cast(obj)", "title": "" }, { "docid": "59eec871ce59a011bf3262e294a6e90d", "score": "0.8675929", "text": "def itkImageFunctionIRGBAUC2RGBADD_cast(obj: 'itkLightObject') -> \"itkImageFunctionIRGBAUC2RGBADD *\":\n return _itkImageFunctionBasePython.itkImageFunctionIRGBAUC2RGBADD_cast(obj)", "title": "" }, { "docid": "fe9deb45d9c077c063343a4cbbc1afc0", "score": "0.86662114", "text": "def cast(obj: 'itkLightObject') -> \"itkImageFunctionIRGBAUC3RGBADD *\":\n return _itkImageFunctionBasePython.itkImageFunctionIRGBAUC3RGBADD_cast(obj)", "title": "" }, { "docid": "7f322d9e17cf36ecbc9a2773e27c3e41", "score": "0.8642761", "text": "def cast(obj: 'itkLightObject') -> \"itkImageFunctionIRGBAUC2RGBADD *\":\n return _itkImageFunctionBasePython.itkImageFunctionIRGBAUC2RGBADD_cast(obj)", "title": "" }, { "docid": "55828f7cb80de9852702c27aeaa1a91d", "score": "0.86062956", "text": "def itkImageFunctionIRGBAUC3RGBADD_cast(obj: 'itkLightObject') -> \"itkImageFunctionIRGBAUC3RGBADD *\":\n return _itkImageFunctionBasePython.itkImageFunctionIRGBAUC3RGBADD_cast(obj)", "title": "" }, { "docid": "d5198ba517d4da2707f183c16de1f104", "score": "0.8466477", "text": "def itkInPlaceImageFilterIUC3IRGBAUC3_cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterIUC3IRGBAUC3 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterIUC3IRGBAUC3_cast(obj)", "title": "" }, { "docid": "d0ac9347119b49401bbb38bfa03f72a2", "score": "0.84355646", "text": "def itkInPlaceImageFilterIRGBAUC3IUC3_cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterIRGBAUC3IUC3 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterIRGBAUC3IUC3_cast(obj)", "title": "" }, { "docid": "e0087c6435865b894b73a4fee23e24ad", "score": "0.84113866", "text": "def itkInPlaceImageFilterIRGBAUC3IRGBAUC3_cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterIRGBAUC3IRGBAUC3 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterIRGBAUC3IRGBAUC3_cast(obj)", "title": "" }, { "docid": "255afc8bbafbfa84a446f6abaf907b38", "score": "0.83847046", "text": "def cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterIUC3IRGBAUC3 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterIUC3IRGBAUC3_cast(obj)", "title": "" }, { "docid": "ac4be1ec296f3efee28037df635dc88f", "score": "0.8366549", "text": "def cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterIRGBAUC3IRGBAUC3 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterIRGBAUC3IRGBAUC3_cast(obj)", "title": "" }, { "docid": "d090864d5adedaa412927b0ec2222bfa", "score": "0.83553845", "text": "def itkInPlaceImageFilterIUL3IRGBAUC3_cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterIUL3IRGBAUC3 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterIUL3IRGBAUC3_cast(obj)", "title": "" }, { "docid": "1b3d0e151ef0ff80010c63aca35b7322", "score": "0.8343427", "text": "def itkCheckerBoardImageFilterIRGBAUC3_cast(obj: 'itkLightObject') -> \"itkCheckerBoardImageFilterIRGBAUC3 *\":\n return _itkCheckerBoardImageFilterPython.itkCheckerBoardImageFilterIRGBAUC3_cast(obj)", "title": "" }, { "docid": "8bf507c2323e6e80ddb56254837e8872", "score": "0.83268917", "text": "def itkImageFunctionIRGBUC3RGBUCD_cast(obj: 'itkLightObject') -> \"itkImageFunctionIRGBUC3RGBUCD *\":\n return _itkImageFunctionBasePython.itkImageFunctionIRGBUC3RGBUCD_cast(obj)", "title": "" }, { "docid": "0fe40b042fa839c4bb44cd890605a147", "score": "0.8313289", "text": "def cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterIUL3IRGBAUC3 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterIUL3IRGBAUC3_cast(obj)", "title": "" }, { "docid": "a0281a63f19d89dd4b4bf27d6e09f88b", "score": "0.8304701", "text": "def itkImageFunctionIRGBUC2RGBUCD_cast(obj: 'itkLightObject') -> \"itkImageFunctionIRGBUC2RGBUCD *\":\n return _itkImageFunctionBasePython.itkImageFunctionIRGBUC2RGBUCD_cast(obj)", "title": "" }, { "docid": "4f05f7cf56092bfb1eee3b3c9389fc73", "score": "0.830104", "text": "def itkInPlaceImageFilterIF3IRGBAUC3_cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterIF3IRGBAUC3 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterIF3IRGBAUC3_cast(obj)", "title": "" }, { "docid": "f80497da11fce255b6dd9e5458fc71f3", "score": "0.8288648", "text": "def cast(obj: 'itkLightObject') -> \"itkCheckerBoardImageFilterIRGBAUC3 *\":\n return _itkCheckerBoardImageFilterPython.itkCheckerBoardImageFilterIRGBAUC3_cast(obj)", "title": "" }, { "docid": "ff9f4c1ee689624d499ed78b5fb9acbc", "score": "0.8285438", "text": "def cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterIRGBAUC3IUC3 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterIRGBAUC3IUC3_cast(obj)", "title": "" }, { "docid": "badd3b015c5b55ea2b4237efb7e3ae17", "score": "0.82613355", "text": "def itkImageFunctionIRGBUC3RGBDD_cast(obj: 'itkLightObject') -> \"itkImageFunctionIRGBUC3RGBDD *\":\n return _itkImageFunctionBasePython.itkImageFunctionIRGBUC3RGBDD_cast(obj)", "title": "" }, { "docid": "08a6d93c73a43abd06cc11dffcba3f5d", "score": "0.8250718", "text": "def cast(obj: 'itkLightObject') -> \"itkComposeImageFilterIUC3IRGBAUC3 *\":\n return _itkComposeImageFilterPython.itkComposeImageFilterIUC3IRGBAUC3_cast(obj)", "title": "" }, { "docid": "d822aa77b1b01250056b398132519d51", "score": "0.8249764", "text": "def itkInPlaceImageFilterIUC2IRGBAUC2_cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterIUC2IRGBAUC2 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterIUC2IRGBAUC2_cast(obj)", "title": "" }, { "docid": "a172d99bf0f49f6c987ffbe0c67e99ae", "score": "0.824599", "text": "def cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterIUS3IRGBAUC3 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterIUS3IRGBAUC3_cast(obj)", "title": "" }, { "docid": "6aa06c658b609affdc2ce62e7a73ea3f", "score": "0.82382226", "text": "def itkInPlaceImageFilterIUS3IRGBAUC3_cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterIUS3IRGBAUC3 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterIUS3IRGBAUC3_cast(obj)", "title": "" }, { "docid": "cbd3bf799e30dd92ab91a49160476c3e", "score": "0.8212988", "text": "def itkInPlaceImageFilterIUL2IRGBAUC2_cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterIUL2IRGBAUC2 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterIUL2IRGBAUC2_cast(obj)", "title": "" }, { "docid": "401671f708647797e3ea5ba1717f87eb", "score": "0.82057804", "text": "def itkComposeImageFilterIUC3IRGBAUC3_cast(obj: 'itkLightObject') -> \"itkComposeImageFilterIUC3IRGBAUC3 *\":\n return _itkComposeImageFilterPython.itkComposeImageFilterIUC3IRGBAUC3_cast(obj)", "title": "" }, { "docid": "9d0fd3442048ac1f20baf850049b792c", "score": "0.82009673", "text": "def cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterIF3IRGBAUC3 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterIF3IRGBAUC3_cast(obj)", "title": "" }, { "docid": "2d3e6d97aa1788e8542164d029a85c80", "score": "0.81921506", "text": "def itkInPlaceImageFilterIRGBAUC2IUC2_cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterIRGBAUC2IUC2 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterIRGBAUC2IUC2_cast(obj)", "title": "" }, { "docid": "c8526c05067635e4a13415d5ba4df6f9", "score": "0.8172437", "text": "def itkImageFunctionIRGBUC2RGBDD_cast(obj: 'itkLightObject') -> \"itkImageFunctionIRGBUC2RGBDD *\":\n return _itkImageFunctionBasePython.itkImageFunctionIRGBUC2RGBDD_cast(obj)", "title": "" }, { "docid": "c31b7c76ce96daeb000f14b1557aac46", "score": "0.81663156", "text": "def cast(obj: 'itkLightObject') -> \"itkImageFunctionIRGBUC2RGBUCD *\":\n return _itkImageFunctionBasePython.itkImageFunctionIRGBUC2RGBUCD_cast(obj)", "title": "" }, { "docid": "8a8e1b06aa99b2990cfbdffb00f3516d", "score": "0.8164018", "text": "def itkInPlaceImageFilterIRGBAUC2IRGBAUC2_cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterIRGBAUC2IRGBAUC2 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterIRGBAUC2IRGBAUC2_cast(obj)", "title": "" }, { "docid": "c8112edbae5ea51610953b28debd056a", "score": "0.81458193", "text": "def cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterIUL2IRGBAUC2 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterIUL2IRGBAUC2_cast(obj)", "title": "" }, { "docid": "f975f28cf2b8ec03976ca20ae8d005d9", "score": "0.81437737", "text": "def cast(obj: 'itkLightObject') -> \"itkImageFunctionIRGBUC3RGBUCD *\":\n return _itkImageFunctionBasePython.itkImageFunctionIRGBUC3RGBUCD_cast(obj)", "title": "" }, { "docid": "8d615f5e0c03ec65720d8418c18cdc5e", "score": "0.8127759", "text": "def cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterIUC2IRGBAUC2 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterIUC2IRGBAUC2_cast(obj)", "title": "" }, { "docid": "78dfda5d54d29fd14f5ec62b28f9f045", "score": "0.8118124", "text": "def cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterIRGBAUC2IRGBAUC2 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterIRGBAUC2IRGBAUC2_cast(obj)", "title": "" }, { "docid": "0436ddf8ccb10764a391339a724b598d", "score": "0.8096839", "text": "def cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterISS3IRGBAUC3 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterISS3IRGBAUC3_cast(obj)", "title": "" }, { "docid": "32a4cdb895927ce0155ed353d400aca7", "score": "0.807932", "text": "def itkInPlaceImageFilterISS3IRGBAUC3_cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterISS3IRGBAUC3 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterISS3IRGBAUC3_cast(obj)", "title": "" }, { "docid": "4effe60acbf9aef08bfb7705667ce820", "score": "0.80454326", "text": "def itkComposeImageFilterIUC2IRGBAUC2_cast(obj: 'itkLightObject') -> \"itkComposeImageFilterIUC2IRGBAUC2 *\":\n return _itkComposeImageFilterPython.itkComposeImageFilterIUC2IRGBAUC2_cast(obj)", "title": "" }, { "docid": "0d4c58ab7b0a1d0a3d13b6909e2e1515", "score": "0.8036508", "text": "def cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterIUS2IRGBAUC2 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterIUS2IRGBAUC2_cast(obj)", "title": "" }, { "docid": "bcc87e7834e66427859e1e6eb73c2046", "score": "0.80336547", "text": "def cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterIRGBAUC2IUC2 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterIRGBAUC2IUC2_cast(obj)", "title": "" }, { "docid": "6d2bdd2147fc521e25fb528152554e2f", "score": "0.80321777", "text": "def cast(obj: 'itkLightObject') -> \"itkImageFunctionIRGBUC3RGBDD *\":\n return _itkImageFunctionBasePython.itkImageFunctionIRGBUC3RGBDD_cast(obj)", "title": "" }, { "docid": "b5a70dd1b152f8a5a69f5a78fa254455", "score": "0.8016177", "text": "def itkCheckerBoardImageFilterIRGBAUC2_cast(obj: 'itkLightObject') -> \"itkCheckerBoardImageFilterIRGBAUC2 *\":\n return _itkCheckerBoardImageFilterPython.itkCheckerBoardImageFilterIRGBAUC2_cast(obj)", "title": "" }, { "docid": "17bc147b880a3e43ba74f79b0116b710", "score": "0.80000615", "text": "def cast(obj: 'itkLightObject') -> \"itkCheckerBoardImageFilterIRGBAUC2 *\":\n return _itkCheckerBoardImageFilterPython.itkCheckerBoardImageFilterIRGBAUC2_cast(obj)", "title": "" }, { "docid": "8e81ef6726855c1ebd55b8cd38d5eb30", "score": "0.7999979", "text": "def cast(obj: 'itkLightObject') -> \"itkComposeImageFilterIUC2IRGBAUC2 *\":\n return _itkComposeImageFilterPython.itkComposeImageFilterIUC2IRGBAUC2_cast(obj)", "title": "" }, { "docid": "c09ea1387f591ed56e4d051c824224e2", "score": "0.79979825", "text": "def itkInPlaceImageFilterIF2IRGBAUC2_cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterIF2IRGBAUC2 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterIF2IRGBAUC2_cast(obj)", "title": "" }, { "docid": "42c89db628d6c448357aa858bdf9cc9b", "score": "0.79965025", "text": "def cast(obj: 'itkLightObject') -> \"itkImageFunctionIRGBUC2RGBDD *\":\n return _itkImageFunctionBasePython.itkImageFunctionIRGBUC2RGBDD_cast(obj)", "title": "" }, { "docid": "bdd479ac1fd9237c76fac886b7daea5b", "score": "0.7977508", "text": "def itkInPlaceImageFilterIUS2IRGBAUC2_cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterIUS2IRGBAUC2 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterIUS2IRGBAUC2_cast(obj)", "title": "" }, { "docid": "212c9f4abfebff4e94918c5d213b670d", "score": "0.79711366", "text": "def cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterIF2IRGBAUC2 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterIF2IRGBAUC2_cast(obj)", "title": "" }, { "docid": "4d57935b92b0274247790360e9e8d355", "score": "0.7921275", "text": "def itkImageFunctionIUC3DD_cast(obj: 'itkLightObject') -> \"itkImageFunctionIUC3DD *\":\n return _itkImageFunctionBasePython.itkImageFunctionIUC3DD_cast(obj)", "title": "" }, { "docid": "5b82fa3552b8aa868e97b27f05218960", "score": "0.7873333", "text": "def cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterISS2IRGBAUC2 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterISS2IRGBAUC2_cast(obj)", "title": "" }, { "docid": "5120619b173820cf67e0afac96940603", "score": "0.78062254", "text": "def itkInPlaceImageFilterISS2IRGBAUC2_cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterISS2IRGBAUC2 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterISS2IRGBAUC2_cast(obj)", "title": "" }, { "docid": "e5df5f9679763b0099ad2aa449afb97f", "score": "0.7790869", "text": "def itkImageFunctionIUC3DF_cast(obj: 'itkLightObject') -> \"itkImageFunctionIUC3DF *\":\n return _itkImageFunctionBasePython.itkImageFunctionIUC3DF_cast(obj)", "title": "" }, { "docid": "d1ab041c8ec0061aeec9cffbceac5f7c", "score": "0.77399635", "text": "def cast(obj: 'itkLightObject') -> \"itkImageFunctionIUC3DD *\":\n return _itkImageFunctionBasePython.itkImageFunctionIUC3DD_cast(obj)", "title": "" }, { "docid": "e56bcf75a8010d73418544b2250a8227", "score": "0.77346796", "text": "def itkInPlaceImageFilterIUC2IRGBUC2_cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterIUC2IRGBUC2 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterIUC2IRGBUC2_cast(obj)", "title": "" }, { "docid": "aa3a789dc11bcd5e5beef298340d61f5", "score": "0.7730059", "text": "def itkInPlaceImageFilterIUC3IRGBUC3_cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterIUC3IRGBUC3 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterIUC3IRGBUC3_cast(obj)", "title": "" }, { "docid": "1ed2f2ebf379b37013d17a957c2a036d", "score": "0.7724644", "text": "def itkImageFunctionIUC2DD_cast(obj: 'itkLightObject') -> \"itkImageFunctionIUC2DD *\":\n return _itkImageFunctionBasePython.itkImageFunctionIUC2DD_cast(obj)", "title": "" }, { "docid": "934a456421ad281984909ff5b58ac4e7", "score": "0.7620201", "text": "def itkInPlaceImageFilterIUC3IUC3_cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterIUC3IUC3 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterIUC3IUC3_cast(obj)", "title": "" }, { "docid": "62811e89287e3535c45d011e5c311fc2", "score": "0.756884", "text": "def itkValuedRegionalMaximaImageFilterIUC3IUC3_cast(obj: 'itkLightObject') -> \"itkValuedRegionalMaximaImageFilterIUC3IUC3 *\":\n return _itkValuedRegionalMaximaImageFilterPython.itkValuedRegionalMaximaImageFilterIUC3IUC3_cast(obj)", "title": "" }, { "docid": "2159d638bb84773b1c119d8476a1f808", "score": "0.75637466", "text": "def itkHConcaveImageFilterIUC3IUC3_cast(obj: 'itkLightObject') -> \"itkHConcaveImageFilterIUC3IUC3 *\":\n return _itkHConcaveImageFilterPython.itkHConcaveImageFilterIUC3IUC3_cast(obj)", "title": "" }, { "docid": "bbcf34e2c3c5f79daa2f043bec371255", "score": "0.75558007", "text": "def itkComposeImageFilterIUC3IRGBUC3_cast(obj: 'itkLightObject') -> \"itkComposeImageFilterIUC3IRGBUC3 *\":\n return _itkComposeImageFilterPython.itkComposeImageFilterIUC3IRGBUC3_cast(obj)", "title": "" }, { "docid": "eeaab804fdc8eb69248007e27e712f64", "score": "0.7548392", "text": "def cast(obj: 'itkLightObject') -> \"itkImageFunctionIUC2DD *\":\n return _itkImageFunctionBasePython.itkImageFunctionIUC2DD_cast(obj)", "title": "" }, { "docid": "09bb8ef67ad4ec58ea69166430b91d62", "score": "0.75474834", "text": "def itkComposeImageFilterIUC2IRGBUC2_cast(obj: 'itkLightObject') -> \"itkComposeImageFilterIUC2IRGBUC2 *\":\n return _itkComposeImageFilterPython.itkComposeImageFilterIUC2IRGBUC2_cast(obj)", "title": "" }, { "docid": "cc80291ebc810583888dbb658b24a447", "score": "0.75428265", "text": "def itkCheckerBoardImageFilterIRGBUC3_cast(obj: 'itkLightObject') -> \"itkCheckerBoardImageFilterIRGBUC3 *\":\n return _itkCheckerBoardImageFilterPython.itkCheckerBoardImageFilterIRGBUC3_cast(obj)", "title": "" }, { "docid": "1938afebcbe46a77fab0ab4fd01fcdd7", "score": "0.7541549", "text": "def itkImageFunctionIUC2DF_cast(obj: 'itkLightObject') -> \"itkImageFunctionIUC2DF *\":\n return _itkImageFunctionBasePython.itkImageFunctionIUC2DF_cast(obj)", "title": "" }, { "docid": "cf8c9b43f42d8874d4f78402a2eadad8", "score": "0.753417", "text": "def cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterIUC2IRGBUC2 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterIUC2IRGBUC2_cast(obj)", "title": "" }, { "docid": "4dbae784c11883209d9723814e9f841e", "score": "0.75324285", "text": "def itkCheckerBoardImageFilterIUC3_cast(obj: 'itkLightObject') -> \"itkCheckerBoardImageFilterIUC3 *\":\n return _itkCheckerBoardImageFilterPython.itkCheckerBoardImageFilterIUC3_cast(obj)", "title": "" }, { "docid": "25df2c9a728120a3d2d6626ea9ac9e1b", "score": "0.7524783", "text": "def cast(obj: 'itkLightObject') -> \"itkImageFunctionIUC3DF *\":\n return _itkImageFunctionBasePython.itkImageFunctionIUC3DF_cast(obj)", "title": "" }, { "docid": "97c8c8814acf49a57ff1ddd56bf7002a", "score": "0.7510517", "text": "def cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterIUC3IRGBUC3 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterIUC3IRGBUC3_cast(obj)", "title": "" }, { "docid": "3178ac3fc0eaa5a9d3729fd4554e8af5", "score": "0.7492381", "text": "def itkInPlaceImageFilterIUC2IUC2_cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterIUC2IUC2 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterIUC2IUC2_cast(obj)", "title": "" }, { "docid": "a9577a1c2e59854776d49b31d5cfa93c", "score": "0.7464304", "text": "def cast(obj: 'itkLightObject') -> \"itkComposeImageFilterIUC3IRGBUC3 *\":\n return _itkComposeImageFilterPython.itkComposeImageFilterIUC3IRGBUC3_cast(obj)", "title": "" }, { "docid": "de683aa1ba0aa62f9f6ede66c2f057df", "score": "0.7452418", "text": "def itkInPlaceImageFilterIUL3IRGBUC3_cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterIUL3IRGBUC3 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterIUL3IRGBUC3_cast(obj)", "title": "" }, { "docid": "73d734ec38718b58fbf0e2d1512a7704", "score": "0.745107", "text": "def cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterIUC3IUC3 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterIUC3IUC3_cast(obj)", "title": "" }, { "docid": "4b5e87ace7d41639363f765f82bbac31", "score": "0.74509364", "text": "def cast(obj: 'itkLightObject') -> \"itkHConcaveImageFilterIUC3IUC3 *\":\n return _itkHConcaveImageFilterPython.itkHConcaveImageFilterIUC3IUC3_cast(obj)", "title": "" }, { "docid": "656849ed30b21935fff1ed4f872ecd2c", "score": "0.7444449", "text": "def itkInPlaceImageFilterIULL3IUC3_cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterIULL3IUC3 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterIULL3IUC3_cast(obj)", "title": "" }, { "docid": "e6a5728a4b318901afd19c981b6c736c", "score": "0.7435667", "text": "def itkInPlaceImageFilterIF3IRGBUC3_cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterIF3IRGBUC3 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterIF3IRGBUC3_cast(obj)", "title": "" }, { "docid": "e01c905a65fc03c17dae19e64532b25f", "score": "0.7434209", "text": "def itkComposeImageFilterIUC3VIUC3_cast(obj: 'itkLightObject') -> \"itkComposeImageFilterIUC3VIUC3 *\":\n return _itkComposeImageFilterPython.itkComposeImageFilterIUC3VIUC3_cast(obj)", "title": "" }, { "docid": "990f3722185d70fd5a489328f34fb7eb", "score": "0.74328923", "text": "def itkInPlaceImageFilterIUL2IRGBUC2_cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterIUL2IRGBUC2 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterIUL2IRGBUC2_cast(obj)", "title": "" }, { "docid": "dfe789ba032ad8d8d6137b656b226681", "score": "0.74212724", "text": "def cast(obj: 'itkLightObject') -> \"itkComposeImageFilterIUC2IRGBUC2 *\":\n return _itkComposeImageFilterPython.itkComposeImageFilterIUC2IRGBUC2_cast(obj)", "title": "" }, { "docid": "9d839bc432261f2cdd0f2f7174f5cc1a", "score": "0.74185663", "text": "def cast(obj: 'itkLightObject') -> \"itkComposeImageFilterIUC3VIUC3 *\":\n return _itkComposeImageFilterPython.itkComposeImageFilterIUC3VIUC3_cast(obj)", "title": "" }, { "docid": "df57e26d23766e021428b05796fbe7bb", "score": "0.7391455", "text": "def itkRegionOfInterestImageFilterIUC3IUC3_cast(obj: 'itkLightObject') -> \"itkRegionOfInterestImageFilterIUC3IUC3 *\":\n return _itkRegionOfInterestImageFilterPython.itkRegionOfInterestImageFilterIUC3IUC3_cast(obj)", "title": "" }, { "docid": "f3b6e50afd38044d7ac0401a3b3495d4", "score": "0.7388445", "text": "def itkInPlaceImageFilterIF2IRGBUC2_cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterIF2IRGBUC2 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterIF2IRGBUC2_cast(obj)", "title": "" }, { "docid": "2c4bc52aa44a2f3890bd65cf5b47b926", "score": "0.73868436", "text": "def cast(obj: 'itkLightObject') -> \"itkCheckerBoardImageFilterIRGBUC3 *\":\n return _itkCheckerBoardImageFilterPython.itkCheckerBoardImageFilterIRGBUC3_cast(obj)", "title": "" }, { "docid": "603042768f5f9ecab4dd9396c9bf7be8", "score": "0.7383859", "text": "def cast(obj: 'itkLightObject') -> \"itkValuedRegionalMaximaImageFilterIUC3IUC3 *\":\n return _itkValuedRegionalMaximaImageFilterPython.itkValuedRegionalMaximaImageFilterIUC3IUC3_cast(obj)", "title": "" }, { "docid": "df675d460cdd9aa079af434e9b26573d", "score": "0.7375842", "text": "def cast(obj: 'itkLightObject') -> \"itkCheckerBoardImageFilterIUC3 *\":\n return _itkCheckerBoardImageFilterPython.itkCheckerBoardImageFilterIUC3_cast(obj)", "title": "" }, { "docid": "206973c5d3d76ebde2e6f3b237b51640", "score": "0.73736554", "text": "def cast(obj: 'itkLightObject') -> \"itkRegionOfInterestImageFilterIUC3IUC3 *\":\n return _itkRegionOfInterestImageFilterPython.itkRegionOfInterestImageFilterIUC3IUC3_cast(obj)", "title": "" }, { "docid": "ab99dd68329868216944a8317fdd753d", "score": "0.73669755", "text": "def itkInPlaceImageFilterIF3IUC3_cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterIF3IUC3 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterIF3IUC3_cast(obj)", "title": "" }, { "docid": "f87544dc2c75c70a08d1d5e2ee8a0273", "score": "0.7366966", "text": "def itkValuedRegionalMaximaImageFilterIUC2IUC2_cast(obj: 'itkLightObject') -> \"itkValuedRegionalMaximaImageFilterIUC2IUC2 *\":\n return _itkValuedRegionalMaximaImageFilterPython.itkValuedRegionalMaximaImageFilterIUC2IUC2_cast(obj)", "title": "" }, { "docid": "e69d98d257aaa2e67bb827b3c5f23173", "score": "0.7355345", "text": "def cast(obj: 'itkLightObject') -> \"itkRegionOfInterestImageFilterVIUC3VIUC3 *\":\n return _itkRegionOfInterestImageFilterPython.itkRegionOfInterestImageFilterVIUC3VIUC3_cast(obj)", "title": "" }, { "docid": "406d3b2f5a9b84909b39ec10d1c68629", "score": "0.7343732", "text": "def itkInPlaceImageFilterIUS2IRGBUC2_cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterIUS2IRGBUC2 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterIUS2IRGBUC2_cast(obj)", "title": "" }, { "docid": "e4a49953fa71d5302d35e1f9b7ed1e50", "score": "0.73348725", "text": "def itkInPlaceImageFilterIUS3IRGBUC3_cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterIUS3IRGBUC3 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterIUS3IRGBUC3_cast(obj)", "title": "" }, { "docid": "0853ce7ee205cd073aa37dd64a6d18f5", "score": "0.73217374", "text": "def itkCheckerBoardImageFilterIRGBUC2_cast(obj: 'itkLightObject') -> \"itkCheckerBoardImageFilterIRGBUC2 *\":\n return _itkCheckerBoardImageFilterPython.itkCheckerBoardImageFilterIRGBUC2_cast(obj)", "title": "" }, { "docid": "b33285e52cec7ca1fb6b7f38cd62c0df", "score": "0.7316399", "text": "def itkHConcaveImageFilterIUC2IUC2_cast(obj: 'itkLightObject') -> \"itkHConcaveImageFilterIUC2IUC2 *\":\n return _itkHConcaveImageFilterPython.itkHConcaveImageFilterIUC2IUC2_cast(obj)", "title": "" }, { "docid": "665dd3cd323af1ded99d1b72300e5146", "score": "0.73141414", "text": "def cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterIUL2IRGBUC2 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterIUL2IRGBUC2_cast(obj)", "title": "" }, { "docid": "e8324ab2ee5db308d8377decd34f9783", "score": "0.7310928", "text": "def itkComposeImageFilterIUC2VIUC2_cast(obj: 'itkLightObject') -> \"itkComposeImageFilterIUC2VIUC2 *\":\n return _itkComposeImageFilterPython.itkComposeImageFilterIUC2VIUC2_cast(obj)", "title": "" }, { "docid": "ca4a774121f4afdb3ae41d5f219d5d91", "score": "0.7308667", "text": "def itkCheckerBoardImageFilterIUC2_cast(obj: 'itkLightObject') -> \"itkCheckerBoardImageFilterIUC2 *\":\n return _itkCheckerBoardImageFilterPython.itkCheckerBoardImageFilterIUC2_cast(obj)", "title": "" }, { "docid": "43534a1ceacbde8f7253d2d58c61e26b", "score": "0.73086417", "text": "def cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterIUL3IRGBUC3 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterIUL3IRGBUC3_cast(obj)", "title": "" }, { "docid": "a9e8cda3578c4b551f79e540c0be4c27", "score": "0.7304404", "text": "def itkRegionOfInterestImageFilterVIUC3VIUC3_cast(obj: 'itkLightObject') -> \"itkRegionOfInterestImageFilterVIUC3VIUC3 *\":\n return _itkRegionOfInterestImageFilterPython.itkRegionOfInterestImageFilterVIUC3VIUC3_cast(obj)", "title": "" }, { "docid": "7ba1ab505a813bffae5c3e3bd35e0f61", "score": "0.7295293", "text": "def cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterIUC2IUC2 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterIUC2IUC2_cast(obj)", "title": "" }, { "docid": "d28d1026a275cceae32e39f0e7b6a383", "score": "0.72936153", "text": "def cast(obj: 'itkLightObject') -> \"itkInPlaceImageFilterIULL3IUC3 *\":\n return _itkInPlaceImageFilterAPython.itkInPlaceImageFilterIULL3IUC3_cast(obj)", "title": "" } ]
a315eac5e01b7996f85b0ee3e9037907
this functions return True is every slot is full with 'X' or 'O'
[ { "docid": "d57d5981fa5aba15ebedafea2aed08c3", "score": "0.6083814", "text": "def is_full(self):\r\n for i in range(self.width):\r\n if self.can_add_to(i):\r\n return False\r\n return True", "title": "" } ]
[ { "docid": "174b00346f9041dc36393673ea361c7d", "score": "0.6845584", "text": "def check_full():\n if table[1] in ['x', 'o'] and table[2] in ['x', 'o'] and table[3] in ['x', 'o'] and table[4] in ['x', 'o'] \\\n and table[5] in ['x', 'o'] and table[6] in ['x', 'o'] and table[7] in ['x', 'o'] \\\n and table[8] in ['x', 'o'] and table[9] in ['x', 'o']:\n print(\"Table is full!\\nGAME OVER!\")\n return False\n else:\n return True", "title": "" }, { "docid": "a03cd515ba07d9145c8f97ff56188689", "score": "0.6812814", "text": "def __check_slot(self, slot: int) -> bool:\n col = self.__board[slot - 1]\n return \"\" in col # Will be true if that slot has an empty space ", "title": "" }, { "docid": "72d8390c8bc7dfeb81493befd297e840", "score": "0.65943444", "text": "def seXvenceu(tab):\r\n # |\r\n if tab[1] == 'X' and tab[4] == 'X' and tab[7] == 'X':\r\n return True\r\n if tab[2] == 'X' and tab[5] == 'X' and tab[8] == 'X':\r\n return True\r\n if tab[3] == 'X' and tab[6] == 'X' and tab[9] == 'X':\r\n return True\r\n # /\\\r\n if tab[1] == 'X' and tab[5] == 'X' and tab[9] == 'X':\r\n return True\r\n if tab[3] == 'X' and tab[5] == 'X' and tab[7] == 'X':\r\n return True \r\n # -\r\n if tab[7] == 'X' and tab[8] == 'X' and tab[9] == 'X':\r\n return True\r\n if tab[4] == 'X' and tab[5] == 'X' and tab[6] == 'X':\r\n return True\r\n if tab[1] == 'X' and tab[2] == 'X' and tab[3] == 'X':\r\n return True", "title": "" }, { "docid": "3699c97eda382a9367f21345d3a7ce19", "score": "0.6559591", "text": "def __check_status(self, slot: int) -> bool:\n\n slot = slot - 1 # slot is 0-index rather than 1 of the normal game.\n stone_type = \"X\" if self.__current_player == self.__player_name_1 else \"O\"\n\n # index_last_stone is a coordinate of the last placed stone from the last player\n index_last_stone = (slot, self.__board[slot].index(stone_type))\n\n def filter_list(lst):\n \"\"\"removes out of bound coordinates\"\"\"\n return [t for t in lst if -1 < t[0] < 7 and -1 < t[1] < 6]\n\n def map_values(lst):\n \"\"\"replaces coordinates with current values\"\"\"\n return map(lambda x: self.__board[x[0]][x[1]], lst)\n\n # Four list with coordinates of the board are generated. \n # They are around the coordinate index_last_stone and reach out 3 extra coordinates in each direction\n vertical = list(map(lambda x: (index_last_stone[0], x + index_last_stone[1] - 3), range(7)))\n horizontal = list(map(lambda x: (x + index_last_stone[0] - 3, index_last_stone[1]), range(7)))\n\n # The diagonals are the product of the horizontal and vertical lines\n # On diagonal which is from a reversed vertical line\n diagonal_1 = list(map(lambda x, y: (y[0], x[1]), vertical, horizontal))\n diagonal_2 = list(map(lambda x, y: (y[0], x[1]), vertical[::-1], horizontal))\n\n check_placed_stone = [vertical, horizontal, diagonal_1, diagonal_2]\n\n # Filters out unwanted coordinates and maps the coordinates to the real values\n for i in range(len(check_placed_stone)):\n check_placed_stone[i] = list(map_values(filter_list(check_placed_stone[i])))\n\n # Checks if four stones of the same type are in fact together without space\n for stones in check_placed_stone:\n count_in_row = 0\n for stone in stones:\n if stone == stone_type:\n count_in_row += 1\n else:\n count_in_row = 0\n if count_in_row == 4:\n return True", "title": "" }, { "docid": "deb99dc1f9b55fe622c6c2f6d46391a0", "score": "0.6529732", "text": "def areAllSurrendered(matrix):\n return all(matrix[i][i] == 'x' for i in range(len(matrix)))", "title": "" }, { "docid": "51c6573dd7a8bf3f66fe8febf7231c15", "score": "0.6498064", "text": "def check_white_rings(self):\n white_rings = 0\n for x in range(1,19):\n for y in range(1,19):\n piece = self.return_piece([x,y])\n if piece == [[\"|O|\", \"|O|\", \"|O|\"], [\"|O|\", \"|-|\", \"|O|\"], [\"|O|\", \"|O|\", \"|O|\"]]:\n white_rings += 1\n if white_rings > 0:\n return True\n return False", "title": "" }, { "docid": "7e87bf07a01c201d07d99ad5ad42888a", "score": "0.64820343", "text": "def is_full(self):\r\n for y in range(8):\r\n for x in range(8):\r\n if self.board[x][y] == EMPTY:\r\n return False\r\n return True", "title": "" }, { "docid": "7e87bf07a01c201d07d99ad5ad42888a", "score": "0.64820343", "text": "def is_full(self):\r\n for y in range(8):\r\n for x in range(8):\r\n if self.board[x][y] == EMPTY:\r\n return False\r\n return True", "title": "" }, { "docid": "c84f10883c4415272e906caec605b88c", "score": "0.64652604", "text": "def is_box(self):\n for x in range(1, 4 + 1):\n if len(self.box[x]) == 12:\n return True", "title": "" }, { "docid": "c86f893371a599afcb7f0252117a726d", "score": "0.6461415", "text": "def is_full(self):\n return ('-' not in self.board[0:12])", "title": "" }, { "docid": "522caa809e3690a4eae6e14f90868412", "score": "0.64512914", "text": "def _helper_check_valid(self, row, col):\n for i in range(row+1, self.num_rows):\n return self.board[i][col] != Constants.EMPTY_SLOT\n return True", "title": "" }, { "docid": "a8f09d8c01376e1f44ee62b7dd583490", "score": "0.6441642", "text": "def winsFor(self, ox):\n #TODO figure out why this is still out of range\n for i in range(len(self.data)):\n for j in range(len(self.data[0])):\n if self.data[i][j] == ox:\n for u in range(3):\n if i+u +1 >= self.height:\n break\n if self.data[i+u+1][j] != ox:\n break\n if u != 2:\n continue\n return True\n for u in range(3):\n if i-u-1 <= -1:\n break\n if self.data[i-u-1][j] != ox:\n break\n if u != 2:\n continue\n return True\n for u in range(3):\n if j+u+1 >= self.width:\n break\n if self.data[i][j+u+1] != ox:\n break\n if u != 2:\n continue\n return True\n for u in range(3):\n if j-u-1 <= -1:\n break\n if self.data[i][j-u-1] != ox:\n break\n if u != 2:\n continue\n return True\n #Diagonoals\n for u in range(3):\n if i+u+1 >= self.height or j+u+1 >= self.width:\n break\n if self.data[i+u+1][j+u+1] != ox:\n break\n if u != 2:\n continue\n return True\n for u in range(3):\n if i-u-1 <= -1 or j-u-1 <= -1:\n break\n if self.data[i-u-1][j-u-1] != ox:\n break\n if u != 2:\n continue\n return True\n for u in range(3):\n if i-u-1 <= -1 or j+u+1 >= self.width:\n break\n if self.data[i-u-1][j+u+1] != ox:\n break\n if u != 2:\n continue\n return True\n for u in range(3):\n if i+u+1 >= self.height or j-u-1 <= -1:\n break\n if self.data[i+u+1][j-u-1] != ox:\n break\n if u != 2:\n continue\n return True\n return False", "title": "" }, { "docid": "e2b1b1b689e02c1ecbc7eaca30c617ae", "score": "0.6437306", "text": "def is_full(self):\n return np.count_nonzero(self.slots == 0) == 0", "title": "" }, { "docid": "1b5001fe57692a7d894cc0344f7239b9", "score": "0.6412611", "text": "def repeat(board):\n for n in board:\n if n != 'x':\n if board.count(n) > 1:\n return True\n return False", "title": "" }, { "docid": "858865b85cd554e6a07586dd5820a463", "score": "0.63849825", "text": "def full(self):\n return len(self.slots) == self.limit", "title": "" }, { "docid": "2c1886efe11aeb9af93c9094865a1436", "score": "0.63777786", "text": "def wincheck():\n xwin=False\n owin=False\n win=False\n for i in range (3) :\n \n if (a[i][0]==\"x\" and a[i][1]== \"x\" and a[i][2]== \"x\" ):\n xwin=True\n win=True\n elif (a[i][0]==\"o\" and a[i][1]== \"o\" and a[i][2]== \"o\"):\n owin=True\n win=True\n elif (a[0][i]==\"x\" and a[1][i]== \"x\" and a[2][i]== \"x\" ):\n xwin=True\n win=True\n elif (a[0][i]==\"o\" and a[1][i]== \"o\" and a[2][i]== \"o\"):\n owin=True\n win=True\n elif (a[i][0]==\"x\" and a[1][1]==\"x\" and a[2][2]==\"x\"):\n xwin=True\n win=True\n elif (a[0][2]==\"o\" and a[1][1]==\"o\" and a[2][0]==\"o\"):\n owin=True\n win=True\n if ((a[0][0]==\"x\" and a[1][1]==\"x\" and a[2][2]==\"x\") or (a[0][2]==\"x\" and a[1][1]==\"x\" and a[2][0]==\"x\")):\n xwin=True\n win=True\n ## print(\"x is winner\")\n \n \n elif ((a[0][0]==\"o\" and a[1][1]==\"o\" and a[2][2]==\"o\") or( a[0][2]==\"o\" and a[1][1]==\"o\" and a[2][0]==\"o\")):\n owin=True\n win=True\n ## print(\"o is winner\")\n \n\n if (xwin==True and owin==True):\n print(\"boring draw!!!\")\n \n elif(xwin==True and owin==False):\n print(\"X wins\")\n elif(xwin==False and owin==True):\n print(\"o wins\")\n elif (size==0 and (xwin==False and owin==False)):\n print(\"boring draw!!!\")\n \n \n return win", "title": "" }, { "docid": "9a6b58523a1bda18e950e9a76046e3de", "score": "0.6369146", "text": "def _is_winning_line(self, cells):\n return all(c == X for c in cells) or all(c == O for c in cells)", "title": "" }, { "docid": "247dfacbe430a41448ab522f50622682", "score": "0.6352087", "text": "def is_board_full(self):\n for i in range(3):\n for j in range(3):\n if self.is_space_available(str(i)+str(j)):\n return False\n return True", "title": "" }, { "docid": "3e296778763a4ca3b2950ea2f40450b1", "score": "0.632772", "text": "def xo(s):\n low = s.lower()\n if (low.count('x')) == (low.count('o')):\n return True\n else:\n return False", "title": "" }, { "docid": "ce9da927c381ac11aa0304d67bb04bde", "score": "0.6325393", "text": "def check_win(self, symbol):\r\n\t\tb = self.cells\r\n\r\n\t\tfor i in range(3):\r\n\t\t\tif b[i] == symbol and b[i + 3] == symbol and \\\r\n\t\t\t\t\tb[i + 6] == symbol:\r\n\t\t\t\treturn True\r\n\t\t\telif b[(i * 3)] == symbol and b[\r\n\t\t\t\t(i * 3) + 1] == symbol and b[\r\n\t\t\t\t(i * 3) + 2] == symbol:\r\n\t\t\t\treturn True\r\n\r\n\t\t\tif b[0] == symbol and b[4] == symbol and \\\r\n\t\t\t\t\tb[8] == symbol:\r\n\t\t\t\treturn True\r\n\t\t\telif b[2] == symbol and b[4] == symbol and \\\r\n\t\t\t\t\tb[6] == symbol:\r\n\t\t\t\treturn True\r\n\t\treturn False", "title": "" }, { "docid": "d1ae33f6a204ff73c5fc7e96039e35ab", "score": "0.63149035", "text": "def board_full(board):\n for i in range(1, 10):\n if space_check(board, i):\n return False\n return True", "title": "" }, { "docid": "e6bbdae056a7d4ea98ea536d7da17980", "score": "0.63125956", "text": "def is_ship(s):\n for i in s:\n if i not in ['*', \"X\"]:\n return False\n return True", "title": "" }, { "docid": "eefe1a26bd239daf16ee9049510a6419", "score": "0.63122314", "text": "def terminal(board):\n if any(row.count(EMPTY) != 0 for row in board):\n if winner(board) == X or winner(board) == O:\n return True\n else: \n return False\n else:\n return True", "title": "" }, { "docid": "d4742d29582ecf12f734ce96b2bafa36", "score": "0.6309931", "text": "def checkMovesLeft(self):\n #YOUR CODE HERE\n # loop over all possible positions. If there is an empty space, return True.\n for i in self.board:\n for j in i:\n if j=='_':\n return True\n # return false when all position possible are occupied by X or O\n return False", "title": "" }, { "docid": "ded4fe9e4ab06d74b8c13243c265f012", "score": "0.6304615", "text": "def seOvenceu(tab):\r\n # |\r\n if tab[1] == 'O' and tab[4] == 'O' and tab[7] == 'O':\r\n return True\r\n if tab[2] == 'O' and tab[5] == 'O' and tab[8] == 'O':\r\n return True\r\n if tab[3] == 'O' and tab[6] == 'O' and tab[9] == 'O':\r\n return True\r\n # /\\\r\n if tab[1] == 'O' and tab[5] == 'O' and tab[9] == 'O':\r\n return True\r\n if tab[3] == 'O' and tab[5] == 'O' and tab[7] == 'O':\r\n return True \r\n # -\r\n if tab[7] == 'O' and tab[8] == 'O' and tab[9] == 'O':\r\n return True\r\n if tab[4] == 'O' and tab[5] == 'O' and tab[6] == 'O':\r\n return True\r\n if tab[1] == 'O' and tab[2] == 'O' and tab[3] == 'O':\r\n return True", "title": "" }, { "docid": "1b472d65e5df5584c7312e89a42ed156", "score": "0.62805814", "text": "def is_valid(field):\n ship_cells = 0\n for i in field:\n for j in i:\n if j in ['*', 'X']:\n ship_cells += 1\n has_number_cells = (ship_cells == 20)\n one_cell = 0\n for i in range(len(field)):\n for j in range(len(i)):\n if field[i][j] in ['*', 'X']:\n if field[i-1][j] not in ['*', 'X'] and field[i][j - 1] not in ['*', 'X'] and field[i][j + 1] not in \\\n ['*', 'X'] and field[i + 1][j] not in ['*', 'X']:\n one_cell += 1\n has_number_one_cells = one_cell == 4\n four = 0\n for i in field:\n for j in range(6):\n if is_ship(''.join(i[j: j + 4])):\n four += 1\n for i in range(len(field)):\n pass", "title": "" }, { "docid": "f1f9734952e8cc9f9775b84a10ba9e3d", "score": "0.6265", "text": "def check_black_rings(self):\n black_rings = 0\n for x in range(1, 19):\n for y in range(1, 19):\n piece = self.return_piece([x, y])\n if piece == [[\"|X|\", \"|X|\", \"|X|\"], [\"|X|\", \"|-|\", \"|X|\"], [\"|X|\", \"|X|\", \"|X|\"]]:\n black_rings += 1\n if black_rings > 0:\n return True\n return False", "title": "" }, { "docid": "9d930158f6f42b0b4576dd10764ada93", "score": "0.62633705", "text": "def gameOver(self):\n\t\tif hasWon('X') or hasWon('O') or self.count>=8:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False", "title": "" }, { "docid": "d67998c1bb61b2002bfda3c2da235291", "score": "0.6259372", "text": "def check_occ(seats:List[str], i: int, j: int) -> bool:\n occupied_count = 0\n \n # Count how many visible seats are occupied\n occupied_count += (up(seats, i, j) == \"#\") +\\\n (down(seats, i, j) == \"#\") +\\\n (left(seats, i, j) == \"#\") +\\\n (right(seats, i, j) == \"#\") +\\\n (diagupleft(seats, i, j) == \"#\") +\\\n (diagupright(seats, i, j) == \"#\") +\\\n (diagdownleft(seats, i, j) == \"#\") +\\\n (diagdownright(seats, i, j) == \"#\")\n\n return occupied_count >= 5", "title": "" }, { "docid": "6ede60497a33d77641fbf6c944451c76", "score": "0.6258936", "text": "def checkGrid(self):\r\n for i in self.lines.keys():\r\n if self.lines[i][1]==3:\r\n print(\"X has won!\")\r\n return True\r\n if self.lines[i][2]==3:\r\n print(\"O has won!\")\r\n return True\r\n return False", "title": "" }, { "docid": "e1eab387e71b656663e5d8992fa8c874", "score": "0.6255784", "text": "def slot_free(self, x, y):\n # print(\"Slot free:\", x, y)\n try:\n return self.matrix[y][x] == self.EMPTY_CHAR\n except IndexError:\n # If the slot is not inside the grid consider it not free to move to\n return False", "title": "" }, { "docid": "e551fc9aafc7114f3912cae5b921da54", "score": "0.6253158", "text": "def is_winning_combo(list):\n\n if scoreboard[list[0][0]][list[0][1]] == \\\n scoreboard[list[1][0]][list[1][1]] == \\\n scoreboard[list[2][0]][list[2][1]] == 'X':\n return True\n elif scoreboard[list[0][0]][list[0][1]] == \\\n scoreboard[list[1][0]][list[1][1]] == \\\n scoreboard[list[2][0]][list[2][1]] == 'O':\n return True\n else:\n return False", "title": "" }, { "docid": "05446207751a4f8d7f39223af9b76489", "score": "0.62491316", "text": "def terminal(board):\n if winner(board) == X or winner(board) == O:\n return True\n full_board = 0\n for row in range(3):\n for cell in range(3):\n if board[row][cell] != EMPTY:\n full_board +=1\n if full_board == 9:\n return True\n return False", "title": "" }, { "docid": "69b52c1f73a581611fbee357a8ac86cd", "score": "0.62478334", "text": "def check_status(self):\r\n there_is_empty_cell = False\r\n for i in range(4):\r\n for j in range(4):\r\n if j < 3 and self.data[i][j] == self.data[i][j + 1] and self.data[i][j] != 0:\r\n there_is_empty_cell = True\r\n if i < 3 and self.data[i][j] == self.data[i + 1][j] and self.data[i][j] != 0:\r\n there_is_empty_cell = True\r\n if self.data[i][j] == 0:\r\n there_is_empty_cell = True\r\n if self.data[i][j] == 2048:\r\n self.gameFinish = True\r\n self.game_over()\r\n if not there_is_empty_cell:\r\n self.gameFinish = True\r\n self.game_over()", "title": "" }, { "docid": "bac23fbfe1d1a2f1c191f4344deb47dd", "score": "0.6240791", "text": "def terminal(board):\n if winner(board) == X or winner(board) == O:\n return True\n for i in range(3):\n for j in range(3):\n if board[i][j] == EMPTY:\n return False\n return True", "title": "" }, { "docid": "efb1adc3410050b5073690cb6dabf15f", "score": "0.6223549", "text": "def isemptyseat(self, x, y):\n if (x, y) in self.emptyseatcoords:\n return True\n else:\n return False", "title": "" }, { "docid": "2ce6f94205a51c4a3797b71c78181ecd", "score": "0.62165767", "text": "def is_valid():\n temp = False\n i = 0\n j = 0\n cap_ship = 0 #size 4\n large_ship = 0 #size 3\n mid_ship = 0 #size 2\n small_ship = 0 #size 1\n while i < 11:\n while j < 11:\n if ship_size(i, j) == 4:\n cap_ship += 1\n j += 1\n elif ship_size(i, j) == 3:\n large_ship += 1\n j += 1\n elif ship_size(i, j) == 2:\n mid_ship += 1\n j += 1\n elif ship_size(i, j) == 1:\n small_ship += 1\n j += 1\n i += 1\n j = 0\n if (cap_ship == 4) and (large_ship == 6) and (mid_ship == 6) and (small_ship == 4):\n temp = True\n return temp", "title": "" }, { "docid": "b6b48f682bfc653eb4efacd1d67e00ea", "score": "0.6215728", "text": "def check_win(self):\n return all([len(self.homes[x]) == 13 for x in range(4)])", "title": "" }, { "docid": "42057b0a89bd5ed3bca297dceb5c1f4c", "score": "0.62110674", "text": "def full_check():\r\n open_seats = 0\r\n for i in bus:\r\n for seat in i[\"seat\"]:\r\n if step_through_whole_count(seat, i[\"seat\"]):\r\n open_seats += 1\r\n if open_seats == 0:\r\n print(\"The bus is full.\")\r\n return True\r\n return False", "title": "" }, { "docid": "5792bea9e89ccd1390075d567eb2a102", "score": "0.6210284", "text": "def is_any_free_slot():\n if len(read_parking_lots()) < SLOT_SIZE:\n return True", "title": "" }, { "docid": "3c49e4910774f9fb9ca3ed81b53deadc", "score": "0.6200344", "text": "def is_truck_empty():\n return (free_space == 1).all()", "title": "" }, { "docid": "2d7e0e5e4544be8e56ad395b8ac33f29", "score": "0.61993223", "text": "def checkBSVictory(hit_board, opposing_board):\n for z in hit_board:\n for i in z:\n if i is 'X':\n i = 0\n if hit_board == opposing_board:\n return True\n else:\n return False", "title": "" }, { "docid": "cc7ccc6049dd4b106a9dc04afda379ce", "score": "0.6198052", "text": "def is_full(self):\n for row in self.board:\n for elem in row:\n if elem == 0:\n return False\n\n return True", "title": "" }, { "docid": "55321fe89aaf97722178227db8f188dd", "score": "0.6191033", "text": "def has_full(self):\n if self.has_three() and self.has_pair():\n return True\n return False", "title": "" }, { "docid": "98cdbc3f0a0c3acc1129870a87c50808", "score": "0.6171824", "text": "def isFull(self) -> bool:", "title": "" }, { "docid": "22e8d2240c9896a661cf62fcbae1347f", "score": "0.61708826", "text": "def is_full(self):\n for c in range(self.width):\n if self.can_add_to(c) == True:\n return False\n return True", "title": "" }, { "docid": "a5f10f2570ac47fd78fa923550178384", "score": "0.616854", "text": "def is_full(puzzel):\n for r in range(9):\n for c in range(9):\n if puzzel[r][c]==0:\n return False\n return True", "title": "" }, { "docid": "a12cc089496af1bc9941106927e36ae5", "score": "0.6157833", "text": "def occupied(self):\r\n if self.creature == \"sheep\" or self.creature == \"wolf\":\r\n return True\r\n else:\r\n return False", "title": "" }, { "docid": "0ff805144d7c31d40adf7909ea1c28ca", "score": "0.6145716", "text": "def check_full(array):\n for row in board:\n for piece in row:\n if piece == blank:\n return(False)\n return(True)", "title": "" }, { "docid": "e4fcc57813113e4a84450f342854a1ee", "score": "0.6145674", "text": "def bingo_check(curr_state_list):\n state = lis_repack(curr_state_list)\n bings = 0\n for comb in ind_criteria:\n c = 0\n for ind in comb:\n i, j = ind.split(' ', 1)\n if state[int(i)][int(j)] == \"x\":c+=1\n if c==5:bings+=1\n if bings>=1:return True\n else:return False", "title": "" }, { "docid": "ecaa4c3f4b85daad693fb89c5d10e7d7", "score": "0.61447364", "text": "def is_interesting(x):\n\n # XX XX 1[1, 3, 5] XX XX 1[1, 3, 5] will move right on blanks and nothing else\n if x[4] == '1' and x[5] in ('1', '3', '5') and x[10] == '1' and x[11] in ('1', '3', '5'):\n return False\n\n # 02 XX 00 XX XX XX will replace 0 with blank and vice versa forever\n if x[0] == '0' and x[1] == '2' and x[4] == '0' and x[5] == '0':\n return False\n # The dual of the above TM class\n if x[2] == '0' and x[3] == '2' and x[4] == '0' and x[5] == '4':\n return False\n\n # Never writes any symbol except blank\n if x[4] == '1' and x[5] in ('2', '5') and x[10] == '0' and x[11] in ('2', '5'):\n return False\n\n # Blank goes to 0, and 0 goes to blank, swapping states\n if x[4] == '1' and x[5] == '0' and x[6] == '0' and x[7] == '2':\n return False\n # Dual of above\n if x[4] == '1' and x[5] == '4' and x[8] == '0' and x[9] == '2':\n return False\n\n # Loops between states switching blank and 0\n if x[0] == '1' and x[1] == '2' and x[4] == '0' and x[5] == '0' and x[10] == '0' and x[11] in ('0', '2'):\n return False\n # Dual of above\n if x[2] == '1' and x[3] == '2' and x[4] == '0' and x[5] == '4' and x[10] == '0' and x[11] in ('2', '4'):\n return False\n\n if x[4] == '1' and x[5] in ('1', '3', '5') and x[10] == '0' and x[11] in ('1', '3', '5'):\n return False\n\n if x[4] == '1' and x[5] == '2' and x[10] == '1' and x[11] == '2':\n return False\n\n if x[4] == '1' and x[5] == '2' and x[10] == '1' and x[11] in ('1', '3', '5'):\n return False\n\n if x[4] == '1' and x[5] == '0' and x[6] == '1' and x[7] == '0':\n return False\n if x[4] == '1' and x[5] == '4' and x[8] == '1' and x[7] == '4':\n return False\n\n # Missed this at first! Moves right on blank\n if x[4] == '0' and x[5] in ('1', '3', '5'):\n return False\n\n if x[4] == '1' and x[5] == '0' and x[6] == '1' and x[7] in ('1', '3', '5') and x[10] == '1' and x[11] in ('1', '3', '5'):\n return False\n if x[4] == '1' and x[5] == '4' and x[8] == '1' and x[9] in ('1', '3', '5') and x[10] == '1' and x[11] in ('1', '3', '5'):\n return False\n\n if x[0] == '1' and x[1] == '2' and x[4] == '0' and x[5] == '0' and x[10] == '1' and x[11] in ('1', '3', '5'):\n return False\n if x[2] == '1' and x[3] == '2' and x[4] == '0' and x[5] == '4' and x[10] == '1' and x[11] in ('1', '3', '5'):\n return False\n\n # If it never writes a 0, and needs a 0 to halt, it must loop\n if x[6] == '2' and x[2] in ('0', '1') and x[3] in ('1', '2', '4', '5') and x[4] in ('0', '1') and x[5] in ('1', '2', '4', '5') and x[8] in ('0', '1') and x[9] in ('1', '2', '4', '5') and x[10] in ('0', '1') and x[11] in ('1', '2', '4', '5'):\n return False\n # Dual, needs a 1 to halt\n if x[8] == '2' and x[0] in ('0', '1') and x[1] in ('0', '2', '3', '5') and x[4] in ('0', '1') and x[5] in ('0', '2', '3', '5') and x[6] in ('0', '1') and x[7] in ('0', '2', '3', '5') and x[10] in ('0', '1') and x[11] in ('0', '2', '3', '5'):\n return False\n\n if x[4] == '1' and x[5] == '3' and x[6] == '1' and x[7] == '3' and x[10] == '1' and x[11] == '0':\n return False\n if x[4] == '1' and x[5] == '1' and x[8] == '1' and x[9] == '1' and x[10] == '1' and x[11] == '4':\n return False\n\n return True", "title": "" }, { "docid": "4ba2d56c41837d56ef8547f8d727a4da", "score": "0.61347467", "text": "def check_complete(self, symbol):\n if self.check_won(symbol, self.last_move_pos, self.board):\n return True, (1, symbol, 'won')\n elif len(self.possible_move_positions) == 0:\n return True, (0.5, 'draw')\n else:\n return False, ()", "title": "" }, { "docid": "91195717d2159cbcaf8e48562904e6a4", "score": "0.6132034", "text": "def winsFor(self,ox):\n H = self.height\n W = self.width\n D = self.data\n for row in range(H):\n for col in range(W):\n if inarow_Neast(ox,row,col,D,4):\n return True\n if inarow_Nnortheast(ox,row,col,D,4):\n return True\n if inarow_Nsouth(ox,row,col,D,4):\n return True\n if inarow_Nsoutheast(ox,row,col,D,4):\n return True\n return False", "title": "" }, { "docid": "953818f1e6349ba021454f6379e94d44", "score": "0.612622", "text": "def isFull(self):\n return (self.end + 1)%self.k == self.start", "title": "" }, { "docid": "357ee64ed538695b7c5dc67fc5d0e52a", "score": "0.6125745", "text": "def check_empty(seats: List[str], i: int, j: int) -> bool:\n # Check that all visible seats aren't occupied \n return up(seats, i, j) != \"#\" and\\\n down(seats, i, j) != \"#\" and\\\n left(seats, i, j) != \"#\" and\\\n right(seats, i, j) != \"#\" and\\\n diagupleft(seats, i, j) != \"#\" and\\\n diagupright(seats, i, j) != \"#\" and\\\n diagdownleft(seats, i, j) != \"#\" and\\\n diagdownright(seats, i, j) != \"#\"", "title": "" }, { "docid": "80ea80dce60ecb253ab7ddc1339c316b", "score": "0.6116339", "text": "def is_board_full(board):\n for i in range(1, 10):\n if is_space_free(board, i):\n return False\n return True", "title": "" }, { "docid": "55d86feba2053d2e124e40c58a7f4836", "score": "0.61119074", "text": "def full_board_check(board):\n for i in range(1,10):\n if space_check(board, i):\n return False\n return True", "title": "" }, { "docid": "55d398fdd943e5a7e6fa04b9ba3dea36", "score": "0.61097735", "text": "def tie(board):\n all_filled = True\n \n for field in BOARD_FIELDS.values():\n if board[field] == EMPTY_SYMBOL:\n all_filled = False\n\n return all_filled", "title": "" }, { "docid": "afb3d86d7a4438df2910feb8925ac499", "score": "0.6099069", "text": "def is_valid_build_space(self, x_val, y_val):\n space_list = get_adjacent(x_val, y_val)\n for i, j in space_list:\n if (is_valid_num(i) and is_valid_num(j) and\n self.board[i][j]['occupant'] == 'O'):\n return True\n return False", "title": "" }, { "docid": "b6b271fe362bcd325c92564d6c4e8808", "score": "0.60921824", "text": "def full_board_check(board):\n for i in range(1, 10):\n if space_check(board, i):\n return False\n return True", "title": "" }, { "docid": "23f38622db604905a3763c4c525a22f5", "score": "0.60909283", "text": "def is_board_full(board):\n for selected_move in range(1, 10):\n if is_space_free(board, selected_move):\n return False\n return True", "title": "" }, { "docid": "acd53b92c669fd1af81542f66d7353de", "score": "0.6076808", "text": "def isFilled(state, secX, secY):\n return (Cell(0) not in reduce(operator.add, state.square(secX, secY)))", "title": "" }, { "docid": "0da320c8f4078b4f8714486e18de0538", "score": "0.6070595", "text": "def isFull(self):\n for x in xrange(0, 7):\n if not self.isColumnFull(x):\n return False\n return True", "title": "" }, { "docid": "6048fff3efe25b7a139dce1f0feb4cd9", "score": "0.6070156", "text": "def winsFor(self, XO):\n H = self.height\n W = self.width\n D = self.data\n win = False\n # check for horizontal wins\n for row in range(0,H):\n for col in range(0,W-3):\n if D[row][col] == XO and \\\n D[row][col+1] == XO and \\\n D[row][col+2] == XO and \\\n D[row][col+3] == XO:\n win = True\n for row in range(0,H-3): #check vertical.\n for col in range(0,W):\n if D[row][col] == XO and \\\n D[row+1][col] == XO and \\\n D[row+2][col] == XO and \\\n D[row+3][col] == XO:\n win = True\n for row in range(0,H-3): #diagonal down\n for col in range(0,W-3):\n if D[row][col] == XO and \\\n D[row+1][col+1] == XO and \\\n D[row+2][col+2] == XO and \\\n D[row+3][col+3] == XO:\n win = True\n for row in range(3,H): #diagonal up\n for col in range(0,W-3):\n if D[row][col] == XO and \\\n D[row-1][col+1] == XO and \\\n D[row-2][col+2] == XO and \\\n D[row-3][col+3] == XO:\n win = True\n return win", "title": "" }, { "docid": "784fd7e5f6b02ecbc1fe4c61e89b80bf", "score": "0.606116", "text": "def is_full(self):\n for row in range(0, self.height):\n for col in range(0, self.width):\n if self.data[row][col] == ' ':\n return False\n return True", "title": "" }, { "docid": "f93e47e7adab8c308419a55356917aa7", "score": "0.6060629", "text": "def game_over_control(self):\n count = 0\n for cell in self.cells:\n if cell.is_open is False:\n count += 1\n if count == self.bomb_count:\n return True\n else:\n return False", "title": "" }, { "docid": "18e31c348d9f9a5f42595f9e76b342fa", "score": "0.60538626", "text": "def is_consistent(self):\n for i, j in [(i,j) for i in range(9) for j in range(9) if self.board[i,j] != 0]:\n value = self.board[i,j]\n if value not in self.get_values((i,j), assigned=False):\n return False\n return True", "title": "" }, { "docid": "e30a45408c2887b20de2ff1a67127a2a", "score": "0.6053099", "text": "def __has_space(self, piece):\n for row, row_val in enumerate(self.board): # ToDo: think of better way to do this\n for col, col_val in enumerate(row_val):\n if self.__is_placement_legal(row, col, piece):\n return True\n\n return False", "title": "" }, { "docid": "5143de2553e2663a17587474ac88f6c0", "score": "0.6045587", "text": "def end_check(self):\n\t\ttop = self.high_tile()\n\t\tself.spaces()\n\t\tif top == 2048:\n\t\t\treturn [True, True]\n\t\telse:\n\t\t\tif not self.empty:\n\t\t\t\treturn [True, False]\n\t\t\treturn [False, False]", "title": "" }, { "docid": "b70fa1a7078d8288285f5ac79fcfef56", "score": "0.60453516", "text": "def checkMovesLeft(self):\n #YOUR CODE HERE\n for line in self.board:\n for slot in line:\n if slot == '_':\n return True\n return False", "title": "" }, { "docid": "778f94db2a2875e7c7df0298faa1a6a3", "score": "0.6043966", "text": "def isFull(self) -> bool:\n for pos in range(1, 10):\n if self.isFreeSpace(pos):\n return False\n return True", "title": "" }, { "docid": "7544abd72d053ea61953866be5892e6c", "score": "0.6039745", "text": "def check_win(self):\n for pos in self.win_set:\n s = set([self.grid[p] for p in pos])\n if len(s) == 1 and (0 not in s):\n return True\n return False", "title": "" }, { "docid": "7544abd72d053ea61953866be5892e6c", "score": "0.6039745", "text": "def check_win(self):\n for pos in self.win_set:\n s = set([self.grid[p] for p in pos])\n if len(s) == 1 and (0 not in s):\n return True\n return False", "title": "" }, { "docid": "82332d8a54ad99941dd9e9bc42d926e9", "score": "0.6038827", "text": "def won(bo):\n b=bo[:]\n for i in range(len(b)):\n if b[i] == ' ':\n b[i]=str(i)\n return any([b[0]==b[1]==b[2],b[3]==b[4]==b[5],\\\n b[6]==b[7]==b[8],b[1]==b[4]==b[7],\\\n b[2]==b[5]==b[8],b[0]==b[3]==b[6],\\\n b[0]==b[4]==b[8],b[2]==b[4]==b[6]])", "title": "" }, { "docid": "5efd65e43b3ccb2c24c3ea8eddbf4f11", "score": "0.602383", "text": "def _board_is_full(board):\n for list in board:\n if '-' in list:\n return False\n return True", "title": "" }, { "docid": "6bc36cda423539bc1058bead1475fd7f", "score": "0.6017058", "text": "def has_fullhouse(self):\r\n return self.check_sets(3, 2)", "title": "" }, { "docid": "8baa3e1f6a819cb2e3f019522da2687b", "score": "0.6015534", "text": "def isBoardFull(board):\n for i in range(1, 10):\n if isSpaceFree(board, i):\n return False\n return True", "title": "" }, { "docid": "c30036e8907c413cf8cb0f83e739a05a", "score": "0.6015423", "text": "def isFull(self) -> bool:\n return self.count == self.k", "title": "" }, { "docid": "359126be3977154728c1fbbd5c5fe8bc", "score": "0.6007787", "text": "def check_full(board):\r\n hold = 1\r\n for x in board:\r\n if x == ' ':\r\n hold += 1\r\n return hold == 1", "title": "" }, { "docid": "f50661cc7c43428f27853b744de5d9c3", "score": "0.5998143", "text": "def x_winner_check(plays):\n return (plays[1] == \"X\" and plays[2] == \"X\" and plays[3] == \"X\") or\\\n (plays[4] == \"X\" and plays[5] == \"X\" and plays[6] == \"X\") or\\\n (plays[7] == \"X\" and plays[8] == \"X\" and plays[9] == \"X\") or\\\n (plays[1] == \"X\" and plays[4] == \"X\" and plays[7] == \"X\") or\\\n (plays[2] == \"X\" and plays[5] == \"X\" and plays[8] == \"X\") or\\\n (plays[3] == \"X\" and plays[6] == \"X\" and plays[9] == \"X\") or\\\n (plays[1] == \"X\" and plays[5] == \"X\" and plays[9] == \"X\") or\\\n (plays[3] == \"X\" and plays[5] == \"X\" and plays[7] == \"X\")", "title": "" }, { "docid": "8b1b3add5916ee808332eccc3c09ef05", "score": "0.59942687", "text": "def is_general_position(self) -> bool:\n x_s = [p.x for p in self.V]\n print(sorted(x_s))\n return len(x_s) == len(set(x_s))", "title": "" }, { "docid": "fc846c0c12489b2ba2637dffb4d3f472", "score": "0.59918094", "text": "def check_f(self, sing):\r\n if sing not in self.SINGS:\r\n return False\r\n other = 'x'\r\n if sing == other:\r\n other = 'o'\r\n k = self.c1(sing)\r\n m = self.c1(other)\r\n if k is True:\r\n return True\r\n elif m is True:\r\n return False\r\n\r\n if self.num_free() == 0:\r\n return \"stop\"\r\n else:\r\n return \"continue\"", "title": "" }, { "docid": "4209c79c4c1b19f2e3b9aadbd3a4e4c3", "score": "0.5981567", "text": "def can_add_to(self, col):\r\n if col<0 or col>=self.width:\r\n return False\r\n else:\r\n for i in range(self.height):\r\n if self.slots[i][col]==\" \":\r\n return True\r\n return False", "title": "" }, { "docid": "8b4080bb3c8134d510a8db127f78355a", "score": "0.59811485", "text": "def terminal(board):\n\n #Returns true if X or O won from previous method or if there is no space left\n winnerOfGame = winner(board)\n if winnerOfGame != None or sum(row.count(EMPTY) for row in board) == 0:\n return True\n\n return False", "title": "" }, { "docid": "17c5046706849c1849335a67ccd55c0c", "score": "0.5979894", "text": "def valid_box(self, row, col):\n values = []\n for i in range(row, row + 3):\n for j in range(col, col + 3):\n values.append(self.board[i][j])\n for k in range(1, 10):\n if values.count(k) > 1:\n return False\n return True", "title": "" }, { "docid": "e11f4e8c7d16f19b045fbfb605d64046", "score": "0.5977672", "text": "def check_winner(self):\n draw_condition = True\n for pos in self.pos_to_check:\n x_1 = self.board[pos[0][0]][pos[0][1]]\n x_2 = self.board[pos[1][0]][pos[1][1]]\n x_3 = self.board[pos[2][0]][pos[2][1]]\n line = {x_1, x_2, x_3}\n if len(line) == 1 and not '_' in line:\n return True\n draw_condition = draw_condition and 'x' in line and 'o' in line\n if draw_condition:\n return None\n return False", "title": "" }, { "docid": "aa84d204681287ccb6e46d17c44770cd", "score": "0.5971567", "text": "def is_occupied(board, n):\n return board[n] not in (INVALID, EMPTY)", "title": "" }, { "docid": "a4d5453e19b662165e9a44952d590233", "score": "0.59628224", "text": "def check_draw(self):\n return all(\"___\" not in row for row in self.board)", "title": "" }, { "docid": "7f754bffc614e14e4640f373782a3454", "score": "0.5959239", "text": "def check_pawn2x(self):\n\n return True if isinstance(self, Pawn) and any(row in self.current_field for row in ['1', '8']) else False", "title": "" }, { "docid": "e247abfc0f5cf89fc8508e27b9d82cb6", "score": "0.5958977", "text": "def x_wing(self):\n #print \"========= X-Wing ============\"\n found = False\n for possible in range(1,10):\n rows = []\n for row in range(0, 9):\n cols = [possible, row]\n for cell in self.get_cells_by_row(row):\n if cell.has_possible(possible):\n cols.append(cell.col)\n if len(cols) == 4:\n rows.append(cols)\n\n while len(rows) > 1:\n for i in range(1, len(rows)):\n first = str(rows[0][2]) + str(rows[0][3])\n second = str(rows[i][2]) + str(rows[i][3])\n if first == second:\n col_one = rows[0][2]\n col_two = rows[0][3]\n row_one = rows[0][1]\n row_two = rows[i][1]\n msg = \"X wing for {0} in rows {1} and {2}: \".format(possible, row_one, row_two)\n for cells in self.get_cells_by_col(col_one):\n if cell.row != row_one and cell.row != row_two:\n if cell.remove_possibles(possible, msg):\n found = True\n for cells in self.get_cells_by_col(col_two):\n if cell.row != row_one and cell.row != row_two:\n if cell.remove_possibles(possible, msg):\n found = True\n rows.pop(i)\n break\n rows.pop(0)\n\n for possible in range(1,10):\n cols = []\n for col in range(0, 9):\n rows = [possible, col]\n for cell in self.get_cells_by_col(col):\n if cell.has_possible(possible):\n rows.append(cell.row)\n if len(rows) == 4:\n cols.append(rows)\n\n while len(cols) > 1:\n for i in range(1, len(cols)):\n first = str(cols[0][2]) + str(cols[0][3])\n second = str(cols[i][2]) + str(cols[i][3])\n if first == second:\n row_one = cols[0][2]\n row_two = cols[0][3]\n col_one = cols[0][1]\n col_two = cols[i][1]\n msg = \"X wing for {0} in cols {1} and {2}\".format(possible, col_one, col_two)\n for cells in self.get_cells_by_row(row_one):\n if cell.col != col_one and cell.col != col_two:\n if cell.remove_possibles(possible, msg):\n found = True\n for cells in self.get_cells_by_row(row_two):\n if cell.col != col_one and cell.col != col_two:\n if cell.remove_possibles(possible, msg):\n found = True\n cols.pop(i)\n break\n cols.pop(0)\n\n return found", "title": "" }, { "docid": "9ab899cf6851298f52c3c1c139329284", "score": "0.59583265", "text": "def is_game_over():\n for player in [\"O\", \"X\"]:\n\n # First, check if any of the rows have the same symbol three time\n for row in [0, 1, 2]:\n if all(board[row][column] == player for column in [0, 1, 2]):\n return player\n\n # Then, do the same check for columns\n for column in [0, 1, 2]:\n if all(board[row][column] == player for row in [0, 1, 2]):\n return player\n\n # Check the first diagonal\n if all(board[row][column] == player for row, column in [(0, 0), (1, 1), (2, 2)]):\n return player\n\n # Check the second diagonal\n if all(board[row][column] == player for row, column in [(2, 0), (1, 1), (0, 2)]):\n return player\n\n # No player has won … let's check if it's a tie\n for row in [0, 1, 2]:\n for column in [0, 1, 2]:\n if board[row][column] == \" \":\n # There's still at least one empty spot … keep playing!\n return False\n\n # No player has won, and there are no empty fields left … we are sure it's a tie!\n return \"Tie\"", "title": "" }, { "docid": "b2863290b28de2b6ea767ac8ad81bcfd", "score": "0.595802", "text": "def sudokuCompleto(self):\n for x in xrange(0, 9):\n for y in xrange(0, 9):\n if self.tablero[x][y] == \"0\":\n return False\n return True", "title": "" }, { "docid": "83db5701b5f15581a629467362e064c5", "score": "0.59557223", "text": "def game_over(self):\n return self.middle_tower == [] and self.right_tower == []", "title": "" }, { "docid": "2b26c501304e8caeeed272ea8a411029", "score": "0.59551364", "text": "def wins_for(self, char):\n for row in range(self.height):\n for col in range(self.width):\n if in_a_row_n_east(char, row, col, self.data, self.win_length):\n return True\n if in_a_row_n_south(char, row, col, self.data, self.win_length):\n return True\n if in_a_row_n_northeast(char, row, col, self.data, self.win_length):\n return True\n if in_a_row_n_southeast(char, row, col, self.data, self.win_length):\n return True\n return False", "title": "" }, { "docid": "5f684ccd6cdf9366d48a000c676504df", "score": "0.59548295", "text": "def check_draw(self, board):\r\n for row in board:\r\n if EMPTY in row:\r\n return False\r\n \r\n return True", "title": "" }, { "docid": "340996ac3074f648e0bacf5a31fe0087", "score": "0.5952092", "text": "def isFull(self):\n return self.keyCount() == 3", "title": "" }, { "docid": "ec4de27a7553ad8dfd2c2b1d2f4b5a99", "score": "0.5950909", "text": "def is_full(self):\n for val in self.board:\n if val == \" \":\n return False\n return True", "title": "" }, { "docid": "0e50797fe439dcc24da1992c2d441a6c", "score": "0.59490514", "text": "def isFull(self):\n for x in range(self.width):\n if self.allowsMove(x):\n return False\n return True", "title": "" }, { "docid": "7dd1df56eb3708a8b84658256ba7499e", "score": "0.5944147", "text": "def terminal(board):\n \n trace = contiguous(board)\n for p in [X,O]:\n if 3 in trace[p]['row'] or \\\n 3 in trace[p]['col'] or \\\n 3 in trace[p]['diag']:\n return True\n\n for i in range(len(board)):\n for j in range(len(board[i])):\n if board[i][j] == EMPTY:\n return False\n\n # Tie Game\n return True", "title": "" }, { "docid": "84efe4126b5ac18f85f791fb519a1930", "score": "0.59421146", "text": "def space_check(board, position):\n return board[position] in [str(i) for i in range(10)]", "title": "" } ]
efecd4f2db07fcb8a9c5c70ca798be42
Get the map's content at the given x,y coordinates.
[ { "docid": "66c2928aa06147e1c947647c9b300532", "score": "0.58302176", "text": "def get_content(self, x: float, y: float) -> protocol.NavNodeContentType:\n return self._root_node.get_content(x, y)", "title": "" } ]
[ { "docid": "bad62267e3c84fb20f4d89fa0d887810", "score": "0.6279652", "text": "def get(self, x, y, z):\n cursor = self._conn.execute(\n \"\"\"\n SELECT tile_data FROM tiles\n WHERE zoom_level = :z AND\n tile_column = :x AND\n tile_row = :y\n \"\"\",\n {'x': x, 'y': y, 'z': z}\n )\n result = cursor.fetchone()\n if result is None:\n return None\n return result[0]", "title": "" }, { "docid": "924ceb9fd8df6e3e4ffe2a0a5bd36901", "score": "0.62369615", "text": "def cell_content(self, coordinate):\n x_location,y_location = coordinate\n return self.__board[x_location][y_location]", "title": "" }, { "docid": "e25c3ece2666f3ec51387ebc0e01a2c8", "score": "0.6175764", "text": "def get_map_tile_at_pixel(self, x, y):\n screen_rect = self.get_map_area()\n for map_tile_x in range(MAP_COLUMNS):\n for map_tile_y in range(MAP_ROWS):\n loc = self.map_tile_corner(map_tile_x, map_tile_y)\n tile_rect = pygame.Rect(loc, MAP_TILE_SIZE)\n if tile_rect.collidepoint(x - screen_rect.x, y - screen_rect.y):\n return (map_tile_x, map_tile_y)\n return (None, None)", "title": "" }, { "docid": "3934100ae942355ab8d89dc2929f91c7", "score": "0.61716515", "text": "def getAtomListAt(self, x, y):\r\n\t\treturn self.atomListMap[x%self.dimX][y%self.dimY]", "title": "" }, { "docid": "ece576be1e7ad77d94f501947ae8179a", "score": "0.607597", "text": "def get_tile(self, x, y):\n\n try:\n char = self.map[y][x]\n except IndexError:\n return {}\n try:\n return self.key[char]\n except KeyError:\n return {}", "title": "" }, { "docid": "80881c59b7290aa02a2119eacc6eb24d", "score": "0.6069618", "text": "def getMap(x, y):\n import requests\n import requests.packages.urllib3\n requests.packages.urllib3.disable_warnings()\n from flask import jsonify\n r = requests.get(_flask.config[\"REST_API_URL\"] + \"getTilesAroundMap/%f/%f/1\" % (x, y))\n if r.status_code != 200:\n return bad_request(r.status_code, u\"REST API request wasn't successful.\")\n return jsonify(results=r.content)", "title": "" }, { "docid": "9423476d6a918fca64211662d6c6dc85", "score": "0.60478497", "text": "def get(self, x, y):\r\n return self._data[y][x]", "title": "" }, { "docid": "cb277a73271ad08495a94cbd48d68e11", "score": "0.60385066", "text": "def getmap(self):\n address = self.getaddress( self.lat, self.lng )\n r = self.s.get( address )\n base64data = base64.encodebytes( r.content )\n image = tkinter.PhotoImage( data=base64data )\n self.window.cprint( 'Map Loaded' )\n self.window.update()\n return image", "title": "" }, { "docid": "2ee064f50de586470b37cbc188f9793b", "score": "0.6034975", "text": "def __findMap(self):\n self.__map = self.__pageSource.xpath('//*[@id=\"ContentPlaceHolder1_stuff\"]/text()[1]')[0]", "title": "" }, { "docid": "feb16b58c7989e30e0d46d8ee93d82b2", "score": "0.60275465", "text": "def cell_at(self, x, y):\n\n return self.maze_map[x][y]", "title": "" }, { "docid": "1f6f6d929c8456913a9725a309b7b3ac", "score": "0.60253125", "text": "def at(self, x, y):\n return self.board[x][y]", "title": "" }, { "docid": "e1a8c61c401b67a323cd9c5e87445891", "score": "0.6013908", "text": "def get(self, pos_x, pos_y):\n\n\t\treturn self.matrix[pos_y][pos_x]", "title": "" }, { "docid": "4187ceb9f668af6d6c2c44e548cd41d8", "score": "0.59941006", "text": "def _map_coords(self, x, y):\n return (self.windowSize/2 + 25 + self.windowSize *\n ((x - self.width / 2.0) / self.max_dim),\n self.windowSize/2 + 25 + self.windowSize *\n ((self.height / 2.0 - y) / self.max_dim))", "title": "" }, { "docid": "ca531c82e67511e58b1d629104deacf9", "score": "0.59737945", "text": "def get_tile_at_position(self, x, y):\r\n return board[x][y]", "title": "" }, { "docid": "ecd62ec08c55891167b1d7acc9f70722", "score": "0.5917928", "text": "def get_tile(self, x, y):\n return self.maze[x][y]", "title": "" }, { "docid": "44e37b4a292a0fcdc3034663c2c7f650", "score": "0.58826125", "text": "def at(self, x, y):\n return self._val[x + y*self._w]", "title": "" }, { "docid": "fcec221289bbbcf117e3ecae73d21688", "score": "0.587703", "text": "def image_at(self, coords):\n\t\treturn self.tile_images[coords[1]][coords[0]]", "title": "" }, { "docid": "bff8a7fff1e83d71ce2038028c1eba73", "score": "0.5874756", "text": "def get_tile_by_coordinates(self, x, y):\n # negative coordinates not allowed\n if x < 0 or y < 0:\n return False\n elif is_set(self.tiles_, x, y):\n return self.tiles_[x][y]\n else:\n return False", "title": "" }, { "docid": "25a9b91dfcba892916a2e79da0c87f97", "score": "0.5848992", "text": "def get_cell(self, x, y):\n l = self.cells[(x,y)]\n return l", "title": "" }, { "docid": "995655fb27a1370f9a03736cec4d4009", "score": "0.58282894", "text": "def get(self, coord):\n (local_offset_x, local_offset_y) = self.get_xy(coord)\n if self.inside(local_offset_x, local_offset_y):\n return self.data[local_offset_y][local_offset_x]\n return []", "title": "" }, { "docid": "7bc147830dd842b46cd008b19f1ec9ff", "score": "0.58276606", "text": "def mget(x: int, y: int) -> int:\n return map_sprites[(x % 128) + (y % 64) * 128]", "title": "" }, { "docid": "62b26b8f6d8f7c62a92bca22638f9eb7", "score": "0.5816128", "text": "def pix2coord(self, x, y):\n wcs = pywcs.WCS(self.header)\n return wcs.all_pix2world(x, y, 0)", "title": "" }, { "docid": "5d9d78fa0c33eabd592223f2228dc031", "score": "0.58151305", "text": "def extract_source_at_pos(MAP, x_pos, y_pos, PSF_npix_x, PSF_npix_y):\n #\n cpix_x = int(math.floor(PSF_npix_x / 2))\n cpix_y = int(math.floor(PSF_npix_y / 2))\n # corner pixels\n x_inf = int(round(x_pos)) - cpix_x; x_sup = x_inf + PSF_npix_x\n y_inf = int(round(y_pos)) - cpix_y; y_sup = y_inf + PSF_npix_y\n # extract map\n SRC_MAP = MAP[x_inf:x_sup, y_inf:y_sup]\n #\n return SRC_MAP", "title": "" }, { "docid": "35429bb0cb7b6aceaa9bed0984cc4f6f", "score": "0.5812354", "text": "def get(self, x, y):\n x = max(min(self.width - 1, int(x)), 0)\n y = max(min(self.height - 1, int(y)), 0)\n return self.grid[x][y]", "title": "" }, { "docid": "5b97ff146142e24f7b2a5c38f36e0872", "score": "0.5769584", "text": "def get_cell_from_coords(self, x: float, y: float) -> OccupancyCell:\n c = int((x - self.x_min) / self.resolution)\n r = int((y - self.y_min) / self.resolution)\n return self.get_cell(r, c)", "title": "" }, { "docid": "594f367ed8c51fc79d4c63bf0fae606e", "score": "0.57561576", "text": "def get_cell_nodes(self, x, y):\n return self.__map[x][y]", "title": "" }, { "docid": "d9955d22dd3672b2ab385129eb58f1fd", "score": "0.5742338", "text": "def get_at(self, x=None, y=None):\n # we use .get() here to avoid new empty Bags being inserted\n # into the index stores when a non-existant coordinate is requested.\n assert isinstance(x, int) or x is None, \"get_at takes integers (got {!r})\".format(x)\n assert isinstance(y, int) or y is None, \"get_at takes integers (got {!r})\".format(y)\n if x is None and y is None:\n raise TypeError('get_at requires at least one x or y value')\n if x is None:\n return self._y_index.get(y, Bag(self))\n if y is None:\n return self._x_index.get(x, Bag(self))\n return self._y_index.get((y), Bag(self)).filter(lambda cell: cell.x==x)", "title": "" }, { "docid": "31f63897bee5971f84579dd3986a3c4a", "score": "0.57173896", "text": "def find_element_with_coordinates(self, x, y):\n driver = self.driver_cache._get_current_driver()\n elem_str = \"return document.elementFromPoint(\" + str(x) + \", \" + str(y) + \")\"\n return driver.execute_script(elem_str)", "title": "" }, { "docid": "2b210112f5358160e03a7c3ebfb4e26e", "score": "0.57125443", "text": "def content(self):\r\n return self._kml['content']", "title": "" }, { "docid": "c762c876e79b3b0ab1f70d39ba65f7f8", "score": "0.567519", "text": "def posFromCell(self, x,y):\n return (x+0.5)*self.mapa.sizeCell/1000.0, (y+0.5)*self.mapa.sizeCell/1000.0", "title": "" }, { "docid": "d8a37217ea3af3487585af71cc72f342", "score": "0.56058675", "text": "def get_obj(self,x,y):\n if self.inworld(x,y):\n return self.objs[x][y]\n return None", "title": "" }, { "docid": "0048e42187de5b6f1162202198e7ea92", "score": "0.5571979", "text": "def get_cell(self, x, y):\n return self.cells[y][x]", "title": "" }, { "docid": "dae67064c605b4aa76cc00ad36146fca", "score": "0.5567073", "text": "def map_coords(self, svg_x, svg_y):\n easting = (svg_x - self.offset_x) * self.mpp\n southing = (svg_y - self.offset_y) * self.mpp\n\n return (self.ul_x_map + easting, self.ul_y_map - southing)", "title": "" }, { "docid": "bbc8008b52a9d507016860539cde8c10", "score": "0.55423564", "text": "def get_map_tile(\n self,\n tileset_id: Union[str, TilesetID],\n x: int,\n y: int,\n z: int,\n **kwargs: Any\n ) -> Iterator[bytes]:\n\n return self._render_client.get_map_tile(\n tileset_id=tileset_id,\n z=z,\n x=x,\n y=y,\n **kwargs\n )", "title": "" }, { "docid": "d089c7c15c926454685946c9fdbd2106", "score": "0.55251414", "text": "def read(self, *args):\n return _pymaxwell.CtextureMap_read(self, *args)", "title": "" }, { "docid": "8bf0c8df088432675a7b3d4725bb6c23", "score": "0.54737777", "text": "def geomap():\n return render_template(\"map.html\", cfMap=cfMap)", "title": "" }, { "docid": "f7f098c2ebb6f5ed7a136cf0b32f61ec", "score": "0.5464919", "text": "def get_tile(self, coords):\n if self.__valid_coords(coords):\n return self.tiles[coords[0]][coords[1]]\n else:\n return None", "title": "" }, { "docid": "cff433e94e0760d2081f255250fd6299", "score": "0.5455625", "text": "def get_current_tile(self, position):\r\n return self.tiles[position['y']][position['x']]", "title": "" }, { "docid": "6c44c31dfeb98d0f8691c36a87a2e08a", "score": "0.5443797", "text": "def pixel(self, x, y):\n return self.data[x + self.width * y]", "title": "" }, { "docid": "66000a3489d6cd43a16185f7de6791dd", "score": "0.5426729", "text": "def map(self) :\n\t\ttry :\n\t\t\treturn self._map\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "b48801f38e87de548e743cbef5c13d18", "score": "0.54166853", "text": "def grid_to_map_coords(self, xy):\n # type: ([int,int]) -> [int,int]\n x = self.resolution * xy[0] + self.origin.position.x\n y = self.resolution * xy[1] + self.origin.position.y\n return [x, y]", "title": "" }, { "docid": "93b9fb98f45aa8efb8485eb869802cbe", "score": "0.5405492", "text": "def get_coordinates_from_position(x, y):\n return y*GRID_SIZE+x+1", "title": "" }, { "docid": "51d88b6e5893418e366bb4f5f70c05ea", "score": "0.5402881", "text": "def _loc(_x):\n return _x.geometry.coords[0]", "title": "" }, { "docid": "d5927108c31046b426b1bbc01247957c", "score": "0.539877", "text": "def pix2world(self,x,y, withoffset=True):\n offset = self.image_offset[::-1] if withoffset else np.asarray([0,0])\n \n if \"__iter__\" not in dir(x):\n xoffset = x-offset[0]+1\n yoffset = y-offset[1]+1\n return self.wcs_pix2world([[xoffset,yoffset]],\n 1)[0]\n \n xyoffset = np.asarray([x,y]).T-offset+1 \n return self.wcs_pix2world(xyoffset.tolist(),1)", "title": "" }, { "docid": "019fbdb3c38f4f78af538fcac9ebedea", "score": "0.53908753", "text": "def pix2world(x, y, hdr):\n xp, yp = pix2proj(x, y, hdr)\n phi, theta = proj2natsph(xp, yp, hdr)\n lon, lat = natsph2celsph(phi, theta, hdr)\n return lon, lat", "title": "" }, { "docid": "78288f5c7d6a6e289f1055b6e3f19ec2", "score": "0.5380698", "text": "def get_map_data(self):\n return self.map_data", "title": "" }, { "docid": "df5263e4679a2f8e6c1e5d079677d2c5", "score": "0.5375769", "text": "def __getitem__(self, x_y_tuple):\n x, y = x_y_tuple\n return self.get_chip_at(x, y)", "title": "" }, { "docid": "ca30a8c1d2f30c284049e0c61fb8310f", "score": "0.5363664", "text": "def get_content(self, x: float, y: float) -> protocol.NavNodeContentType:\n node = self.get_node(x, y)\n if node:\n return node.content\n\n return NavNodeContentTypes.Unknown", "title": "" }, { "docid": "1a40eba25f486611564a28bde7db50f7", "score": "0.5361107", "text": "def __getitem__(self, value):\n\n if isinstance(value, tuple):\n assert len(value) == 2, 'slice must have two elements.'\n y, x = value\n return self.getSpaxel(x=x, y=y, xyorig='lower')\n elif isinstance(value, six.string_types):\n return self.getMap(value)\n else:\n raise marvin.core.exceptions.MarvinError('invalid type for getitem.')", "title": "" }, { "docid": "4c33970d1b100119738e42f9b8d51b67", "score": "0.53471804", "text": "def map(self, map, x=None, y=None, width=None, height=None, parent=None):\n pass", "title": "" }, { "docid": "20a9cb8d447e3595bf42ecc2d7d6ec79", "score": "0.5293636", "text": "def __call__(self, *args, **kwargs):\n super().__call__(*args, **kwargs)\n\n return self._wcs.pixel_to_world(*args)[1]", "title": "" }, { "docid": "220c96df3b74b93948ed95a0dbd3e875", "score": "0.5285481", "text": "def map_location(location):\n x, y = location\n return (x, y)", "title": "" }, { "docid": "0c5fa9e9b2606d95cbb8bfaf070fde11", "score": "0.5275251", "text": "def get_node(self, x: float, y: float) -> 'NavMapGridNode':\n return self._get_node(x, y, assumed_in_bounds=False)", "title": "" }, { "docid": "5246d0b7e0f5ca267123e5bec0fa763a", "score": "0.5268956", "text": "def get_coordinates(self, object):\n index = self.objects.index(object)\n print self.coordinates\n return self.coordinates[index, :]", "title": "" }, { "docid": "2273f187291e5b70c0c3ef6f46a29afa", "score": "0.526466", "text": "def geoFromPx(self, xPx, yPx, reverseY=False):\n\t\tif reverseY:#the users given y pixel in the image counting from bottom\n\t\t\tyPxRange = self.size.y - 1\n\t\t\tyPx = yPxRange - yPx\n\t\t#\n\t\tx = self.pxSize.x * xPx + self.rotation.y * yPx + self.origin.x\n\t\ty = self.pxSize.y * yPx + self.rotation.x * xPx + self.origin.y\n\t\treturn xy(x, y)", "title": "" }, { "docid": "b4fc6c63364f83697b1e91b483732326", "score": "0.52500314", "text": "def at(self, x, y=None):\n if y is None:\n # x is a point\n y = x.y\n x = x.x\n if x < 0 or y < 0 or x >= self.width or y >= self.height:\n return \"Out of Bounds\"\n return self._grid[x][y]", "title": "" }, { "docid": "93bc74e30be4a4b0a763483f3ce10174", "score": "0.524674", "text": "def extract(self,idxs,xcoords):\n mask = (idxs > 0) & (idxs < self.nbeams)\n pixel_mask = self.mask[(idxs[mask],xcoords[mask])]\n x = xcoords[mask][pixel_mask]\n return self.data[(idxs[mask][pixel_mask],x)],x", "title": "" }, { "docid": "27918cbc292127d4e824c97844e9e1be", "score": "0.5242802", "text": "def get_pixel_map(self):\n lng_map = self.lng_map\n lat_map = self.lat_map\n\n return lat_map.long(), lng_map.long()", "title": "" }, { "docid": "2e884df7762c8c0817ff840680401d36", "score": "0.5240197", "text": "def get_tile(self, row, col): \n return self.grid[row][col]", "title": "" }, { "docid": "13864982a0876e27ef329f2c83330c7a", "score": "0.52286017", "text": "def _convertWorldToPixels((worldX, worldY)):\n # First convert from meters to pixels, assuming 20 pixels per meter\n pixelX = worldX * 20.0\n pixelY = worldY * 20.0\n # Next flip x and y values around\n mapX = 2142 - 1 - pixelY\n mapY = 815 - 1 - pixelX\n return (int(mapX), int(mapY))", "title": "" }, { "docid": "61fe989aa57773653cca02586fdf5a31", "score": "0.5223156", "text": "def get(self, point):\n return self._grid.get(point)", "title": "" }, { "docid": "d4072f186f0609a376b5495a83892c29", "score": "0.5220302", "text": "def tileFromPos(self, x, y):\n tdim = float(self._tileSource.tileSize())\n return QPointF(x / tdim, y / tdim)", "title": "" }, { "docid": "7189f7f0324071d4f9774e8e4f9c653a", "score": "0.52085644", "text": "def cell_content(self, coordinate):\n board_cells= self.cell_list()\n\n if not coordinate in board_cells:\n return\n if self.__board[coordinate[0]][coordinate[1]] != \"_\":\n return self.__board[coordinate[0]][coordinate[1]]\n return", "title": "" }, { "docid": "40f1c771381830ab93996271d73f5b12", "score": "0.5207707", "text": "def get_node(self, x: float, y: float) -> NavMapGridNode:\n return self._root_node.get_node(x, y)", "title": "" }, { "docid": "290114463a66bdc8c67a3bbc6e9c1472", "score": "0.5199919", "text": "def findPixelsWithCells(x, y):\r\n xpos = 0\r\n ypos = 0\r\n if x <= 12 and x >= 0 or x == 26:\r\n xpos = position_dict['xdown'].get(x)\r\n ypos = position_dict['ydown'].get(y)\r\n elif x <= 25 and x >= 13:\r\n xpos = position_dict['xup'].get(x)\r\n ypos = position_dict['yup'].get(y)\r\n return xpos, ypos", "title": "" }, { "docid": "a29bb8bb1cefeaf721c7e5d93f626954", "score": "0.5199307", "text": "def get_map(self):\n self.map = self.map_dropdown.currentText()", "title": "" }, { "docid": "335c19a9bdadf684515b4c8ae3908926", "score": "0.5198939", "text": "def cell(self,x,y,lid=None):\n \n if not lid and self.lidOpen:\n lid = 'open'\n elif not lid:\n lid = 'closed'\n bounds = self.cell_bounds[lid]\n\n for cell,coords in bounds.items():\n if(x >= coords['x'][0] and x < coords['x'][1] and \\\n y >= coords['y'][0] and y < coords['y'][1]):\n return cell\n #if no matches\n return None", "title": "" }, { "docid": "797dfd5e374131320e4a97cc681e62ae", "score": "0.5186094", "text": "def get_map_state_tile(\n self,\n stateset_id: str,\n x: int,\n y: int,\n z: int,\n **kwargs: Any\n ) -> Iterator[bytes]:\n\n return self._render_client.get_map_state_tile(\n stateset_id=stateset_id,\n z=z,\n x=x,\n y=y,\n **kwargs\n )", "title": "" }, { "docid": "4b7ebca903f222980e66da7799989d27", "score": "0.5184862", "text": "def extract_from(self, img, force_square=False):\n x1, y1 = self.top_left.coords_in(img=img)\n x2, y2 = self.bottom_right.coords_in(img=img)\n x = slice(x1, x2)\n y = slice(y1, y2)\n if force_square:\n h, w = img.shape[:2]\n x, y = _make_square(x, y, w, h)\n return img[y, x, ...]", "title": "" }, { "docid": "cc26434a5a11e474dd205ed412c48e37", "score": "0.5180141", "text": "def get_patch(x,offset_h,h,offset_w,w):\n return x[offset_h:offset_h+h,offset_w:offset_w+w]", "title": "" }, { "docid": "8c5624af34d298c172e7a61f1ac5d080", "score": "0.517088", "text": "def getNode(self, x, y):\n return self.grid[y][x]", "title": "" }, { "docid": "471d07a713e5f20700f7b000940a54e4", "score": "0.5170602", "text": "def get_tile(self, row, col): \r\n return self.cells[row][col]", "title": "" }, { "docid": "96485f1c72779a9c1316b6a22e24c598", "score": "0.5165609", "text": "def get_pixel(self, x, y):\n return self._image[y][x]", "title": "" }, { "docid": "87c30857f92eec1122b2198ec368d62e", "score": "0.5165347", "text": "def getPixel(self, x, y):\n\n value = self.image.get( x,y)\n if type(value) == int:\n return [value, value, value]\n else:\n return map(int, value.split())", "title": "" }, { "docid": "8fdf52e18752161bd77bb62ada6261eb", "score": "0.51455235", "text": "def get_coordinates_from_index(self, x_index, y_index):\r\n x = x_index*self.cell_size\r\n y = y_index*self.cell_size\r\n\r\n return x, y", "title": "" }, { "docid": "12b502d8ad7c33941848d250c51a6887", "score": "0.5140834", "text": "def download_osm(left,bottom,right,top):\n from urllib.request import urlopen\n fp = urlopen( \"http://api.openstreetmap.org/api/0.5/map?bbox=%f,%f,%f,%f\"%(left,bottom,right,top) )\n return fp", "title": "" }, { "docid": "b018f235150bf5bf12e4098dc2e3be60", "score": "0.5138633", "text": "def getTile(self, row, col):\n return self._grid.getAt(row, col)", "title": "" }, { "docid": "43c265dffe5bdad6be4ed6daa6291e0d", "score": "0.51335526", "text": "def map(self):\n return self._map", "title": "" }, { "docid": "f43ca756061560914f6b2cff95e92a0b", "score": "0.51279503", "text": "def world_to_display(x, y, z, figure=None):\n if figure is None:\n f = get_engine().current_scene\n else:\n f = figure\n if f is None or f.scene is None:\n return 0, 0\n\n f.scene._renderer.world_point = [x, y, z, 1]\n f.scene._renderer.world_to_display()\n x, y, _ = f.scene._renderer.display_point\n return x, y", "title": "" }, { "docid": "c8cf561bf32cd5182c6c7951f06a7dd3", "score": "0.51263803", "text": "def get_cell(situation, x, y):\n return situation[x][y]", "title": "" }, { "docid": "4e6e5c797b2ac2f5efe19574e4fab0e5", "score": "0.5122923", "text": "def get(self, x, y):\n x = normalize(x)\n y = normalize(y)\n dot_index = pixel_map[(y % 4) // 2][x % 2]\n col, row = get_pos(x, y)\n char = self.chars.get(row, {}).get(col)\n\n if not char:\n return False\n\n if type(char) != int:\n return True\n\n return bool(char & dot_index)", "title": "" }, { "docid": "c932dd7792973590316d2b8d34cb60b6", "score": "0.511341", "text": "def extract_seq_by_coords(genome_fasta, chr_key,coord_tuple):\n genome_dict = SeqIO.index(genome_fasta,\"fasta\")\n return genome_dict[str(chr_key )].seq[coord_tuple[0]:coord_tuple[1]]\n genome_dict.close()", "title": "" }, { "docid": "44c613ea0669a92c3d18791d75cdb8ad", "score": "0.5111278", "text": "def __getitem__(self, xy):\n\n return self.getSpaxel(x=xy[1], y=xy[0], xyorig='lower')", "title": "" }, { "docid": "1b7c4c89046857556008e7b87e07d89f", "score": "0.51093566", "text": "def get_matrix_coords(self, y, x):\n delta_w, delta_h, mw, mh = self.conversion_factors()\n ix = int(x / delta_w)\n iy = int(y / delta_h)\n # Enforce boundaries\n if iy == mh: \n iy = mh - 1\n if ix == mw: \n ix = mw - 1\n return mh - iy - 1, ix", "title": "" }, { "docid": "0459ea2aa559c011827ea86ffbdcf7ce", "score": "0.5108479", "text": "def get_tile(self, row, col):\n return self.grid[row][col]", "title": "" }, { "docid": "0459ea2aa559c011827ea86ffbdcf7ce", "score": "0.5108479", "text": "def get_tile(self, row, col):\n return self.grid[row][col]", "title": "" }, { "docid": "0459ea2aa559c011827ea86ffbdcf7ce", "score": "0.5108479", "text": "def get_tile(self, row, col):\n return self.grid[row][col]", "title": "" }, { "docid": "83e03f4d5fd1e0d51f31515c2abd171f", "score": "0.5093538", "text": "def _get_map(self):\n\n if not self._needs_repaint:\n return self._map_cache.copy()\n\n obs = np.zeros((self.scenario.map_width * CELL_SIZE, self.scenario.map_height * CELL_SIZE, 3), dtype=np.uint8)\n obs[:, :, :] = self.GRASS_COLOR\n\n # paint trees\n for x in range(self.scenario.map_width):\n for y in range(self.scenario.map_height):\n if self.map[x, y] == self.MAP_TREE:\n self.draw_tile(obs, x, y, self.TREE_COLOR)\n\n # paint button\n if self.scenario.voting_button:\n self.draw_tile(obs, *self.button_location, self.BUTTON_COLOR)\n\n # padding (makes cropping easier...)\n self._map_padding = (self.scenario.max_view_distance + 1) # +1 for border\n padding = (self._map_padding*CELL_SIZE, self._map_padding*CELL_SIZE)\n obs = np.pad(obs, (padding, padding, (0, 0)), mode=\"constant\")\n\n self._map_cache = obs\n self._needs_repaint = False\n return self._map_cache.copy()", "title": "" }, { "docid": "c04067ba3eb33436a2dbe6c507b8d755", "score": "0.50889075", "text": "def get_tile(self, row, col):\r\n return self._grid[row][col]", "title": "" }, { "docid": "5648407b4d28b3a48576ba0525fc1586", "score": "0.5085311", "text": "def pxFromGeo(self, x, y, reverseY=False, round2Floor=False):\n\t\t# aliases for more readability\n\t\tpxSizex, pxSizey = self.pxSize\n\t\trotx, roty = self.rotation\n\t\toffx, offy = self.origin\n\t\t#\n\t\txPx = (pxSizey*x - rotx*y + rotx*offy - pxSizey*offx) / (pxSizex*pxSizey - rotx*roty)\n\t\tyPx = (-roty*x + pxSizex*y + roty*offx - pxSizex*offy) / (pxSizex*pxSizey - rotx*roty)\n\t\tif reverseY:#the users want y pixel position counting from bottom\n\t\t\tyPxRange = self.size.y - 1#number of pixels is range from 0 (not 1)\n\t\t\tyPx = yPxRange - yPx\n\t\t#offset the result of 1/2 px to get the good value\n\t\txPx += 0.5\n\t\tyPx += 0.5\n\t\t#round to floor\n\t\tif round2Floor:\n\t\t\txPx, yPx = math.floor(xPx), math.floor(yPx)\n\t\treturn xy(xPx, yPx)", "title": "" }, { "docid": "1292ef276157ad2947c510deb7fba223", "score": "0.5079597", "text": "def get_tile(self : object, z : int, x : int, y : int) -> object:\n\n return RasterTile.get(self, z, x, y)", "title": "" }, { "docid": "c38c62fc2ae2f372c29c1af220cc5f8a", "score": "0.5078752", "text": "def what_is(\n self,\n coord: typ.Tuple[int, int]\n ) -> str:\n x = coord[0] % self.xmax\n y = coord[1]\n return self.map[y][x]", "title": "" }, { "docid": "0ec0bea748baed81836449cfe16d577d", "score": "0.5072508", "text": "def query_coordinate(self, x, y):\n x = int(x)\n y = int(y)\n if x < 0 or x >= self.xsize or y < 0 or y >= self.ysize:\n raise IndexError(\"Position is not on board: %s\" % ((x, y),))\n return self._grid[x][y]", "title": "" }, { "docid": "5e2132a07573f32781a73e025e66cd3f", "score": "0.5072136", "text": "def get_pixel(self, x: int, y: int) -> int:\n return self.bitmap[x, y]", "title": "" }, { "docid": "5b750706be8966c0f9e439e0166ec46d", "score": "0.50720304", "text": "def _get_map_(self):\n return self.__map_", "title": "" }, { "docid": "c2ee5c22fb263db197c62fbd89ff5f37", "score": "0.5071165", "text": "def get_location( self ):\n return (self.x,self.y)", "title": "" }, { "docid": "d16a21af2f8571575693701509b64a93", "score": "0.5071042", "text": "def world2Pixel(geoMatrix, x, y):\n ulX = geoMatrix[0]\n ulY = geoMatrix[3]\n xDist = geoMatrix[1]\n yDist = geoMatrix[5]\n rtnX = geoMatrix[2]\n rtnY = geoMatrix[4]\n columna = int((x - ulX) / xDist)\n fila = int((ulY - y) / xDist)\n return (fila, columna)", "title": "" }, { "docid": "e054b824de63694779d03d4386b83a05", "score": "0.50682646", "text": "def get_map_2d(self):\n return None", "title": "" }, { "docid": "b7c92f6eb81f785d95f4883c28d7ddfd", "score": "0.5064807", "text": "def get_position(situation, x, y):\n return situation[x][y]['position']", "title": "" }, { "docid": "be61fb3864f6d8586d91d3ce599f758d", "score": "0.50628066", "text": "def get_coordinates(self):\n return self.data_store.select_as_coordinates(self.key)", "title": "" } ]
5c729b495a476373c33f734718cc8c46
find optimal parameters on crossvalidation for xgboost by Hyperopt
[ { "docid": "9239610441cd07314801ad5c69e648da", "score": "0.71515936", "text": "def find_params_xgb(train_x, train_y, task, type, eval_metric, n_classes, folds, total_evals = 50, sparse = False, stopping_rounds = -1, missing = np.nan, verbose = False):\r\n\r\n np.random.seed(777)\r\n\r\n def score(params):\r\n \r\n CV_score = 0.0\r\n\r\n for f in range(len(folds)):\r\n\r\n fold = folds[f]\r\n train_index, val_index = fold[0], fold[1]\r\n\r\n if f == 0 and verbose:\r\n \r\n print (\"Training with params : \")\r\n print (params)\r\n \r\n if sparse:\r\n \r\n X_train = train_x.tocsr()[train_index,:].tocoo()\r\n X_val = train_x.tocsr()[val_index,:].tocoo()\r\n y_train = train_y[train_index]\r\n y_val = train_y[val_index]\r\n\r\n dtrain = xgb.DMatrix(X_train.tocsc(), y_train, missing = missing)\r\n dvalid = xgb.DMatrix(X_val.tocsc(), y_val, missing = missing)\r\n \r\n else:\r\n \r\n X_train = train_x[train_index,:]\r\n X_val = train_x[val_index,:]\r\n y_train = train_y[train_index]\r\n y_val = train_y[val_index]\r\n \r\n dtrain = xgb.DMatrix(X_train, y_train, missing = missing)\r\n dvalid = xgb.DMatrix(X_val, y_val, missing = missing)\r\n \r\n\r\n watchlist = [(dtrain, 'train'), (dvalid, 'eval')]\r\n \r\n if 'max_depth' in params:\r\n params['max_depth'] = int(params['max_depth'])\r\n \r\n\r\n if stopping_rounds < 0:\r\n model = xgb.train(params, dtrain, int(params['rounds']))\r\n preds_val = model.predict(dvalid)\r\n\r\n if eval_metric == 'auc':\r\n score = roc_auc_score(y_val, preds_val)\r\n elif eval_metric == 'logloss':\r\n score = log_loss(y_val, preds_val)\r\n elif eval_metric == 'mlogloss':\r\n score = log_loss(y_val, preds_val)\r\n elif eval_metric == 'error':\r\n score = 1 - accuracy_score(y_val, preds_val)\r\n elif eval_metric == 'merror':\r\n score = 1 - accuracy_score(y_val, preds_val) \r\n\r\n elif eval_metric == 'rmse':\r\n score = np.sqrt(mean_squared_error(y_val, preds_val))\r\n elif eval_metric == 'mse':\r\n score = mean_squared_error(y_val, preds_val)\r\n\r\n else:\r\n model = xgb.train(params, dtrain, 1000, evals = watchlist, verbose_eval = verbose, early_stopping_rounds = stopping_rounds)\r\n score = model.best_score\r\n\r\n \r\n CV_score += score\r\n\r\n if verbose:\r\n print ('FOLD', f, 'SCORE', score)\r\n\r\n CV_score /= float(len(folds))\r\n \r\n if verbose:\r\n print ('\\tCV_score {0}\\n\\n'.format(CV_score))\r\n print (params)\r\n\r\n if params['eval_metric'] != 'auc':\r\n return {'loss': CV_score, 'status': STATUS_OK}\r\n else:\r\n return {'loss': -CV_score, 'status': STATUS_OK}\r\n \r\n def optimize(trials):\r\n \r\n if task == 'regression':\r\n objective = 'reg:linear'\r\n elif task == 'binary_classification':\r\n objective = 'binary:logistic'\r\n elif task == 'multiclass_classification':\r\n objective = 'multi:softprob'\r\n\r\n if type == 'linear':\r\n \r\n space = {\r\n 'eta': hp.uniform('eta', 0.01, 0.51),\r\n 'lambda' : hp.uniform('lambda', 0.05, 0.7),\r\n 'lambda_bias' : hp.uniform('lambda_bias', 0.05, 0.7),\r\n 'alpha' : hp.uniform('alpha', 0.1, 0.5),\r\n 'scale_pos_weight' : 1,\r\n 'booster': 'gblinear',\r\n 'eval_metric': eval_metric,\r\n 'objective': objective,\r\n 'nthread' : 8,\r\n 'silent' : 1,\r\n 'seed': 7\r\n }\r\n \r\n\r\n elif type=='tree':\r\n\r\n space = {\r\n 'booster': 'gbtree',\r\n 'eval_metric': eval_metric,\r\n 'objective': objective,\r\n 'eta' : hp.uniform('eta', 0.003, 0.3),\r\n 'colsample_bytree' : hp.uniform('colsample_bytree', 0.3, 0.95),\r\n 'subsample' : hp.uniform('subsample', 0.6, 0.95),\r\n 'max_depth' : hp.quniform('max_depth', 3, 15, 1),\r\n 'min_child_weight': hp.quniform('min_child_weight', 1, 100, 3),\r\n 'gamma': hp.loguniform('gamma', -3.0, 1.0),\r\n 'lambda': hp.loguniform('lambda', -3.0, 1.0),\r\n 'alpha': hp.uniform('alpha', 0.0, 1.0),\r\n 'scale_pos_weight' : 1,\r\n 'nthread' : 8,\r\n 'silent' : 1,\r\n 'seed': 7\r\n }\r\n\r\n if stopping_rounds < 0:\r\n space['rounds'] = hp.quniform('rounds', 30, 700, 5)\r\n\r\n if task == 'multiclass_classification':\r\n space['num_class'] = n_classes\r\n\r\n best = fmin(score, space, algo = tpe.suggest, trials = trials, max_evals = int(total_evals / len(folds)))\r\n for key in space:\r\n if key in best:\r\n space[key] = best[key]\r\n \r\n if verbose:\r\n print ('-'*50,'BEST PARAMS',space)\r\n return space\r\n\r\n trials = Trials()\r\n best = optimize(trials)\r\n losses = abs(np.array(trials.losses()))\r\n if eval_metric == 'auc':\r\n best_score = np.max(losses)\r\n else:\r\n best_score = np.min(losses)\r\n\r\n if verbose:\r\n \r\n print (losses)\r\n print ('BEST CV SCORE: ', best_score)\r\n\r\n return best, best_score", "title": "" } ]
[ { "docid": "3fb6f0fbafc0533ccb54bbf5551f4622", "score": "0.74352896", "text": "def xgb_hyperopt():\n\n xgb_best = xgb.XGBClassifier(n_estimators=300, **best_parameters, tree_method='gpu_hist')\n #xgb_best = xgboost.XGBClassifier(n_estimators=300, param_grid = best_parameters, tree_method='gpu_hist')\n\n xgb_best.fit(X_train, y_train)\n\n \"\"\"### **Predicting Presence of Fradulent Transactions for Test Set (X_test)**\"\"\"\n y_pred = xgb_best.predict(X_test)\n y_pred_proba = xgb_best.predict_proba(X_test)[:,1]", "title": "" }, { "docid": "56e4928935f0f45f964b84942c14158d", "score": "0.7100805", "text": "def hyperparameter_tuning(train_data: pd.DataFrame,\n train_data_target: pd.DataFrame,\n val_data: pd.DataFrame,\n val_data_target: pd.DataFrame,\n test1_data: pd.DataFrame,\n test1_data_target: pd.DataFrame,\n test2_data: pd.DataFrame,\n test2_data_target: pd.DataFrame,\n n_iteration: int,\n target: str,\n weight_balance: np.ndarray\n ):\n\n max_depth_dist = np.random.randint(3, 8, n_iteration)\n learning_rate_dist = np.random.uniform(0.01, 0.5, n_iteration)\n n_estimators_dist = np.random.randint(30, 120, n_iteration)\n gamma_dist = np.random.randint(1, 5, n_iteration)\n min_child_weight_dist = np.random.uniform(0.5, 4, n_iteration)\n subsample_dist = np.random.uniform(0.4, 1, n_iteration)\n colsample_bytree_dist = np.random.uniform(0.6, 1, n_iteration)\n reg_lambda_dist = np.random.uniform(1, 6, n_iteration)\n\n train_pr_auc = list()\n val_pr_auc = list()\n test1_pr_auc = list()\n test2_pr_auc = list()\n\n for i in range(n_iteration):\n print(\"Iteration {} running ...\".format(i))\n xgb_model = xgb.XGBClassifier(max_depth=max_depth_dist[i],\n learning_rate=learning_rate_dist[i],\n n_estimators=n_estimators_dist[i],\n verbosity=1,\n objective='binary:logistic',\n booster='gbtree',\n tree_method='auto',\n n_jobs=3,\n gamma=gamma_dist[i],\n min_child_weight=min_child_weight_dist[i],\n max_delta_step=0,\n subsample=subsample_dist[i],\n colsample_bytree=colsample_bytree_dist[i],\n colsample_bylevel=1,\n colsample_bynode=1,\n reg_alpha=0,\n reg_lambda=reg_lambda_dist[i],\n scale_pos_weight=1,\n base_score=0.5,\n random_state=123\n )\n\n xgb_model.fit(train_data.values, train_data_target[target].values,\n eval_set=[(train_data.values, train_data_target[target].values),\n (val_data.values, val_data_target[target].values)],\n sample_weight = weight_balance,\n verbose=False)\n\n # PR AUC value for train datset\n xgb_predict = xgb_model.predict_proba(train_data.values)[:, 1]\n precision, recall, thresholds = precision_recall_curve(train_data_target[target].values, xgb_predict)\n auc = sklearn_auc(recall, precision)\n train_pr_auc.append(auc)\n\n # PR AUC value for val datset\n xgb_predict = xgb_model.predict_proba(val_data.values)[:, 1]\n precision, recall, thresholds = precision_recall_curve(val_data_target[target].values, xgb_predict)\n auc = sklearn_auc(recall, precision)\n val_pr_auc.append(auc)\n\n # PR AUC value for test1 datset\n xgb_predict = xgb_model.predict_proba(test1_data.values)[:, 1]\n precision, recall, thresholds = precision_recall_curve(test1_data_target[target].values, xgb_predict)\n auc = sklearn_auc(recall, precision)\n test1_pr_auc.append(auc)\n\n # PR AUC value for val datset\n xgb_predict = xgb_model.predict_proba(test2_data.values)[:, 1]\n precision, recall, thresholds = precision_recall_curve(test2_data_target[target].values, xgb_predict)\n auc = sklearn_auc(recall, precision)\n test2_pr_auc.append(auc)\n\n print(\"Iteration {} completed.\".format(i))\n\n df = pd.DataFrame({'train_pr_auc': train_pr_auc,\n 'val_pr_auc': val_pr_auc,\n 'test1_pr_auc': test1_pr_auc,\n 'test2_pr_auc': test2_pr_auc\n })\n return df, max_depth_dist, learning_rate_dist, n_estimators_dist, gamma_dist, min_child_weight_dist, subsample_dist, colsample_bytree_dist, reg_lambda_dist", "title": "" }, { "docid": "7aab3fa27aa11303400dbad4e5a6ffe8", "score": "0.7062625", "text": "def tune_xgboost(train_X, train_y, verbose = 0):\n params = {\n \"max_depth\": (3, 4, 5, 6),\n \"learning_rate\": (0.1, 0.15, 0.20),\n \"gamma\": (0.0, 0.05, 0.1),\n \"min_child_weight\": (1,),\n \"subsample\": (0.8, 0.85, 0.9, 0.95, 1.0),\n \"reg_alpha\": (0, 0.05, 0.1, 0.15, 0.2),\n \"reg_lambda\": (1.0, 1.1, 1.2, 1.3),\n }\n xgb = xgboost.XGBClassifier(nthread=-1, seed=1234, n_estimators=150)\n cv = sklearn.model_selection.GridSearchCV(xgb, params, cv=5, verbose=verbose)\n cv.fit(train_X, train_y)\n if verbose:\n print(\"Optimal score:\")\n print(cv.best_score_)\n print(\"Optimal parameters:\")\n print(cv.best_params_)\n return cv", "title": "" }, { "docid": "9554f4c12e9a596ce3c7ee8de20cc9a7", "score": "0.69633067", "text": "def objective(params, n_folds=N_FOLDS):\n\n # Keep track of evals\n global ITERATION\n\n ITERATION += 1\n\n # Retrieve the subsample if present otherwise set to 1.0\n subsample = params['boosting_type'].get('subsample', 1.0)\n\n # Extract the boosting type\n params['boosting_type'] = params['boosting_type']['boosting_type']\n params['subsample'] = subsample\n\n # Make sure parameters that need to be integers are integers\n for parameter_name in ['num_leaves', 'subsample_for_bin', 'min_child_samples']:\n params[parameter_name] = int(params[parameter_name])\n\n start = timer()\n\n\n print('params',params)\n # Perform n_folds cross validation\n cv_results = lgb.cv(params, train_set, num_boost_round=10000, nfold=n_folds,\n stratified=True,\n\n early_stopping_rounds=100,\n feval=tpr_weight_funtion_lgb_cv,\n seed=50,\n verbose_eval=1#用于一次显示第几个几个cv-agg,cv_agg's TPR: -0.777223 + 0.0223719,均值和标准差\n )\n\n # print('cv_results',type(cv_results),'\\n',cv_results)\n print('df_cv_results\\n',pd.DataFrame(cv_results))\n\n run_time = timer() - start\n\n # Extract the best score\n best_score = np.min(cv_results['TPR-mean'])\n\n # Loss must be minimized\n loss = best_score\n\n TPR_stdv=cv_results['TPR-stdv'][cv_results['TPR-mean'].index(best_score)]\n print('TPR_stdv',TPR_stdv)\n\n\n # Boosting rounds that returned the highest cv score\n n_estimators = int(np.argmin(cv_results['TPR-mean']) + 1)\n\n # Write to the csv file ('a' means append)\n of_connection = open(out_file, 'a')\n writer = csv.writer(of_connection)\n writer.writerow([loss,TPR_stdv, params, ITERATION, n_estimators, run_time])\n\n # Dictionary with information for evaluation\n return {'loss': loss, 'TPR_stdv':TPR_stdv,'params': params, 'iteration': ITERATION,\n 'estimators': n_estimators,\n 'train_time': run_time, 'status': STATUS_OK}", "title": "" }, { "docid": "ece73eac324442dbd3866d38a5cc31a0", "score": "0.68017083", "text": "def hyperopt_obj(self, params):\n optim_type = 'Hyperopt'\n #space = H_SPACE\n self.iteration += 1\n # Extract the bootstrap_type\n if self.GPU == True:\n params['task_type'] = 'GPU'\n if params['bootstrap_type']['bootstrap_type'] == 'Bayesian':\n params['bagging_temperature'] = params['bootstrap_type']['bagging_temperature']\n params['bootstrap_type'] = params['bootstrap_type']['bootstrap_type']\n else:\n params['bootstrap_type'] = params['bootstrap_type']['bootstrap_type']\n if params['grow_policy']['grow_policy'] == 'Lossguide':\n params['max_leaves'] = params['grow_policy']['max_leaves']\n params['grow_policy'] = params['grow_policy']['grow_policy']\n if self.GPU == False:\n params['score_function'] = 'L2'\n else:\n self.lossguide_verifier =True \n \n else:\n params['grow_policy'] = params['grow_policy']['grow_policy']\n print(params['grow_policy'])\n \n # Perform n_folds cross validation\n loss, params, n_estimators, run_time = self.ctb_crossval(params, optim_type)\n #Dictionary with information for evaluation\n \n return {'loss':loss, 'params':params, 'iteration':self.iteration,\n 'estimators':n_estimators, 'train_time':run_time, 'status':STATUS_OK}", "title": "" }, { "docid": "3f2898f3a9bc5a4a76f39f5b52768cc6", "score": "0.6750106", "text": "def hyper_parameters(X, y, model, solver):\n cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)\n grid = {\"solver\": [solver,]}\n search_model = GridSearchCV(model, grid, scoring='accuracy', cv=cv, n_jobs=-1)\n results = search_model.fit(X, y)\n print(\"Solver: {}\".format(results.best_params_))\n print(\"Accuracy: %.8f\"%results.best_score_)\n return results", "title": "" }, { "docid": "7252b67acaf737527152eba6cca77c4a", "score": "0.6745147", "text": "def xgb_classifier(a,b):\n #Spliting data into train/validation sets \n from sklearn.model_selection import train_test_split\n #from sklearn.model_selection import KFold\n from sklearn.model_selection import StratifiedKFold\n from sklearn.model_selection import cross_val_score\n from sklearn.model_selection import GridSearchCV\n from xgboost import XGBClassifier\n from sklearn import metrics\n X_train, X_test, y_train, y_test = train_test_split(a, b, test_size=0.25, random_state=42)\n \n xgb = XGBClassifier()\n #xgb.fit(X_train, y_train)\n param_grid = dict(learning_rate=[0.01,0.05,0.10,0.2,0.3], \n n_estimators=np.arange(1,50,10), \n reg_alpha = np.arange(0.1,1,0.2),\n max_depth=[2,4,6,8], \n gamma=[0,1,5])\n \n str_kfold = StratifiedKFold(n_splits=10, random_state=42)\n \n grid_model = GridSearchCV(xgb, param_grid, scoring='accuracy', n_jobs=-1, cv=str_kfold)\n grid_result = grid_model.fit(X_train, y_train)\n \n print(grid_result.best_params_ , grid_result.best_score_)\n best_parm = grid_result.best_params_ \n model = XGBClassifier(learning_rate=best_parm[\"learning_rate\"],\n objective=\"binary:logistic\", \n n_estimators=best_parm[\"n_estimators\"], \n reg_alpha = best_parm[\"reg_alpha\"],\n max_depth=best_parm[\"max_depth\"], \n gamma=best_parm[\"gamma\"])\n model.fit(X_train, y_train)\n accuracy = cross_val_score(model, X_test, y_test, cv=str_kfold,scoring='accuracy')\n print('cross_ validation Accuracy : ',np.mean(accuracy))\n return model", "title": "" }, { "docid": "9802ddd56ba8c7e1f88cfa943ecd9377", "score": "0.6728138", "text": "def tune_hyperparam(y, x, k_fold, seed, hprange, method, args, compute_loss):\n\tk_indices = build_k_indices(y, k_fold, seed)\n\n\t# define lists to store the loss of training data and test data\n\tloss_tr = []\n\tloss_te = []\n\n\t# cross validation\n\tfor hp in hprange:\n\t\t# compute the cv_losses\n\t\tcv_tr, cv_te = cv_loss(y, x, k_indices, method, args + [hp], compute_loss)\n\n\t\tloss_tr.append(cv_tr)\n\t\tloss_te.append(cv_te)\n\n\t# keep the hyperparam giving best loss on the test set\n\tind_hp_opt = np.argmin(loss_te)\n\tbest_hp = hprange[ind_hp_opt]\n\n\treturn best_hp, loss_tr, loss_te", "title": "" }, { "docid": "f76bf59a7e9347c3d32c9818e4aca6b8", "score": "0.66908693", "text": "def fit(train_data, train_target):\r\n for name in models.keys():\r\n est = models[name]\r\n est_params = params2[name]\r\n gscv = GridSearchCV(estimator=est, param_grid=est_params, cv=5)\r\n gscv.fit(train_data, train_target)\r\n print(\"best parameters are: {}\".format(gscv.best_estimator_))\r\n print(\"Where we selected the parameters: {}\" .format(gscv.cv_results_['params'][gscv.best_index_]))\r\n print(\"with mean cross-validated score: {}\" .format(gscv.best_score_))", "title": "" }, { "docid": "7fdd4edcdb0944302480c03e9dd33809", "score": "0.6687812", "text": "def find_best_parameters(train_inputs, train_labels, cv_params):\n\n pass", "title": "" }, { "docid": "79919a1463b3f486a8112fc80fc7a774", "score": "0.6521779", "text": "def fitHyper(self):\n from sklearn.model_selection import GridSearchCV\n from sklearn.model_selection import LeaveOneOut\n from sklearn.svm import SVR\n from GridModule import Grid1D\n import numpy as np\n \n # [0.001, 0.01, 0.1, 1.0, 10, 100, 1000]\n LoO_CV=LeaveOneOut()\n Clow=-3\n Chigh=3\n NG=7\n Elow=-3\n Ehigh=3\n Clst=Grid1D(Min=Clow,Max=Chigh,NGrid=NG,GridType=\"log\")\n Elst=Grid1D(Min=Elow,Max=Ehigh,NGrid=NG,GridType=\"log\")\n done=False\n oldScore=1.0E10 #something very bad\n thress=1.0E-3\n \n cnt=0\n while not done:\n cnt+=1\n param_grid=[{'C':Clst, \n 'epsilon':Elst}]\n\n grid_search=GridSearchCV(self.model,param_grid,\n scoring='neg_mean_squared_error', #RMSE^2, neg-> higher value is better\n n_jobs=-1, # parallel, all cores\n pre_dispatch='n_jobs', # spawn as many jobs as there will run in parallel\n cv=LoO_CV, # Leave-One-Out Cross-validation (small data)\n error_score=np.NaN, # Some parameter-combos may lead to failure to fit. (Robustness to Failure)\n # Instead of crashing the entire CV job, just set \n # their score to NaN, and contine.. as such they \n # are ignored in the end compilation of the best result\n \n )\n grid_search.fit(self.feature_tf,self.target)\n newScore=grid_search.best_score_\n print(cnt,' best param= ',grid_search.best_params_)\n print(cnt,' best score= ',grid_search.best_score_)\n if abs((newScore-oldScore)/newScore)<thress:\n #score isn't changeing enough we reached the end\n done=True\n print(\" +-+-+-+ DONE abs< thress\")\n elif (cnt>10):\n #stop running, something bad is going on\n print(\"WARNING in TSVMLinearModel. Hypertunning takes too many runs. Stopping and hoping for the best.\")\n done=True\n else:\n print(\" +-+-+-+ ELSE abs< thress\")\n #now check if we are not on the edge:\n Cval=grid_search.best_params_.get('C')\n Cpow=int(np.log10(Cval))#get rid of rounding errors\n if ((Cpow>Clow)and(Cpow<Chigh)):#it is on the inside\n Cdone=True\n Clst=[Cval]\n else:#it is on an edge\n Cdone=False\n if (Cpow==Clow):#lower bound\n Chigh=Clow+1\n Clow=Chigh-NG+1#7 points have 6 steps in between\n else: #upper bound\n Clow=Chigh-1\n Chigh=Clow+NG-1\n Clst=Grid1D(Min=Clow,Max=Chigh,NGrid=NG,GridType=\"log\")\n \n print(\" ++++++ Clow < Cval < Chigh == \",Clow,\" < \",Cpow,\" < \",Chigh)\n print(\" ++++++ Clst =\",Clst )\n \n Eval=grid_search.best_params_.get('epsilon')\n Epow=int(np.log10(Eval))#get rid of rounding errors\n if ((Epow>Elow)and(Epow<Ehigh)):#it is on the inside\n Edone=True\n Elst=[Eval]\n else:#it is on an edge\n Edone=False\n if (Epow==Elow):#lower bound\n Ehigh=Elow+1\n Elow=Ehigh-NG+1#7 points have 6 steps in between\n else: #upper bound\n Elow=Ehigh-1\n Ehigh=Elow+NG-1\n Elst=Grid1D(Min=Elow,Max=Ehigh,NGrid=NG,GridType=\"log\")\n \n print(\" ++++++ Elow < Eval < Ehigh == \",Elow,\" < \",Epow,\" < \",Ehigh)\n print(\" ++++++ Elst =\",Elst )\n \n done=(Cdone and Edone)\n oldScore=newScore #store for the next round of while\n \n self.best_score=newScore \n print('best param= ',grid_search.best_params_)\n print('best score= ',grid_search.best_score_)\n self.best_C=grid_search.best_params_.get('C')\n self.best_e=grid_search.best_params_.get('epsilon')\n \n #and now we set the actual model\n self.hyperTuned=True\n self.model=SVR(kernel='poly',\n degree=self.degree,\n gamma=self.gamma,\n coef0=self.coef0,\n C=self.best_C,\n epsilon=self.best_e,\n cache_size=500, #cach size in MB, can improve speed\n max_iter=self.max_iter, # maximum number of iterations for the solver...default is -1 (infinite)...which it will sometimes take so it shoul dbe avoided\n )", "title": "" }, { "docid": "ab2825bf8ad8cb0a7e8c75c6f53a1fa0", "score": "0.64933795", "text": "def svmfit(X_train, y_train, bigC = 0.01):\n rnum = X_train.shape[0]\n cnum = X_train.shape[1]\n tmp_K = np.zeros((rnum, rnum))\n P = np.dot(y_train * X_train*1.0, (y_train * X_train*1.0).T)\n q = -1 * np.ones((rnum,1))\n G = np.vstack((np.eye(rnum)* -1 ,np.eye(rnum)))\n h = np.hstack((np.zeros(rnum), np.ones(rnum) * bigC))\n A = y_train.T\n b = np.array([0.0])\n \n P_cvm = cvxopt.matrix(P)\n q_cvm = cvxopt.matrix(q)\n G_cvm = cvxopt.matrix(G)\n h_cvm = cvxopt.matrix(h)\n A_cvm = cvxopt.matrix(A)\n b_cvm = cvxopt.matrix(b)\n sol=cvxopt.solvers.qp(P_cvm, q_cvm, G_cvm, h_cvm, A_cvm, b_cvm,solver='glpk',options={'glpk':{'msg_lev':'GLP_MSG_OFF'}})\n lamb = np.array(sol['x'])\n w = np.zeros(cnum,)\n for i in range(0, rnum):\n if (sol[\"x\"][i]>=0 and sol[\"x\"][i]<=bigC):\n w = w + np.multiply(y_t[i][0]*sol[\"x\"][i],list(X_t[i]))\n S = (lamb > 1e-5).flatten()\n b = y_train[S] - np.dot(X_train[S], w)\n model = (w,b[0][0])\n return model\n # return model", "title": "" }, { "docid": "c0376554fa5941e471ce68d60736aa49", "score": "0.6473927", "text": "def _set_lgb_parameters(\n X: np.ndarray,\n y: np.ndarray,\n objective: str,\n rf: bool,\n silent: bool,\n n_jobs: int = 0,\n lgbm_params: dict = None,\n) -> dict:\n\n n_feat = X.shape[1]\n\n params = lgbm_params if lgbm_params is not None else {}\n\n params[\"objective\"] = objective\n params[\"verbosity\"] = -1\n if objective == \"softmax\":\n params[\"num_class\"] = len(np.unique(y))\n\n if rf:\n feat_frac = (\n np.sqrt(n_feat) / n_feat\n if objective in [\"softmax\", \"binary\"]\n else n_feat / (3 * n_feat)\n )\n params.update(\n {\n \"boosting_type\": \"rf\",\n \"bagging_fraction\": 0.7,\n \"feature_fraction\": feat_frac,\n \"bagging_freq\": 1,\n }\n )\n\n clf_losses = [\n \"binary\",\n \"softmax\",\n \"multi_logloss\",\n \"multiclassova\",\n \"multiclass\",\n \"multiclass_ova\",\n \"ova\",\n \"ovr\",\n \"binary_logloss\",\n ]\n if objective in clf_losses:\n y = y.astype(int)\n y_freq_table = pd.Series(y.fillna(0)).value_counts(normalize=True)\n n_classes = y_freq_table.size\n if n_classes > 2 and objective != \"softmax\":\n params[\"objective\"] = \"softmax\"\n params[\"num_class\"] = len(np.unique(y))\n if not silent:\n print(\"Multi-class task, setting objective to softmax\")\n main_class = y_freq_table[0]\n if not silent:\n print(\"GrootCV: classification with unbalance classes\")\n if main_class > 0.8:\n params.update({\"is_unbalance\": True})\n\n params.update({\"num_threads\": n_jobs})\n\n # we are using early_stopping\n # we prevent the overridding of it by popping the n_iterations\n keys_to_pop = [\n \"num_iterations\",\n \"num_iteration\",\n \"n_iter\",\n \"num_tree\",\n \"num_trees\",\n \"num_round\",\n \"num_rounds\",\n \"nrounds\",\n \"num_boost_round\",\n \"n_estimators\",\n \"max_iter\",\n ]\n for key in keys_to_pop:\n params.pop(key, None)\n\n return params", "title": "" }, { "docid": "1d690754405074050f7f5429f6ab189b", "score": "0.6473025", "text": "def get_hyperparams_range():\n valid_keys = [\"loss\", \"learning_rate\"]\n return {\n \"model_type\": [[GradientBoostingClassifier, valid_keys]],\n \"learning_rate\": [0.1, 1, 100],\n \"loss\": [\"deviance\", \"exponential\"],\n \"n_estimators\": [100, 300, 1000],\n \"subsample\": [0.2, 1.2, 100],\n \"random_state\": [None, 123],\n \"max_features\": [\"None\", \"log2\", \"sqrt\"],\n \"ccp_alpha\": [0.0, 1, 100],\n }", "title": "" }, { "docid": "75b24863283eaaa51a436abaf0fb5fd4", "score": "0.645393", "text": "def _hyperparameter_tuning(self, train_data, algorithm):\n if algorithm == 'GB':\n parameters = {'n_estimators': [100],\n 'min_samples_split': [2],\n 'min_samples_leaf': [1],\n 'max_depth': [3]}\n # parameters = {'n_estimators': np.arange(50, 210, 10),\n # 'min_samples_split': np.arange(2, 6, 1),\n # 'min_samples_leaf': np.arange(1, 6, 1),\n # 'max_depth': np.arange(2, 6, 1)}\n model = GradientBoostingClassifier()\n\n elif algorithm == 'RF':\n parameters = {'n_estimators': [100],\n 'min_samples_split': [2],\n 'min_samples_leaf': [1]}\n # parameters = {'n_estimators': np.arange(50, 210, 10),\n # 'min_samples_split': np.arange(2, 6, 1),\n # 'min_samples_leaf': np.arange(1, 6, 1)}\n model = RandomForestClassifier()\n\n elif algorithm == 'XGB':\n parameters = {'n_estimators': [100],\n 'learning_rate': [0.3],\n 'max_depth': [3],\n 'min_child_weight': [1],\n 'gamma': [0],\n 'colsample_bytree': [1]}\n # parameters = {'n_estimators': np.arange(50, 210, 10),\n # 'learning_rate': [0.10, 0.20, 0.30],\n # 'max_depth': [4, 6, 8, 10, 12, 15],\n # 'min_child_weight': [1, 3, 5, 7],\n # 'gamma': [0.0, 0.1, 0.2, 0.3, 0.4],\n # 'colsample_bytree': [0.3, 0.4, 0.5, 0.7]}\n model = xgb.XGBClassifier()\n\n else:\n raise ValueError('{} is not supported.'.format(algorithm))\n\n grid_search = GridSearchCV(model, parameters, scoring=['balanced_accuracy', 'f1_macro'],\n n_jobs=-1, refit='f1_macro')\n grid_search.fit(train_data[['address_similarity', 'address_str_similarity', 'name_similarity']],\n train_data['label'])\n return grid_search", "title": "" }, { "docid": "45cfacf85f80a2af22fe8cf1f993a446", "score": "0.64479077", "text": "def find_top_features_xgb(train,preds,numvars,target,modeltype,verbose=0):\n subsample = 0.5\n col_sub_sample = 0.5\n train = copy.deepcopy(train)\n start_time = time.time()\n test_size = 0.2\n seed = 1\n n_splits = 5\n kf = KFold(n_splits=n_splits,random_state= 33)\n rem_vars = left_subtract(preds,numvars)\n if len(numvars) > 0:\n final_list = remove_variables_using_fast_correlation(train,numvars)\n else:\n final_list = numvars[:]\n print(' Adding %s categorical variables to reduced numeric variables of %d' %(\n len(rem_vars),len(final_list)))\n preds = final_list+rem_vars\n ######## Drop Missing value rows since XGB for some reason #########\n ######## can't handle missing values in early stopping rounds #######\n train.dropna(axis=0,subset=preds+[target],inplace=True)\n ######## Dont move this train and y definition anywhere else ########\n y = train[target]\n ######################################################################\n important_features = []\n if modeltype == 'Regression':\n model_xgb = XGBRegressor(objective='reg:linear', n_estimators=100,subsample=subsample,\n colsample_bytree=col_sub_sample,reg_alpha=0.5, reg_lambda=0.5, \n seed=1,n_jobs=-1,random_state=1)\n eval_metric = 'rmse'\n else:\n #### This is for Classifiers only\n classes = np.unique(train[target].values)\n if len(classes) == 2:\n model_xgb = XGBClassifier(base_score=0.5, booster='gbtree', subsample=subsample,\n colsample_bytree=col_sub_sample,gamma=1, learning_rate=0.1, max_delta_step=0,\n max_depth=5, min_child_weight=1, missing=-999, n_estimators=100,\n n_jobs=-1, nthread=None, objective='binary:logistic',\n random_state=1, reg_alpha=0.5, reg_lambda=0.5, scale_pos_weight=1,\n seed=1, silent=True)\n eval_metric = 'logloss'\n else:\n model_xgb = XGBClassifier(base_score=0.5, booster='gbtree', subsample=subsample,\n colsample_bytree=col_sub_sample, gamma=1, learning_rate=0.1, max_delta_step=0,\n max_depth=5, min_child_weight=1, missing=-999, n_estimators=100,\n n_jobs=-1, nthread=None, objective='multi:softmax',\n random_state=1, reg_alpha=0.5, reg_lambda=0.5, scale_pos_weight=1,\n seed=1, silent=True)\n eval_metric = 'mlogloss'\n #### This is where you start to Iterate on Finding Important Features ################\n train_p = train[preds]\n if train_p.shape[1] < 10:\n iter_limit = 2\n else:\n iter_limit = int(train_p.shape[1]/5+0.5)\n print('Selected No. of variables = %d ' %(train_p.shape[1],))\n print('Finding Important Features...')\n for i in range(0,train_p.shape[1],iter_limit):\n if verbose == 1:\n print(' in %d variables' %(train_p.shape[1]-i))\n if train_p.shape[1]-i < iter_limit:\n X = train_p.iloc[:,i:]\n if modeltype == 'Regression':\n train_part = int((1-test_size)*X.shape[0])\n X_train, X_cv, y_train, y_cv = X[:train_part],X[train_part:],y[:train_part],y[train_part:]\n else:\n X_train, X_cv, y_train, y_cv = train_test_split(X, y, \n test_size=test_size, random_state=seed)\n try:\n model_xgb.fit(X_train,y_train,early_stopping_rounds=5,eval_set=[(X_cv,y_cv)],\n eval_metric=eval_metric,verbose=False)\n except:\n print('XGB is Erroring. Check if there are missing values in your data and try again...')\n return [], []\n try:\n [important_features.append(x) for x in list(pd.concat([pd.Series(model_xgb.feature_importances_\n ),pd.Series(list(X_train.columns.values))],axis=1).rename(columns={0:'importance',1:'column'\n }).sort_values(by='importance',ascending=False)[:25]['column'])]\n except:\n print('Model training error in find top feature...')\n important_features = copy.deepcopy(preds)\n return important_features, [], []\n else:\n X = train_p[list(train_p.columns.values)[i:train_p.shape[1]]]\n #### Split here into train and test ##### \n if modeltype == 'Regression':\n train_part = int((1-test_size)*X.shape[0])\n X_train, X_cv, y_train, y_cv = X[:train_part],X[train_part:],y[:train_part],y[train_part:]\n else:\n X_train, X_cv, y_train, y_cv = train_test_split(X, y, \n test_size=test_size, random_state=seed)\n model_xgb.fit(X_train,y_train,early_stopping_rounds=5,\n eval_set=[(X_cv,y_cv)],eval_metric=eval_metric,verbose=False)\n try:\n [important_features.append(x) for x in list(pd.concat([pd.Series(model_xgb.feature_importances_\n ),pd.Series(list(X_train.columns.values))],axis=1).rename(columns={0:'importance',1:'column'\n }).sort_values(by='importance',ascending=False)[:25]['column'])]\n important_features = list(OrderedDict.fromkeys(important_features))\n except:\n print('Multi Label possibly no feature importances.')\n important_features = copy.deepcopy(preds)\n important_features = list(OrderedDict.fromkeys(important_features))\n print(' Found %d important features' %len(important_features))\n #print(' Time taken (in seconds) = %0.0f' %(time.time()-start_time))\n numvars = [x for x in numvars if x in important_features]\n return important_features, numvars", "title": "" }, { "docid": "4753582c69720e7dfa35f79ebefda8e4", "score": "0.64417326", "text": "def xgboost_training(train_x,train_y,valid_x=None,\r\n valid_y=None,eval_fun=None,\r\n num_round=500,max_depth=3,eta=0.1,subsample=1,colsample=1):\r\n dtrain = xgb.DMatrix(data=train_x,label=train_y)\r\n dvalid = xgb.DMatrix(data=valid_x,label=valid_y)\r\n param = {'max_depth':max_depth,'eta':eta,'silent':1,'objective':'binary:logistic',\r\n 'eval_metric':['logloss'],'subsample':subsample,'colsample_bytree':colsample}\r\n watchlist = [(dvalid,'eval'),(dtrain,'train')]\r\n num_round = num_round\r\n result_dict = {}\r\n bst = xgb.train(param,dtrain,num_round,watchlist,feval=eval_fun,evals_result=result_dict)\r\n xgb.plot_importance(bst,max_num_features=20)\r\n valid_score = result_dict['eval']['LoanGrantingScore'][-1]\r\n train_score = result_dict['train']['LoanGrantingScore'][-1]\r\n valid_logloss = result_dict['eval']['logloss'][-1]\r\n train_logloss = result_dict['train']['logloss'][-1]\r\n return {'train_logloss':train_logloss,'valid_logloss':valid_logloss,\r\n 'train_score':train_score,'valid_score':valid_score}", "title": "" }, { "docid": "348bcd3aac8b0b50acd23863b499566b", "score": "0.6423355", "text": "def train_improve(dataset_name, X, Y, outer_cv=10, inner_cv=3, random_search_trials=50, inner_epochs=1, outer_epochs=5):\n skf = StratifiedKFold(n_splits=outer_cv, random_state=7, shuffle=True)\n skf2 = StratifiedKFold(n_splits=inner_cv, random_state=7, shuffle=True)\n\n fold_var = 1\n\n list_of_res = []\n for train_index, val_index in skf.split(np.zeros(X.shape[0]), Y):\n results_dict = {}\n x_train = X[train_index]\n y_train = Y[train_index]\n x_val = X[val_index]\n y_val = Y[val_index]\n max_trail = 0\n max_acc = 0\n hyper_param_dropoutstudent = np.array([0.1, 0.2, 0.3, 0.4])\n hyper_param_batchsize = np.array([32, 64, 128])\n hyper_param_droputteacher = np.array([0.1, 0.2, 0.3, 0.4])\n hyper_param_emarate = np.array([0.999, 0.95, 0.92, 0.98])\n hyper_params_dict = {}\n for trail in range(0, random_search_trials):\n ds = np.random.choice(hyper_param_dropoutstudent)\n b = np.random.choice(hyper_param_batchsize)\n dt = np.random.choice(hyper_param_droputteacher)\n e = np.random.choice(hyper_param_emarate)\n print(f\"hyper params {(ds, b, dt, e)}\")\n acc_list = []\n for train_ind2, val_inds2 in skf2.split(np.zeros(x_train.shape[0]), y_train):\n x_train_hyperopt = x_train[train_ind2]\n y_train_hyperopt = y_train[train_ind2]\n x_val_hyperopt = x_train[val_inds2]\n y_val_hyperopt = y_train[val_inds2]\n model = MT_doubleTeacher_MODEL(((x_train_hyperopt, y_train_hyperopt), (x_val_hyperopt, y_val_hyperopt)),\n b, dt, ds, dt, e)\n model.train(inner_epochs)\n results = model.evaluate()\n acc_list.append(results['acc'])\n tf.keras.backend.clear_session()\n\n mean_acc = np.array(acc_list).mean()\n if mean_acc > max_acc:\n max_trail = trail\n max_acc = mean_acc\n hyper_params_dict[trail] = (b, dt, ds, e, mean_acc)\n # for later need to save the results in the dict\n\n best_params = hyper_params_dict[max_trail]\n model = MT_doubleTeacher_MODEL(((x_train, y_train), (x_val, y_val)), best_params[0], best_params[1],\n best_params[2], best_params[1], best_params[3])\n start_timer = time.time()\n model.train(outer_epochs)\n end_timer = time.time()\n eval_res = model.evaluate()\n results_dict['dataset_name'] = dataset_name\n results_dict['k-fold'] = fold_var\n results_dict['train_time'] = end_timer - start_timer\n results_dict.update(eval_res)\n list_of_res.append(results_dict)\n tf.keras.backend.clear_session()\n fold_var += 1\n tmp = pd.DataFrame(list_of_res)\n tmp.to_csv(f'Results/Improve_{dataset_name}.csv')\n return pd.DataFrame(list_of_res)", "title": "" }, { "docid": "6268d7c790f0a7ee4d7ca769ae82096c", "score": "0.6404148", "text": "def get_best_hyper_param(vocab_path, train_path, list_values):\n\n def cross_validation(num_folds, initial_values):\n \"\"\"\n\n :param num_folds:\n :param initial_values: [lstm_size, batch_size]\n :return: avg of loss\n \"\"\"\n\n #Init hyper prams\n lstm_size, batch_size = initial_values[0], initial_values[1]\n print(\"--Try with: lstm_size = \", lstm_size, \" and batch_size = \", batch_size)\n\n #get train data\n with open(vocab_path) as f:\n vocab_size = len(f.read().splitlines())\n\n train_data_reader = DataReader(\n data_path=train_path,\n batch_size=batch_size,\n vocab_size = vocab_size\n )\n\n X_train, y_train, sentence_lenght_train = train_data_reader._data, train_data_reader._labels, train_data_reader._sentence_length\n\n #divide training set into num_folds parts follow indexes:\n row_ids = np.array(range(X_train.shape[0]))\n valid_ids = np.split(row_ids[:len(row_ids) - len(row_ids) % num_folds], num_folds)\n valid_ids[-1] = np.append(valid_ids[-1],\n row_ids[len(row_ids) - len(row_ids) % num_folds:])\n\n train_ids = [[k for k in row_ids if k not in valid_ids[i]] for i in range(num_folds)]\n avg_error_rate = 0\n\n for i in range(num_folds):\n # with each i, we have corresponding train-val sets:\n # (k-1) parts for train and the rest for val\n valid_X_part, valid_y_part, valid_sentence_length = X_train[valid_ids[i]], y_train[valid_ids[i]], sentence_lenght_train[valid_ids[i]]\n train_X_part, train_y_part, train_sentence_length = X_train[train_ids[i]], y_train[train_ids[i]], sentence_lenght_train[train_ids[i]]\n Valid = [valid_X_part, valid_y_part, valid_sentence_length]\n Train = [train_X_part, train_y_part, train_sentence_length]\n\n # fit and compute corresponding RSS:\n avg_error_rate += train_and_evaluate_RNN_choose_param(vocab_path, Train, Valid,\n lstm_size, batch_size)\n\n return avg_error_rate / num_folds\n\n def range_scan(best_values, min_error_rate, list_values):\n \"\"\"\n Use curr_values given, find the best_values from curr_values\n :param best_values:\n :param min_error_rate:\n :param curr_values:\n :return:\n \"\"\"\n \n for values in list_values:\n error_rate = cross_validation(5, initial_values=values)\n if error_rate < min_error_rate:\n min_error_rate = error_rate\n best_values = values\n \n return best_values, min_error_rate\n \n best_values, min_error_rate = range_scan(best_values = [32, 32], min_error_rate = 1000**2, list_values=list_values)\n\n return best_values", "title": "" }, { "docid": "741b366eefdaba9950a85289bf8c82cd", "score": "0.6371084", "text": "def xgboost():\n train_x, test_x, train_y, test_y = prepare_data()\n train_y = train_y.reshape((train_y.shape[0], ))\n \n \n clf = XGBClassifier(n_estimators=20)\n \n start = time.time()\n clf.fit(train_x, train_y)\n end = time.time()\n \n y_pred = clf.predict(test_x)\n\n time_ = end - start\n accuracy = 100 * accuracy_score(test_y, y_pred)\n\n print(\"### XGB ###\\n\")\n print(\"Training lasted %.2f seconds\" % time_)\n print(\"Accuracy = %.2f\" % (accuracy))\n\n return(time_, accuracy)", "title": "" }, { "docid": "d27bdf38c37e89f208d30020a7fbd0bb", "score": "0.63423693", "text": "def objective(params, iterations):\n max_depth, learning_rate, max_features = params\n\n reg.set_params(max_depth=max_depth,\n learning_rate=learning_rate,\n max_features=max_features)\n\n return -np.mean(cross_val_score(reg, X, y, cv=5, n_jobs=-1,\n scoring=\"neg_mean_absolute_error\"))", "title": "" }, { "docid": "62eca370bded45213f61c28fc4688e7f", "score": "0.6338585", "text": "def cross_validation_find_best(y, x, k_fold,seed, lambdas, gammas, initial_w, max_iters=10000):\r\n # split data in k fold\r\n k_indices = build_k_indices(y, k_fold, seed)\r\n # define lists to store the loss of training data and test data\r\n best_accuracy = 0\r\n best_lambda = 0\r\n best_gamma = 0\r\n best_prediction= []\r\n for gamma in gammas:\r\n for lambda_ in lambdas:\r\n currentw, currentaccuracy ,currentprediction =cross_validation_RegLogGD(y,x,k_fold,seed,lambda_, initial_w, max_iters, gamma)\r\n if currentaccuracy> best_accuracy: \r\n best_accuracy= currentaccuracy\r\n best_lambda =lambda_\r\n best_gamma=gamma\r\n best_prediction[0] = currentprediction\r\n print(\"Best accuracy={ba}, Best lambda={bl:.8f}, Best gamma={bg:.3f}\".format(ba=best_accuracy, bl=best_lambda, bg=best_gamma))\r\n return best_accuracy, best_lambda, best_gamma", "title": "" }, { "docid": "79de5d1f447463f23d0c942510355acb", "score": "0.6327437", "text": "def gridSearch(df, folds, k, algo, grid, features, sampling_method, ratio, pool):\n\n best_hyper = None\n best_precision = 0\n best_recall = 0\n best_fscore = 0\n\n for i in range(len(grid)):\n params = list(grid[i])\n print(params)\n classifier = generateClassifier(algo, params, features)\n modelPerformance = crossValidate(df, folds, k, classifier, features, sampling_method, ratio, pool)\n if modelPerformance[2] > best_fscore:\n best_hyper = params\n best_precision = modelPerformance[0]\n best_recall = modelPerformance[1]\n best_fscore = modelPerformance[2]\n\n return best_hyper, best_precision, best_recall, best_fscore", "title": "" }, { "docid": "ef363275a78060b1a56d9e5c2a2d55f3", "score": "0.6319666", "text": "def optimize_model(self):\n print('Feature importances:', list(self.gbm.feature_importances_))\n\n estimator = lgb.LGBMRegressor(num_leaves=31)\n\n param_grid = {\n 'learning_rate': [0.08, 0.085, 0.09, 0.095, 0.1],\n 'n_estimators': [25, 26, 27, 28, 29, 30]\n }\n\n gbm = GridSearchCV(estimator, param_grid)\n\n gbm.fit(self.X_train, self.y_train)\n\n print('Best parameters found by grid search are:', gbm.best_params_)", "title": "" }, { "docid": "159181f8ab31e304d13e9737777bda4d", "score": "0.630788", "text": "def build_xgboost_model(X_train, y_train, train_indices, validation_indices): # , sample_size \n \n # Record running time\n start = time.time()\n\n # Define model and parameters \n model = xgb.XGBClassifier() \n \n xgb_params = {\n 'objective': 'binary:logistic'\n , 'base_score': 0.5 \n , 'n_estimators': 40 \n , 'learning_rate': 0.1\n , 'max_depth': 4 \n , 'scale_pos_weight': 2\n , 'seed': 0\n }\n model.set_params(**xgb_params) \n \n # Store predictions\n predictions = np.zeros(X_train.shape[0], dtype = np.float64)\n\n # Train model on validation sets\n for i in range(0, n_folds): \n print \"Fold \" + str(i) + \" :\"\n \n # Fit model on sampled data \n model.fit(X_train.ix[train_indices[i]], y_train[train_indices[i]]\n , eval_set = [(X_train.ix[train_indices[i]], y_train[train_indices[i]]), (X_train.ix[validation_indices[i]], y_train[validation_indices[i]])] \n , eval_metric = \"auc\" \n , early_stopping_rounds = 5\n , verbose = True) \n \n # Evaluate predictions over 5-folds\n predictions[validation_indices[i]] = [x[1] for x in model.predict_proba(X_train.ix[validation_indices[i]])]\n score = str(round(roc_auc_score(y_train[validation_indices[i]], predictions[validation_indices[i]]), 4))\n print \"ROC AUC Score: \" + score\n \n # Print learning curves\n plot_learning_curves(model)\n \n # Evaluate predictions over 5-folds\n cv_score = str(round(roc_auc_score(y_train, predictions), 4))\n print \"ROC AUC Score: \" + cv_score \n \n # Print running time\n end = time.time()\n print \"\\nTime Elapsed: \" + str(end - start) + \" seconds\"\n \n return model, cv_score, predictions", "title": "" }, { "docid": "53bece4c3b687c7ae3d071be49ad9c1f", "score": "0.62965417", "text": "def evaluate(X, Y, hyperparams):\n\n# from scikits.learn.cross_val import LeaveOneOut\n# loo = LeaveOneOut(len(Y))\n from scikits.learn.cross_val import KFold\n K = 5\n# print >> sys.stderr, \"Using 10-fold cross-validation\"\n loo = KFold(len(Y), K)\n# print loo\n\n all_y_test = []\n all_y_test_predict = []\n\n nlltotal = 0.\n for train, test in loo:\n trainidx = [idx for idx in range(len(train)) if train[idx]]\n testidx = [idx for idx in range(len(test)) if test[idx]]\n X_train, X_test, y_train, y_test = X[trainidx], X[testidx], Y[trainidx], Y[testidx]\n# print \"train\", X_train.shape, y_train.shape\n# print \"test\", X_test.shape, y_test.shape\n\n if len(frozenset(y_train)) == 1:\n # Skip training on this LOO set if there is only one y-value in the training set\n continue\n\n clf = fit_classifier(X_train, y_train, hyperparams)\n\n# print \"target\", y_test\n## print \"predict\", clf.predict(X_test)\n# print \"predict\", clf.predict_proba(X_test)\n## print \"df\", clf.decision_function(X_test)\n## print \"score\", clf.score(X_test, y_test)\n\n# y_test_predict = clf.predict_proba(X_test)\n y_test_predict = clf.predict(X_test)\n# print y_test_predict\n\n all_y_test.append(y_test)\n all_y_test_predict.append(y_test_predict)\n\n## print clf.best_estimator\n# print precision_score(y_test, y_test_predict)\n# print recall_score(y_test, y_test_predict)\n# print classification_report(y_test, y_test_predict)\n#\n#\n# assert y_test.shape == (1,)\n# assert y_test_predict.shape == (1,)\n# if y_test_predict[0] >= 1.:\n## print >> sys.stderr, \"WHA? y_test_predict[0] %f >= 1. !!!\" % y_test_predict[0]\n# y_test_predict[0] = 1-1e-9\n# elif y_test_predict[0] <= 0.:\n## print >> sys.stderr, \"WHA? y_test_predict[0] %f <= 0. !!!\" % y_test_predict[0]\n# y_test_predict[0] = 1e-9\n#\n# if y_test[0] == 1:\n# probtarget = y_test_predict[0]\n# else:\n# assert y_test[0] == 0\n# probtarget = 1-y_test_predict[0]\n## print \"probtarget\", probtarget\n## print y_test[0], y_test_predict[0], repr(probtarget)\n# nll = -math.log(probtarget)\n## print \"nll\", nll\n## print\n#\n# nlltotal += nll\n# nlltotal /= len(Y)\n## print \"nlltotal %f (alpha=%f, n_iter=%d)\" % (nlltotal, alpha, n_iter)\n# return nlltotal\n\n y_test = numpy.hstack(all_y_test)\n y_test_predict = numpy.hstack(all_y_test_predict)\n assert y_test.ndim == 1\n assert y_test_predict.ndim == 1\n assert Y.shape == y_test.shape\n assert y_test.shape == y_test_predict.shape\n# import plot\n# print \"precision_recall_fscore_support\", scikits.learn.metrics.precision_recall_fscore_support(y_test, y_test_predict)\n f1 = f1_score(y_test, y_test_predict)\n# print \"\\tf1 = %0.3f when evaluating with %s\" % (f1, hyperparams)\n# sys.stdout.flush()\n# precision, recall, thresholds = scikits.learn.metrics.precision_recall_curve(y_test, y_test_predict)\n# plot.plot_precision_recall(precision, recall)\n# print \"confusion_matrix\", scikits.learn.metrics.confusion_matrix(y_test, y_test_predict)\n# print \"roc_curve\", scikits.learn.metrics.roc_curve(y_test, y_test_predict)\n# fpr, tpr, thresholds = scikits.learn.metrics.roc_curve(y_test, y_test_predict)\n# print \"auc\", scikits.learn.metrics.auc(fpr, tpr)\n# plot.plot_roc(fpr, tpr)\n return f1", "title": "" }, { "docid": "95912497408e65e5ab4532c2e8f48fd5", "score": "0.6288122", "text": "def TrainXGB(self,train_DF,test_DF, n_estimators=150, max_depth=3, min_child_weight=1,seed=0,multi=False,nclass = 4,class_weights=None):\n # Create XGB object with the hyperparameters desired\n xgb = XGBClassifier(n_estimators=n_estimators,\n max_depth=max_depth, \n min_child_weight=min_child_weight,\n objective='multi:softprob',#'multi:softmax',\n num_class=nclass,\n seed=seed,\n verbose_eval=True,\n verbosity=1,)\n #tree_method='gpu_hist',\n #gpu_id = 0)\n # Fit to the training set, making sure to include event weights\n history = xgb.fit(train_DF[self.var_list], # X\n train_DF[\"isSignal\"],\n verbose=True,\n eval_metric='logloss', # yii\n sample_weight=class_weights, # weights\n )\n \n # Score the testing set\n Xg_score_test = xgb.predict(test_DF[self.var_list])#[:,0] # predict_proba returns [prob_bkg, prob_sig] which have the property prob_bkg+prob_sig = 1 so we only need one. Chose signal-ness\n # Score the training set (for overtraining analysis)\n Xg_score_train = xgb.predict(train_DF[self.var_list])#[:,0] # predict_proba returns [prob_bkg, prob_sig] which have the property prob_bkg+prob_sig = 1 so we only need one. Chose signal-ness\n Xg_score_test = pd.Series(Xg_score_test, index=test_DF.index) \n Xg_score_train = pd.Series(Xg_score_train, index=train_DF.index) \n return Xg_score_train, Xg_score_test,history , xgb", "title": "" }, { "docid": "7bb836fab7ad06186c9d9be15eb56158", "score": "0.6240789", "text": "def compare_gradient_xg_boost(X,y): \n gb = GradientBoostingClassifier()\n xgb = XGBClassifier()\n\n gb_roc_results = []\n gb_brier_results = []\n xgb_roc_results = []\n xgb_brier_results = []\n\n skf = KFold(n_splits=5, shuffle=False)\n\n for train, test in skf.split(X,y):\n X_train = X.iloc[train]\n y_train = y.iloc[train]\n X_test = X.iloc[test]\n y_test = y.iloc[test]\n\n gb.fit(X_train,y_train)\n xgb.fit(X_train,y_train)\n gb_predictions = gb.predict(X_test)\n xgb_predictions = xgb.predict(X_test)\n # xgb_predictions = [round(value) for value in xgb_pre_dictions]\n\n gb_roc_auc = roc_auc_score(y_test, gb_predictions)\n gb_brier = brier_score_loss(y_test, gb_predictions)\n xgb_roc_auc = roc_auc_score(y_test, xgb_predictions)\n xgb_brier = brier_score_loss(y_test, xgb_predictions)\n\n gb_roc_results.append(gb_roc_auc)\n gb_brier_results.append(gb_brier)\n xgb_roc_results.append(xgb_roc_auc)\n xgb_brier_results.append(xgb_brier)\n \n print(f'GB Brier:{np.mean(gb_brier_results)}')\n print(f'XGB Brier:{np.mean(xgb_brier_results)}')\n print(f'GB ROC_AUC:{np.mean(gb_roc_results)}')\n print(f'XGB ROC_AUC:{np.mean(xgb_roc_results)}')\n\n\n # WHY LOWER THAN the method done with all models?!?!?!?!?!\n # SEEMS LIKE CROSS VAL IS ALWAYS SCORING HIGHER????? NOT GOING TO WORRY FOR NOW \n # SINCE GB SEEMS TO OUTPERFORM XGBOOST ANYWAYS\n # GB Brier:0.3334479696125048\n # XGB Brier:0.3567060173798983\n # GB ROC_AUC:0.622271279514713\n # XGB ROC_AUC:0.6104926538981456", "title": "" }, { "docid": "1a0ae96e9fcfc7aba97e995856fb131a", "score": "0.6232317", "text": "def fit_classifier_with_hyperparamter_search(\n X:np.ndarray,y:np.ndarray, basemod, cv:int, param_grid:Dict[str, List[Any]],\n scoring='f1_macro',verbose:bool=True):\n splitter = StratifiedShuffleSplit(n_splits=cv, test_size=0.20)\n crossvalidator = GridSearchCV(basemod, param_grid, cv=splitter, scoring=scoring)\n crossvalidator.fit(X, y)\n if verbose:\n print(\"Best params: {}\".format(crossvalidator.best_params_))\n print(\"Best score: {0:0.03f}\".format(crossvalidator.best_score_))\n return crossvalidator.best_estimator_", "title": "" }, { "docid": "88fe0eb6fc6796c230806fb20a1df31e", "score": "0.62235606", "text": "def optimize_lgbm_params(train_df, target_df):\n def _lgbm_evaluate(**params):\n \"\"\"Wrapper for KFold LGBM parameter evaluation\n Args:\n params(dict): Parameter to evaluate based on LGBM outcome\n\n Returns:\n roc_auc_score(float): ROC-AUC-value to optimize by Bayesian optimization\n \"\"\"\n\n warnings.simplefilter('ignore')\n params['num_leaves'] = int(params['num_leaves'])\n params['max_depth'] = int(params['max_depth'])\n\n clf = LGBMClassifier(**params, n_estimators=10000, nthread=4)\n\n folds = KFold(n_splits=2, shuffle=True, random_state=1001)\n test_pred_proba = np.zeros(train_df.shape[0])\n for n_fold, (train_idx, valid_idx) in enumerate(folds.split(train_df, target_df)):\n train_x, train_y = train_df[feats].iloc[train_idx], target_df.iloc[train_idx]\n valid_x, valid_y = train_df[feats].iloc[valid_idx], target_df.iloc[valid_idx]\n\n clf.fit(train_x, train_y,\n eval_set=[(train_x, train_y), (valid_x, valid_y)], eval_metric='auc',\n verbose=False, early_stopping_rounds=100)\n\n test_pred_proba[valid_idx] = clf.predict_proba(valid_x, num_iteration=clf.best_iteration_)[:, 1]\n\n del train_x, train_y, valid_x, valid_y\n gc.collect()\n\n return roc_auc_score(target_df, test_pred_proba)\n\n # parameter ranges for optimization\n params = {'colsample_bytree': (0.8, 1),\n 'learning_rate': (.015, .025),\n 'num_leaves': (33, 35),\n 'subsample': (0.8, 1),\n 'max_depth': (7, 9),\n 'reg_alpha': (.03, .05),\n 'reg_lambda': (.06, .08),\n 'min_split_gain': (.01, .03),\n 'min_child_weight': (38, 40)}\n\n bo = BayesianOptimization(_lgbm_evaluate, params)\n bo.maximize(init_points=5, n_iter=5)\n\n best_params = bo.max['params']\n best_params['n_estimators'] = 10000\n best_params['nthread'] = 4\n best_params['num_leaves'] = int(best_params['num_leaves'])\n best_params['max_depth'] = int(best_params['max_depth'])\n\n return best_params", "title": "" }, { "docid": "353d7eaec74ccebce6d8b3c0aa30305c", "score": "0.62216985", "text": "def gradient_boosting_regression(x, y):\n x_test, x_train, x_tv, x_validation, y_test, y_train, y_tv, y_validation = dataset_split(x, y)\n\n gbdt = GradientBoostingRegressor(\n loss='ls'\n , learning_rate=0.05\n , n_estimators=100\n , subsample=1\n , min_samples_split=2\n , min_samples_leaf=1\n , max_depth=3\n , init=None\n , random_state=None\n , max_features=None\n , alpha=0.9\n , verbose=0\n , max_leaf_nodes=None\n , warm_start=False\n )\n sum_criterion = 0\n sum_test_mae = 0\n\n for i in range(max_iteration):\n x_test, x_train, x_tv, x_validation, y_test, y_train, y_tv, y_validation = dataset_split(x, y)\n gbdt.fit(x_train, y_train)\n y_pred_train = evaluate(gbdt.predict(x_train), y_train).mae\n y_pred_validation = evaluate(gbdt.predict(x_validation), y_validation).mae\n train_criterion = weighted_cost(y_pred_train, y_pred_validation)\n\n gbdt.fit(x_tv, y_tv)\n y_pred_test = gbdt.predict(x_test)\n sum_criterion = sum_criterion + train_criterion\n sum_test_mae = sum_test_mae + evaluate(y_pred_test, y_test).mae\n\n return sum_criterion / max_iteration, sum_test_mae / max_iteration", "title": "" }, { "docid": "c06c491902d85202f1e9fc626f25034e", "score": "0.6200181", "text": "def grid_search_xgb(sampling, X_test, y_test, X_train, y_train):\n xgboost = XGBClassifier()\n pipeline = Pipeline(steps=[['sampling', sampling],\n ['classifier', xgboost]])\n param_grid_ = {\n 'n_estimators': [100, 1000, 10000],\n 'learning_rate': [0.01, 0.1, 0.2]\n }\n param_grid_clf = {\n 'classifier__n_estimators': [100, 1000, 10000],\n 'classifier__learning_rate': [0.01, 0.1, 0.2]\n }\n if sampling is None:\n estimator = xgboost\n param_grid = param_grid_\n else:\n estimator = pipeline\n param_grid = param_grid_clf\n # Fitting grid search to the train data with 5 folds\n gridsearch = GridSearchCV(estimator=estimator,\n param_grid=param_grid,\n cv=StratifiedKFold(\n n_splits=5, random_state=1, shuffle=True),\n scoring='f1')\n gridsearch.fit(X_train, y_train)\n y_pred = gridsearch.predict(X_test)\n y_pred_proba = gridsearch.predict_proba(X_test)[:, 1]\n print(\"Best: %f using %s\" %\n (gridsearch.best_score_, gridsearch.best_params_))\n conf_matrix = confusion_matrix(y_test, y_pred)\n # Calculating and printing the f1 score\n f1_train = gridsearch.best_score_\n f1_test = f1_score(y_test, y_pred)\n print('The f1 score for the testing data:', f1_test)\n auprc = average_precision_score(y_test, y_pred_proba)\n return f1_train, f1_test, auprc, conf_matrix, gridsearch", "title": "" }, { "docid": "9e8d978de1c50be92ff8caac8836517b", "score": "0.6195012", "text": "def param_cv(lm, max_units, max_layers, learning_rate, train_data, train_targets, target_mean, target_std):\n # Initialize array to hold CV results for all combinations of parameters\n results = np.zeros((1, (NUM_PARAM + NUM_MEASURES*2))) # Multiply by 2 to account for mean + standard deviation\n\n # Initialize data frame that holds the possible indices for the training data\n fold_indices = pd.DataFrame(np.arange(train_data.shape[0]))\n\n # Size of each fold that is added onto the existing training fold\n fold_size = int(np.floor(train_data.shape[0]/(NUM_FOLDS+1))) # Add 1 to number of folds due to rolling basis for CV\n leftover = train_data.shape[0] % (NUM_FOLDS+1) # Leftover number of points from not evenly dividing into NUM_FOLDS\n\n # Iterate over all possible combinations of hyperparameters\n for i in range(max_layers):\n num_layers = i + 1 # Current number of layers\n for j in range(max_units):\n num_units = j + 1 # Current number of units\n for k in range(len(learning_rate)):\n lr = learning_rate[k] # Current learning rate\n\n # Initialize DataFrame to store performance measures for each test fold\n cv_measures = pd.DataFrame(columns=['RMSE',\n 'MASE',\n 'NMSE'])\n\n # Perform k-fold cross validation for the current set of hyperparameters\n for fold in range(1, NUM_FOLDS+1):\n # Get current training and test fold indices\n train_indices = np.ravel(fold_indices.iloc[:(fold*fold_size+leftover)])\n test_indices = np.ravel(fold_indices.iloc[(fold*fold_size+leftover):((fold+1)*fold_size+leftover)])\n\n # Separate inputs and labels into training and test folds\n train_fold = train_data.iloc[train_indices]\n train_fold_targets = train_targets.iloc[train_indices]\n\n test_fold = train_data.iloc[test_indices]\n test_fold_targets = train_targets.iloc[test_indices]\n\n # Build and train the MLP\n mlp = build_ff_nn(len(train_data.keys()), lr, lm, num_layers, num_units)\n mlp.fit(train_fold.values,\n train_fold_targets,\n epochs=NUM_EPOCHS,\n verbose=False,\n callbacks=EARLY_STOPPING)\n\n # Predict test fold data\n test_fold_pred = mlp.predict(test_fold)\n\n # Inverse normalize the test predictions and targets\n test_fold_pred = target_std*test_fold_pred + target_mean\n test_fold_targets = target_std*test_fold_targets + target_mean\n\n # Calculate performance measures\n (rmse, nmse, mase) = calc_performance(test_fold_targets, test_fold_pred)\n\n # Add them to the results data frame\n cv_measures = cv_measures.append(pd.Series(np.concatenate((rmse, nmse, mase), axis=None), index=cv_measures.columns), ignore_index=True)\n\n tf.keras.backend.clear_session() # Destroy current TF graph to prevent cluttering from old models\n\n # Calculate mean and standard deviation of performance measures from cross-validation\n cv_mean = cv_measures.mean()\n cv_std = cv_measures.std()\n cv_results = [val for pair in zip(cv_mean, cv_std) for val in pair]\n results_vector = np.concatenate((num_units, num_layers, lr, lm, cv_results), axis=None)\n results = np.vstack((results, results_vector))\n\n # Print progress reports\n print(\"layers: {} ; units: {} ; lr: {} ; lm: {} completed\".format(num_layers, num_units, lr, lm))\n results = np.delete(results, obj=0, axis=0) # Remove first row that was used to initialize the array\n return results", "title": "" }, { "docid": "0b3519e7c5b2f227ce6cb9c33e5b0fae", "score": "0.61477506", "text": "def fit_gbm(X, y):\n\n # search for optimal parameters\n gridsearch = GridSearchCV(\n estimator=GradientBoostingClassifier(),\n param_grid={\n 'learning_rate': [0.01, 0.05, 0.1],\n 'max_depth': range(3, 10),\n 'n_estimators': [10, 50, 100]\n },\n cv=5, verbose=0, n_jobs=-1)\n\n # determine best parameters\n gridsearch_result = gridsearch.fit(X, y)\n best_params = gridsearch_result.best_params_\n\n # declare and fit best model\n gbm = GradientBoostingClassifier(learning_rate=best_params[\"learning_rate\"], max_depth=best_params['max_depth'],\n n_estimators=best_params[\"n_estimators\"], verbose=False)\n\n gbm.fit(X.values, y)\n\n return [gbm, best_params]", "title": "" }, { "docid": "e656dd63e154725702b182706cd24491", "score": "0.6080607", "text": "def cv_best_hyperparams(model: BaseEstimator, X, y, k_folds,\n degree_range, lambda_range):\n\n # TODO: Do K-fold cross validation to find the best hyperparameters\n # Notes:\n # - You can implement it yourself or use the built in sklearn utilities\n # (recommended). See the docs for the sklearn.model_selection package\n # http://scikit-learn.org/stable/modules/classes.html#module-sklearn.model_selection\n # - If your model has more hyperparameters (not just lambda and degree)\n # you should add them to the search.\n # - Use get_params() on your model to see what hyperparameters is has\n # and their names. The parameters dict you return should use the same\n # names as keys.\n # - You can use MSE or R^2 as a score.\n\n # ====== YOUR CODE: ======\n #raise NotImplementedError()\n # ========================\n\n kf = sklearn.model_selection.KFold(k_folds)\n smallest_loss = np.inf\n best_params = {\"bostonfeaturestransformer__degree\": 1, \"linearregressor__reg_lambda\": 0.2}\n count = 0\n\n\n for lam in lambda_range:\n for deg in degree_range:\n model.set_params(linearregressor__reg_lambda=lam, bostonfeaturestransformer__degree=deg)\n avg_mse = 0.0\n count += 1\n\n for train_i, test_i in kf.split(X):\n x_train = X[train_i]\n y_train = y[train_i]\n model.fit(x_train, y_train)\n y_pred = model.predict(X[test_i])\n avg_mse += np.square(y[test_i] - y_pred).sum() / (2 * X.shape[0])\n\n avg_mse /= k_folds\n\n #check if the current params are the best\n if avg_mse <= smallest_loss:\n smallest_loss = avg_mse\n best_params = {\"linearregressor__reg_lambda\": lam, \"bostonfeaturestransformer__degree\": deg}\n # ========================\n print(count)\n return best_params", "title": "" }, { "docid": "a4a307a7563e0569ef038de84cd384cf", "score": "0.6062954", "text": "def _tune_model_helper(self, n_tries, model_class, hyperparameters_dict, X_train, y_train, X_test, y_test):\n\n def objective(params): \n xgb_params = {\"objective\": \"multiclass\",\n \"booster\": \"gbtree\",\n \"metric\": \"qwk\",\n \"num_class\": 5,\n \"max_depth\": int(params[\"max_depth\"]),\n \"eta\": params[\"eta\"],\n \"subsample\": params[\"subsample\"],\n \"colsample_bytree\": params[\"colsample_bytree\"],\n \"gamma\": params[\"gamma\"],\n \"min_child_weight\": params[\"min_child_weight\"],\n \"verbosity\": 0,\n \"silent\": 1,\n \"seed\": 17,\n \"nthread\": 30}\n\n print(\"Params:\", xgb_params)\n\n xgb = BlendedXGBClassifier(xgb_params, early_stopping_rounds = 150, eval_size = 0.2, eval_split_type = \"random\", verbose_eval = 100, nrounds = 10000)\n\n # Train the model\n xgb.fit(X_train, y_train)\n \n # Make predictions\n predictions_npa = xgb.predict(X_test)\n \n # Evaluate the model\n qwk = quadratic_weighted_kappa(y_test, predictions_npa)\n print(xgb_params)\n print(\"QWK = \", qwk)\n\n return -qwk # Return negative value as we want to maximize it\n\n # Stores all information about each trial and about the best trial\n trials = hyperopt.Trials()\n\n best = fmin(fn = objective, trials = trials, space = hyperparameters_dict, algo = tpe.suggest, max_evals = n_tries)\n\n return best, trials", "title": "" }, { "docid": "a1c50ab444bc20b49b75a166cfdaca91", "score": "0.6056003", "text": "def xgbc_random(kwargs: dict, max_duration: int, train_X, train_y, test_X, test_y):\n\n best_kwargs = None\n best_score = None\n used_kwargs = []\n start = time.time()\n while time.time() - start < max_duration:\n logging.info(\"Trace xgbc_random() --- starting new iteration --- time elapsed = {} seconds\".format(time.time() - start))\n try_kwargs = {k:np.random.choice(v) for (k, v) in kwargs.items()}\n if try_kwargs not in used_kwargs:\n logging.info(\"Trace xgbc_random() --- trying hyperparameters = {}\".format(try_kwargs))\n used_kwargs.append(try_kwargs)\n classifier = XGBClassifier(**try_kwargs)\n classifier.fit(train_X, train_y, verbose=False)\n pred_y = classifier.predict_proba(test_X)\n score = log_loss(test_y, pred_y)\n if best_score is None or score < best_score:\n best_score = score\n best_kwargs = try_kwargs\n logging.info(\"Trace xgbc_random() --- best_score updated to {}\".format(best_score))\n logging.info(\"Trace xgbc_random() --- best_kwargs updated to {}\".format(best_kwargs))\n else:\n logging.info(\"Trace xgbc_random() --- skipping hyperparameters --- they have been tried.\")\n continue\n logging.info(\"Trace xgbc_random() --- duration exceeded --- process quitting with best_score = {}\".format(best_score))\n logging.info(\"Trace xgbc_random() --- duration exceeded --- process quitting with best_kwargs = {}\".format(best_kwargs))\n return best_kwargs, best_score", "title": "" }, { "docid": "3f2bfab96a02808de54e9211031e30a0", "score": "0.6048841", "text": "def feature_combination(df, label, num_boost_round=1000, params=XGB_PARAMS, pos_label=1,\n exclude_list=[]):\n if df.shape[1] >=10:\n raise Exception(\"Too many combinations to iterate\")\n col = list(set(df.columns) - set([label] + exclude_list))\n x = df[col]\n y = df[label]\n x_train, x_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=42)\n xgb_res = pd.DataFrame()\n for i in range(2, x.shape[1] + 1):\n print(i)\n cc = list(combinations(x.columns, i))\n for j in range(len(cc)):\n print(list(cc[j]))\n model, pred_train_value, pred_val_value = xgbt(x_train[list(cc[j])]\n , y_train\n , x_val[list(cc[j])]\n , y_val\n , None\n , params=params\n , num_boost_round=num_boost_round\n , early_stopping_rounds=50\n , make_prediction=True)\n add = model_summary(pred_train_value, y_train, pred_val_value, y_val,\n pos_label=pos_label, use_formater=False, plot=False)\n add['combination'] = '+'.join(list(cc[j]))\n add = add.reset_index().set_index('combination')\n xgb_res = pd.concat([xgb_res, add], axis=0)\n if len(col) == 2:\n return xgb_res\n else:\n train_res = xgb_res.groupby(['index', 'combination']).sum().loc['train']\n val_res = xgb_res.groupby(['index', 'combination']).sum().loc['val']\n train_res = train_res.rename(columns={'auc': 'train_auc', 'ks': 'train_ks'})\n val_res = val_res.rename(columns={'auc': 'val_auc', 'ks': 'val_ks'})\n res = pd.concat([val_res, train_res], axis=1)\n res = res.sort_values(by='val_auc', ascending=False)\n return res[['val_auc', 'val_ks', 'train_auc', 'train_ks']]", "title": "" }, { "docid": "143caeea2f27516f58d58784b0dc74c7", "score": "0.60474753", "text": "def gridsearchCV(base_model, X, n_folds, hyperparams):\n _sparse_checker(X, '`X`')\n\n folds = cross_val_folds(X, n_folds=n_folds)\n\n keys, values = zip(*hyperparams.items())\n p_total = []\n hyperparam_vals = []\n max_score, min_score, mean_score = [], [], []\n df = pd.DataFrame(columns=list(keys))\n for val in tqdm(itertools.product(*values)):\n params = dict(zip(keys, val))\n this_model = copy.deepcopy(base_model)\n print_line = []\n for k, v in params.items():\n setattr(this_model, k, v)\n print_line.append((k, v))\n print(' | '.join('{}: {}'.format(k, v) for (k, v) in print_line))\n precision = []\n for fold in np.arange(n_folds):\n X_train = folds[fold]['train']\n X_test = folds[fold]['test']\n p = _get_precision(this_model, X_train, X_test, K=10)\n precision.append(p)\n p_total.append(precision)\n hyperparam_vals.append(list(val))\n max_score.append(max(precision))\n min_score.append(min(precision))\n mean_score.append(np.mean(precision))\n\n results = pd.DataFrame(hyperparam_vals)\n results['mean_score'] = mean_score\n results['max_score'] = max_score\n results['min_score'] = min_score\n return results", "title": "" }, { "docid": "ee067fd3797daef828e61e21234f962e", "score": "0.6044487", "text": "def fit_classifier_with_hyperparameter_search(\n X:np.ndarray, y:List[int], basemod, cv, param_grid, scoring='f1_macro', verbose=True):\n splitter = StratifiedShuffleSplit(n_splits=cv, test_size=0.20)\n # Find the best model within param_grid:\n crossvalidator = GridSearchCV(basemod, param_grid, cv=splitter, scoring=scoring)\n crossvalidator.fit(X, y)\n # Report some information:\n if verbose:\n print(\"Best params: {}\".format(crossvalidator.best_params_))\n print(\"Best score: {0:0.03f}\".format(crossvalidator.best_score_))\n # Return the best model found:\n return crossvalidator.best_estimator_", "title": "" }, { "docid": "adfa6f055f97fe4cdb0a0e9398a98e2d", "score": "0.60438967", "text": "def xgboost_model(train_X, train_y):\n params = { 'subsample': 0.8, 'min_child_weight': 1, 'reg_lambda': 1.2,\n 'learning_rate': 0.1, 'reg_alpha': 0.15, 'max_depth': 3,\n 'gamma': 0.0 }\n xgb = xgboost.XGBClassifier(nthread=-1, seed=1234, n_estimators=150,\n **params)\n xgb.fit(train_X, train_y)\n return xgb", "title": "" }, { "docid": "598bff8a2f3001d2ab982ed480c9bb13", "score": "0.6036065", "text": "def optimize_catboost(x_train, y_train, pbounds, n_iter, init_points):\n\n def catboost_crossval(learning_rate, l2_leaf_reg, bagging_temperature, depth):\n \"\"\"\n Wrapper for catboost_cv\n :param learning_rate:\n :param l2_leaf_reg:\n :param bagging_temperature:\n :param depth:\n :return:\n \"\"\"\n return catboost_cv(learning_rate, l2_leaf_reg, bagging_temperature, depth, x_train, y_train)\n\n optimizer = BayesianOptimization(\n f=catboost_crossval,\n pbounds=pbounds,\n bounds_transformer=SequentialDomainReductionTransformer(),\n random_state=42,\n verbose=2\n )\n\n optimizer.maximize(n_iter=n_iter, init_points=init_points, acq='ei')\n\n return optimizer", "title": "" }, { "docid": "7bb7fbdcb9ccf81443dc18851022fc79", "score": "0.6034345", "text": "def cv (X, y, folds, alg, param_grid, regression):\n if regression:\n scoring = multi_scorer_regression()\n else:\n scoring = multi_scorer_classification()\n \n print (\"\\n\\n\\nDoing Gridsearch\\n\")\n\n kfold_cv = cross_validation.KFold(X.shape[0], n_folds=folds, shuffle=True)\n model = grid_search.GridSearchCV(cv = kfold_cv, estimator = alg, param_grid = param_grid, scoring = scoring, n_jobs=4)\n model = model.fit(X,y)\n # model trained on all data\n y_pred = model.predict(X)\n \n if regression:\n print (\"Best Model Train RMSE: %f\" % rmse(y, y_pred))\n print (\"Best Model Train Spearman %f\" % spearman(y, y_pred))\n else:\n print (\"Best Model Train AUC: %f\" % roc_auc_score(y, y_pred))\n print (\"Best Model Train F1 %f\" % f1_score(y, y_pred))\n print (\"Best Model Train Accuracy %f\" % accuracy_score(y, y_pred))\n \n\n\n print(\"\\nBest parameters set found:\")\n best_parameters, score, _ = max(model.grid_scores_, key=lambda x: x[1])\n print(best_parameters, score)\n print (\"\\n\")\n print(\"Grid scores:\")\n for params, mean_score, scores in model.grid_scores_:\n print(\"%0.5f (+/-%0.05f) for %r\"\n % (mean_score, scores.std() / 2, params))\n\n return model", "title": "" }, { "docid": "5169281ebe42440dc53c219a96cbf10c", "score": "0.60183525", "text": "def train_paper(dataset_name, X, Y, outer_cv=10, inner_cv=3, random_search_trials=50, inner_epochs=1, outer_epochs=5):\n skf = StratifiedKFold(n_splits=outer_cv, random_state=7, shuffle=True)\n skf2 = StratifiedKFold(n_splits=inner_cv, random_state=7, shuffle=True)\n\n fold_var = 1\n\n list_of_res = []\n for train_index, val_index in skf.split(np.zeros(X.shape[0]), Y):\n results_dict = {}\n x_train = X[train_index]\n y_train = Y[train_index]\n x_val = X[val_index]\n y_val = Y[val_index]\n max_trail = 0\n max_acc = 0\n hyper_param_dropoutstudent = np.array([0.1, 0.2, 0.3, 0.4])\n hyper_param_batchsize = np.array([32, 64, 128])\n hyper_param_droputteacher = np.array([0.1, 0.2, 0.3, 0.4])\n hyper_param_emarate = np.array([0.999, 0.95, 0.92, 0.98])\n hyper_params_dict = {}\n for trail in range(0, random_search_trials):\n ds = np.random.choice(hyper_param_dropoutstudent)\n b = np.random.choice(hyper_param_batchsize)\n dt = np.random.choice(hyper_param_droputteacher)\n e = np.random.choice(hyper_param_emarate)\n print(f\"hyper params {(ds, b, dt, e)}\")\n acc_list = []\n for train_ind2, val_inds2 in skf2.split(np.zeros(x_train.shape[0]), y_train):\n x_train_hyperopt = x_train[train_ind2]\n y_train_hyperopt = y_train[train_ind2]\n x_val_hyperopt = x_train[val_inds2]\n y_val_hyperopt = y_train[val_inds2]\n model = MT_MODEL(((x_train_hyperopt, y_train_hyperopt), (x_val_hyperopt, y_val_hyperopt)), b, dt, ds, e)\n # print(np.unique(y_train_hyperopt))\n model.train(inner_epochs)\n results = model.evaluate()\n acc_list.append(results['acc'])\n tf.keras.backend.clear_session()\n\n mean_acc = np.array(acc_list).mean()\n if mean_acc > max_acc:\n max_trail = trail\n max_acc = mean_acc\n hyper_params_dict[trail] = (b, dt, ds, e, mean_acc)\n # for later need to save the results in the dict\n\n best_params = hyper_params_dict[max_trail]\n model = MT_MODEL(((x_train, y_train), (x_val, y_val)), best_params[0], best_params[1], best_params[2],\n best_params[3])\n start_timer = time.time()\n model.train(outer_epochs)\n end_timer = time.time()\n eval_res = model.evaluate()\n results_dict['dataset_name'] = dataset_name\n results_dict['k-fold'] = fold_var\n results_dict['train_time'] = end_timer - start_timer\n results_dict.update(eval_res)\n list_of_res.append(results_dict)\n tf.keras.backend.clear_session()\n fold_var += 1\n tmp = pd.DataFrame(list_of_res)\n tmp.to_csv(f'Results/Paper_{dataset_name}.csv')\n return pd.DataFrame(list_of_res)", "title": "" }, { "docid": "3d2967babdb76ab056782e543754a1d9", "score": "0.60132754", "text": "def gridsearch_params(MLA_compare, X_train, y_train, top):\n best_classifiers = MLA_compare['MLA Name'].values[:top]\n best_cls_ind = MLA_compare['MLA Name'].index[:top]\n\n cv_split = model_selection.ShuffleSplit(n_splits=5, test_size=.2, train_size=.8, random_state=39)\n best_params_dict = {'cls': best_classifiers, 'param': [], 'score': []}\n start_total = time()\n\n for ind, clf in zip(best_cls_ind, best_classifiers):\n start = time()\n param = grid_param[ind]\n estimator = MLA[clf]\n # if estimator == 'XGBClassifier':\n # break\n # else:\n best_search = model_selection.GridSearchCV(estimator=estimator,\n param_grid=param,\n cv=cv_split,\n scoring='roc_auc',\n n_jobs=-1)\n best_search.fit(X_train, y_train)\n run = time() - start\n best_param = best_search.best_params_\n best_params_dict['param'].append(MLA[clf].set_params(**best_param))\n best_params_dict['score'].append(best_search.best_score_)\n print(f'{clf}\\nBest Parameters: {best_param}\\nRuntime: {run:.2f} seconds.')\n print('-' * 10)\n\n run_total = time() - start_total\n print(f'Total optimization time was {(run_total / 60):.2f} minutes.')\n return best_params_dict", "title": "" }, { "docid": "1f6a4a90673c8009a4ecbdbafe84b793", "score": "0.6008142", "text": "def get_optimal_parameters(self): \n raise NotImplementedError", "title": "" }, { "docid": "9a6af494256f85a39cc347a938ff9d12", "score": "0.60079306", "text": "def crossvalidation(x, y):\n c_array = np.logspace(0, 3, 4)\n gamma_array = np.logspace(-3, 3, 7)\n\n # feature scaling\n if feature_scaling:\n std_scale = preprocessing.StandardScaler().fit(x)\n x = std_scale.transform(x)\n\n for c in c_array:\n for gamma in gamma_array:\n clf = svm.SVC(kernel='linear', C=c, gamma=gamma) #kernel= rbf #kernel= poly #kernel= linear\n scores = cross_validation.cross_val_score(clf, x, y, cv=3)\n print(\"Accuracy: %0.2f (+/- %0.2f) %f %f\" % (scores.mean(), scores.std() * 2, c, gamma))\n pred = cross_validation.cross_val_predict(clf, x, y, cv=3)\n print(\"Classes accuracy: \", classes_accuracy(y, pred))\n\n print(np.array(y))\n print(pred)\n\n #plot last one, not best, CARE!!!\n if plot_confusion_matrix:\n confusion_matrix.prepare_plot(y, pred)\n\n if save_clf:\n clf.fit(x, y)\n joblib.dump(clf, 'classifiers\\\\'+configuration.get('clf_name')+'.pkl')", "title": "" }, { "docid": "1cd929776d885c17281feb5a4f41f917", "score": "0.59924436", "text": "def grid_search(model_class, init_args, param_grid, x_unvec, y,\n num_class, k=3, max_num_sample=10000):\n param_list = _param_combinations(param_grid)\n\n best_param_set, best_loss, worst_loss = _search(\n model_class, init_args, param_list, x_unvec, y, num_class=num_class,\n k=k, max_num_sample=max_num_sample)\n\n print('During parameter tuning, best loss: {:.4f} / Worst loss: {:.4f}'\n .format(best_loss, worst_loss))\n\n return best_param_set", "title": "" }, { "docid": "65683eebce9f83e31a7d8d5c18bedfaf", "score": "0.5968687", "text": "def grid_parameters():\n return {\n 'verbose': 3,\n 'cv': 5,\n 'refit': False,\n 'scoring': 'neg_mean_squared_error',\n 'return_train_score': True,\n }", "title": "" }, { "docid": "f15a4e4f6f78911ab1c143fea3d310fc", "score": "0.5950652", "text": "def getGridSearchParams(self):\n if self.valDataPath:\t\t# have a specified validation set \n docs_gs = self.trainSet.getDocs() + self.valSet.getDocs()\n y_gs = np.concatenate( (self.trainSet.getYvalues(),\n self.valSet.getYvalues()) )\n\n lenTrain = self.trainSet.getNumDocs()\n lenVal = self.valSet.getNumDocs()\n cv = [ (list(range(lenTrain)),\n list(range(lenTrain, lenTrain+lenVal)) ), ]\n\n else:\t\t\t\t# no val set, use k-fold\n docs_gs = self.trainSet.getDocs()\n y_gs = self.trainSet.getYvalues()\n cv = self.numCV\n\n return docs_gs, y_gs, cv", "title": "" }, { "docid": "7144061bfe2d672b66fe892d307da565", "score": "0.5914663", "text": "def executeLogit_collective_train(x,y,theta,Number_of_class):\n \n #lamda = 0.0001\n max_iterations = 200\n #Number_of_class = len(np.unique(y))\n feature_size = x.shape[0]\n xtrain = x\n ytrain = y\n encoder = Logit(Number_of_class, feature_size,lamda)\n \n opt_solution = scipy.optimize.minimize(encoder.softmax_cost, encoder.theta,args = (xtrain,ytrain,), method = 'L-BFGS-B',jac = True, options = {'maxiter': max_iterations,'disp': False})\n opt_W = opt_solution.x\n #opt_W1 = opt_W[encoder.limit0 : encoder.limit1].reshape(Number_of_class, feature_size)\n #encoder.W1 = opt_W1\n #opt_b1 = opt_W[encoder.limit1 : encoder.limit2].reshape(Number_of_class, 1)\n \"\"\" Visualize the obtained optimal W1 weights \"\"\"\n #print opt_W.shape\n #print np.linalg.norm(theta-opt_W)\n #return opt_W1,opt_b1\n return opt_W", "title": "" }, { "docid": "a0ee2a9734e97822c1bd7d5eab85762c", "score": "0.5902634", "text": "def _reduce_vars_lgb_cv(\n X,\n y,\n objective,\n n_folds,\n cutoff,\n n_iter,\n silent,\n weight,\n rf,\n fastshap,\n lgbm_params=None,\n n_jobs=0,\n):\n\n params = _set_lgb_parameters(\n X=X,\n y=y,\n objective=objective,\n rf=rf,\n silent=silent,\n n_jobs=n_jobs,\n lgbm_params=lgbm_params,\n )\n\n dtypes_dic = create_dtype_dict(X, dic_keys=\"dtypes\")\n category_cols = dtypes_dic[\"cat\"] + dtypes_dic[\"time\"] + dtypes_dic[\"unk\"]\n cat_idx = [X.columns.get_loc(col) for col in category_cols]\n\n rkf = RepeatedKFold(n_splits=n_folds, n_repeats=n_iter, random_state=2652124)\n iter = 0\n df = pd.DataFrame({\"feature\": X.columns})\n for tridx, validx in tqdm(\n rkf.split(X, y), total=rkf.get_n_splits(), desc=\"Repeated k-fold\"\n ):\n X_train, X_val, y_train, y_val, weight_tr, weight_val = _split_data(\n X, y, tridx, validx, weight\n )\n\n # Create the shadow variables and run the model to obtain importances\n new_x_tr, shadow_names = _create_shadow(X_train)\n new_x_val, _ = _create_shadow(X_val)\n\n bst, shap_matrix, bst.best_iteration = _train_lgb_model(\n new_x_tr,\n y_train,\n weight_tr,\n new_x_val,\n y_val,\n weight_val,\n category_cols=category_cols,\n early_stopping_rounds=20,\n fastshap=fastshap,\n **params,\n )\n\n importance = _compute_importance(\n new_x_tr, shap_matrix, params, objective, fastshap\n )\n df = _merge_importance_df(\n df=df,\n importance=importance,\n iter=iter,\n n_folds=n_folds,\n column_names=new_x_tr.columns,\n silent=silent,\n )\n iter += 1\n\n df[\"Med\"] = df.select_dtypes(include=[np.number]).mean(axis=1)\n # Split them back out\n real_vars = df[~df[\"feature\"].isin(shadow_names)]\n shadow_vars = df[df[\"feature\"].isin(shadow_names)]\n\n # Get median value from the shadows, comparing predictor by predictor. Not the same criteria\n # max().max() like in Boruta but max of the median to mitigate.\n # Otherwise too conservative (reject too often)\n cutoff_shadow = shadow_vars.select_dtypes(include=[np.number]).max().mean() / cutoff\n real_vars = real_vars[(real_vars.Med.values >= cutoff_shadow)]\n\n return real_vars[\"feature\"], df, cutoff_shadow", "title": "" }, { "docid": "59604c8455c94229463efda1b4ea552d", "score": "0.59000033", "text": "def set_hyperparams(self, max_depth, n_estimators):\n \n self.model = XGBRegressor(max_depth=max_depth, n_estimators=n_estimators, objective=\"reg:squarederror\")", "title": "" }, { "docid": "b1151eaeb0664f8b5521afca50bfbc2d", "score": "0.58974737", "text": "def _search(model_class, init_args, param_list, x_unvec, y, num_class, k=3,\n max_num_sample=10000):\n\n if len(x_unvec) > max_num_sample:\n x_unvec = x_unvec[:max_num_sample]\n y = y[:max_num_sample]\n\n num_sample = len(x_unvec)\n perm_indices = np.random.permutation(num_sample)\n\n best_param_set = None\n best_loss = float('inf')\n worst_loss = -1\n for param_set in param_list:\n y_test_probas_all = np.empty([0, num_class])\n y_test_all = np.empty([0, ])\n for k_idx in range(k):\n x_train_unvec, y_train, x_val_unvec, y_val, x_test_unvec, y_test = (\n get_k_fold_partition(x_unvec, y, k_idx=k_idx, k=k,\n perm_indices=perm_indices))\n\n full_init_args = dict(init_args, **param_set)\n model = model_class(**full_init_args)\n model.train(x_train_unvec, y_train, x_val_unvec, y_val,\n verbose=False)\n y_test_probas = model.predict_proba(x_test_unvec, verbose=False)\n\n y_test_probas_all = np.append(\n y_test_probas_all, y_test_probas, axis=0)\n y_test_all = np.append(y_test_all, y_test, axis=0)\n\n K.clear_session()\n\n loss = log_loss(y_test_all, y_test_probas_all, labels=range(num_class))\n if loss < best_loss:\n best_param_set = param_set\n best_loss = loss\n\n if loss > worst_loss:\n worst_loss = loss\n\n return best_param_set, best_loss, worst_loss", "title": "" }, { "docid": "91402ff4f4fa32828c52faf78aef98a3", "score": "0.5884534", "text": "def parameter_search(X, y, algorithm, scaler, pca, selector, metric):\n model = define_model(algorithm)\n X = apply_transforms(X, scaler, pca, selector)\n\n param_grid = None\n if algorithm == 'logistic':\n param_grid = [{'penalty': ['l1', 'l2'], 'C': [0.1, 0.3, 1.0, 3.0]}]\n elif algorithm == 'svm':\n param_grid = [{'C': [1, 10, 100, 1000], 'kernel': ['linear']},\n {'C': [1, 10, 100, 1000], 'gamma': [0.001, 0.0001], 'kernel': ['rbf']}]\n elif algorithm == 'sgd':\n param_grid = [{'loss': ['hinge', 'log', 'modified_huber'], 'penalty': ['l1', 'l2'],\n 'alpha': [0.0001, 0.001, 0.01], 'iter': [100, 1000, 10000]}]\n elif algorithm == 'forest':\n param_grid = [{'n_estimators': [10, 30, 100, 300], 'criterion': ['gini', 'entropy'],\n 'max_features': ['auto', 'log2', None], 'max_depth': [3, 5, 7, None],\n 'min_samples_split': [2, 10, 30, 100], 'min_samples_leaf': [1, 3, 10, 30, 100]}]\n elif algorithm == 'boost':\n param_grid = [{'learning_rate': [0.1, 0.3, 1.0], 'subsample': [1.0, 0.9, 0.7, 0.5],\n 'n_estimators': [100, 300, 1000], 'max_features': ['auto', 'log2', None],\n 'max_depth': [3, 5, 7, None], 'min_samples_split': [2, 10, 30, 100],\n 'min_samples_leaf': [1, 3, 10, 30, 100]}]\n\n t0 = time.time()\n grid_estimator = GridSearchCV(model, param_grid, scoring=metric, cv=3, n_jobs=-1)\n grid_estimator.fit(X, y)\n t1 = time.time()\n print('Grid search completed in {0:3f} s.'.format(t1 - t0))\n\n return grid_estimator.best_estimator_, grid_estimator.best_params_, grid_estimator.best_score_", "title": "" }, { "docid": "fbfee94dfee5289b015d0a481815ca13", "score": "0.5879154", "text": "def big_loop(models_n_params, x, y, isClassification,\n test_size = 0.2, n_splits = 5, random_state=None, doesUpsample=True,\n scoring=None,\n verbose=False, n_jobs = cpu_count()-1):\n\n def cv_():\n return cv_clf(x, y, test_size, n_splits, random_state, doesUpsample) \\\n if isClassification \\\n else cv_reg(x, test_size, n_splits, random_state)\n\n res = []\n num_features = x.shape[1]\n scoring = scoring or ('accuracy' if isClassification else 'neg_mean_squared_error')\n print('Scoring criteria:', scoring)\n\n for i, (clf_Klass, parameters) in enumerate(models_n_params):\n try:\n print('-'*15, 'model %d/%d' % (i+1, len(models_n_params)), '-'*15)\n print(clf_Klass.__name__)\n\n if clf_Klass == KMeans:\n parameters['n_clusters'] = [len(np.unique(y))]\n elif clf_Klass in TREE_N_ENSEMBLE_MODELS:\n parameters['max_features'] = [v for v in parameters['max_features']\n if v is None or type(v)==str or v<=num_features]\n\n clf_search = GridSearchCV(clf_Klass(), parameters, scoring, cv=cv_(), n_jobs=n_jobs)\n clf_search.fit(x, y)\n\n timespent = timeit(clf_Klass, clf_search.best_params_, x, y)\n print('best score:', clf_search.best_score_, 'time/clf: %0.3f seconds' % timespent)\n print('best params:')\n pprint(clf_search.best_params_)\n\n if verbose:\n print('validation scores:', clf_search.cv_results_['mean_test_score'])\n print('training scores:', clf_search.cv_results_['mean_train_score'])\n\n res.append((clf_search.best_estimator_, clf_search.best_score_, timespent))\n\n except Exception as e:\n print('ERROR OCCURRED')\n if verbose: print(e)\n res.append((clf_Klass(), -np.inf, np.inf))\n\n\n print('='*60)\n print(tabulate([[m.__class__.__name__, '%.3f'%s, '%.3f'%t] for m, s, t in res], headers=['Model', scoring, 'Time/clf (s)']))\n winner_ind = np.argmax([v[1] for v in res])\n winner = res[winner_ind][0]\n print('='*60)\n print('The winner is: %s with score %0.3f.' % (winner.__class__.__name__, res[winner_ind][1]))\n\n return winner, res", "title": "" }, { "docid": "214d33b74a3217f4b8772db0561f9913", "score": "0.5865949", "text": "def get_best_params():\n\t\tif not self.has_been_run:\n\t\t\tprint(\"Tuning needs to be run first. Results are invalid.\")\n\t\treturn self.best_parameters", "title": "" }, { "docid": "035e836fc9d831d6595c109ed4666ba0", "score": "0.5865833", "text": "def abGridSearchCV(defaultParams, paramGrid, features, labels, validationSplit, winnerCriteria, log=True, topn=5, logToBeReturned=[]):\n \n assert winnerCriteria in [\"meanLosses\", \"meanTrainingLoss\", \"meanValidationLoss\"], \"This function currently doesn't support the winner criteria provided. Please make sure it's 'meanLosses' or 'meanTrainingLoss', or 'meanValidationLoss'\"\n \n listParams=list(paramGrid.values())\n winners=[]\n allCombinations=list(product(*listParams))\n trainData_, validationData_, trainLabels_, validationLabels_=train_test_split(features, labels, test_size=validationSplit) \n for index, val in enumerate(allCombinations):\n print(\" {}/{}\".format(index, len(allCombinations)), end=\"\\r\")\n param={}\n for index_ in range(len(paramGrid)):\n param[list(paramGrid.keys())[index_]]=val[index_]\n model=perceptron(**defaultParams)\n model.set_params(**param)\n model.fit(trainData_, trainLabels_, validationData_, validationLabels_, earlyStoppingLog=False, comingFromGridSearch=True)\n meanLoss=np.mean(model.losses)\n meanValidationLoss=np.mean(model.validationLosses)\n if model.newEpochNotification:\n param['epochs']=model.bestEpoch\n tempLog={\n 'params': param, \n 'meanTrainingLoss':meanLoss, \n 'meanValidationLoss':meanValidationLoss,\n 'meanLosses':(meanLoss+meanValidationLoss)/2\n }\n if log:\n logToBeReturned.append(tempLog)\n \n if len(winners)<topn:\n winners.append(tempLog)\n else:\n winners = sorted(winners, key=lambda k: k[winnerCriteria])\n if tempLog[winnerCriteria]<winners[-1][winnerCriteria]:\n winners[-1]=tempLog\n \n if log:\n return logToBeReturned, winners\n else:\n return winners", "title": "" }, { "docid": "ff89cb51db1742565074082a334e97f7", "score": "0.5863152", "text": "def tune_parameters(self, X: pd.DataFrame, y: pd.Series) -> dict:\n\n X, _ = self._create_pipeline(X, y, \"training\",\n \"constant\", -9999)\n\n parameters = {# \"criterion\": ['mse', 'friedman_mse'],\n # \"max_features\": ['auto', 'sqrt'],\n # \"max_depth\": [1, 3, 5, 9, 15, 25, 100],\n \"min_samples_leaf\": [1, 3, 5, 10, 15, 25, 50, 100, 1000]\n # \"ccp_alpha\": [0.0, 0.1, 0.3, 0.5]\n }\n\n self.clf = GridSearchCV(self.clf, parameters, scoring=('neg_mean_absolute_error',\n 'neg_root_mean_squared_error'),\n refit=\"neg_root_mean_squared_error\", n_jobs=-1, verbose=3)\n\n self.clf.fit(X, y)\n\n cv_results = self.clf.cv_results_\n results_df = pd.DataFrame({\"params\": cv_results['params'],\n \"mean_fit_time\": cv_results['mean_fit_time'],\n \"mean_score_time\": cv_results['mean_score_time'],\n \"mae_rank\": cv_results['rank_test_neg_mean_absolute_error'],\n \"mae_results\": cv_results['mean_test_neg_mean_absolute_error'],\n \"rmse_rank\": cv_results['rank_test_neg_root_mean_squared_error'],\n \"rmse_results\": cv_results['mean_test_neg_root_mean_squared_error']\n })\n\n return self.clf, results_df", "title": "" }, { "docid": "9e6a7fccc24a3f95728d2723694ed166", "score": "0.5854674", "text": "def cross_validation(Ps, data, algo, kfolds=5, **kwargs):\n scores_tr = np.zeros((kfolds, len(Ps)))\n scores_te = np.zeros((kfolds, len(Ps)))\n X_tr, y_tr, X_te, y_te, _ = data\n X_train_ = pd.concat((X_tr, X_te)).reset_index(drop=True).sample(frac=1)\n y_train_ = pd.concat((y_tr, y_te)).reset_index(drop=True).iloc[X_train_.index]\n X_train_, y_train_ = X_train_.reset_index(drop=True), y_train_.reset_index(drop=True)\n n = X_train_.shape[0]\n p = int(n // kfolds)\n for k in tqdm(range(kfolds)):\n print('Fold {}'.format(k+1))\n q = p * (k + 1) + n % kfolds if k == kfolds - 1 else p * (k + 1)\n idx_val = np.arange(p * k, q)\n idx_train = np.setdiff1d(np.arange(n), idx_val)\n X_train, y_train = X_train_.iloc[idx_train, :], y_train_.iloc[idx_train, :]\n X_val, y_val = X_train_.iloc[idx_val, :], y_train_.iloc[idx_val, :]\n s_tr, s_te = [], []\n for P in Ps:\n if algo == 'CSVM':\n alg = C_SVM(C=P, print_callbacks=False, **kwargs)\n elif algo == 'KLR':\n alg = KLR(lbda=P, **kwargs)\n elif algo == 'KRR':\n alg = KRR(lbda=P, **kwargs)\n else:\n NotImplementedError('Please choose between \"CSVM\", \"KRR\" or \"KLR\"')\n alg.fit(X_train, y_train)\n pred_tr = alg.predict(X_train)\n score_tr = alg.score(pred_tr, y_train)\n pred_te = alg.predict(X_val)\n score_te = alg.score(pred_te, y_val)\n s_tr.append(score_tr)\n s_te.append(score_te)\n print('Constant={}, train_acc={:0.4f}, val_acc={:0.4f}'.format(P, score_tr, score_te))\n scores_tr[k], scores_te[k] = s_tr, s_te\n mean_scores_tr, mean_scores_te = np.mean(scores_tr, axis=0), np.mean(scores_te, axis=0)\n p_opt = Ps[np.argmax(mean_scores_te)]\n print('Best constant={}, val_acc={:0.4f}'.format(p_opt, np.max(mean_scores_te)))\n return p_opt, scores_tr, scores_te, mean_scores_tr, mean_scores_te", "title": "" }, { "docid": "c9bc391cee3b3efdce36f54adaecbd26", "score": "0.58538795", "text": "def tune_random_forest_parameter(train_x, train_y):\n param_grid = {\"criterion\": [\"gini\", \"entropy\"], \"min_samples_leaf\": [1, 5, 10, 25, 50, 70],\n \"min_samples_split\": [2, 4, 10, 12, 16, 18, 25, 35], \"n_estimators\": [100, 400, 700, 1000, 1500]}\n\n rf = RandomForestClassifier(n_estimators=100, max_features='auto', oob_score=True, random_state=0, n_jobs=-1)\n clf = GridSearchCV(estimator=rf, param_grid=param_grid, n_jobs=-1)\n clf.fit(train_x, train_y)\n print(clf.best_params_)", "title": "" }, { "docid": "12808a1ca57f1e43f83d5869f0e8145e", "score": "0.585311", "text": "def select_params(X, y, kf, metrics=[\"accuracy\"]) :\n\n # part 4b: for each metric, select optimal hyperparameters using cross-validation\n\n # create grid of hyperparameters\n # hint: use a small 2x2 grid of hyperparameters for debugging\n depth_range = range(5,21)\n min_samples_range = range(1,15)\n scores = np.empty((len(metrics), len(depth_range), len(min_samples_range)))\n\n # compute CV scores using cv_performance(...)\n for depth_ind, max_depth in enumerate(depth_range):\n for samples_ind, min_samples in enumerate(min_samples_range):\n clf = tree.DecisionTreeClassifier(criterion=\"entropy\", max_depth=max_depth, min_samples_leaf=min_samples)\n # compute CV scores using cv_performance(...)\n score = cv_performance(clf, X, y, kf, metrics)\n scores[:,depth_ind,samples_ind] = score\n\n # get best hyperparameters\n best_params = []\n for met_ind, metric in enumerate(scores):\n depth_ind, samples_ind = np.unravel_index(metric.argmax(), metric.shape)\n params = (depth_range[depth_ind], min_samples_range[samples_ind])\n\n best_params.append(params)\n\n return best_params", "title": "" }, { "docid": "5c826cd9820a3be5a459af384022a1df", "score": "0.58494264", "text": "def cross_validation_demo():", "title": "" }, { "docid": "f85994d0f802cafb3a5b71fab3c373db", "score": "0.58488446", "text": "def fit_xrt(X, y):\n\n # search for optimal parameters\n gridsearch = GridSearchCV(\n estimator=ExtraTreesClassifier(),\n param_grid={\n 'max_depth': range(3, 7),\n 'n_estimators': (10, 50, 100, 1000),\n },\n cv=5, verbose=0, n_jobs=-1)\n\n # determine best parameters\n gridsearch_result = gridsearch.fit(X, y)\n best_params = gridsearch_result.best_params_\n\n # declare and fit best model\n xrt = ExtraTreesClassifier(n_estimators=best_params[\"n_estimators\"], max_depth=best_params[\"max_depth\"],\n random_state=False, verbose=False)\n xrt.fit(X, y)\n\n return [xrt, best_params]", "title": "" }, { "docid": "fd89362033d3ea4d6fb20e39f4a2cb99", "score": "0.58475596", "text": "def learnStackingLayer(X_train_meta,\n y_train,\n X_test_meta,\n n_folds):\n\n model_param_space = range(1,12)\n meta_penalty_eval = np.zeros((n_folds, len(model_param_space)))\n\n print \"CV to select best lambda for stacked layer...\"\n k_fold = KFold(n_splits=n_folds, shuffle=True, random_state=0703)\n fold_cnt = 0\n total_cnt = 0\n for train_index, test_index in k_fold.split(X_train_meta):\n\n X_train_meta_cv = X_train_meta[train_index]\n y_train_meta_cv = y_train[train_index]\n\n X_test_meta_cv = X_train_meta[test_index]\n y_test_meta_cv = y_train[test_index]\n\n lambda_cnt = 0\n for lam in model_param_space:\n start_model = datetime.now()\n\n #model_meta_cv = LogisticRegression(penalty='l2',\n # C=lam,\n # fit_intercept=True,\n # multi_class='multinomial',\n # solver='sag'\n # )\n #model_meta_cv.fit(X=X_train_meta_cv, y=y_train_meta_cv)\n #meta_probs_cv = model_meta_cv.predict_proba(X=X_test_meta_cv)\n\n #beta = model_meta_cv.coef_\n #np.savetxt(fname='output/meta-logit-beta.txt',X=beta)\n\n xgb_param_map = {'num_class': 12,\n 'max_depth': 4,\n 'eta': .1,\n 'silent': 1,\n 'objective': 'multi:softprob',\n 'booster': 'gbtree',\n 'gamma': 2.0,\n 'min_child_weight': lam,\n 'subsample': .5\n }\n num_round = 4\n\n dtrain = xgb.DMatrix(X_train_meta_cv, label=y_train_meta_cv)\n dtest = xgb.DMatrix(X_test_meta_cv)\n\n mod_meta_cv = xgb.train(xgb_param_map, dtrain, num_round)\n meta_probs_cv = mod_meta_cv.predict(dtest)\n\n model_time = datetime.now() - start_model\n\n eval_ndcg = ndcg_score(ground_truth=y_test_meta_cv, predictions=meta_probs_cv)\n meta_penalty_eval[fold_cnt, lambda_cnt] = eval_ndcg\n\n lambda_cnt += 1\n total_cnt += 1\n\n # Print progress to screen\n progress = np.round(total_cnt / float(n_folds * len(model_param_space)),4)\n print \"progress: \" + str(progress) + \" time: \" + str(model_time)\n\n fold_cnt += 1\n\n mean_eval_ndcg_meta = np.mean(meta_penalty_eval, axis=1)\n best_meta_lambda_idx = np.argmax(mean_eval_ndcg_meta)\n best_meta_lambda = model_param_space[best_meta_lambda_idx]\n\n print \"Best lambda: \" + str(best_meta_lambda)\n\n #model_meta = LogisticRegression(penalty='l2',\n # C=best_meta_lambda,\n # fit_intercept=True,\n # multi_class='multinomial',\n # solver='sag'\n # )\n #model_meta.fit(X=X_train_meta, y=y_train)\n #meta_probs = model_meta.predict_proba(X=X_test_meta)\n\n xgb_param_map = {'num_class': 12,\n 'max_depth': 4,\n 'eta': .1,\n 'silent': 1,\n 'objective': 'multi:softprob',\n 'booster': 'gbtree',\n 'gamma': 2.0,\n 'min_child_weight': best_meta_lambda,\n 'subsample': .5\n }\n num_round = 4\n\n dtrain = xgb.DMatrix(X_train_meta, label=y_train)\n dtest = xgb.DMatrix(X_test_meta)\n\n model_meta = xgb.train(xgb_param_map, dtrain, num_round)\n meta_probs = model_meta.predict(dtest)\n\n y_hat_meta = classify(probs=meta_probs)\n\n return y_hat_meta", "title": "" }, { "docid": "1b660bf8d3012e6bd119db62366ce2ba", "score": "0.584658", "text": "def train(\n self,\n x_train,\n y_train,\n x_cv,\n y_cv,\n gammas=(0.00001,),\n epochs=1,\n batchsize=30000,\n method=\"BFGS\",\n gtol=1 * 1e-05,\n maxiter=10,\n renormalize=False,\n ):\n # Cast input parameters to desired precision\n x_train = np.array(x_train, dtype=dtype)\n y_train = np.array(y_train, dtype=dtype)\n x_cv = np.array(x_cv, dtype=dtype)\n y_cv = np.array(y_cv, dtype=dtype)\n gammas = np.array(gammas, dtype=dtype)\n\n # Cast to numpy array\n gammas = np.array(gammas)\n\n # If predict_log_value is True, the cost function will be of the form:\n # (ln(m)-ln(y))^2 instead of (m-y)^2.\n # When m approx y, this can be Taylor expanded into ((m-y)/y)^2.\n # Hence the relative error is minimized instead of the absolute error.\n if self.data[\"predict_log_value\"]:\n # Fit to logarithm of the output (instead of fitting to the output).\n y_train = np.log(y_train)\n y_cv = np.log(y_cv)\n\n r = get_number_of_NN_parameters(self.data[\"layers\"])\n\n print(\"{:d} parameters, {:d} training examples\".format(r, len(y_train)))\n\n # Normalization and scaling parameters\n mu_x, std_x = get_norm_and_scale(x_train)\n mu_y, std_y = get_norm_and_scale(y_train)\n\n # Set default values\n if \"p\" not in self.data:\n # Randomly initialize parameters\n self.data[\"p\"] = np.random.randn(r).astype(dtype)\n if \"mu_x\" not in self.data or renormalize:\n self.data[\"mu_x\"] = mu_x\n if \"std_x\" not in self.data or renormalize:\n self.data[\"std_x\"] = std_x\n if \"mu_y\" not in self.data or renormalize:\n self.data[\"mu_y\"] = mu_y\n if \"std_y\" not in self.data or renormalize:\n self.data[\"std_y\"] = std_y\n # Normalized and scaled data\n x_train = (x_train - self.data[\"mu_x\"]) / self.data[\"std_x\"]\n x_cv = (x_cv - self.data[\"mu_x\"]) / self.data[\"std_x\"]\n y_train = (y_train - self.data[\"mu_y\"]) / self.data[\"std_y\"]\n y_cv = (y_cv - self.data[\"mu_y\"]) / self.data[\"std_y\"]\n\n history = []\n cost_cv_best = np.inf\n for i, gamma in enumerate(gammas):\n print(\"Regularization parameter value:\", gamma)\n p, hist = get_minimization_solution(\n self.data[\"p\"],\n x_train,\n y_train,\n self.data[\"layers\"],\n self.data[\"activation_type\"],\n gamma=gamma,\n epochs=epochs,\n batchsize=batchsize,\n method=method,\n gtol=gtol,\n maxiter=maxiter,\n )\n cost_cv = cost_NN(p, x_cv, y_cv, self.data[\"layers\"], self.data[\"activation_type\"], output=\"value\")\n hist[\"cost cv without regularization\"] = cost_cv\n history.append(hist)\n if cost_cv <= cost_cv_best:\n i_best = i\n cost_cv_best = cost_cv\n p_best = p\n self.data[\"p\"] = p_best\n print(\"Best regularization value: {:.6f}\".format(gammas[i_best]))\n\n # Training and cross-validation errors,\n # without any regularization.\n self.data[\"cost_train\"] = cost_NN(\n self.data[\"p\"], x_train, y_train, self.data[\"layers\"], self.data[\"activation_type\"], output=\"value\"\n )\n self.data[\"cost_cv\"] = cost_NN(\n self.data[\"p\"], x_cv, y_cv, self.data[\"layers\"], self.data[\"activation_type\"], output=\"value\"\n )\n return history", "title": "" }, { "docid": "e852f37820e3c6bcbea9600750b340d3", "score": "0.5846127", "text": "def optimize_model(self, model, index=None):\n # print('Feature importances:', list(self.gbm.feature_importance()))\n print(self.X_train.iloc[0, ], self.X_train.columns, len(\n self.X_train.columns), self.y_train[0])\n dtrain = lgb.Dataset(self.X_train,\n label=self.y_train,\n feature_name=list(self.X_train.columns),\n categorical_feature=[])\n\n eval_hist = lgb.cv(self.params,\n dtrain,\n nfold=8,\n num_boost_round=self.MAX_ROUNDS,\n early_stopping_rounds=self.EARLY_STOP,\n verbose_eval=50,\n seed=self.seed,\n shuffle=True,\n feval=self.eval_auc,\n metrics=\"None\"\n )\n result = [self.version]\n result.append('best n_estimators:' + str(len(eval_hist['auc-mean'])))\n result.append('best cv score:' + str(eval_hist['auc-mean'][-1]) + '\\n')\n with open(model_path + 'result', 'a') as f:\n f.write('\\n'.join([str(index) for index in result]))\n print('best n_estimators:', len(eval_hist['auc-mean']))\n print('best cv score:', eval_hist['auc-mean'][-1])\n self.OPT_ROUNDS = len(eval_hist['auc-mean'])\n if (eval_hist['auc-mean'][-1] > self.basic_auc):\n self.basic_auc = eval_hist['auc-mean'][-1]\n if not index is None and index != -1:\n self.good_columns.append(self.wait_columns[index])\n with open(model_path + 'columns.csv', 'w') as f:\n f.write(','.join([str(index) for index in self.good_columns]))", "title": "" }, { "docid": "e1623b6146020c45015782d21ed9cef5", "score": "0.5840862", "text": "def multi_bw(init, y, X, n, k, family, tol, max_iter, rss_score, gwr_func,\n bw_func, sel_func, multi_bw_min, multi_bw_max, bws_same_times,\n verbose=False):\n if init is None:\n bw = sel_func(bw_func(y, X))\n optim_model = gwr_func(y, X, bw)\n else:\n bw = init\n optim_model = gwr_func(y, X, init)\n bw_gwr = bw\n err = optim_model.resid_response.reshape((-1, 1))\n param = optim_model.params\n\n XB = np.multiply(param, X)\n if rss_score:\n rss = np.sum((err)**2)\n iters = 0\n scores = []\n delta = 1e6\n BWs = []\n bw_stable_counter = 0\n bws = np.empty(k)\n gwr_sel_hist = []\n\n try:\n from tqdm.auto import tqdm #if they have it, let users have a progress bar\n except ImportError:\n\n def tqdm(x, desc=''): #otherwise, just passthrough the range\n return x\n\n for iters in tqdm(range(1, max_iter + 1), desc='Backfitting'):\n new_XB = np.zeros_like(X)\n params = np.zeros_like(X)\n\n for j in range(k):\n temp_y = XB[:, j].reshape((-1, 1))\n temp_y = temp_y + err\n temp_X = X[:, j].reshape((-1, 1))\n bw_class = bw_func(temp_y, temp_X)\n\n if bw_stable_counter >= bws_same_times:\n #If in backfitting, all bws not changing in bws_same_times (default 5) iterations\n bw = bws[j]\n else:\n bw = sel_func(bw_class, multi_bw_min[j], multi_bw_max[j])\n gwr_sel_hist.append(deepcopy(bw_class.sel_hist))\n\n optim_model = gwr_func(temp_y, temp_X, bw)\n err = optim_model.resid_response.reshape((-1, 1))\n param = optim_model.params.reshape((-1, ))\n new_XB[:, j] = optim_model.predy.reshape(-1)\n params[:, j] = param\n bws[j] = bw\n \n #If bws remain the same as from previous iteration\n if (iters > 1) and np.all(BWs[-1] == bws):\n bw_stable_counter += 1\n else:\n bw_stable_counter = 0\n \n num = np.sum((new_XB - XB)**2) / n\n den = np.sum(np.sum(new_XB, axis=1)**2)\n score = (num / den)**0.5\n XB = new_XB\n\n if rss_score:\n predy = np.sum(np.multiply(params, X), axis=1).reshape((-1, 1))\n new_rss = np.sum((y - predy)**2)\n score = np.abs((new_rss - rss) / new_rss)\n rss = new_rss\n scores.append(deepcopy(score))\n delta = score\n BWs.append(deepcopy(bws))\n\n if verbose:\n print(\"Current iteration:\", iters, \",SOC:\", np.round(score, 7))\n print(\"Bandwidths:\", ', '.join([str(bw) for bw in bws]))\n\n if delta < tol:\n break\n\n opt_bws = BWs[-1]\n return (opt_bws, np.array(BWs), np.array(scores), params, err, gwr_sel_hist, bw_gwr)", "title": "" }, { "docid": "a91d529a37d67e5ea645ae2487f4c818", "score": "0.58404744", "text": "def getBestClassifier(self, X, y):\n hyperparams_data = {\n 'KFold': {},\n 'Avg': []\n }\n\n fold = 1\n\n for train_idx, test_idx in self.outer_cv.split(X, y):\n X_train, y_train, X_test, y_test = X[train_idx], y[train_idx], X[test_idx], y[test_idx]\n\n for clf_key, clf_val in self.clf_names.iteritems():\n clf = None\n for score in self.scores:\n if self.search_method.lower() == 'grid':\n clf = GridSearchCV(\n clf_val[0](probability=True) if clf_key == 'SVM' else clf_val[0](), clf_val[1],\n cv=self.inner_cv, scoring=score, verbose=1, n_jobs=self.n_jobs)\n # elif self.search_method.lower() == 'hyperband' and clf_key in ['XGBoost', 'Extra-Trees', 'Random Forest']:\n # HyperbandSearchCV(\n # clf_val[0](probability=True) if clf_key == 'SVM' else clf_val[0](), clf_val[2].copy().pop('n_estimators'),\n # resource_param='n_estimators',\n # min_iter=500 if clf_key == 'XGBoost' else 200,\n # max_iter=3000 if clf_key == 'XGBoost' else 1000,\n # cv=self.inner_cv, random_state=seed_no, scoring=score\n # )\n else: # randomized is used as default\n clf = RandomizedSearchCV(\n clf_val[0](probability=True) if clf_key == 'SVM' else clf_val[0](), clf_val[2],\n cv=self.inner_cv, scoring=score, verbose=1, n_jobs=self.n_jobs, n_iter=self.n_iter)\n clf.fit(X_train, y_train)\n\n y_pred = clf.predict(X_test)\n\n hyperparams_found = dict()\n hyperparams_found['accuracy'] = accuracy_score(y_test, y_pred)\n hyperparams_found['f1_weighted'] = f1_score(y_test, y_pred, average='weighted')\n hyperparams_found['f1_macro'] = f1_score(y_test, y_pred, average='macro')\n hyperparams_found['fold'] = fold\n hyperparams_found['Best Hyperparameters'] = clf.best_params_\n\n if clf_key in hyperparams_data['KFold']:\n hyperparams_data['KFold'][clf_key].append(hyperparams_found)\n else:\n hyperparams_data['KFold'][clf_key] = [hyperparams_found]\n\n fold += 1\n\n for clf_key in self.clf_names.keys():\n clf_metrics = dict()\n clf_metrics['accuracy'] = sum(float(x['accuracy']) for x in hyperparams_data['KFold'][clf_key]) / self.kfold\n clf_metrics['f1_weighted'] = sum(\n float(x['f1_weighted']) for x in hyperparams_data['KFold'][clf_key]) / self.kfold\n clf_metrics['f1_macro'] = sum(float(x['f1_macro']) for x in hyperparams_data['KFold'][clf_key]) / self.kfold\n clf_metrics['classifier'] = clf_key\n\n hyperparams_data['Avg'].append(clf_metrics)\n\n _, best_clf = max(enumerate(hyperparams_data['Avg']), key=(lambda x: x[1]['accuracy']))\n\n return best_clf", "title": "" }, { "docid": "1a037bff5223acd77a4d489cabfeab84", "score": "0.58303463", "text": "def cross_validation_iter(data, labels, params, metrics, n_rounds, nfold,\n stratified, shuffle, early_stopping_rounds, seed,\n show_stdv, verbose_eval=0):\n dftrainXGB = xgb.DMatrix(data=data, label=labels, feature_names=list(data), silent=1, nthread=-1)\n cv_results = xgb.cv(\n params,\n dftrainXGB,\n num_boost_round=n_rounds,\n nfold=nfold,\n metrics=metrics,\n stratified=stratified,\n shuffle=shuffle,\n early_stopping_rounds=early_stopping_rounds,\n seed=seed,\n show_stdv=show_stdv,\n verbose_eval=verbose_eval\n )\n return cv_results", "title": "" }, { "docid": "82a26d9406c21deb550c573d826f4fef", "score": "0.5824066", "text": "def get_hyperparameter_search_space(seed):\n cs = ConfigSpace.ConfigurationSpace('mlr.xgboost', seed)\n\n imputer = ConfigSpace.CategoricalHyperparameter(\n name='num.impute.selected.cpo', choices=['impute.hist', 'impute.median', 'impute.mean'], default_value='impute.hist')\n booster = ConfigSpace.CategoricalHyperparameter(\n name='booster', choices=['gbtree', 'dart', 'gblinear'], default_value='gbtree')\n nrounds = ConfigSpace.UniformIntegerHyperparameter(\n name='nrounds', lower=1, upper=5973, default_value=100)\n eta = ConfigSpace.UniformFloatHyperparameter(\n name='eta', lower=2**-12, upper=2**0, log=True, default_value=0.1)\n lmbda = ConfigSpace.UniformFloatHyperparameter(\n name='lambda', lower=2**-12, upper=2**12, log=True, default_value=0.1)\n gamma = ConfigSpace.UniformFloatHyperparameter(\n name='gamma', lower=0.0, upper=32768, log=True, default_value=0.0)\n alpha = ConfigSpace.UniformFloatHyperparameter(\n name='alpha', lower=2**-12, upper=2**12, log=True, default_value=0.1)\n subsample = ConfigSpace.UniformFloatHyperparameter(\n name='subsample', lower=0.0003, upper=1, default_value=0.8)\n max_depth = ConfigSpace.UniformIntegerHyperparameter(\n name='max_depth', lower=1, upper=36, default_value=3)\n min_child_weight = ConfigSpace.UniformIntegerHyperparameter(\n name='min_child_weight', lower=0, upper=2^7, default_value=1)\n colsample_bytree = ConfigSpace.UniformFloatHyperparameter(\n name='colsample_bytree', lower=0.0001, upper=1, default_value=0.8)\n colsample_bylevel = ConfigSpace.UniformFloatHyperparameter(\n name='colsample_bylevel', lower=0.0001, upper=1, default_value=0.8)\n rate_drop = ConfigSpace.UniformFloatHyperparameter(\n name='rate_drop', lower=0.0003, upper=1, default_value=0.8)\n skip_drop = ConfigSpace.UniformFloatHyperparameter(\n name='skip_drop', lower=0.0003, upper=1, default_value=0.8)\n\n cs.add_hyperparameters([\n imputer,\n booster,\n nrounds,\n eta,\n lmbda,\n gamma,\n alpha,\n subsample,\n max_depth,\n min_child_weight,\n colsample_bytree,\n colsample_bylevel,\n rate_drop,\n skip_drop\n ])\n\n pars_depth = ConfigSpace.InCondition(max_depth, booster , ['gbtree', 'dart'])\n pars_cw = ConfigSpace.InCondition(min_child_weight, booster , ['gbtree', 'dart'])\n pars_eta = ConfigSpace.InCondition(eta, booster, ['gbtree', 'dart'])\n pars_cs_bt = ConfigSpace.InCondition(colsample_bylevel, booster, ['gbtree', 'dart'])\n pars_cs_bl = ConfigSpace.InCondition(colsample_bytree, booster, ['gbtree', 'dart'])\n pars_cs_ga = ConfigSpace.InCondition(gamma, booster, ['gbtree', 'dart'])\n skip = ConfigSpace.EqualsCondition(skip_drop, booster, 'dart')\n rate = ConfigSpace.EqualsCondition(rate_drop, booster, 'dart')\n cs.add_condition(pars_depth)\n cs.add_condition(pars_cw)\n cs.add_condition(pars_eta)\n cs.add_condition(pars_cs_bt)\n cs.add_condition(pars_cs_bl)\n cs.add_condition(pars_cs_ga)\n cs.add_condition(skip)\n cs.add_condition(rate)\n return cs", "title": "" }, { "docid": "61729c472d5b3f1d8ce2f35fbbf18bfe", "score": "0.5817382", "text": "def optimize_hyperparameters(self,\n train_x,\n train_y,\n test_x,\n test_y,\n param_dist,\n early_stop_monitor='val_loss',\n early_stop_patience=5,\n n_iter_search=75,\n n_epochs=5000,\n batchsize=512,\n val_split=0.2,\n num_hid_layers=1,\n out_fname=None):\n\n # Quit training if validation loss increases for N epochs\n early_stop = EarlyStopping(monitor=early_stop_monitor, patience=early_stop_patience)\n\n # ------------------------------------------\n # Build models for optimize hyper-parameters\n # Start with initial parameters\n # ------------------------------------------\n init_param_dict = {}\n init_param_dict['neurons'] = 128\n init_param_dict['dropout'] = 0\n init_param_dict['activation'] = 'relu'\n init_param_dict['optimizer'] = 'adam'\n init_param_dict['input_dim'] = 4098\n init_param_dict['task_type'] = 'classification'\n\n # --------------\n # Classification\n # --------------\n if self.pred_type == 'classification':\n\n score = make_scorer(geometric_mean_score)\n # Optimizer network with convolutional features\n conv_model = KerasClassifier(\n build_fn=self.build_model,\n epochs=n_epochs,\n batch_size=batchsize,\n validation_data=(test_x, test_y),\n # validation_split=val_split,\n class_weight=self._get_class_weights(train_y, neural_net=True),\n shuffle=True\n )\n\n # ----------\n # Regression\n # ----------\n if self.pred_type == 'regression':\n\n score = 'r2'\n conv_model = KerasRegressor(\n build_fn=self.build_model,\n epochs=n_epochs,\n batch_size=batchsize,\n validation_data=(test_x, test_y),\n # validation_split=val_split,\n shuffle=True\n )\n\n # -------------------\n # Conduct grid search\n # -------------------\n random_search_conv = RandomizedSearchCV(\n conv_model, param_distributions=param_dist, n_iter=n_iter_search, scoring=score,\n fit_params={'callbacks': [early_stop]}\n )\n\n random_search_conv = random_search_conv.fit(train_x, train_y)\n\n # ------------------------------\n # save optimized hyperparameters\n # ------------------------------\n if not (out_fname is None):\n np.save(out_fname, random_search_conv.best_params_)\n\n return random_search_conv.best_params_", "title": "" }, { "docid": "57c1ca85b1690d1ed0596ab701163f18", "score": "0.5813697", "text": "def brt_train_plot_func(X,y,feats,train_model = False):\n\n X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.15,random_state=1)\n \n #y_train = y_train.as_matrix().ravel()\n #y_test = y_test.as_matrix().ravel() using only when coming from pandas dataframe\n \n param_grid_brt = {'learning_rate': np.logspace(-4,0,50),'max_depth': range(2,8),'min_samples_leaf': range(3,10)}\n #from sklearn.metrics import mean_squared_error, make_scorer\n #param_grid_brt = {'learning_rate': np.logspace(-4,0,3),'max_depth': [2,6],'min_samples_leaf': [3,10]}\n clf = GradientBoostingRegressor(n_estimators=500) \n #cross-validation grid to search the best parameters\n \n \n #str_in = raw_input(\"(T)raining or (U)sed selected (Default: U): \")\n \n if train_model:\n print \"Training model\"\n #mse_scorer = make_scorer(mean_squared_error,greater_is_better = False)\n brt_complete = GridSearchCV(clf, param_grid_brt,n_jobs = -1,verbose = True,cv = 10)\n brt_complete.fit(X_train,y_train)\n brt = brt_complete.best_estimator_\n else:\n brt = GradientBoostingRegressor(n_estimators=2000,learning_rate=0.0008,max_depth = 4,min_samples_leaf=5)\n brt.fit(X_train,y_train)\n \n #str_in = raw_input(\"Descomp-(T)raining or (U)sed selected (Default: U): \")\n #\n #if str_in == 'T':\n # print \"Training descomp model\"\n # brt_descomp_complete = GridSearchCV(clf_descomp, param_grid_brt,n_jobs = -1,verbose = True,cv = 10)\n # brt_descomp_complete.fit(X_descomp_train,y_descomp_train)\n # brt_descomp = brt_descomp_complete.best_estimator_\n #else:\n # brt_descomp = GradientBoostingRegressor(n_estimators=2000,learning_rate=0.006,max_depth = 4,min_samples_leaf=5)\n # brt_descomp.fit(X_descomp_train,y_descomp_train)\n \n \n plt.close('all')\n # ####### IAM %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n #relative importance\n \n feature_importance = brt.feature_importances_\n # make importances relative to max importance\n feature_importance = 100.0 * (feature_importance / feature_importance.max()) \n sorted_idx = np.argsort(feature_importance)\n pos = np.arange(sorted_idx.shape[0]) + .5\n #plt.sca(axs[5])\n #plt.cla()\n #feats = np.array(features)\n plt.barh(pos, feature_importance[sorted_idx], align='center')\n plt.yticks(pos,feats[sorted_idx], fontsize=20) \n plt.title(\"TS Group 3\", fontsize=20)\n plt.xlabel('Relative Importance (%)', fontsize=20)\n plt.subplots_adjust(top=0.9, left=0.18, bottom=0.15)\n #partial dependence plot\n \n #mse\n from sklearn.metrics import mean_squared_error, r2_score\n \n y_pred = brt.predict(X_test)\n \n print \"MSE\",mean_squared_error(y_test,y_pred)\n print 'R2',r2_score(y_test,y_pred)\n \n #plot for IAM\n #plt.figure()\n #4 features AVNN, age, sex, ci\n #features = ['SDNN','HRVTriangIndex','SDSD','AVNN','logIndex','RMSSD','ci','sex','age']\n #target_features = [features[3],features[-1],features[-2],features[-3]]\n target_features_idx = [0,4,7,3,9,(0,4)]\n fig_hrt, axs = plot_partial_dependence(brt, X_train, target_features_idx, feature_names=feats,n_jobs=-1, grid_resolution=80)\n fig_hrt.suptitle('TS Group 3 = f(HRV)', fontsize=20)\n plt.subplots_adjust(top=0.9, hspace=0.4, wspace=0.5)\n for a in range(5):\n axs[a].set_ylabel(\"TS\", fontsize=20) # tight_layout causes overlap with suptitle\n axs[a].set_xlabel(feats[target_features_idx[a]], fontsize=20)\n axs[5].set_xlabel(feats[target_features_idx[5][0]], fontsize=20) \n axs[5].set_ylabel(feats[target_features_idx[5][1]], fontsize=20)\n plt.show()\n \n target_features_idx = [8,7]\n fig_hrt, axs = plot_partial_dependence(brt, X_train, target_features_idx, feature_names=feats,n_jobs=-1, grid_resolution=80)\n fig_hrt.suptitle('TS Group 3 = f(HRV)', fontsize=20)\n plt.subplots_adjust(top=0.9, left=0.12)\n for a in range(2):\n axs[a].set_ylabel(\"TS partial dependence\", fontsize=20) # tight_layout causes overlap with suptitle\n axs[a].set_xlabel(feats[target_features_idx[a]], fontsize=20)\n axs[a].set_ylim(-2.5,1.5) \n plt.show()\n \n \n fig = plt.figure()\n \n target_feature = (7, 3)\n pdp, (x_axis, y_axis) = partial_dependence(brt, target_feature, X=X_train, grid_resolution=80)\n XX, YY = np.meshgrid(x_axis, y_axis)\n Z = pdp.T.reshape(XX.shape).T\n ax = Axes3D(fig)\n surf = ax.plot_surface(XX, YY, Z, rstride=1, cstride=1, cmap=plt.cm.BuPu)\n ax.set_xlabel(feats[target_feature[0]], fontsize=18)\n ax.set_ylabel(feats[target_feature[1]], fontsize=18)\n ax.set_zlabel('$TS$', fontsize=18)\n # pretty init view\n ax.view_init(elev=22, azim=122)\n plt.colorbar(surf)\n plt.suptitle('$TS = f(Scl,TINN)$', fontsize=18)\n #plt.subplots_adjust(top=0.91)", "title": "" }, { "docid": "f545980615b739f0fc27ec1158acb1cc", "score": "0.58109933", "text": "def train_baseline(dataset_name, X, Y, outer_cv=10, inner_cv=3, random_search_trials=50, inner_epochs=1,\n outer_epochs=5):\n skf = StratifiedKFold(n_splits=outer_cv, random_state=7, shuffle=True)\n skf2 = StratifiedKFold(n_splits=inner_cv, random_state=7, shuffle=True)\n\n fold_var = 1\n\n list_of_res = []\n for train_index, val_index in skf.split(np.zeros(X.shape[0]), Y):\n results_dict = {}\n x_train = X[train_index]\n y_train = Y[train_index]\n x_val = X[val_index]\n y_val = Y[val_index]\n max_trail = 0\n max_acc = 0\n hyper_param_batchsize = np.array([32, 64, 128])\n hyper_param_lr = np.array([0.01, 0.001, 0.0001, 0.005])\n hyper_param_pooling = np.array(['max', 'avg'])\n hyper_params_dict = {}\n for trail in range(0, random_search_trials):\n b = np.random.choice(hyper_param_batchsize)\n lr = np.random.choice(hyper_param_lr)\n po = np.random.choice(hyper_param_pooling)\n print(f\"hyper params {(b, lr, po)}\")\n acc_list = []\n for train_ind2, val_inds2 in skf2.split(np.zeros(x_train.shape[0]), y_train):\n x_train_hyperopt = x_train[train_ind2]\n y_train_hyperopt = y_train[train_ind2]\n x_val_hyperopt = x_train[val_inds2]\n y_val_hyperopt = y_train[val_inds2]\n classes = len(np.unique(y_train_hyperopt))\n model = BaselineModel(((x_train_hyperopt, y_train_hyperopt), (x_val_hyperopt, y_val_hyperopt)), classes,\n lr, b, po)\n model.train(inner_epochs)\n results = model.evaluate()\n acc_list.append(results['acc'])\n tf.keras.backend.clear_session()\n\n mean_acc = np.array(acc_list).mean()\n if mean_acc > max_acc:\n max_trail = trail\n max_acc = mean_acc\n hyper_params_dict[trail] = (lr, b, po, mean_acc)\n # for later need to save the results in the dict\n\n best_params = hyper_params_dict[max_trail]\n model = BaselineModel(((x_train, y_train), (x_val, y_val)), classes, best_params[0], best_params[1],\n best_params[2])\n start_timer = time.time()\n model.train(outer_epochs)\n end_timer = time.time()\n eval_res = model.evaluate()\n results_dict['dataset_name'] = dataset_name\n results_dict['k-fold'] = fold_var\n results_dict['train_time'] = end_timer - start_timer\n results_dict.update(eval_res)\n list_of_res.append(results_dict)\n tf.keras.backend.clear_session()\n fold_var += 1\n tmp = pd.DataFrame(list_of_res)\n tmp.to_csv(f'Results/Baseline_{dataset_name}.csv')\n return pd.DataFrame(list_of_res)", "title": "" }, { "docid": "3fccf984e292f581e1470545efcf105d", "score": "0.5808092", "text": "def fit(self, X, y, verbosity=0):\n self.classes = list(set(y))\n n_points = len(y)\n if len(X) != n_points:\n raise Exception('Number of training points should be the same as training labels.')\n\n if not self._state_machine:\n self._state_machine = DefaultStateMachine(self.classes)\n\n # Initialize the parameters given the state machine, features, and target classes.\n self.parameters = self._initialize_parameters(self._state_machine, X[0].shape[2])\n\n # Create a new model object for each training example\n models = [self._Model(self._state_machine, x, ty) for x, ty in zip(X, y)]\n\n self._evaluation_count = 0\n\n def _objective(parameters):\n gradient = np.zeros(self.parameters.shape)\n ll = 0.0 # Log likelihood\n # TODO: Embarrassingly parallel\n for model in models:\n dll, dgradient = model.forward_backward(parameters.reshape(self.parameters.shape))\n ll += dll\n gradient += dgradient\n\n parameters_without_bias = np.array(parameters, dtype='float64') # exclude the bias parameters from being regularized\n parameters_without_bias[0] = 0\n ll -= self.l2_regularization * np.dot(parameters_without_bias.T, parameters_without_bias)\n gradient = gradient.flatten() - 2.0 * self.l2_regularization * parameters_without_bias\n\n if verbosity > 0:\n if self._evaluation_count == 0:\n print('{:10} {:10} {:10}'.format('Iteration', 'Log-likelihood', '|gradient|'))\n if self._evaluation_count % verbosity == 0:\n print('{:10} {:10.4} {:10.4}'.format(self._evaluation_count, ll, (abs(gradient).sum())))\n self._evaluation_count += 1\n\n # TODO: Allow some of the parameters to be frozen. ie. not trained. Can later also completely remove\n # TODO: the computation associated with these parameters.\n return -ll, -gradient\n\n def _objective_copy_gradient(paramers, g):\n nll, ngradient = _objective(paramers)\n g[:] = ngradient\n return nll\n\n if self._optimizer:\n self.optimizer_result = self._optimizer(_objective, self.parameters.flatten(), **self._optimizer_kwargs)\n self.parameters = self.optimizer_result[0].reshape(self.parameters.shape)\n else:\n optimizer = lbfgs.LBFGS()\n final_betas = optimizer.minimize(_objective_copy_gradient,\n x0=self.parameters.flatten(),\n progress=None)\n self.optimizer_result = final_betas\n self.parameters = final_betas.reshape(self.parameters.shape)\n\n self.parameters = np.asfortranarray(self.parameters)\n return self", "title": "" }, { "docid": "beaa471631c3b9d19370c11f99017bce", "score": "0.57987833", "text": "def run_output(df):\n features = [i for i in df.columns if i != 'price_range']\n X = df[features].values\n y = df['price_range'].values\n\n optimization_function = partial(opt_func, param_names=param_names, x=X, y=y)\n\n result = gp_minimize(optimization_function, dimensions=param_space, n_calls=15, n_random_starts=10, verbose=10)\n best_parameters = dict(zip(param_names, result.x))\n\n return best_parameters", "title": "" }, { "docid": "d5a6b3d2038d4e66416494d4070e5075", "score": "0.5798312", "text": "def bayesian_optimization_search(model, bo_n_init, bo_n_iterations, Ytrain, Ftrain, ftest, ytest, do_print=False):\n\n predictions = bayesian_optimization.BayesianOptimization(model.dim, model.kernel, bayesian_optimization.expected_improvement,\n variance=transform_forward(model.variance))\n ix_evaluated = []\n ix_candidates = np.where(np.invert(np.isnan(ytest)))[0].tolist()\n ybest_list = []\n\n def _process_ix(ix, predictions, model, ytest, ix_evaluated, ix_candidates):\n predictions.add(model.X[ix], ytest[ix])\n ix_evaluated.append(ix)\n ix_candidates.remove(ix)\n\n def _print_status(ix, bo_iteration, ytest, ybest, do_print):\n if do_print:\n print('Iteration: %d, %g [%d], Best: %g' % (bo_iteration, ytest[ix], ix, ybest))\n\n ix_init = bayesian_optimization.init_l1(Ytrain, Ftrain, ftest).tolist()\n for bo_iteration in range(bo_n_init):\n ix = ix_init[bo_iteration]\n if not np.isnan(ytest[ix]):\n _process_ix(ix, predictions, model, ytest, ix_evaluated, ix_candidates)\n ybest = predictions.ybest\n if ybest is None:\n ybest = np.nan\n ybest_list.append(ybest)\n\n _print_status(ix, bo_iteration, ytest, ybest, do_print)\n\n for bo_iteration in range(bo_n_init, bo_n_iterations):\n ix = ix_candidates[predictions.next(model.X[ix_candidates])]\n _process_ix(ix, predictions, model, ytest, ix_evaluated, ix_candidates)\n ybest = predictions.ybest\n ybest_list.append(ybest)\n\n _print_status(ix, bo_iteration, ytest, ybest, do_print)\n\n return np.asarray(ybest_list)", "title": "" }, { "docid": "6f1547867a1f2095c314d0c375635b5d", "score": "0.57941216", "text": "def randomsrch_obj(self, params, iteration):\n optim_type = 'Random'\n self.iteration += 1\n #random.seed(SEED) ##For True Randomized Search deactivate the fixated SEED\n if self.GPU == True:\n params['task_type'] = 'GPU'\n if self.GPU == False:\n params['task_type'] = 'CPU'\n \n bagging_temperature_dist = list(np.linspace(0,1))\n if params['bootstrap_type'] == 'Bayesian':\n params['bagging_temperature'] = random.sample(bagging_temperature_dist,1)[0]\n max_leaves_dist = list(range( 2, 32, 1))\n if params['grow_policy'] == 'Lossguide':\n params['max_leaves'] = random.sample(max_leaves_dist,1)[0] \n if self.GPU == False:\n params['score_function'] = 'L2'\n else:\n self.lossguide_verifier = True\n # Perform n_folds cross validation\n loss, params, n_estimators, run_time = self.ctb_crossval(params, optim_type)\n\n # Return list of results\n return [loss, params,iteration, n_estimators, run_time]", "title": "" }, { "docid": "8057194efeff47219f56f6b321517bb9", "score": "0.57940066", "text": "def _train_gradient_boosting(self):\n\t\tfrom sklearn.ensemble import GradientBoostingClassifier\n\t\tprint(self.experiments)\n\t\tfor experiment in self.experiments:\n\t\t\tprint(experiment)\n\t\t\tmodel = GradientBoostingClassifier(max_depth=int(experiment['maxTreeDepth']), learning_rate=experiment['shrinkage'],\n\t\t\t\t\t\t\t\t\t\t\t n_estimators=int(experiment['numberTrees']), min_samples_leaf=int(experiment['minTerminalNodeSize']))\n\t\t\tmodel.fit(self.train_data, self.train_labels)\n\t\t\tpredictions = model.predict(self.test_data)\t\t\n\t\t\tscores = model.decision_function(self.test_data)\n\t\treturn predictions, scores", "title": "" }, { "docid": "0c7c75b8bc0bbec68a60073ec0567dfb", "score": "0.5790052", "text": "def find_best_voter(estimators, X, y, voting='hard', scoring='accuracy',\n calc_diff=True, cv=5):\n start_time = datetime.datetime.now()\n best_score = 0\n num_ests = len(estimators)\n num_of_combos = sum((binom(num_ests, i)) for i in range(2, num_ests + 1))\n count = 0\n per_divider = 10\n per_div_step = per_divider\n per_increment = num_of_combos / per_divider\n per_target = per_increment\n combo_score_std = {}\n\n print('Starting search for best estimator combination for voting.')\n print('Num of estimators : {}'.format(num_ests))\n print('Num of combinations : {}'.format(num_of_combos))\n for i in range(0, num_ests - 1):\n for each in combinations(estimators, num_ests - i):\n voting_clf = VotingClassifier(estimators=each,\n voting=voting)\n cross_val_raw = cross_val_score(voting_clf, X, y, cv=cv,\n scoring=scoring, n_jobs=-1)\n current_score = np.mean(cross_val_raw)\n current_std = np.std(cross_val_raw)\n if calc_diff:\n current_diff = train_val_diff(voting_clf, X, y, cv=cv)\n else:\n current_diff = None\n key = str([k for k, _ in each]).replace(' ', '')\n combo_score_std[key] = [current_score, current_std, current_diff]\n if current_score > best_score:\n best_score = current_score\n best_combo = each\n\n if count == int(np.floor(per_target)):\n print('{} % complete; {} elapsed '.format(per_div_step, str(\n datetime.datetime.now() - start_time)))\n per_target += per_increment\n per_div_step += per_divider\n count += 1\n\n print('Best score: {}'.format(best_score))\n return best_score, best_combo, combo_score_std", "title": "" }, { "docid": "5b0d87d092b41cc191209c22aec2fa5d", "score": "0.5784555", "text": "def best_params_(self):\r\n if self.is_fitted():\r\n return {k: self.cv_results_[\"param_\" + k][self.best_index_] for k in\r\n self.param_grid}\r\n else:\r\n raise ValueError(\"model not fitted!\")", "title": "" }, { "docid": "3ff55d210cc4636be324b4a522cccfc6", "score": "0.57814586", "text": "def gridSearch(ABP,CBFV,ABP_G,CBFV_G):\r\n #Parametros a utilizar\r\n learn_rate = [0.001,0.003,0.01,0.03,0.1,0.3]\r\n hidden_layer_sizes = [2,3,4,5,6,cantidadDeNeuronas(retrasos(ABP))]\r\n activation = [\"identity\",\"logistic\", \"tanh\", \"relu\"]\r\n\r\n #Modelo obtenidocon Optimizacion de parametros\r\n model = MLPRegressor(max_iter=10000)\r\n grid = GridSearchCV(estimator=model, param_grid=dict(activation = activation,alpha=learn_rate, hidden_layer_sizes = hidden_layer_sizes))\r\n CBFV = CBFV[0:len(CBFV)-3]\r\n grid.fit(retrasos(ABP),CBFV)\r\n model = grid.best_estimator_\r\n params = grid.best_params_\r\n CBFV_G1 = model.predict(ABP_G)\r\n mser = mean_squared_error(CBFV_G,CBFV_G1)\r\n return model, mser, params", "title": "" }, { "docid": "164d57a21fb54028928e6afe00f22c38", "score": "0.57782257", "text": "def train_NN(\n x_train,\n y_train,\n x_cv,\n y_cv,\n layers,\n gammas,\n activation_type=\"sigmoid\",\n logistic_output=False,\n epochs=100,\n batchsize=None,\n method=\"BFGS\",\n gtol=1e-05,\n maxiter=None,\n):\n # Number of parameters\n r = get_number_of_NN_parameters(layers)\n\n # Initialize parameters\n p0 = np.random.randn(r)\n\n # Loop over regularization values\n # Minimize validation error as a function of the\n # regularization parameter.\n for i, gamma in enumerate(gammas):\n print(\"Regularization parameter value:\", gamma)\n p = get_minimization_solution(\n p0,\n x_train,\n y_train,\n layers,\n activation_type,\n logistic_output,\n gamma,\n epochs,\n batchsize,\n method,\n gtol,\n maxiter,\n )\n\n cost_cv = cost_NN(p, x_cv, y_cv, layers, activation_type, logistic_output, output=\"value\")\n if i == 0:\n i_best = 0\n cost_cv_best = cost_cv\n p_best = p\n elif cost_cv < cost_cv_best:\n i_best = i\n cost_cv_best = cost_cv\n p_best = p\n return p_best, i_best", "title": "" }, { "docid": "39fd00e116bea184eb52e2a8d93447ad", "score": "0.5768876", "text": "def train_hyperparameters(inputs_train, labels_train, # Training data\n inputs_val, labels_val, # Validation data\n learning_rate_initial, learning_rate_min, max_epochs, # LR schedule\n n_hidden_vals, batch_size_vals, regulariser_vals, momentum_vals, # Hyperparameters\n verbose = True, profile = False):\n combs = itertools.product(batch_size_vals, regulariser_vals, n_hidden_vals,\n momentum_vals)\n cost = math.inf\n n = 0\n for batch_size,regulariser,n_hidden,momentum in combs:\n print(\"\\nHyperparameters (batch size/regulariser/neurons/momentum):\",\n \"{0} {1} {2} {3}\".format(batch_size,regulariser,n_hidden,momentum))\n nn = gradient_descent(inputs_train, labels_train, learning_rate_initial,\n learning_rate_min, max_epochs, n_hidden, batch_size,\n regulariser, momentum, verbose, profile)\n output_val = forward_propagation(inputs_val, nn[\"weights\"],\n nn[\"biases\"])[-1]\n cost_val = sigmoid_cost(output_val, labels_val, nn[\"weights\"], 0)\n print(\"Validation error:\", cost_val)\n if cost_val < cost:\n out = {\"batch_size\":batch_size,\n \"regulariser\":regulariser, \"weights\":nn[\"weights\"],\n \"biases\": nn[\"biases\"],\n \"trace\":nn[\"costs\"], \"n_hidden\":n_hidden,\n \"epochs_run\":nn[\"epochs_run\"], \"momentum\":momentum}\n cost = cost_val\n print(\"\\nBest hyperparameters (batch size/regulariser/neurons/momentum):\",\n \"{0} {1} {2} {3}\".format(out[\"batch_size\"],out[\"regulariser\"],\n out[\"n_hidden\"],out[\"momentum\"]))\n if verbose: print(\"Best validation error:\", cost)\n return out", "title": "" }, { "docid": "a7c25c5e554cb86be0a2f2f428f3d0a8", "score": "0.57633966", "text": "def fit_model(X, Y, parameters, dataset):\n X_train, X_test, y_train, y_test = train_test_split(X, Y, random_state=1)\n grid_search = GridSearchCV(SVC(kernel='rbf', probability=True), parameters, cv=5)\n grid_search.fit(X_train, y_train)\n test_score = grid_search.score(X_test, y_test)\n best_parameters = grid_search.best_params_\n mean_fit_time = grid_search.cv_results_['mean_fit_time']\n return mean_fit_time, grid_search, test_score, best_parameters", "title": "" }, { "docid": "f93675002da81dcd2eebc1ebd20600e5", "score": "0.5762532", "text": "def cost(params):\n\n # get the F(x) response\n Fx = model(params)\n\n # compute goodness of fit\n return scale * (Fx - G)**2", "title": "" }, { "docid": "3d914400387f3eb2ebd1f409ad6b6728", "score": "0.5758873", "text": "def fit(self):\n objective = self._create_objective()\n space = self._create_hyperparameter_domain()\n\n # Using the Trials object allows us to keep track of every trial.\n trials = Trials()\n best_parameters = fmin(\n fn=objective,\n space=space,\n algo=tpe.suggest,\n trials=trials,\n max_evals=self.n_iter\n )\n\n return best_parameters, trials", "title": "" }, { "docid": "607177a0837c4a1447deaacf1a577045", "score": "0.57580745", "text": "def xgbr_random(kwargs: dict, max_duration: int, train_X, train_y, test_X, test_y):\n\n best_kwargs = None\n best_error = None\n used_kwargs = []\n start = time.time()\n while time.time() - start < max_duration:\n logging.info(\"Trace xgbr_random() --- starting new iteration --- time elapsed = {} seconds\".format(time.time() - start))\n try_kwargs = {k:np.random.choice(v) for (k, v) in kwargs.items()}\n if try_kwargs not in used_kwargs:\n logging.info(\"Trace xgbr_random() --- trying hyperparameters = {}\".format(try_kwargs))\n used_kwargs.append(try_kwargs)\n classifier = XGBRegressor(**try_kwargs)\n classifier.fit(train_X, train_y, verbose=False)\n pred_y = classifier.predict(test_X)\n error = mean_squared_error(test_y, pred_y)\n if not best_error or error < best_error:\n best_error = error\n best_kwargs = try_kwargs\n logging.info(\"Trace xgbr_random() --- best_error updated to {}\".format(best_error))\n logging.info(\"Trace xgbr_random() --- best_kwargs updated to {}\".format(best_kwargs))\n else:\n logging.info(\"Trace xgbr_random() --- skipping hyperparameters --- they have been tried.\")\n continue\n logging.info(\"Trace xgbr_random() --- duration exceeded --- process quitting with best_error = {}\".format(best_error))\n logging.info(\"Trace xgbr_random() --- duration exceeded --- process quitting with best_kwargs = {}\".format(best_kwargs))\n return best_kwargs, best_error", "title": "" }, { "docid": "f207be19046750241c78d9fdfd4e7625", "score": "0.5744937", "text": "def test_cross_validation():\n features=[]\n param=[1,2,3]\n assert CovEstHard._cross_validation(features, param, 5) == None", "title": "" }, { "docid": "795227e61129abca1a4e8c7be937561d", "score": "0.57427216", "text": "def vlb_objective(params, x, y, layer_sizes, n_samples, model_sd=0.1, act=np.tanh):\n qw_mean, qw_log_std, qz_mean, qz_log_std = params\n\n weights = sample_weights((qw_mean, qw_log_std), n_samples)\n latents = sample_latents((qz_mean, qz_log_std)) # []\n entropy = gaussian_entropy(qw_log_std)+gaussian_entropy(qz_log_std)\n print(x.shape, latents.shape)\n f_bnn= bnn_predict(weights, np.concatenate([x, latents], 1), layer_sizes, act)[:, :, 0] # [ns, nd]\n\n\n #f_bnn = sample_bnn(params, x, n_samples,layer_sizes, act)\n log_likelihood = diag_gaussian_log_density(y.T, f_bnn, .1)\n qw_log_prior = diag_gaussian_log_density(weights, 0, 1)\n qz_log_prior = diag_gaussian_log_density(latents, 0, 1)\n\n return -entropy - np.mean(log_likelihood+qw_log_prior) -np.mean(qz_log_prior)", "title": "" }, { "docid": "5d4d4c798107124c29431690119ee31b", "score": "0.5741536", "text": "def svc():\n\n #Take a look: get_dataset() is defined by yourself\n #train_x : [None, feature_nb]\n #train_y : [None, 1]\n train_x, train_y, test_x, test_y = get_dataset()\n\n\n params = {'C':[0.1,1,10,100,1000], 'gamma':[0.0001, 0.001],'kernel':['rbf']}\n gs_all = GridSearchCV(estimator = SVC(probability=True), param_grid=params, scoring='neg_log_loss', cv=5, n_jobs=20)\n gs_all.fit(train_x, np.ravel(train_y))\n\n\n \"\"\" train best_params' model \"\"\"\n print(gs_all.grid_scores_)\n print(gs_all.best_params_)\n print(gs_all.best_score_)\n\n c = gs_all.best_params_[\"C\"]\n g = gs_all.best_params_[\"gamma\"]\n k = gs_all.best_params_[\"kernel\"]\n best_svc = SVC(C=c, gamma=g, kernel=k, probability=True)\n best_svc.fit(train_x, np.ravel(train_y))\n\n \"\"\" save best model \"\"\"\n joblib.dump(best_svc, model_path)\n\n \"\"\" predict test_data \"\"\"\n pred = best_svc.predict_proba(test_x)\n\n \"\"\" save predict result to csv \"\"\"\n os.chdir(test_result_filepath)\n with open('pre_label.csv','w') as file:\n write = csv.writer(file)\n for i in range(len(test_x)):\n row = []\n row.append(pred[i][1])\n row.append(test_y[i])\n write.writerow(row)\n\n \"\"\" metric evaluate \"\"\"\n\n os.chdir(test_result_filepath)\n with open('evaluate_metrics.csv','w') as file:\n writer = csv.writer(file,lineterminator='\\n')\n writer.writerow(['Threshold','TP','TN','FP','FN','precision','recall','FDR','TDR'])\n for i in range(200):\n threshold = i/199\n evaulate(threshold)\n (TP,TN,FP,FN),(precision,recall),(FPR,TPR) = calc_metrics()\n writer.writerow([threshold,TP,TN,FP,FN,precision,recall,FPR,TPR])\n\n \"\"\" plot PR and ROC Curve \"\"\"\n evaluate_plot()", "title": "" }, { "docid": "c888615244a59307ce858a0680804e1d", "score": "0.5727211", "text": "def train_model_classification(X, X_test, y, params, folds, model_type='lgb', eval_metric='auc', columns=None, plot_feature_importance=False, model=None,\n verbose=10000, early_stopping_rounds=200, n_estimators=50000):\n columns = X.columns if columns == None else columns\n X_test = X_test[columns]\n \n # to set up scoring parameters\n metrics_dict = {'auc': {'lgb_metric_name': eval_auc,\n 'catboost_metric_name': 'AUC',\n 'sklearn_scoring_function': metrics.roc_auc_score},\n }\n \n result_dict = {}\n print(\"len y.values: \")\n print(len(set(y.values)))\n print(y.values)\n #print(set(y.values))\n # # out-of-fold predictions on train data\n # oof = np.zeros((len(X), len(set(y.values))))\n \n # # averaged predictions on train data\n # prediction = np.zeros((len(X_test), oof.shape[1]))\n # out-of-fold predictions on train data\n oof = np.zeros(len(X))\n \n # averaged predictions on train data\n prediction = np.zeros(len(X_test))\n # list of scores on folds\n scores = []\n feature_importance = pd.DataFrame()\n \n # split and train on folds\n for fold_n, (train_index, valid_index) in enumerate(folds.split(X)):\n print(f'Fold {fold_n + 1} started at {time.ctime()}')\n if type(X) == np.ndarray:\n X_train, X_valid = X[columns][train_index], X[columns][valid_index]\n y_train, y_valid = y[train_index], y[valid_index]\n else:\n X_train, X_valid = X[columns].iloc[train_index], X[columns].iloc[valid_index]\n y_train, y_valid = y.iloc[train_index], y.iloc[valid_index]\n \n if model_type == 'lgb':\n model = lgb.LGBMClassifier(**params, n_estimators=n_estimators, n_jobs = -1)\n model.fit(X_train, y_train, eval_set=[(X_train, y_train), (X_valid, y_valid)], eval_metric=metrics_dict[eval_metric]['lgb_metric_name'], verbose=verbose, early_stopping_rounds=early_stopping_rounds)\n \n y_pred_valid = model.predict_proba(X_valid)\n y_pred = model.predict_proba(X_test, num_iteration=model.best_iteration_)\n \n if model_type == 'xgb':\n train_data = xgb.DMatrix(data=X_train, label=y_train, feature_names=X.columns)\n valid_data = xgb.DMatrix(data=X_valid, label=y_valid, feature_names=X.columns)\n\n watchlist = [(train_data, 'train'), (valid_data, 'valid_data')]\n model = xgb.train(dtrain=train_data, num_boost_round=n_estimators, evals=watchlist, early_stopping_rounds=early_stopping_rounds, verbose_eval=verbose, params=params)\n y_pred_valid = model.predict(xgb.DMatrix(X_valid, feature_names=X.columns), ntree_limit=model.best_ntree_limit)\n y_pred = model.predict(xgb.DMatrix(X_test, feature_names=X.columns), ntree_limit=model.best_ntree_limit)\n \n if model_type == 'sklearn':\n model = model\n model.fit(X_train, y_train)\n \n y_pred_valid = model.predict(X_valid).reshape(-1,)\n score = metrics_dict[eval_metric]['sklearn_scoring_function'](y_valid, y_pred_valid)\n print(f'Fold {fold_n}. {eval_metric}: {score:.4f}.')\n print('')\n \n y_pred = model.predict_proba(X_test)\n \n if model_type == 'cat':\n model = CatBoostClassifier(iterations=n_estimators, eval_metric=metrics_dict[eval_metric]['catboost_metric_name'], **params,\n loss_function=metrics_dict[eval_metric]['catboost_metric_name'])\n model.fit(X_train, y_train, eval_set=(X_valid, y_valid), cat_features=[], use_best_model=True, verbose=False)\n\n y_pred_valid = model.predict(X_valid)\n y_pred = model.predict(X_test)\n \n oof[valid_index] = y_pred_valid\n scores.append(metrics_dict[eval_metric]['sklearn_scoring_function'](y_valid, y_pred_valid[:, 1]))\n\n prediction += y_pred \n \n if model_type == 'lgb' and plot_feature_importance:\n # feature importance\n fold_importance = pd.DataFrame()\n fold_importance[\"feature\"] = columns\n fold_importance[\"importance\"] = model.feature_importances_\n fold_importance[\"fold\"] = fold_n + 1\n feature_importance = pd.concat([feature_importance, fold_importance], axis=0)\n\n prediction /= folds.n_splits\n \n print('CV mean score: {0:.4f}, std: {1:.4f}.'.format(np.mean(scores), np.std(scores)))\n \n result_dict['oof'] = oof\n result_dict['prediction'] = prediction\n result_dict['scores'] = scores\n \n if model_type == 'lgb':\n if plot_feature_importance:\n feature_importance[\"importance\"] /= folds.n_splits\n cols = feature_importance[[\"feature\", \"importance\"]].groupby(\"feature\").mean().sort_values(\n by=\"importance\", ascending=False)[:50].index\n\n best_features = feature_importance.loc[feature_importance.feature.isin(cols)]\n\n plt.figure(figsize=(16, 12));\n sns.barplot(x=\"importance\", y=\"feature\", data=best_features.sort_values(by=\"importance\", ascending=False));\n plt.title('LGB Features (avg over folds)');\n \n result_dict['feature_importance'] = feature_importance\n \n return result_dict", "title": "" }, { "docid": "096e40bcffbf002209d78eac967245f8", "score": "0.5719901", "text": "def search_params_nonlinear(trainX, trainY, group_ids, rprange, gammarange):\n bestperf = -1.\n for loggamma in range(*gammarange):\n gamma = 2. ** loggamma\n kwargs = {\n 'train_features': trainX,\n 'train_labels': trainY,\n #'kernel_obj': LinearKernel(trainX),\n 'kernel_obj': GaussianKernel(trainX, gamma=gamma),\n #'kernel_obj': PolynomialKernel(trainX, gamma=gamma, coef0=1, degree=2),\n }\n rls = RLS.createLearner(**kwargs)\n rls.train()\n perf, perf_groups, rp = search_rp(rls, trainY, group_ids, rprange)\n if perf > bestperf:\n bestperf = perf\n bestperf_groups = perf_groups\n bestrp = rp\n bestgamma = gamma\n return bestperf, bestperf_groups, bestrp, bestgamma", "title": "" }, { "docid": "cbb59b4e4d330431cf019936da32d105", "score": "0.57134324", "text": "def cross_validation(dataset, acronym, method, seed=6, k_fold=10, deg=12, max_iters=100, gamma=0.1, lambda_=0.1):\n # Create the indices lists for k-flod split of teh dataset\n k_idx = create_k_indices(dataset, k_fold, seed)\n accuracies = []\n np.random.seed(seed)\n\n for k in range(k_fold):\n # Test set and train set for kth cross validation step\n validation_set = dataset[k_idx[k-1]] # only 1/k_flod of the rows\n train_set = np.delete(dataset, k_idx[k-1], axis=0) # all other rows\n\n # Polynomial expansion of the training and test set's features (without the prediction column)\n validation_features = build_poly(validation_set[:,1:], deg)\n train_features = build_poly(train_set[:,1:],deg)\n\n # Weight computation using our criterion on the train_set\n if (acronym == 'LSGD'): #train_set[:,0] = actual output values\n initial_w = np.random.uniform(low=1e-4, high=0.5, size=train_features.shape[1]) #random initialization of weights\n weights, _ = method(train_set[:,0], train_features, initial_w, max_iters, gamma)\n elif (acronym == 'LSSGD'):\n initial_w = np.random.uniform(low=1e-4, high=0.5, size=train_features.shape[1]) #random initialization of weights\n weights, _ = method(train_set[:,0], train_features, initial_w, max_iters, gamma)\n elif (acronym == 'LS'):\n weights, _ = method(train_set[:,0], train_features)\n elif (acronym == 'RR'):\n weights, _ = ridge_regression(train_set[:,0], train_features, lambda_)\n elif (acronym == 'LR'):\n initial_w = np.random.uniform(low=1e-4, high=0.5, size=train_features.shape[1]) #random initialization of weights\n weights, _ = method(train_set[:,0], train_features, initial_w, max_iters, gamma)\n elif (acronym == 'RLR'):\n initial_w = np.random.uniform(low=1e-4, high=0.5, size=train_features.shape[1]) #random initialization of weights\n weights, _ = method(train_set[:,0], train_features, lambda_, initial_w, max_iters, gamma)\n\n # Computation of prediction with trained weights on validation set\n yhat = validation_features.dot(weights) > 0.5\n\n # Accuracy computation for one fold and store to compute mean after all folds\n accuracy = np.sum(yhat == validation_set[:,0]) / len(yhat) #validation[:,0] = actual output values\n accuracies.append(accuracy)\n return (np.mean(accuracies))", "title": "" }, { "docid": "fed44e6c259fc784618f21c6d82f449c", "score": "0.57129425", "text": "def get_global_optimum(self):", "title": "" }, { "docid": "676641cb612199145be11ad73a1868d9", "score": "0.56965274", "text": "def fit(self,X_b_train,y_train):\n # Fit tuning curve\n input_x_range=np.arange(np.min(y_train[:,0]),np.max(y_train[:,0])+.01,np.round((np.max(y_train[:,0])-np.min(y_train[:,0]))/self.res))\n input_y_range=np.arange(np.min(y_train[:,1]),np.max(y_train[:,1])+.01,np.round((np.max(y_train[:,1])-np.min(y_train[:,1]))/self.res))\n # Get all combinations of x/y values\n input_mat = np.meshgrid(input_x_range,input_y_range)\n xs = np.reshape(input_mat[0],[input_x_range.shape[0]*input_y_range.shape[0],1])\n ys = np.reshape(input_mat[1],[input_x_range.shape[0]*input_y_range.shape[0],1])\n input_xy=np.concatenate((xs,ys),axis=1)\n if self.encoding_model=='quadratic':\n input_xy_modified = np.empty([input_xy.shape[0],5])\n input_xy_modified[:,0] = input_xy[:,0]**2\n input_xy_modified[:,1] = input_xy[:,0]\n input_xy_modified[:,2] = input_xy[:,1]**2\n input_xy_modified[:,3] = input_xy[:,1]\n input_xy_modified[:,4] = input_xy[:,0]*input_xy[:,1]\n y_train_modified = np.empty([y_train.shape[0],5])\n y_train_modified[:,0] = y_train[:,0]**2\n y_train_modified[:,1] = y_train[:,0]\n y_train_modified[:,2] = y_train[:,1]**2\n y_train_modified[:,3] = y_train[:,1]\n y_train_modified[:,4] = y_train[:,0]*y_train[:,1]\n # Create tuning curves\n num_nrns=X_b_train.shape[1] # Number of neurons to fit tuning curves for\n tuning_all=np.zeros([num_nrns,input_xy.shape[0]]) # Matrix that stores tuning curves for all neurons\n # Loop through neurons and fit tuning curves\n for j in range(num_nrns): #Neuron number\n if self.encoding_model == \"linear\":\n tuning=glm_run(y_train,X_b_train[:,j:j+1],input_xy)\n if self.encoding_model == \"quadratic\":\n tuning=glm_run(y_train_modified,X_b_train[:,j:j+1],input_xy_modified)\n tuning_all[j,:] = np.squeeze(tuning)\n # Save tuning curves to be used in \"predict\" function\n self.tuning_all = tuning_all\n self.input_xy = input_xy\n n = y_train.shape[0]\n dx = np.zeros([n-1,1])\n for i in range(n-1):\n dx[i] = np.sqrt((y_train[i+1,0]-y_train[i,0])**2+(y_train[i+1,1]-y_train[i,1])**2) # Change in state across time steps\n std = np.sqrt(np.mean(dx**2)) # dx is only positive. this gets approximate stdev of distribution (if it was positive and negative)\n self.std = std #Save for use in \"predict\" function", "title": "" }, { "docid": "79cd30259e5f171ec4774a1c7a20bd75", "score": "0.5690847", "text": "def _tune_model_helper2(self, n_tries, model_class, hyperparameters_dict, X_train, y_train, X_test, y_test):\n\n def objective(params):\n lgb_params = {\"application\": \"multiclass\",\n \"boosting\": \"gbdt\",\n \"metric\": \"qwk\",\n \"num_class\": 5,\n \"num_leaves\": int(params[\"num_leaves\"]),\n \"max_depth\": -1,\n \"learning_rate\": \"{:.4f}\".format(params[\"learning_rate\"]),\n \"bagging_fraction\": \"{:.4f}\".format(params[\"bagging_fraction\"]),\n \"feature_fraction\": \"{:.4f}\".format(params[\"feature_fraction\"]),\n \"min_split_gain\": \"{:.4f}\".format(params[\"min_split_gain\"]),\n \"min_child_samples\": int(params[\"min_child_samples\"]),\n \"min_child_weight\": \"{:.4f}\".format(params[\"min_child_weight\"]),\n \"verbosity\": -1,\n \"seed\": 17,\n \"nthread\": 16,\n \"device\": \"cpu\"}\n\n lgbm = BlendedLGBMClassifier(lgb_params, early_stopping_rounds = 150, eval_size = 0.2, eval_split_type = \"random\", verbose_eval = 100, nrounds = 10000)\n\n # Train the model\n lgbm.fit(X_train, y_train)\n \n # Make predictions\n predictions_npa = lgbm.predict(X_test)\n \n # Evaluate the model\n qwk = quadratic_weighted_kappa(y_test, predictions_npa)\n print(\"QWK = \", qwk)\n\n return -qwk # Return negative value as we want to maximize it\n\n # Stores all information about each trial and about the best trial\n trials = hyperopt.Trials()\n\n best = fmin(fn = objective, trials = trials, space = hyperparameters_dict, algo = tpe.suggest, max_evals = n_tries)\n\n return best, trials", "title": "" }, { "docid": "727db329a9ff03ab574fda42925918e1", "score": "0.5688919", "text": "def train_model_regression(X, X_test, y, params, folds, model_type='lgb', eval_metric='mae', columns=None, plot_feature_importance=False, model=None,\n verbose=10000, early_stopping_rounds=200, n_estimators=50000):\n columns = X.columns if columns is None else columns\n X_test = X_test[columns]\n \n # to set up scoring parameters\n metrics_dict = {'mae': {'lgb_metric_name': 'mae',\n 'catboost_metric_name': 'MAE',\n 'sklearn_scoring_function': metrics.mean_absolute_error},\n 'group_mae': {'lgb_metric_name': 'mae',\n 'catboost_metric_name': 'MAE',\n 'scoring_function': group_mean_log_mae},\n 'mse': {'lgb_metric_name': 'mse',\n 'catboost_metric_name': 'MSE',\n 'sklearn_scoring_function': metrics.mean_squared_error}\n }\n\n \n result_dict = {}\n \n # out-of-fold predictions on train data\n oof = np.zeros(len(X))\n \n # averaged predictions on train data\n prediction = np.zeros(len(X_test))\n \n # list of scores on folds\n scores = []\n feature_importance = pd.DataFrame()\n \n # split and train on folds\n for fold_n, (train_index, valid_index) in enumerate(folds.split(X)):\n print(f'Fold {fold_n + 1} started at {time.ctime()}')\n if type(X) == np.ndarray:\n X_train, X_valid = X[columns][train_index], X[columns][valid_index]\n y_train, y_valid = y[train_index], y[valid_index]\n else:\n X_train, X_valid = X[columns].iloc[train_index], X[columns].iloc[valid_index]\n y_train, y_valid = y.iloc[train_index], y.iloc[valid_index]\n \n if model_type == 'lgb':\n model = lgb.LGBMRegressor(**params, n_estimators = n_estimators, n_jobs = -1)\n model.fit(X_train, y_train, \n eval_set=[(X_train, y_train), (X_valid, y_valid)], eval_metric=metrics_dict[eval_metric]['lgb_metric_name'],\n verbose=verbose, early_stopping_rounds=early_stopping_rounds)\n \n y_pred_valid = model.predict(X_valid)\n y_pred = model.predict(X_test, num_iteration=model.best_iteration_)\n \n if model_type == 'xgb':\n train_data = xgb.DMatrix(data=X_train, label=y_train, feature_names=X.columns)\n valid_data = xgb.DMatrix(data=X_valid, label=y_valid, feature_names=X.columns)\n\n watchlist = [(train_data, 'train'), (valid_data, 'valid_data')]\n model = xgb.train(dtrain=train_data, num_boost_round=20000, evals=watchlist, early_stopping_rounds=200, verbose_eval=verbose, params=params)\n y_pred_valid = model.predict(xgb.DMatrix(X_valid, feature_names=X.columns), ntree_limit=model.best_ntree_limit)\n y_pred = model.predict(xgb.DMatrix(X_test, feature_names=X.columns), ntree_limit=model.best_ntree_limit)\n \n if model_type == 'sklearn':\n model = model\n model.fit(X_train, y_train)\n \n y_pred_valid = model.predict(X_valid).reshape(-1,)\n score = metrics_dict[eval_metric]['sklearn_scoring_function'](y_valid, y_pred_valid)\n print(f'Fold {fold_n}. {eval_metric}: {score:.4f}.')\n print('')\n \n y_pred = model.predict(X_test).reshape(-1,)\n \n if model_type == 'cat':\n model = CatBoostRegressor(iterations=20000, eval_metric=metrics_dict[eval_metric]['catboost_metric_name'], **params,\n loss_function=metrics_dict[eval_metric]['catboost_metric_name'])\n model.fit(X_train, y_train, eval_set=(X_valid, y_valid), cat_features=[], use_best_model=True, verbose=False)\n\n y_pred_valid = model.predict(X_valid)\n y_pred = model.predict(X_test)\n \n oof[valid_index] = y_pred_valid.reshape(-1,)\n if eval_metric != 'group_mae':\n scores.append(metrics_dict[eval_metric]['sklearn_scoring_function'](y_valid, y_pred_valid))\n else:\n scores.append(metrics_dict[eval_metric]['scoring_function'](y_valid, y_pred_valid, X_valid['type']))\n\n prediction += y_pred \n \n if model_type == 'lgb' and plot_feature_importance:\n # feature importance\n fold_importance = pd.DataFrame()\n fold_importance[\"feature\"] = columns\n fold_importance[\"importance\"] = model.feature_importances_\n fold_importance[\"fold\"] = fold_n + 1\n feature_importance = pd.concat([feature_importance, fold_importance], axis=0)\n\n prediction /= folds.n_splits\n \n print('CV mean score: {0:.4f}, std: {1:.4f}.'.format(np.mean(), np.std(scores)))\n \n result_dict['oof'] = oof\n result_dict['prediction'] = prediction\n result_dict['scores'] = scores\n \n if model_type == 'lgb':\n if plot_feature_importance:\n feature_importance[\"importance\"] /= folds.n_splits\n cols = feature_importance[[\"feature\", \"importance\"]].groupby(\"feature\").mean().sort_values(\n by=\"importance\", ascending=False)[:50].index\n\n best_features = feature_importance.loc[feature_importance.feature.isin(cols)]\n\n plt.figure(figsize=(16, 12));\n sns.barplot(x=\"importance\", y=\"feature\", data=best_features.sort_values(by=\"importance\", ascending=False));\n plt.title('LGB Features (avg over folds)');\n \n result_dict['feature_importance'] = feature_importance\n \n return result_dict", "title": "" }, { "docid": "9c9106c0620e4963b4e3e6df7d0fd3c2", "score": "0.56734776", "text": "def compute_mse_rbf_tuned(x_train, y_train, x_test, y_test):\n global optimal_parameters, clf\n # define objective function for tuning\n @optunity.cross_validated(x=x_train, y=y_train, num_iter=2, num_folds=2)\n def tune_cv(x_train, y_train, x_test, y_test, C, gamma):\n # sample_weights = my_scaling_odr(y_train)\n # sample_weights = [i / max(Events[-1]) for i in Events[-1]]\n\n model = svm.SVR(C=C, gamma=gamma).fit(x_train, y_train)#, sample_weight=sample_weights\n predictions = model.predict(x_test)\n return optunity.metrics.mse(y_test, predictions)\n\n # optimize parameters\n optimal_pars, _, _ = optunity.minimize(tune_cv, 200, C=[1, 4000], gamma=[0, 10], pmap=optunity.pmap)\n logging.info(\"Optimal hyperparameters: \" + str(optimal_pars))\n # sample_weights = my_scaling_odr(y_train)\n\n tuned_model = svm.SVR(**optimal_pars).fit(x_train, y_train)\n predictions = tuned_model.predict(x_test)\n mse = optunity.metrics.mse(y_test, predictions)\n logging.info('mse: ' + str(mse))\n if mse < mse_old:\n optimal_parameters = optimal_pars\n clf = tuned_model\n return mse", "title": "" }, { "docid": "285b5545ec3e6adfb8983a14465e1d17", "score": "0.5669436", "text": "def my_cross_validate(X, Y,\n mltype,\n model_name='lgb_reg',\n cv=5,\n lr_crv_ticks=5, data_sizes_frac=None,\n args=None, fit_params=None, init_params=None,\n n_jobs=1, random_state=None, logger=None, outdir='.'):\n X = pd.DataFrame(X).values\n Y = pd.DataFrame(Y).values\n\n if isinstance(cv, int):\n cv_folds = cv\n cv = KFold(n_splits=cv_folds, shuffle=False, random_state=random_state)\n else:\n cv_folds = cv.get_n_splits()\n\n # ... Now start a nested loop of train size and cv folds ...\n tr_scores_all = [] # list dicts\n vl_scores_all = [] # list dicts\n\n if mltype == 'cls':\n if Y.ndim > 1 and Y.shape[1] > 1:\n splitter = cv.split(X, np.argmax(Y, axis=1))\n else:\n splitter = cv.split(X, Y)\n elif mltype == 'reg':\n splitter = cv.split(X, Y)\n\n # Placeholder to save the best model\n best_model = None\n best_score = -np.Inf\n\n # Start CV iters\n for fold_id, (tr_idx, vl_idx) in enumerate(splitter):\n if logger is not None:\n logger.info(f'Fold {fold_id+1}/{cv_folds}')\n\n # Samples from this dataset are sampled for training\n xtr = X[tr_idx, :]\n ytr = np.squeeze(Y[tr_idx, :])\n\n # A fixed set of validation samples for the current CV split\n xvl = X[vl_idx, :]\n yvl = np.squeeze(Y[vl_idx, :]) \n\n # Get the estimator\n estimator = ml_models.get_model(model_name, init_params)\n\n if 'nn' in model_name:\n from keras.callbacks import ModelCheckpoint, CSVLogger, ReduceLROnPlateau, EarlyStopping, TensorBoard\n\n # Create output dir\n out_nn_model = outdir / ('cv'+str(fold_id+1))\n os.makedirs(out_nn_model, exist_ok=False)\n \n # Callbacks (custom)\n clr = CyclicLR(base_lr=0.0001, max_lr=0.001, mode='triangular')\n \n # Keras callbacks\n checkpointer = ModelCheckpoint(str(out_nn_model/'autosave.model.h5'), verbose=0, save_weights_only=False, save_best_only=True)\n csv_logger = CSVLogger(out_nn_model/'training.log')\n reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.75, patience=20, verbose=1, mode='auto',\n min_delta=0.0001, cooldown=3, min_lr=0.000000001)\n early_stop = EarlyStopping(monitor='val_loss', patience=60, verbose=1, mode='auto')\n \n # Callbacks list\n callback_list = [checkpointer, csv_logger, early_stop, reduce_lr]\n if (args is not None) and (args['opt']=='clr'):\n callback_list = callback_list + [clr]\n # TODO: which val set should be used??\n # fit_params['validation_data'] = (xvl, yvl)\n # fit_params['validation_split'] = 0.2\n\n # Fit params\n fit_params['validation_data'] = (xvl, yvl)\n fit_params['callbacks'] = callback_list\n\n # Train model\n history = estimator.model.fit(xtr, ytr, **fit_params)\n \n # Dump model\n estimator.dump_model(out_nn_model)\n\n # Calc preds and scores TODO: dump preds\n # ... training set\n y_preds, y_true = utils.calc_preds(estimator.model, x=xtr, y=ytr, mltype=mltype)\n tr_scores = utils.calc_scores(y_true=y_true, y_preds=y_preds, mltype=mltype)\n # ... val set\n y_preds, y_true = utils.calc_preds(estimator.model, x=xvl, y=yvl, mltype=mltype)\n vl_scores = utils.calc_scores(y_true=y_true, y_preds=y_preds, mltype=mltype)\n\n # Save the best model\n if mltype == 'cls':\n vl_scores['f1_score'] > best_score\n best_score = vl_scores['f1_score']\n best_model = estimator\n elif mltype == 'reg':\n vl_scores['r2'] > best_score\n best_score = vl_scores['r2']\n best_model = estimator\n\n # Plot training curves\n if 'nn' in model_name:\n ml_models.plot_prfrm_metrics(history, title=f'cv fold: {fold_id+1}',\n skp_ep=7, add_lr=True, outdir=out_nn_model)\n\n # Add info\n tr_scores['tr_set'] = True\n vl_scores['tr_set'] = False\n tr_scores['fold'] = 'f'+str(fold_id)\n vl_scores['fold'] = 'f'+str(fold_id)\n\n # Aggregate scores\n tr_scores_all.append(tr_scores)\n vl_scores_all.append(vl_scores)\n\n # Delete the estimator/model\n del estimator, history\n\n \n tr_df = scores_to_df(tr_scores_all)\n vl_df = scores_to_df(vl_scores_all)\n scores_all_df = pd.concat([tr_df, vl_df], axis=0)\n\n return scores_all_df, best_model", "title": "" } ]
1b1435d1683487ecf3365967c1afada2
Define test class to handle assertEqual with `pandas.DataFrame`.
[ { "docid": "fea0b94710088a4ab085ced5d9880434", "score": "0.70121086", "text": "def add_data_frame_equality_func(test):\n def frame_equal(lhs, rhs, msg=None):\n \"\"\"Adapter for pandas.testing.assert_frame_equal.\"\"\"\n if msg:\n try:\n pdt.assert_frame_equal(lhs, rhs)\n except AssertionError:\n raise test.failureException(msg)\n else:\n # Getting weird errors on categorical differences.\n pdt.assert_frame_equal(lhs, rhs, check_categorical=False)\n test.addTypeEqualityFunc(pd.DataFrame, frame_equal)", "title": "" } ]
[ { "docid": "7b1984d44b85fdf72e8e1b13af6da2c9", "score": "0.7090182", "text": "def test_dataframe(self):\n self.assertTrue(test_create_dataframe(DATA_FRAME, LIST_COL_NAME))", "title": "" }, { "docid": "7470b99ef3d62c2b4e8d0eaa6ab5121f", "score": "0.69681454", "text": "def test_evaluation_get_df_equality(self):\n df = pd.DataFrame([{\"a\": 1, \"b\": 2}, {\"a\": 3, \"b\": 4}])\n result = Evaluation(df).get_df()\n\n self.assertIsInstance(result, pd.DataFrame)\n self.assertTrue(result.equals(df))", "title": "" }, { "docid": "d4c4dc3c2a8d8c5c9757a98592703665", "score": "0.69517213", "text": "def verify(cls, df: pd.DataFrame) -> None:", "title": "" }, { "docid": "84dac6dae169bb7eb10788a3885aa9e7", "score": "0.6562528", "text": "def test_from_dict(self):\n first = pd.DataFrame({'x': [1]})\n second = pd.DataFrame({'y': [2]})\n\n test_dfc = DataFrameCollection.from_dict(\n {\"first\": first, \"second\": second})\n self.assertEqual([\"first\", \"second\"], test_dfc.get_table_names())\n pd.testing.assert_frame_equal(first, test_dfc.get(\"first\"))\n pd.testing.assert_frame_equal(second, test_dfc.get(\"second\"))", "title": "" }, { "docid": "5f6486f196fcf610d5cf9ba4e428ef23", "score": "0.63043046", "text": "def test_weekly_data(self):\n \n try:\n pd_df = pd.read_csv('./data/pandas_weekly_report.csv')\n db_df = pd.read_csv('./data/sql_weekly_report.csv') \n except Exception as e:\n raise Exception(e)\n \n assert_frame_equal(pd_df.astype(str), db_df.astype(str), check_dtype=False, check_names=False)", "title": "" }, { "docid": "1a81984ba6ce01195102aa6341efa66b", "score": "0.6288631", "text": "def test_dataframe_loaded(self):\n assert isinstance(self.predictor.dataset, pandas.DataFrame)", "title": "" }, { "docid": "f8a48c1b4560ffb5bf3b2e741c6201f0", "score": "0.6203429", "text": "def test_load_data(dataset):\n df = load_dataset(dataset)\n assert type(df) == DataFrame", "title": "" }, { "docid": "bf20e31b4db9f6eb64d9338317f710f7", "score": "0.61996394", "text": "def assert_frame_equal(self, expected_df: Union[str, pd.DataFrame], actual_df: pd.DataFrame\n , compare_just_head: bool = False):\n if isinstance(expected_df, str):\n expected_df = self.get_test_data(expected_df, actual_df.index.name)\n if compare_just_head:\n expected_df = expected_df.head(actual_df.shape[0])\n\n dtypes = actual_df.dtypes.to_dict()\n for column, expected_type in expected_df.dtypes.to_dict().items():\n if expected_type != dtypes[column]:\n actual_df[column] = actual_df[column].astype(expected_type)\n pd.testing.assert_frame_equal(expected_df, actual_df)", "title": "" }, { "docid": "6de11062ee5631052f6723eaa7975858", "score": "0.6185821", "text": "def test_create_dataframe(df_test, columns):\n df_col = list(df_test)\n # check that the dataframe contains only specified columns\n if df_col != columns:\n return False\n # check that there are at least 10 rows in the dataframe\n if df_test.shape[0] < 10:\n return False\n # check that the values in each column have the same python type\n for col in df_col:\n types = []\n for element in list(df_test[col]):\n types.append(type(element))\n if len(set(list(types))) == 1:\n continue\n else:\n return False", "title": "" }, { "docid": "d1841fafcc8b7a70c2866f05379bffff", "score": "0.6141549", "text": "def __init__(\n self, *, expected: Tuple[str, ...], extra_cols_check: bool = True\n ):\n if len(expected) == 0:\n raise ValueError(\n \"You must define what columns you expect to find in the \"\n f\"DataFrame in order to use {self.__class__.__name__}.\"\n )\n\n self._expected = expected\n self._extra_cols_check = extra_cols_check\n super().__init__()", "title": "" }, { "docid": "b6c05298ea86ad4c3ce824fbf7abb41f", "score": "0.599524", "text": "def test_dataframe(self):\n df = self.a.evolve(order=1)\n print \"This is length of dataframe\", len(df)\n assert len(df)-1 == 10000 # subrtact 1 for the title row", "title": "" }, { "docid": "d72329d4874c216bf26aa3d3c92d18cb", "score": "0.5992019", "text": "def test_simple_df_retrieval(self):\n df_in = pd.DataFrame({'x': [1, 2], 'y': [3, 4]})\n self.dfc.store('test_table', df_in)\n self.assertEqual(['test_table'], self.dfc.get_table_names())\n\n df_out = self.dfc.get('test_table')\n pd.testing.assert_frame_equal(df_in, df_out)\n\n items = list(self.dfc.items())\n self.assertEqual(1, len(items))\n\n table_name, df_out = items[0]\n self.assertEqual(\"test_table\", table_name)\n pd.testing.assert_frame_equal(df_in, df_out)", "title": "" }, { "docid": "cb9d53e8173b3ab9a57e991e93c04730", "score": "0.59414893", "text": "def test_dataframe(self):\n df = self.a.evolve(order=4)\n print \"This is length of dataframe\", len(df)\n assert len(df)-1 == 10000 # subrtact 1 for the title row", "title": "" }, { "docid": "a2da82010e1edd77c3657fa8d585b993", "score": "0.59348536", "text": "def test_table_metadata(self):\n in1_df = pd.DataFrame({\"a\": [1.0, 2.0, 3.0]})\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n in1_df.spotfire_table_metadata = {'bravo': ['The second letter of the phonetic alphabet.']}\n out_md_df = pd.DataFrame(in1_df.spotfire_table_metadata)\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n out_md_df.spotfire_table_metadata = {'a': ['Alpha']}\n\n def expected():\n # pylint: disable=no-else-return\n if sys.version_info.major == 3 and sys.version_info.minor < 8:\n return \"\"\"\nStandard error:\n<string>:3: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access\n\"\"\"\n else:\n return \"\"\"\nStandard error:\n<data_function>:3: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access\n\"\"\"\n\n self._run_analytic(\"\"\"import pandas as pd\nout_md = pd.DataFrame(in1.spotfire_table_metadata)\nout_md.spotfire_table_metadata = {'a': ['Alpha']}\"\"\", {\"in1\": in1_df}, {\"out_md\": out_md_df}, True, expected)", "title": "" }, { "docid": "6fae969bf64207405c01f947bcaa9230", "score": "0.59136724", "text": "def expected_df_1():\n\n df = pd.DataFrame(\n {\"a\": [1.1, 2.4, 3.9, 5.6, 7.5, 9.6], \"b\": [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\"]}\n )\n\n return df", "title": "" }, { "docid": "ea31c26ccc7fb1aaeb81b62514af8cca", "score": "0.5892942", "text": "def expected_df_2():\n\n df = pd.DataFrame(\n {\"a\": [1.1, 2.4, 3, 4, 5, 6], \"b\": [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\"]}\n )\n\n return df", "title": "" }, { "docid": "5fce07717b2e61553dfe52490a13f61b", "score": "0.5884576", "text": "def __init__(self, pandas_obj):\n self._df = pandas_obj", "title": "" }, { "docid": "40780cc0eb0d7a8d448d7197bb4fc2a3", "score": "0.58802086", "text": "def test_function_call_with_pandas_data():\n try:\n import pandas as pd\n except ImportError:\n raise SkipTest(\"Pandas not installed\")\n\n data = pd.DataFrame({\"a\": [1, 2], \"b\": [8, 9], \"w\": [\"NOT\", \"NOT\"]})\n\n for func in all_funcs:\n assert_equal(func(None, \"a\", \"b\", data=data),\n \"x: [1, 2], y: [8, 9], ls: x, w: xyz, label: b\")\n assert_equal(func(None, x=\"a\", y=\"b\", data=data),\n \"x: [1, 2], y: [8, 9], ls: x, w: xyz, label: b\")\n assert_equal(func(None, \"a\", \"b\", label=\"\", data=data),\n \"x: [1, 2], y: [8, 9], ls: x, w: xyz, label: \")\n assert_equal(func(None, \"a\", \"b\", label=\"text\", data=data),\n \"x: [1, 2], y: [8, 9], ls: x, w: xyz, label: text\")\n assert_equal(func(None, x=\"a\", y=\"b\", label=\"\", data=data),\n \"x: [1, 2], y: [8, 9], ls: x, w: xyz, label: \")\n assert_equal(func(None, x=\"a\", y=\"b\", label=\"text\", data=data),\n \"x: [1, 2], y: [8, 9], ls: x, w: xyz, label: text\")", "title": "" }, { "docid": "f402e2915f6a6409f8ecb2f566f35ac8", "score": "0.5864293", "text": "def test_get_df(mssql_connector, mssql_datasource):\n datasource = mssql_datasource(\n query='SELECT Name, CountryCode, Population ' 'FROM City WHERE ID BETWEEN 1 AND 3'\n )\n expected = pd.DataFrame(\n {'Name': ['Kabul', 'Qandahar', 'Herat'], 'Population': [1780000, 237500, 186800]}\n )\n expected['CountryCode'] = 'AFG'\n expected = expected[['Name', 'CountryCode', 'Population']]\n\n # LIMIT 2 is not possible for MSSQL\n res = mssql_connector.get_df(datasource)\n res['Name'] = res['Name'].str.rstrip()\n assert res.equals(expected)", "title": "" }, { "docid": "7ac2c086d544e4f42b6e4a0188dba59b", "score": "0.5813557", "text": "def create_testdata(self):\n # create a dataframe\n x= pd.DataFrame({'A' : ['a', 'a', 'a', 'c'], 'B' : ['0', '1', '1', '4'] }, dtype=object)\n xnew= pd.DataFrame({'A' : ['a', 'a', 'd', 'd'], 'B' : ['1', '2', '2', '4'] }, dtype=object)\n return x, xnew", "title": "" }, { "docid": "40fd928b121acf146890edb10fcffc18", "score": "0.577378", "text": "def test_type(self):\n self.assertTrue(isinstance(LambdataTest.df, pd.DataFrame))\n self.assertTrue(dft.total_nul(df=LambdataTest.df), str)", "title": "" }, { "docid": "73ccb4fc49dd2cc3d06f9d37ad77ae53", "score": "0.5756567", "text": "def assertFrameEqual(df1, df2, **kwds ):\n\n from pandas.util.testing import assert_frame_equal\n return assert_frame_equal(df1.sort_index(axis=1), df2.sort_index(axis=1), check_names=True, **kwds)", "title": "" }, { "docid": "73ccb4fc49dd2cc3d06f9d37ad77ae53", "score": "0.5756567", "text": "def assertFrameEqual(df1, df2, **kwds ):\n\n from pandas.util.testing import assert_frame_equal\n return assert_frame_equal(df1.sort_index(axis=1), df2.sort_index(axis=1), check_names=True, **kwds)", "title": "" }, { "docid": "0c8c215aa1671a5e644b43f089313acd", "score": "0.5721781", "text": "def test_process_dataframe(self):\n preprocessor_simple = Preprocessor(self.simple_dataframe, self.config,\n '_INFO_')\n preprocessor_simple.process_dataframe()\n self.assertEqual(preprocessor_simple.df['_INFO_'][0], \"\")\n self.assertEqual(preprocessor_simple.df['_INFO_'][1],\n \"we have an error that contains USEFUL_INFORMATION\")\n self.assertEqual(\n preprocessor_simple.df['_INFO_'][2],\n \"This line should be kept since it has an error that is USEFUL_INFORMATION\"\n )", "title": "" }, { "docid": "7ff408e15ff730ccf0c6af14f16ab196", "score": "0.5710437", "text": "def verify_series(\n self,\n ts: Optional[Timeseries],\n expected_name: str,\n expected_raw_vals: Dict[int, float],\n expected_df_vals: Iterable[Tuple[str, float]],\n ) -> None:\n self.assertIsNotNone(ts)\n self.assertEqual(ts.get_name(), expected_name)\n raw_vals = ts.get_raw_vals()\n for key in expected_raw_vals:\n self.assertEqual(expected_raw_vals[key], raw_vals[key])\n df = ts.get_dataframe()\n self.assertIsNotNone(df)\n for i, val in enumerate(df.values):\n self.assertEqual(str(val[0]), expected_df_vals[i][0])\n self.assertEqual(val[1], expected_df_vals[i][1])", "title": "" }, { "docid": "cb54c44bedebe06245eafdcee4a4b900", "score": "0.56959784", "text": "def check_if_dataframes_are_equal(df1, df2):\n from pandas.testing import assert_frame_equal\n\n try:\n assert_frame_equal(df1, df2)\n return True\n except AssertionError:\n return False", "title": "" }, { "docid": "b34f39200db1b57e197cca7de9c37765", "score": "0.5689894", "text": "def assert_dataframe_equal(left, right, columns_order=True):\n assert isinstance(left, DataFrame), \"left is not an DataFrame\"\n assert isinstance(right, DataFrame), \"right is not an DataFrame\"\n\n if columns_order:\n assert left.schema == right.schema, \"schema are not equal\"\n else:\n assert left.schema == right.select(left.columns).schema, \"schema are not equal\"\n\n assert left.count() == left.join(right, left.columns, 'inner').count(), \"data is not equal\"", "title": "" }, { "docid": "0b4006a21e9acdabfa7c28f9f5d642bf", "score": "0.56768376", "text": "def test_returns_dataframe(self, input_df):\n out_df = add_columnwise(input_df, 'a', 'b')\n assert isinstance(out_df, DataFrame)", "title": "" }, { "docid": "b9a3ccdc2207a04780812faee955e1dd", "score": "0.56483966", "text": "def check_new_df():\n new_df = pull_modis()\n\n #compares new_df to existing df, if equal it passes\n try:\n if assert_frame_equal(df, new_df):\n pass\n else:\n pass\n\n except:\n df = new_df.copy()\n return df", "title": "" }, { "docid": "9a68445ef463c12bb128a3230b225294", "score": "0.5631713", "text": "def test_data_getter(drop_rows: bool, dataframe: pd.DataFrame) -> None:\n grid = DataGrid(dataframe)\n if drop_rows:\n grid.data = grid.data.drop([\"One\", \"Two\", \"Three\"]) # Drop all rows\n grid_data = grid.data # calling data getter\n assert list(grid_data.columns) == [\"A\", \"B\"]\n assert grid_data.index.name == \"key\"\n assert list(grid_data.values) == []\n else:\n grid_data = grid.data\n assert grid_data.equals(dataframe)", "title": "" }, { "docid": "f7fe0c4dbfcd906eb7be086aa4f87710", "score": "0.56271315", "text": "def test_expected_type(self):\n for col in DF.columns:\n if col != 'quality':\n self.assertEqual(str(DF[col].dtypes), 'float64')\n elif col == 'quality':\n self.assertEqual(str(DF[col].dtypes), 'int64')", "title": "" }, { "docid": "9c29447ac8aee9275bdef968409426b1", "score": "0.56183356", "text": "def test_clean_valid():\n true_data_path = project_path + \"/\" + test_conf['clean_data']\n true_df = pd.read_csv(true_data_path, dtype=str)\n\n test_df = clean(raw_df, **func_conf['clean'])\n test_df = test_df.astype(str)\n\n assert true_df.equals(test_df)", "title": "" }, { "docid": "39a655bde6b6355fecb93b53576c28ec", "score": "0.5599156", "text": "def __init__(self,\n df_left_data: pd.DataFrame,\n df_right_data: pd.DataFrame,\n verbose: bool = True,\n minimal: bool = False):\n is_dataframe_left_data = isinstance(df_left_data, pd.DataFrame)\n is_dataframe_right_data = isinstance(df_right_data, pd.DataFrame)\n\n if not (is_dataframe_left_data and is_dataframe_right_data):\n raise TypeError(\n 'Both `left_data` and `right_data` '\n 'needed in pandas.DataFrame type, '\n f'current types are {type(df_left_data)} '\n f'and {type(df_right_data)}'\n )\n\n set_right_data_column_names = set(df_right_data.columns)\n set_left_data_column_names = set(df_left_data.columns)\n\n if set_right_data_column_names != set_left_data_column_names:\n column_name_right_not_in_left = (\n set_right_data_column_names\n .difference(set_left_data_column_names)\n )\n\n column_name_left_not_in_right = (\n set_left_data_column_names\n .difference(set_right_data_column_names)\n )\n\n raise ColumnsNotMatchException(\n 'Different columns for left and right dataframes\\n\\n'\n f'Columns in right dataframe but not in left one: '\n f'{\", \".join(column_name_right_not_in_left) or \"None\"}\\n'\n f'Columns in left dataframe but not in right one: '\n f'{\", \".join(column_name_left_not_in_right) or \"None\"}'\n )\n\n self.df_left_data = df_left_data\n self.df_right_data = df_right_data\n self.verbose = verbose\n self.minimal = minimal\n\n self.cat_features = (df_left_data\n .select_dtypes(include=['category', 'object'])\n .columns\n .tolist())\n\n self.num_features = (df_left_data\n .select_dtypes(include='number')\n .columns\n .tolist())\n\n self.ml_discriminate_model = None\n self.auc_discriminate_model = None\n self.drift = False\n self.interpretable_drift = None", "title": "" }, { "docid": "3208470c6db98ccf4a2ba84aeb871418", "score": "0.5592523", "text": "def __init__(self, options: Dict, df, dbg=False):\n self.options = options.copy()\n self.dbg = dbg\n\n if isinstance(df, pd.DataFrame) and all(df.shape) > 0:\n self.df = df.copy()\n else:\n raise ValueError(\"DF must be pandas dataframe, with non-zero shape\")\n\n self.test_config()", "title": "" }, { "docid": "472eb7ca94612b5d90146c0f8e442265", "score": "0.5588632", "text": "def test_to_pandas_index(self, use_IndexedTable):\n import pandas as pd\n\n class IndexedTable(table.QTable):\n \"\"\"Always index the first column\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.add_index(self.colnames[0])\n\n row_index = pd.RangeIndex(0, 2, 1)\n tm_index = pd.DatetimeIndex(\n [\"1998-01-01\", \"2002-01-01\"], dtype=\"datetime64[ns]\", name=\"tm\", freq=None\n )\n\n tm = Time([1998, 2002], format=\"jyear\")\n x = [1, 2]\n table_cls = IndexedTable if use_IndexedTable else table.QTable\n t = table_cls([tm, x], names=[\"tm\", \"x\"])\n tp = t.to_pandas()\n\n if not use_IndexedTable:\n assert np.all(tp.index == row_index)\n tp = t.to_pandas(index=\"tm\")\n assert np.all(tp.index == tm_index)\n t.add_index(\"tm\")\n\n tp = t.to_pandas()\n assert np.all(tp.index == tm_index)\n # Make sure writing to pandas didn't hack the original table\n assert t[\"tm\"].info.indices\n\n tp = t.to_pandas(index=True)\n assert np.all(tp.index == tm_index)\n\n tp = t.to_pandas(index=False)\n assert np.all(tp.index == row_index)\n\n with pytest.raises(ValueError) as err:\n t.to_pandas(index=\"not a column\")\n assert \"index must be None, False\" in str(err.value)", "title": "" }, { "docid": "b47b1dd5a0147a2f292bf9a741e15ee1", "score": "0.5574741", "text": "def test_evaluation_get_df_defensive_copy(self):\n df = pd.DataFrame([{\"a\": 1, \"b\": 2}, {\"a\": 3, \"b\": 4}])\n result = Evaluation(df).get_df()\n\n result[\"a\"][0] = 5 # mutate result\n\n self.assertFalse(result.equals(df))", "title": "" }, { "docid": "8a5a4e96136763810f7263433cf21bd7", "score": "0.55651283", "text": "def test_data_type():\n\n puts1 = [10, 20, 30, 40, 50]\n puts2 = {'apples': 10, 'bananas': 20, 'oranges': 30}\n\n gets1 = [{'col': 'data', 'idx': 0, 'val': 10},\n {'col': 'data', 'idx': 1, 'val': 20},\n {'col': 'data', 'idx': 2, 'val': 30},\n {'col': 'data', 'idx': 3, 'val': 40},\n {'col': 'data', 'idx': 4, 'val': 50}]\n gets2 = [{'col': 'data', 'idx': 'apples', 'val': 10},\n {'col': 'data', 'idx': 'bananas', 'val': 20},\n {'col': 'data', 'idx': 'oranges', 'val': 30}]\n\n for ins, outs in zip([puts1, puts2], [gets1, gets2]):\n test = data_type(ins)\n nt.assert_list_equal(test.values, outs)\n\n #From Iters\n puts = {'x': [1, 2, 3], 'y': [10, 20, 30], 'z': [40, 50, 60]}\n gets = [{'col': 'y', 'idx': 1, 'val': 10},\n {'col': 'y', 'idx': 2, 'val': 20},\n {'col': 'y', 'idx': 3, 'val': 30},\n {'col': 'z', 'idx': 1, 'val': 40},\n {'col': 'z', 'idx': 2, 'val': 50},\n {'col': 'z', 'idx': 3, 'val': 60}]\n\n test = data_type(puts, iter_idx='x')\n nt.assert_list_equal(test.values, gets)\n\n #Pandas\n df = pd.DataFrame({'one': [1, 2, 3], 'two': [4, 5, 6]})\n series = pd.Series([1, 2, 3], name='test')\n gets1 = [{'col': 'one', 'idx': 0, 'val': 1},\n {'col': 'two', 'idx': 0, 'val': 4},\n {'col': 'one', 'idx': 1, 'val': 2},\n {'col': 'two', 'idx': 1, 'val': 5},\n {'col': 'one', 'idx': 2, 'val': 3},\n {'col': 'two', 'idx': 2, 'val': 6}]\n gets2 = [{'col': 'test', 'idx': 0, 'val': 1},\n {'col': 'test', 'idx': 1, 'val': 2},\n {'col': 'test', 'idx': 2, 'val': 3}]\n test_df = data_type(df)\n test_series = data_type(series)\n nt.assert_list_equal(test_df.values, gets1)\n nt.assert_list_equal(test_series.values, gets2)\n\n #Bad type\n class BadType(object):\n \"\"\"Bad data type\"\"\"\n pass\n\n test = BadType()\n nt.assert_raises(ValueError, data_type, test)", "title": "" }, { "docid": "e6a9f25ac6f797eb822005c971bade19", "score": "0.55621123", "text": "def is_pandas_dataframe(data):\n\n return isinstance(data, pd.DataFrame)", "title": "" }, { "docid": "1fe0581a064017a2b8404fd0148b7bcd", "score": "0.5554753", "text": "def test_inspection(self):\n # Check inspection methods\n self._meta.get_name()\n self._meta.get_uuid()\n # Check dataframe\n self.assertIsInstance(self._meta.export_results(), pd.DataFrame)\n # Check Params\n self.assertIsInstance(self._meta.export_params(), dict)", "title": "" }, { "docid": "082817d2fbd92ceb05cf55c9fbb1dc54", "score": "0.5551665", "text": "def test_sas7bdat_international(self):\n # in addition, this works only in python 3\n if sys.version_info[0]>2:\n df, meta = pyreadstat.read_sas7bdat(os.path.join(self.international_data_folder, \"sample.sas7bdat\"))\n self.assertTrue(df.equals(self.df_pandas))\n self.assertTrue(meta.number_columns == len(self.df_pandas.columns))\n self.assertTrue(meta.number_rows == len(self.df_pandas))", "title": "" }, { "docid": "57823862eef08fc1cc060b97123fe1b3", "score": "0.5544558", "text": "def is_pandas_object(data):\n\n return is_pandas_series(data) or is_pandas_dataframe(data)", "title": "" }, { "docid": "aa595e75a46ed7d54d5883a3cc594c49", "score": "0.5541494", "text": "def test_dataframe(self):\n with dw.open(self.d7dname) as d7d:\n self.assertFalse(d7d.closed, 'd7d did not open')\n self.assertEqual((18771, 20), d7d.dataframe().shape)", "title": "" }, { "docid": "6a6ebdfa77758523f8b2af2e8df2ae96", "score": "0.5532071", "text": "def test_get_data(self):\n df = self.get_data()\n for c in self.columns:\n self.assertTrue(\n c in df.columns\n )\n self.assertTrue(\n self.county_col_name in df.columns\n )", "title": "" }, { "docid": "184054968833c1af38a5000a62063acb", "score": "0.55083656", "text": "def apply_assert_equal(df):\n\n return df.apply(lambda r: assert_equal_csv(r.fp_bq, r.fp_sf), axis=1)", "title": "" }, { "docid": "6ee8268470f516383e81cbdee8d5d455", "score": "0.55016464", "text": "def test_df_names(attr_timeseries: str = ATTR_TIMESERIES) -> None:\r\n df_out, _ = _get_df()\r\n assert df_out.columns[0] == ATTR_ID and df_out.columns[1] == attr_timeseries", "title": "" }, { "docid": "43c933c2b642d91631173d8489c51983", "score": "0.5495057", "text": "def check_load_table_from_dataframe(dataframe, destination, job_config):\n expected_row_count = _get_csv_row_count()\n self.assertIsInstance(dataframe, DataFrame)\n self.assertEqual(expected_row_count, len(dataframe))\n self.assertEqual(destination, self.cleaning_rule.lookup_table)\n self.assertEqual(job_config.write_disposition,\n bigquery.WriteDisposition.WRITE_TRUNCATE)\n # return a mock for the job result\n return mock.MagicMock()", "title": "" }, { "docid": "cde2e6bd242e880d17745e654a1357e8", "score": "0.548522", "text": "def test_simple_1(self):\n for table_cls in (table.Table, table.QTable):\n col_c_class = u.Quantity if table_cls is table.QTable else table.Column\n for cpy in (False, True):\n st = self.SimpleTable()\n # Test putting in a non-native kwarg `extra_meta` to Table initializer\n t = table_cls(st, copy=cpy, extra_meta=\"extra!\")\n assert t.colnames == [\"a\", \"b\", \"c\"]\n assert t.meta == {\"extra_meta\": \"extra!\"}\n assert np.all(t[\"a\"] == st.columns[0])\n assert np.all(t[\"b\"] == st.columns[1])\n vals = t[\"c\"].value if table_cls is table.QTable else t[\"c\"]\n assert np.all(st.columns[2].value == vals)\n\n assert isinstance(t[\"a\"], table.Column)\n assert isinstance(t[\"b\"], table.MaskedColumn)\n assert isinstance(t[\"c\"], col_c_class)\n assert t[\"c\"].unit is u.m\n assert type(t) is table_cls\n\n # Copy being respected?\n t[\"a\"][0] = 10\n assert st.columns[0][0] == 1 if cpy else 10", "title": "" }, { "docid": "64d031a6de924e4dc119693bb96ca467", "score": "0.54688334", "text": "def test_column_names(self):\n for col in COLS:\n self.assertIn(col, DF.columns, \"DataFrame doesn't have the expected column names.\")", "title": "" }, { "docid": "5ba779866f40fec9647c40771409016e", "score": "0.54377276", "text": "def test_get_data_set_list():\n df = pybea.get_data_set_list(UserID=USER_ID)\n nose.tools.assert_is_instance(df, pd.DataFrame)\n nose.tools.assert_equals(df.shape, (12, 2))", "title": "" }, { "docid": "543cfa04453b562233ae13c756649c38", "score": "0.5435725", "text": "def test_weekly_dataframe(self):\n _df = Transactions.create_df(self.spark)\n _correct_weekly_df = TestDataFrames.CORRECT_WEEKLY_DF\n _incorrect_weekly_df = TestDataFrames.INCORRECT_WEEKLY_DF\n\n _df = RegularSpacedTransactions.transform_reguarly_spaced_transactions(_df, 'weekly', 7, 7)\n\n print('Assert against a correctly defined weekly schema named dataframe')\n self.assertEqual(_df.schema.names, _correct_weekly_df.schema.names)\n self.assertEqual(_df.dtypes, _correct_weekly_df.dtypes)\n\n print('Assert against a correctly defines weekly schema names in dataframe')\n self.assertEqual(_df.schema.names, _incorrect_weekly_df.schema.names)\n\n print('Assert that it had correct weekly schema names but wrong datatypes')\n self.assertNotEqual(_df.dtypes, _incorrect_weekly_df.dtypes)\n\n for col in _correct_weekly_df.schema.names:\n print('Assert that records in column: ' + col + ' is matching weekly dataframe')\n _df_list = get_records(_df, col)\n _df_correct_list = get_records(_correct_weekly_df, col)\n _df_incorrect_list = get_records(_incorrect_weekly_df, col)\n # with the given dataset they should be 0\n self.assertEqual(len(_df_list), 0)\n self.assertEqual(len(_df_correct_list), 0)\n self.assertEqual(len(_df_incorrect_list), 0)\n self.assertEqual(len(_df_list), len(_df_correct_list))\n self.assertEqual(len(_df_list), len(_df_incorrect_list))", "title": "" }, { "docid": "1d015033a6e47fc202771bad5b98d29e", "score": "0.5429899", "text": "def test_fitData(self):\n self.test_df[\"dummy_str\"] = [\"hi\" for i in range(self.test_df.shape[0])]\n self.assertRaises(ValueError, self.engine.fitData, self.test_df)", "title": "" }, { "docid": "9f172b1e99845ae3a896833928659e5b", "score": "0.5428505", "text": "def verify_dataframe_equality(df1, df2):\n if df1.equals(df2):\n print('...verified.')\n else:\n print(df1.shape)\n print(df2.shape)\n print(\"Dataframes not equal.\")\n return False\n\n return True", "title": "" }, { "docid": "b93b8e999655607bd07dde992d0c8749", "score": "0.5427376", "text": "def test_same_type(self):\n for i in range(len(list(DATA_FRAME.dtypes))):\n for j in range(list(DATA_FRAME.count())[i]):\n self.assertTrue(isinstance(DATA_FRAME.iloc[j, i], type(DATA_FRAME.iloc[0, i])))", "title": "" }, { "docid": "8c8f470e4e1d1347d44df3c6b297cafe", "score": "0.5426686", "text": "def test_to_pandas():\n X, y = BaseTransformer._prepare_input(X_bin_array, y_bin_array)\n assert isinstance(X, pd.DataFrame) and isinstance(y, pd.Series)", "title": "" }, { "docid": "ef7e84ae50eee9d948292bb0d609ba89", "score": "0.5425242", "text": "def _ensure_data_frame(obj, name):\r\n #we accept pandas Dataframe, and also dictionaries, lists, tuples\r\n #we'll just convert them to Pandas Dataframe\r\n if isinstance(obj, pd.DataFrame):\r\n df = obj\r\n elif isinstance(obj, (tuple, list)) :\r\n #tuple and list case\r\n if len(obj)==0:\r\n return pd.Dataframe()\r\n\r\n firstrow = obj[0]\r\n\r\n if isinstance(firstrow, (tuple, list)):\r\n #multiple-columns\r\n colnames = [\"c%d\" % i for i in range(len(firstrow))]\r\n df = pd.DataFrame(obj, columns=colnames)\r\n else:\r\n #mono-column\r\n df = pd.DataFrame(obj, columns=[\"c0\"])\r\n\r\n if not isinstance(df, pd.DataFrame) :\r\n raise Exception(\"%s is not a Dataframe, tuple, list, nor dictionary\" % name)\r\n\r\n for col in df:\r\n if df[col].dtype==np.int64:\r\n df[col] = df[col].astype(np.float)\r\n\r\n return df", "title": "" }, { "docid": "e24c5bcc088cf06004e5b9795398517c", "score": "0.54250133", "text": "def setUp(self):\n utils.add_data_frame_equality_func(self)\n self.run_output = textwrap.dedent(\n \"\"\"\\\n language,filename,blank,comment,code,\"http://cloc.sourceforge.net\"\n Python,internals.py,55,50,130\n Python,tests.py,29,92,109\n Python,setup.py,4,2,30\n C#,.NETFramework,Version=v4.7.2.AssemblyAttributes.cs,0,1,3\n \"\"\"\n )\n cmi = \"codemetrics.internals.\"\n self.run_patcher = mock.patch(\n cmi + \"run\", autospec=True, return_value=self.run_output\n )\n self.check_patcher = mock.patch(cmi + \"check_run_in_root\", autospec=True)\n self.run_ = self.run_patcher.start()\n self.check_run_from_root = self.check_patcher.start()", "title": "" }, { "docid": "c99a3d93032a1810a98b7524a8e3aad0", "score": "0.5417636", "text": "def test_load(self):\n df = self._load()\n\n assert df.index.names == ['date', 'asset'], \\\n \"df.index.names should be ['date', 'asset'] \"\n assert not any(df.index.duplicated()), \\\n \"There are duplicate indexes in df, you need handle them up.\"\n assert df.index.is_lexsorted(), \\\n \"df.index must be sorted, try using df.sort_index(level=0, inplace=True)\"\n assert str(df.index.levels[0].tzinfo) == 'UTC', \\\n \"df.index.date must be UTC timezone.\"\n assert df.index.levels[-1].ordered, \\\n \"df.index.asset must ordered categorical.\"\n assert self.time_category in df, \\\n \"You must create a time_category column, convert time to category id\"\n\n if self.adjustments:\n assert all(x in df for x in self.adjustments), \\\n \"Adjustments columns `{}` not found.\".format(self.adjustments)\n assert all(x in df for x in self.adjustment_multipliers), \\\n \"Adjustment multipliers columns `{}` not found.\".format(self.adjustment_multipliers)\n assert not any(df[self.adjustments[0]].isna()), \\\n \"There is nan value in ex-dividend column, should be filled with 0.\"\n assert not any(df[self.adjustments[1]].isna()), \\\n \"There is nan value in split_ratio column, should be filled with 1.\"\n\n return df", "title": "" }, { "docid": "b1c603533c7662e60b03a978f61ac360", "score": "0.54168665", "text": "def assert_unordered_frame_equal(left: pd.DataFrame, right: pd.DataFrame, **kwargs) -> None:\n assert_frame_equal(\n left.sort_index(axis=1), right.sort_index(axis=1), check_names=True, **kwargs # type: ignore # check_names OK\n )", "title": "" }, { "docid": "69f839210a99c2442c717d60466e6a0a", "score": "0.5386229", "text": "def __init__(self, options: Dict, df, dbg=False):\n self.options = options.copy()\n self.dbg = dbg\n\n if isinstance(df, pd.DataFrame) and all(df.shape) > 0:\n self.df = df.copy()\n else:\n raise ValueError(\"DF must be pandas dataframe, with non-zero shape\")\n\n self.test_config()\n self.tests = []\n for itm in [\"first\", \"second\"]:\n if \"type\" in self.options[itm].keys() and\\\n self.options[itm][\"type\"].upper() == \"NUMERIC\":\n self.tests.append(fixed_income_filter_numeric(self.options[itm], df, self.dbg))\n else:\n self.tests.append(fixed_income_filter(self.options[itm], df, self.dbg))", "title": "" }, { "docid": "2f454d7d755d30d41354d332971a7548", "score": "0.53802174", "text": "def check_is_dataframe(X):\n if not isinstance(X, pd.DataFrame):\n raise TypeError(\"Input must be an instance of pandas.DataFrame.\")", "title": "" }, { "docid": "adfe9316dbe800740d4bfda49f2bed2b", "score": "0.5380199", "text": "def test_plot_series(self):\n # Check assertion when no datetime index.\n columns = ['name', 'age', 'country']\n df_no_date = pd.DataFrame([['John', 24, 'China'],\n ['Mary', 20, 'China'],\n ['Jane', 25, 'Switzerland'],\n ['James', 28, 'China']],\n columns=columns)\n self.assertRaises(AssertionError, bplt.plot_series, df_no_date, 'age')\n\n # Check assertion when column doesn't exist.\n df_date = pd.DataFrame({'datetime': pd.date_range('2019-02-01', periods=50, freq='S'),\n 'values': np.random.randn(50)}).set_index('datetime')\n self.assertRaises(AssertionError, bplt.plot_series, df_date, 'col')", "title": "" }, { "docid": "f0b729b001c4f1ef6c967beb25d3b8d4", "score": "0.53695124", "text": "def df_test():\n df = pd.read_csv(\"../datafiles_for_tests/testing_set_test_model.csv\")\n return df", "title": "" }, { "docid": "2c7181c1d83f22e066a9ce6bbbd1bcaf", "score": "0.5366751", "text": "def test_two_dfc_do_not_collide(self):\n dfc1 = DataFrameCollection(storage_path=self.temp_dir)\n dfc2 = DataFrameCollection(storage_path=self.temp_dir)\n\n dfc1.store(\"test\", pd.DataFrame({\"t1\": [1]}))\n dfc2.store(\"test\", pd.DataFrame({\"t2\": [1]}))\n\n t1_out = dfc1.get(\"test\")\n t2_out = dfc2.get(\"test\")\n self.assertEqual([\"t1\"], t1_out.columns)\n self.assertEqual([\"t2\"], t2_out.columns)", "title": "" }, { "docid": "74ff4be37f58f958baea4a6b8566e05c", "score": "0.5359104", "text": "def test_to_dict(self):\n first = pd.DataFrame({'x': [1]})\n second = pd.DataFrame({'y': [2]})\n\n self.dfc.store(\"first\", first)\n self.dfc.store(\"second\", second)\n\n d = self.dfc.to_dict()\n self.assertEqual([\"first\", \"second\"], sorted(d))\n pd.testing.assert_frame_equal(first, d[\"first\"])\n pd.testing.assert_frame_equal(second, d[\"second\"])", "title": "" }, { "docid": "a5e4ed5af6e0fe446e0ae94b17b2e0ef", "score": "0.53573895", "text": "def func_create_dataframe(cls, storage):\n if storage == 'Pandas':\n return pd.DataFrame", "title": "" }, { "docid": "8a2a55a0577fc9be02cb0252042262bf", "score": "0.53497565", "text": "def test_stdErr1(self):\n\n # Create an SQLContext\n conf = SparkConf().setMaster(\"local[*]\").setAppName(\"StandardErrorTest\")\\\n .set(\"spark.jars\", \"../../../target/sparkts-0.4.0-SNAPSHOT-jar-with-dependencies.jar\")\n sc = SparkContext(conf=conf)\n sqlContext = SQLContext(sc)\n\n # Input Data\n inData = \"../../../src/test/resources/sml/inputs/StandardErrorDataIn.json\"\n inDf = sqlContext.read.json(inData)\n print(\"Input Data\")\n inDf.show()\n\n # Expected data\n expectedData = \"../../../src/test/resources/sml/outputs/StandardErrorExpected.json\"\n expectedDf = sqlContext.read.json(expectedData).select(\"ref\", \"xColumn\", \"yColumn\", \"zColumn\", \"stdError\")\n print(\"Expected Data\")\n expectedDf.show()\n\n # Create the class object\n stdErrorObj = standardError(inDf)\n\n # Calling the method to get the resulted dataframe\n resultDf = stdErrorObj.stdErr1(df=inDf, xCol=\"xColumn\", yCol=\"yColumn\", zCol=\"zColumn\", outputCol=\"stdError\")\n print(\"Result Data\")\n resultDf.show()\n\n # Assert they are equal\n self.assertEqual(expectedDf.collect(), resultDf.collect())", "title": "" }, { "docid": "4aee2b14a426e510bdb8be52ae8ec48d", "score": "0.53461015", "text": "def test_columns(self):\n expected = [\n 'Environment.name',\n 'Environment.account_number',\n 'GitRepo.path',\n 'GitRepo.merge_base_name',\n 'GitUrl.url'\n ]\n r = GitReport(self.params)\n for i, col in enumerate(r.columns()):\n self.assertEquals(expected[i], col)", "title": "" }, { "docid": "65cde668bca52366bfc750e04a70beed", "score": "0.534515", "text": "def testDfAttributeDate(self):\n self.assertSetEqual(set(TBRMMData(self.df, self.response).df.columns),\n set(self.df.date))", "title": "" }, { "docid": "0429dbfaa06199fa41a9462e02d2b952", "score": "0.5342546", "text": "def __init__(self, df):\n self.df = df", "title": "" }, { "docid": "e911c2ef90de466ed55ccff2f63c3e60", "score": "0.5330774", "text": "def setUpClass(cls) -> None:\n cls.df = pd.read_csv(PATH)", "title": "" }, { "docid": "11f8e4bfac3ef75fa057f236af895de6", "score": "0.53254867", "text": "def test_table_conflicts_are_prevented(self):\n test_df = pd.DataFrame({'x': [1, 2]})\n self.dfc.store(\"test\", test_df)\n self.assertRaises(dfc.TableExistsError, self.dfc.store, \"test\", pd.DataFrame())\n pd.testing.assert_frame_equal(test_df, self.dfc.get(\"test\"))", "title": "" }, { "docid": "507ed57d4a8414573c76351e118d39b9", "score": "0.5319372", "text": "def test_pandas_data_frame():\n try:\n from numpy.random import randint\n import pandas as pd\n except ImportError:\n raise SkipTest\n\n with closing(StringIO()) as our_file:\n tqdm.pandas(file=our_file, leave=True, ascii=True)\n df = pd.DataFrame(randint(0, 50, (100, 200)))\n\n def task_func(x):\n return x + 1\n\n # applymap\n res1 = df.progress_applymap(task_func)\n res2 = df.applymap(task_func)\n assert res1.equals(res2)\n\n # apply unhashable\n res1 = []\n df.progress_apply(res1.extend)\n assert len(res1) == df.size\n\n # apply\n for axis in [0, 1, 'index', 'columns']:\n res3 = df.progress_apply(task_func, axis=axis)\n res4 = df.apply(task_func, axis=axis)\n assert res3.equals(res4)\n\n our_file.seek(0)\n if our_file.read().count('100%') < 3:\n our_file.seek(0)\n raise AssertionError(\"\\nExpected:\\n{0}\\nIn:\\n{1}\\n\".format(\n '100% at least three times', our_file.read()))\n\n # apply_map, apply axis=0, apply axis=1\n expects = ['20000/20000', '200/200', '100/100']\n for exres in expects:\n our_file.seek(0)\n if our_file.getvalue().count(exres) < 1:\n our_file.seek(0)\n raise AssertionError(\n \"\\nExpected:\\n{0}\\nIn:\\n {1}\\n\".format(\n exres + \" at least once.\", our_file.read()))", "title": "" }, { "docid": "9202048139384fa753eb5ade17d7019f", "score": "0.531166", "text": "def test_pandas_isinstalled(self):\n\n import pandas as pd\n\n self.assertGreaterEqual(pd.__version__, '0.19.2')", "title": "" }, { "docid": "517c2f869801d3f3df092cdc99c854bb", "score": "0.53114736", "text": "def run_and_compare(\n fn,\n data,\n data2=None,\n force_lazy=True,\n force_arrow_execute=False,\n allow_subqueries=False,\n comparator=df_equals,\n **kwargs,\n):\n\n def run_modin(\n fn,\n data,\n data2,\n force_lazy,\n force_arrow_execute,\n allow_subqueries,\n constructor_kwargs,\n **kwargs,\n ):\n kwargs[\"df1\"] = pd.DataFrame(data, **constructor_kwargs)\n kwargs[\"df2\"] = pd.DataFrame(data2, **constructor_kwargs)\n kwargs[\"df\"] = kwargs[\"df1\"]\n\n if force_lazy:\n set_execution_mode(kwargs[\"df1\"], \"lazy\")\n set_execution_mode(kwargs[\"df2\"], \"lazy\")\n elif force_arrow_execute:\n set_execution_mode(kwargs[\"df1\"], \"arrow\")\n set_execution_mode(kwargs[\"df2\"], \"arrow\")\n\n exp_res = fn(lib=pd, **kwargs)\n\n if force_arrow_execute:\n set_execution_mode(exp_res, \"arrow\", allow_subqueries)\n elif force_lazy:\n set_execution_mode(exp_res, None, allow_subqueries)\n\n return exp_res\n\n constructor_kwargs = kwargs.pop(\"constructor_kwargs\", {})\n try:\n kwargs[\"df1\"] = pandas.DataFrame(data, **constructor_kwargs)\n kwargs[\"df2\"] = pandas.DataFrame(data2, **constructor_kwargs)\n kwargs[\"df\"] = kwargs[\"df1\"]\n ref_res = fn(lib=pandas, **kwargs)\n except Exception as e:\n with pytest.raises(type(e)):\n exp_res = run_modin(\n fn=fn,\n data=data,\n data2=data2,\n force_lazy=force_lazy,\n force_arrow_execute=force_arrow_execute,\n allow_subqueries=allow_subqueries,\n constructor_kwargs=constructor_kwargs,\n **kwargs,\n )\n _ = exp_res.index\n else:\n exp_res = run_modin(\n fn=fn,\n data=data,\n data2=data2,\n force_lazy=force_lazy,\n force_arrow_execute=force_arrow_execute,\n allow_subqueries=allow_subqueries,\n constructor_kwargs=constructor_kwargs,\n **kwargs,\n )\n comparator(ref_res, exp_res)", "title": "" }, { "docid": "55cc7c752e5c9abbdabeb1db04dfc843", "score": "0.5293021", "text": "def __init__(self, array_dict = None, units_dict = None, rows_list = None):\r\n if array_dict is None:\r\n array_dict = {}\r\n\r\n if units_dict is None:\r\n units_dict = {}\r\n\r\n if type(array_dict) != dict:\r\n raise dfException, \"Initialization argument must be a dictionary with values np arrays\"\r\n for value in array_dict.values():\r\n\t if type(value) != np.ndarray:\r\n raise dfException, \"Data frame requires numpy arrays as data types\"\r\n if array_dict == {}:\r\n self.nCols = 0\r\n self.nRows = 0\r\n \r\n\r\n else:\r\n #Check to make sure all of the items are the same size\r\n s = array_dict.values()[0].size\r\n for value in array_dict.values():\r\n if value.size != s:\r\n raise dfException, \"Array_dict must have numpy vectors all of the same length\"\r\n\r\n self.nCols = len(array_dict.keys())\r\n self.nRows = s\r\n\r\n \r\n self.data = array_dict.copy()\r\n self.colnames = array_dict.keys()\r\n\t\r\n\r\n for key in units_dict.keys():\r\n if key not in self.data.keys():\r\n raise dfException, \"Unit given for non-existent column\"\r\n\r\n for value in units_dict.values():\r\n if type(value) != str and value is not None:\r\n raise dfException, \"Units must be given as strings\"\r\n\r\n self.units = units_dict.copy()\r\n \r\n\t#Initialize row names, if given a row dictionary\r\n self.rows_dict = {}\r\n\tif rows_list is not None:\r\n\t if not isinstance(rows_list, list):\r\n raise dfException, \"An initialization rows list must be a list of strings\"\r\n if len(rows_list) != self.nRows:\r\n raise dfException, \"An initialization rows list must have the same number of row names as rows\"\r\n index = 0\r\n for name in rows_list:\r\n self.rows_dict[name] = index\r\n index += 1", "title": "" }, { "docid": "8163e81c9d20ee94a1e1e0398405602f", "score": "0.5291239", "text": "def test_mixed_non_string_col(self):\n\n test_file = os.path.join(test_root_path, 'data', 'parquet',\n 'mixed_datetime_data_col.parquet')\n parq_data = ParquetData(test_file)\n\n # assert str and not bytes\n self.assertIsInstance(parq_data.data['col2'][1], str)\n self.assertIsInstance(parq_data.data['col2'][3], str)\n\n # assert no 'b\"data\"' encapsulated, just 'data'\n self.assertNotIn('b\"', parq_data.data['col2'][1])\n self.assertNotIn(\"b'\", parq_data.data['col2'][1])\n self.assertNotIn('b\"', parq_data.data['col2'][3])\n self.assertNotIn(\"b'\", parq_data.data['col2'][3])", "title": "" }, { "docid": "bb6021e7f58d6cb810ee84711fa969ed", "score": "0.52876055", "text": "def __eq__(self, other):\n try:\n pd.testing.assert_frame_equal(\n self.property_df, other.property_df, check_less_precise=8)\n pd.testing.assert_frame_equal(\n self.dft_df, other.dft_df, check_less_precise=8)\n assert self.geometries == other.geometries\n return True\n except AssertionError:\n return False", "title": "" }, { "docid": "1cc2e381963dcae54e8470d6ba4c818f", "score": "0.5273434", "text": "def test_set_2d_pandas_data(self):\n\n test_inst = pysat.Instrument('pysat', 'testing2d', use_header=True)\n test_date = pysat.instruments.pysat_testing2d._test_dates['']['']\n test_inst.load(date=test_date)\n with warnings.catch_warnings(record=True) as war:\n test_inst['new_profiles'] = 2 * test_inst['profiles']\n\n warn_msgs = [\" \".join([\"Support for 2D pandas instrument\",\n \"data has been deprecated and will\",\n \"be removed in 3.2.0+.\"])]\n\n # Ensure the minimum number of warnings were raised.\n assert len(war) >= len(warn_msgs)\n\n # Test the warning messages, ensuring each attribute is present.\n testing.eval_warnings(war, warn_msgs)\n return", "title": "" }, { "docid": "67b0a2835ba421266f2f80c234886b16", "score": "0.5270727", "text": "def data_frame(self, arg=None):\n if arg is None:\n # noinspection PyUnresolvedReferences\n return self.pd.DataFrame()\n # noinspection PyUnresolvedReferences\n return self.pd.DataFrame(arg)", "title": "" }, { "docid": "63f406eac2bc2290c7b98da78b1da937", "score": "0.5269596", "text": "def test_evaluation_process_empty(self):\n df = pd.DataFrame([{\"a\": 1, \"b\": 2}, {\"a\": 3, \"b\": 4}])\n result = Evaluation(df).process([]).get_df()\n\n self.assertTrue(result.equals(df))", "title": "" }, { "docid": "1b55f83d3d5e2a972b3afec15291a3ef", "score": "0.5260433", "text": "def assert_arrow_table_equals(actual: pa.Table, expected: pa.Table) -> None:\n assert (\n actual.column_names == expected.column_names\n ), \"actual columns != expected columns\\n-%r\\n+%r\" % (\n expected.column_names,\n actual.column_names,\n )\n assert (\n actual.schema.metadata == expected.schema.metadata\n ), \"actual table metadata != expected table metadata\\n-%r\\n+%r\" % (\n expected.schema.metadata,\n actual.schema.metadata,\n )\n for i in range(actual.num_columns):\n actual_field = actual.field(i)\n expected_field = expected.field(i)\n assert (\n actual_field == expected_field\n ), \"column %r: actual field != expected field\\n-%r\\n+%r\" % (\n actual_field.name,\n expected_field,\n actual_field,\n )\n assert (\n actual_field.metadata == expected_field.metadata\n ), \"column %r: actual metadata != expected metadata\\n-%r\\n+%r\" % (\n actual_field.name,\n expected_field.metadata,\n actual_field.metadata,\n )\n for column_name, actual_column, expected_column in zip(\n actual.column_names, actual.itercolumns(), expected.itercolumns()\n ):\n assert actual_column.num_chunks == expected_column.num_chunks\n for chunk_index, (actual_chunk, expected_chunk) in enumerate(\n zip(actual_column.chunks, expected_column.chunks)\n ):\n diff = actual_chunk.diff(expected_chunk)\n assert not diff, \"actual != expected data in column %r, chunk %d:%s\" % (\n column_name,\n chunk_index,\n diff,\n )", "title": "" }, { "docid": "e20ff69c4fbf75518064bf9d0996f8fb", "score": "0.5251738", "text": "def sanity_checks(df: pd.DataFrame) -> None:\n df_temp = df.copy()\n # checks that the max date is less than tomorrow's date.\n assert datetime.datetime.strptime(df_temp['Date'].max(), '%Y-%m-%d') < (datetime.datetime.utcnow() + datetime.timedelta(days=1))\n # checks that there are no duplicate dates\n assert df_temp['Date'].duplicated().sum() == 0, 'One or more rows share the same date.'\n if 'Cumulative total' not in df_temp.columns:\n df_temp['Cumulative total'] = df_temp['Daily change in cumulative total'].cumsum()\n # checks that the cumulative number of tests on date t is always greater than the figure for t-1:\n assert (df_temp['Cumulative total'].iloc[1:] >= df_temp['Cumulative total'].shift(1).iloc[1:]).all(), \"On one or more dates, `Cumulative total` is greater on date t-1.\"\n # df.iloc[1:][df['Cumulative total'].iloc[1:] < df['Cumulative total'].shift(1).iloc[1:]]\n # cross-checks a sample of scraped figures against the expected result.\n assert len(sample_official_data) > 0\n for dt, d in sample_official_data:\n val = df_temp.loc[df_temp['Date'] == dt, SERIES_TYPE].squeeze().sum()\n assert val == d[SERIES_TYPE], f\"scraped value ({val:,d}) != official value ({d[SERIES_TYPE]:,d}) on {dt}\"\n return None", "title": "" }, { "docid": "30ac5962d171dfc7eb905cac9c29ddd4", "score": "0.5251113", "text": "def test_df_date_type(attr_timeseries: str = ATTR_TIMESERIES) -> None:\r\n df_out, _ = _get_df()\r\n assert isinstance(\r\n df_out[attr_timeseries][0][0],\r\n tuple\r\n ) and isinstance(\r\n df_out[attr_timeseries][0][0][0],\r\n datetime.datetime\r\n )", "title": "" }, { "docid": "733271c44f486904f10ef694652e06b9", "score": "0.52378", "text": "def test_get_data_set():\n df = pybea.get_data(UserID=USER_ID,\n DataSetName='RegionalData',\n KeyCodes=['POP_MI'],\n GeoFips='MSA',\n Year=['2000', '2005', '2010'])\n nose.tools.assert_is_instance(df, pd.DataFrame)\n nose.tools.assert_equals(df.shape, (1146, 8))\n\n with nose.tools.assert_raises(ValueError):\n pybea.get_data(UserID=USER_ID, DataSetName='InvalidDataSetName')", "title": "" }, { "docid": "3b6e9b6ea7e32eda5e9ac809de2cdd45", "score": "0.522856", "text": "def test_read_data_from_excel(self):\n pass", "title": "" }, { "docid": "f4aa1ccb7096b4e282c8d2ec4ebd301e", "score": "0.5227455", "text": "def test_validation_invalid_content(vcf_class):\n vcfDf = pd.DataFrame(\n {\n \"#CHROM\": [\"chr2\", \"chrM\", float(\"nan\"), \"chr2\"],\n \"POS\": [69688533, 99401860, 53701241, 69688533],\n \"ID\": [\"AAK1\", \"AAED1\", \"AAAS\", \"AAK1\"],\n \"REF\": [\"AAK1\", \"AAED1\", \"AAAS\", \"AAK1\"],\n \"ALT\": [\"AAK1\", \"AAED1\", \"AAAS\", \"AAK1\"],\n \"QUAL\": [\"AAK1\", \"AAED1\", \"AAAS\", \"AAK1\"],\n \"FILTER\": [\"AAK1\", \"AAED1\", \"AAAS\", \"AAK1\"],\n \"INFO\": [\"AAK1\", \"AAED1\", \"AAAS\", \"AAK1\"],\n }\n )\n error, warning = vcf_class._validate(vcfDf)\n expectedError = (\n \"vcf: Must not have duplicate variants.\\n\"\n \"vcf: May contain rows that are \"\n \"space delimited instead of tab delimited.\\n\"\n \"vcf: Please double check your #CHROM column. This column must only be these values: \"\n \"1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, X, Y, MT\\n\"\n )\n expectedWarning = \"vcf: Should not have the chr prefix in front of chromosomes.\\n\"\n assert error == expectedError\n assert warning == expectedWarning", "title": "" }, { "docid": "327e48d3a5f33cebe48e6855aca84700", "score": "0.52273786", "text": "def test_data(df1: DataFrame, df2: DataFrame):\n data1 = df1.collect()\n data2 = df2.collect()\n return set(data1) == set(data2)", "title": "" }, { "docid": "07da0e2a05efedcbca833b26ede19812", "score": "0.5222966", "text": "def test_ouput_types(nbltdata): # pylint: disable=redefined-outer-name\n assert isinstance(nbltdata.failed_success, pd.DataFrame)\n assert isinstance(nbltdata.logon_sessions, pd.DataFrame)\n assert isinstance(nbltdata.logon_matrix, pd.io.formats.style.Styler)\n assert isinstance(nbltdata.plots, dict)\n assert isinstance(nbltdata.plots[\"User Pie Chart\"], Figure)\n assert isinstance(nbltdata.timeline, Column)", "title": "" }, { "docid": "5e1bab02ee7b69b86b5e7c61d112928b", "score": "0.52204275", "text": "def df():\n data = generate_df_for_tests(freq=\"D\", periods=30*8)\n df = data[\"df\"][[TIME_COL, VALUE_COL]]\n return df", "title": "" }, { "docid": "f4d36583af00e471d2c15ee88da51fcd", "score": "0.52187693", "text": "def test_df(\n categorical: bool = False, datetime: bool = False\n) -> Tuple[pd.DataFrame, pd.Series]:\n X, y = make_classification(\n n_samples=1000,\n n_features=12,\n n_redundant=4,\n n_clusters_per_class=1,\n weights=[0.50],\n class_sep=2,\n random_state=1,\n )\n\n # transform arrays into pandas df and series\n colnames = [\"var_\" + str(i) for i in range(12)]\n X = pd.DataFrame(X, columns=colnames)\n y = pd.Series(y)\n\n if categorical is True:\n X[\"cat_var1\"] = [\"A\"] * 1000\n X[\"cat_var2\"] = [\"B\"] * 1000\n\n if datetime is True:\n X[\"date1\"] = pd.date_range(\"2020-02-24\", periods=1000, freq=\"T\")\n X[\"date2\"] = pd.date_range(\"2021-09-29\", periods=1000, freq=\"H\")\n\n return X, y", "title": "" }, { "docid": "2554767581e2fcff84ccc3167f8095d5", "score": "0.5217195", "text": "def _load(self) -> pd.DataFrame:\n raise NotImplementedError(\"abstractmethod\")", "title": "" }, { "docid": "385b399b75ecda859671b0c078b098dd", "score": "0.5213595", "text": "def setUpClass(cls):\n forecastsTests.sc = SparkContext('local' )#spark://192.168.100.180:9090\n forecastsTests.sqlCtx = SQLContext(forecastsTests.sc)\n forecastsTests.version = str(forecastsTests.sqlCtx._sc.version)\n \n data = [\n ['20181029192000', 'host', 20], ['20181029192010', 'host', 25], ['20181029192020', 'host', 30],\n ['20181029192030', 'host', 35], ['20181029192040', 'host', 40], ['20181029192050', 'host', 45],\n ['20181029192100', 'host', 50], ['20181029192110', 'host', 55], ['20181029192120', 'host', 60],\n ['20181029192130', 'host', 65], ['20181029192140', 'host', 70], ['20181029192150', 'host', 75],\n ['20181029192200', 'host', 80], ['20181029192210', 'host', 85], ['20181029192220', 'host', 90],\n ['20181029192230', 'host', 95], ['20181029192240', 'host', 100], ['20181029192250', 'host', 105],\n ['20181029192300', 'host', 110], ['20181029192310', 'host', 115], ['20181029192320', 'host', 120],\n ['20181029192330', 'host', 125], ['20181029192340', 'host', 130], ['20181029192350', 'host', 135],\n ['20181029192400', 'host', 140], ['20181029192410', 'host', 145], ['20181029192420', 'host', 150],\n ['20181029192430', 'host', 155], ['20181029192440', 'host', 160], ['20181029192450', 'host', 165],\n ]\n\n cls.test_df = forecastsTests.sqlCtx.createDataFrame(data,['time','host','value'])\n cls.test_df2 = forecastsTests.sqlCtx.read.format('csv') \\\n .option(\"header\", \"true\") \\\n .option(\"inferschema\", \"true\") \\\n .load(\"plaform2_DATE_CNT.csv\")", "title": "" }, { "docid": "f424e2b1f6cb20fbca0257eaa27937e5", "score": "0.5212829", "text": "def is_DataFrame(data, verbose=3):\n if isinstance(data, list):\n data = pd.DataFrame(data)\n elif isinstance(data, np.ndarray):\n data = pd.DataFrame(data)\n elif isinstance(data, pd.DataFrame):\n pass\n else:\n if verbose>=3: print('Typing should be pd.DataFrame()!')\n data=None\n\n return(data)", "title": "" }, { "docid": "477e09b158723904ae8e599342038fdc", "score": "0.52075213", "text": "def test_column_name_type(process_test_df):\n with pytest.raises(TypeError):\n process_test_df.process_text([\"text\"])", "title": "" }, { "docid": "35eb4604c5cee9868bd2fc0d7be7da59", "score": "0.5198441", "text": "def test_complex_df_report():\n tz_df = pd.DataFrame(\n dict(\n duration_col=[timedelta(seconds=x) for x in range(30)],\n date_col=[date.today() for _ in range(30)],\n datetime_col=[datetime.utcnow() for _ in range(30)],\n datetimez_col=[datetime.now(timezone.utc) for _ in range(30)],\n )\n )\n\n raw_data = {\n \"first_name\": [\"Jason\", \"Molly\", \"Tina\", \"Jake\", \"Amy\"],\n \"last_name\": [\"Miller\", \"Jacobson\", \"Ali\", \"Milner\", \"Cooze\"],\n \"age\": [42, 52, 36, 24, 73],\n \"preTestScore\": [4, 24, 31, 2, 3],\n \"postTestScore\": [25, 94, 57, 62, 70],\n }\n index_df = pd.DataFrame(raw_data, columns=[\"first_name\", \"last_name\", \"age\", \"preTestScore\", \"postTestScore\"])\n df_desc = index_df.describe()\n df_desc_2 = df_desc.reset_index()\n\n tz_t = dp.DataTable(tz_df)\n index_t = dp.DataTable(index_df)\n df_desc_t = dp.DataTable(df_desc)\n df_desc_2_t = dp.DataTable(df_desc_2)\n\n with deletable(dp.Report(tz_t, index_t, df_desc_t, df_desc_2_t)) as dp_report:\n dp_report.upload(name=gen_name())\n\n # NOTE - as above, downloading embedded assets from a report currently not supported in API\n # check_df_equal(tz_df, tz_t.download_df())\n # check_df_equal(index_df, index_t.download_df())\n # check_df_equal(df_desc, df_desc_t.download_df())\n # check_df_equal(df_desc_2, df_desc_2_t.download_df())", "title": "" }, { "docid": "7ef7b7f9eebd6f8ace112d1403107637", "score": "0.519826", "text": "def __init__(self, dataframe, logger=None):\n self._dataframe = dataframe\n self._logger = logger", "title": "" }, { "docid": "89adb5820091fb71eb35475ee6551284", "score": "0.5189214", "text": "def test_create_clean_dataframe(provide_dummy_feature_list, provide_label_dict):\n _, expected_table = provide_label_dict\n feature_list = provide_dummy_feature_list\n\n df = FeatureCollector._create_clean_dataframe( # pylint:disable=invalid-name,protected-access\n feature_list, expected_table\n )\n assert isinstance(df, pd.DataFrame) # make sure we have a dataframe\n assert len(df) == len(expected_table) # there are no N/As\n\n # an interesting case is UKUDIP01 with two metals\n assert len(df[df[\"name\"] == \"UKUDIP01\"]) == 2\n assert (\n df[(df[\"name\"] == \"UKUDIP01\") & (df[\"metal\"] == \"Cu\")][\"oxidationstate\"].values\n == 2\n )\n assert (\n df[(df[\"name\"] == \"UKUDIP01\") & (df[\"metal\"] == \"Gd\")][\"oxidationstate\"].values\n == 3\n )", "title": "" }, { "docid": "af8ecd80f684d805e202b2c017ba73fc", "score": "0.51869124", "text": "def _parse_dataframe(self):\n raise NotImplementedError('Method needs to be implemented by subclass')", "title": "" }, { "docid": "e56fbdc269aa3b4f19b77a6a1b87f252", "score": "0.5184486", "text": "def __init__(self,df):\n self.df = df", "title": "" }, { "docid": "c98ce161966a7174cb0596f48e8d7f5e", "score": "0.51833117", "text": "def test_df_match_borders():\n\n ref_df = pd.DataFrame(\n {\"data\": np.arange(5)},\n index=pd.date_range(datetime(2007, 1, 1, 0), \"2007-01-05\", freq=\"D\"),\n )\n match_df = pd.DataFrame(\n {\"matched_data\": np.arange(5)},\n index=[\n datetime(2007, 1, 1, 9),\n datetime(2007, 1, 2, 9),\n datetime(2007, 1, 3, 9),\n datetime(2007, 1, 4, 9),\n datetime(2007, 1, 5, 9),\n ],\n )\n with pytest.deprecated_call():\n matched = tmatching.df_match(ref_df, match_df)\n\n nptest.assert_allclose(\n np.array([0.375, 0.375, 0.375, 0.375, 0.375]), matched.distance.values\n )\n nptest.assert_allclose(np.arange(5), matched.matched_data)", "title": "" } ]
e1c767c8f6243e23f22514657e32bb27
Sends rows to a BigQuery table. Iterates until all rows are sent.
[ { "docid": "5d1b54000dae9618b2358fb1d06f5ad1", "score": "0.79960674", "text": "def send_to_bq(table_name, rows):\n\n if rows:\n logging.info('Sending %d rows', len(rows))\n _send_to_bq_raw('swarming', table_name, rows)", "title": "" } ]
[ { "docid": "afe5eff31f8c2d1e4f035edc6ff89430", "score": "0.7848489", "text": "def _send_to_bq_raw(dataset, table_name, rows):\n # BigQuery API doc:\n # https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll\n url = (\n 'https://www.googleapis.com/bigquery/v2/projects/%s/datasets/%s/tables/'\n '%s/insertAll') % (app_identity.get_application_id(), dataset, table_name)\n payload = {\n 'kind':\n 'bigquery#tableDataInsertAllRequest',\n # Do not fail entire request because of one bad row.\n # We handle invalid rows below.\n 'skipInvalidRows':\n True,\n 'ignoreUnknownValues':\n False,\n 'rows': [{\n 'insertId': row_id,\n 'json': bqh.message_to_dict(row)\n } for row_id, row in rows],\n }\n res = net.json_request(\n url=url,\n method='POST',\n payload=payload,\n scopes=bqh.INSERT_ROWS_SCOPE,\n deadline=600)\n\n dropped = 0\n # Use this error message string to detect the error where we're pushing data\n # that is too old. This can occasionally happen as a cron job looks for old\n # entity and by the time it's sending them BigQuery doesn't accept them, just\n # skip these and log a warning.\n out_of_time = (\n 'You can only stream to date range within 365 days in the past '\n 'and 183 days in the future relative to the current date')\n # https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll#response\n for line in res.get('insertErrors', []):\n i = line['index']\n err = line['errors'][0]\n if err['reason'] == 'invalid' and out_of_time in err['message']:\n # Silently drop it. The rationale is that if it is not skipped, the loop\n # will get stuck on it.\n dropped += 1\n continue\n\n logging.error('Failed to insert row %s: %r', i, err)\n # TODO(crbug.com/1139745): exclude retryable error.\n raise BQError(\"failed to insert rows: %s\" % err)\n if dropped:\n logging.warning('%d old rows silently dropped', dropped)", "title": "" }, { "docid": "ba0d1ed079a24b0c773b61b1704c9a23", "score": "0.68184465", "text": "def _insert_rows(self):\n client, dataset_reference, table_reference = self._setup_bq_objects()\n self._create_dataset(client, dataset_reference)\n self._create_table(client, table_reference)\n\n table = client.get_table(table_reference)\n rows: list = list()\n # for item in self._data:\n # row = [f'{v}' for v in item.values()]\n # rows.append(row)\n\n rows = self._data\n log.debug(\"ATTEMPTING TO WRITE BQ ROWS TO TABLE\")\n try:\n count = 0\n for row in rows:\n count += 1\n errors = client.insert_rows(table_reference, [row], selected_fields=table.schema)\n if errors:\n log.error(errors)\n else:\n if count % 100000 == 0:\n log.debug(f\"{count} BQ ROWS INSERTED\")\n\n except gcx.BadRequest as b:\n log.error(b)\n except OverflowError as e:\n log.error(f\"\"\"\n Ignoring this error, just skipping this row\n {e}\"\"\")\n\n log.debug(\"BQ WRITE ATTEMPT COMPLETE\")", "title": "" }, { "docid": "2debbf392acc7a7470daf33c2177e38e", "score": "0.66137856", "text": "def flush_rows(\n self,\n ) -> Callable[[storage.FlushRowsRequest], storage.FlushRowsResponse]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"flush_rows\" not in self._stubs:\n self._stubs[\"flush_rows\"] = self.grpc_channel.unary_unary(\n \"/google.cloud.bigquery.storage.v1.BigQueryWrite/FlushRows\",\n request_serializer=storage.FlushRowsRequest.serialize,\n response_deserializer=storage.FlushRowsResponse.deserialize,\n )\n return self._stubs[\"flush_rows\"]", "title": "" }, { "docid": "3d5ac0ba2fe87f8596ea09fd8fe0d68e", "score": "0.6133726", "text": "async def send_table_data(state, table, conds=None, orient='split'):\n # get the table data in another thread, just in case.\n task = state.loop.run_in_executor(state.executor, table_data_as_json,\n state, table, conds, orient)\n await asyncio.wait([task])\n data = task.result()\n params = {'table': table, 'conds': conds, 'orient': orient}\n await send_message(state, \"table_data\", params=params, data=data)", "title": "" }, { "docid": "c6aa3fe8a4ffc20a748289dc68ca7fc0", "score": "0.61239326", "text": "def stream(table, rows_to_insert, unique_ids):\n\n\trow_ids = []\n\tfor row in rows_to_insert:\n\t\tidx = ''\n\t\tfor col in unique_ids:\n\t\t\tidx += str(row[col]) + '_'\n\t\trow_ids.append(idx[:-1])\n\tlogging.info(\"BigQuery Streaming indexIds/uniqueIds/row_ids: {}\".format(row_ids))\n\n\terrors = client.insert_rows_json(table, rows_to_insert, row_ids=row_ids)\n\tif errors == []:\n\t\treturn True\n\telse:\n\t\traise Exception(errors)\n\t\treturn False", "title": "" }, { "docid": "3a66b777500b8e0e8fc99796e8b1cec5", "score": "0.5909472", "text": "def append_rows(\n self,\n ) -> Callable[[storage.AppendRowsRequest], storage.AppendRowsResponse]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"append_rows\" not in self._stubs:\n self._stubs[\"append_rows\"] = self.grpc_channel.stream_stream(\n \"/google.cloud.bigquery.storage.v1.BigQueryWrite/AppendRows\",\n request_serializer=storage.AppendRowsRequest.serialize,\n response_deserializer=storage.AppendRowsResponse.deserialize,\n )\n return self._stubs[\"append_rows\"]", "title": "" }, { "docid": "f07386d6997d4adac6d57d41a95a493c", "score": "0.58994865", "text": "def batch_write(table_name, rows):\n table = dynamodb.Table(table_name)\n\n with table.batch_writer() as batch:\n for row in rows:\n batch.put_item(Item=row)\n\n return True", "title": "" }, { "docid": "b3b4980a4a278ece057a05f3fb4d27fd", "score": "0.5836527", "text": "def table_insert_all(self, table_id, rows, dataset_id):\n def _try_once():\n result = self._table_insert_many(table_id, rows, dataset_id)\n if result.get('insertErrors'):\n raise Exception('Error inserting batch into %s: %s' % (\n dataset_id, result))\n return result\n\n try:\n return _try_once()\n except Exception as e:\n text = str(e)\n bad_gateway_error = 'HttpError 502' in text and '\"Bad Gateway\"' in text\n socket_error = 'Unable to connect to server at URL' in text\n if socket_error or bad_gateway_error:\n logging.info('Retrying _table_insert_many(): %s',\n 'socket error' if socket_error else 'bad gateway')\n return _try_once()\n raise", "title": "" }, { "docid": "9554207bced6dff999ce5c6ef62be338", "score": "0.5804904", "text": "def upload(self) -> None:\n with self.lock:\n for database, tables in self.upload_queue.items():\n for table, rows in tables.items():\n # Deduplicate\n # In case of duplicate keys, the first key is preserved, and the last value is preserved.\n patch: Dict[str, Row] = {r.key: r for r in rows}\n\n self._upload_batch(database=database, table=table, patch=list(patch.values()))\n\n # Perform post-upload logic if applicable\n try:\n self._post_upload(rows)\n except Exception as e:\n self.logger.error(\"Error in upload callback: %s\", str(e))\n\n self.upload_queue.clear()\n self.logger.info(f\"Uploaded {self.upload_queue_size} rows in total\")\n self.upload_queue_size = 0", "title": "" }, { "docid": "5f49006c462c7557fde9b40b05d4381f", "score": "0.5804677", "text": "def insert_to_bigquery(rows_to_insert,dataset,table):\n bq_client = bigquery.Client()\n dataset_ref = bq_client.dataset(dataset)\n table_ref=dataset_ref.table(table)\n \n table = bq_client.get_table(table_ref)\n \n errors = bq_client.insert_rows(table,rows_to_insert)\n print(errors)\n \n assert errors == []", "title": "" }, { "docid": "83d67ba9fd7974446b31253f0cbac42f", "score": "0.5784526", "text": "def do_insert_table(self, txn, table):\n \n self.stats['inserts_received'] = self.stats.get('inserts_received', 0) + 1\n\n rowcount = 0\n\n base_query = \"INSERT INTO %s (%s) VALUES \" % (table.get_tablename(), \",\".join( table.get_column_names() ) )\n\n nr_columns = len(table.get_column_names())\n max_nr_values = 400 # supposedly the max is 500..\n\n nr_values = 0\n bind_values = []\n\n one_row = '({})'.format(','.join(['?'] * nr_columns))\n\n sql = base_query\n\n for row_index, row in enumerate(table.get_rows()):\n\n if nr_values + nr_columns > max_nr_values:\n\n # execute this batch and start again.\n\n sql += ','.join([ one_row ] * (nr_values / nr_columns))\n\n #logging.info(\"DEBUG: hit limit: executing (values %d, rows %d): %s\", nr_values, rowcount, sql)\n\n txn.execute(sql, bind_values)\n bind_values = []\n nr_values = 0\n sql = base_query\n\n nr_values += nr_columns\n\n # add this row to our bind values\n\n for value in row:\n bind_values.append(value)\n\n rowcount += 1\n\n # handle last batch\n if rowcount > 0:\n sql += ','.join([ one_row ] * (nr_values / nr_columns))\n\n #logging.info(\"DEBUG: final: executing (values %d, rows %d): %s\", nr_values, rowcount, sql)\n\n txn.execute(sql, bind_values)\n\n self.stats['inserts_completed'] = self.stats.get('inserts_completed', 0) + 1\n\n return rowcount", "title": "" }, { "docid": "5b3404aec6b4689a5fca526cdb40705c", "score": "0.57687086", "text": "def _do_request(self):\n self._request.Request()\n while self._wait_for_data():\n self._request.RequestNext()\n log.debug('%i rows returned.', self._request.RowCount)", "title": "" }, { "docid": "bb87ecacdd91c196cdb4ce42961ff076", "score": "0.5765905", "text": "def write_to_bq_table(self, dataset: str, gcs_bucket: str,\n filename: str, table_name: str, project=None):\n chunked_frame = gcs_to_bq_util.load_csv_as_dataframe(\n gcs_bucket, filename, chunksize=1000)\n\n # For the very first chunk, we set the mode to overwrite to clear the\n # previous table. For subsequent chunks we append.\n overwrite = True\n for chunk in chunked_frame:\n self.clean_frame_column_names(chunk)\n gcs_to_bq_util.add_dataframe_to_bq(\n chunk, dataset, table_name, project=project,\n overwrite=overwrite)\n overwrite = False", "title": "" }, { "docid": "573027a455bc6d1c3213d9bd84201ac6", "score": "0.57179433", "text": "def insert_all(\n self,\n project_id: str,\n dataset_id: str,\n table_id: str,\n rows: list,\n ignore_unknown_values: bool = False,\n skip_invalid_rows: bool = False,\n fail_on_error: bool = False,\n ) -> None:\n self.log.info(\"Inserting %s row(s) into table %s:%s.%s\", len(rows), project_id, dataset_id, table_id)\n\n table_ref = TableReference(dataset_ref=DatasetReference(project_id, dataset_id), table_id=table_id)\n bq_client = self.get_client(project_id=project_id)\n table = bq_client.get_table(table_ref)\n errors = bq_client.insert_rows(\n table=table,\n rows=rows,\n ignore_unknown_values=ignore_unknown_values,\n skip_invalid_rows=skip_invalid_rows,\n )\n if errors:\n error_msg = f\"{len(errors)} insert error(s) occurred. Details: {errors}\"\n self.log.error(error_msg)\n if fail_on_error:\n raise AirflowException(f\"BigQuery job failed. Error was: {error_msg}\")\n else:\n self.log.info(\"All row(s) inserted successfully: %s:%s.%s\", project_id, dataset_id, table_id)", "title": "" }, { "docid": "96efdc9a2cda6510e47894879261296f", "score": "0.5694242", "text": "def write_to_bq(bigquery):\n tweets = []\n CHUNK = 50 # The size of the BigQuery insertion batch.\n twstring = ''\n tweet = None\n mtweet = None\n while True:\n while len(tweets) < CHUNK:\n # We'll use a blocking list pop -- it returns when there is\n # new data.\n res = r.brpop(REDIS_LIST)\n twstring = res[1]\n try:\n tweet = json.loads(res[1])\n except Exception, bqe:\n print bqe\n continue\n # First do some massaging of the raw data\n mtweet = utils.cleanup(tweet)\n # We only want to write tweets to BigQuery; we'll skip 'delete' and\n # 'limit' information.\n if 'delete' in mtweet:\n continue\n if 'limit' in mtweet:\n print mtweet\n continue\n tweets.append(mtweet)\n # try to insert the tweets into bigquery\n utils.bq_data_insert(bigquery, PROJECT_ID, os.environ['BQ_DATASET'],\n os.environ['BQ_TABLE'], tweets)\n tweets = []", "title": "" }, { "docid": "0ac0d6324e79fcadeed0f6976eec5790", "score": "0.56315917", "text": "def _importRows(self, filehandle, table_id, cols):\n max_per_batch = 100\n current_row = 0\n queries = []\n rows = []\n for line in filehandle:\n values = dict(zip(cols, line))\n query = SQL().insert(table_id, values)\n queries.append(query)\n current_row += 1\n if current_row == max_per_batch:\n full_query = ';'.join(queries)\n try:\n rows += self.ftclient.query(full_query).split(\"\\n\")[1:-1]\n print \"%s rows added\" % (len(rows))\n except:\n print str(sys.exc_info()[1])\n# print full_query + \"\\n\"\n time.sleep(5)\n\n current_row = 0\n queries = []\n\n if len(queries) > 0:\n full_query = ';'.join(queries)\n try:\n rows += self.ftclient.query(full_query).split(\"\\n\")[1:-1]\n print \"%s rows added\" % (len(rows))\n except:\n print str(sys.exc_info()[1])\n# print full_query\n\n return rows", "title": "" }, { "docid": "46dda5abba23b6724c8ed9fbc6c4e73e", "score": "0.5617979", "text": "def batch_write(conn, sleep_interval, table_name, put_requests):\n\n request_items = {table_name: put_requests}\n i = 1\n sleep = sleep_interval\n while True:\n response = conn.batch_write_item(RequestItems=request_items)\n unprocessed_items = response[\"UnprocessedItems\"]\n\n if len(unprocessed_items) == 0:\n break\n if len(unprocessed_items) > 0 and i <= MAX_RETRY:\n logging.debug(\n str(len(unprocessed_items))\n + \" unprocessed items, retrying after %s seconds.. [%s/%s]\"\n % (str(sleep), str(i), str(MAX_RETRY))\n )\n request_items = unprocessed_items\n time.sleep(sleep)\n sleep += sleep_interval\n i += 1\n else:\n logging.info(\n \"Max retries reached, failed to processed batch write: \"\n + json.dumps(unprocessed_items, indent=JSON_INDENT)\n )\n logging.info(\"Ignoring and continuing..\")\n break", "title": "" }, { "docid": "60aded6f52e6bbae2b0f9a230e46d914", "score": "0.5608625", "text": "def send_table(self):\n print(\"Sending table update\")\n\n # Schedule another unsolicted update\n self.scheduler.enter(30+random.randint(0, 5), 1, self.send_table, argument=())\n\n # Clear any triggered updates yet to be sent\n self.update_packet.clear()\n\n # Build the table entries packet\n rip_packet = packet.RIP_Packet(int(self.router_id), RIP_RESPONSE_COMMAND)\n for entry in self.routing_table:\n\n rip_packet.add_entry(entry)\n\n # Send Response packets to each neighbour\n if rip_packet.entries:\n for neighbour in self.routing_table:\n self.send_packet(neighbour.address, rip_packet.pack(neighbour.address))", "title": "" }, { "docid": "5b5639dde4750e9f6013756f4bd70dda", "score": "0.5583838", "text": "def upload_how_many_rows_we_want_main(self):\r\n self.uploade_how_many_rows_we_want(self.df, self.q, self.table_name)\r\n if len(self.l)==0:\r\n print(\"done\")\r\n else:\r\n # union all tables\r\n with t.connect('{\"host\":\"tdprd\",\"logmech\":\"krb5\"}') as con:\r\n with con.cursor () as cur:\r\n q_union = \"sel * from {0}\".format(self.l[0])\r\n for item in self.l[1:]:\r\n q_union +=\" union all sel * from {0}\".format(item)\r\n\r\n q_final = \"\"\"insert into {0}\r\n {1}\r\n \"\"\".format(self.table_name, q_union)\r\n cur.execute(q_final)\r\n #print(\"l: \", self.l)\r\n for item in list(set(self.l)):\r\n cur.execute(\"drop table {0}\".format(item))\r\n print(\"num_of_tables: \" + str(self.num))\r\n print('done')", "title": "" }, { "docid": "c0e5fca1adb8ed35cd1d2bee51713dba", "score": "0.55014646", "text": "def query_all_rows(self, table):\n rowid = 1\n rows = []\n while True:\n row = getattr(ManageDB, 'query_' + table)\n print(row)\n if row:\n rows.append(row)\n rowid += 1\n else:\n break", "title": "" }, { "docid": "4ab74db9a1e709339973072c5bf47127", "score": "0.5496636", "text": "def run(self):\n job_config = bigquery.QueryJobConfig(use_query_cache=True)\n self.job = self.bq_client.query(self.stmt, job_config=job_config)\n\n # wait\n self.job.result()\n\n # build session\n table_ref = self.job.destination.to_bqstorage()\n parent = \"projects/{}\".format(self.PROJECT)\n\n # default number of streams chosen by google to\n # get reasonable read throughput\n self.session = self.bq_storage_client.create_read_session(\n table_ref,\n parent,\n requested_streams=self.num_streams,\n format_=bigquery_storage_v1beta1.enums.DataFormat.ARROW,\n )", "title": "" }, { "docid": "e9e44f6c8800fb3c2b0796467422e448", "score": "0.5493731", "text": "def update(self, chunksize=100000, progress_bar=True):\n\n # form request body and create a session for data uploads\n self.__form_upload_body()\n response = datasets.upload_session(connection=self._connection,\n dataset_id=self._dataset_id, body=self.__upload_body)\n\n if not response.ok:\n self.__response_handler(response=response, msg=\"Error creating new data upload session.\")\n else:\n response_json = response.json()\n self._session_id = response_json['uploadSessionId']\n\n # upload each table\n for ix, _table in enumerate(self._tables):\n\n _df, _name = _table[\"data_frame\"], _table[\"table_name\"]\n\n # break the data up into chunks using a generator\n chunks = (_df[i:i + chunksize] for i in range(0, _df.shape[0], chunksize))\n\n total = _df.shape[0]\n\n # Count the number of iterations\n it_total = int(total/chunksize) + (total % chunksize != 0)\n\n pbar = tqdm(chunks, total=it_total, disable=(not progress_bar))\n for index, chunk in enumerate(pbar):\n if progress_bar:\n pbar.set_description(\"Uploading {}/{}\".format(ix+1, len(self._tables)))\n\n # base64 encode the data\n encoder = Encoder(data_frame=chunk, dataset_type='multi')\n b64_enc = encoder.encode\n\n # form body of the request\n body = {\"tableName\": _name,\n \"index\": index + 1,\n \"data\": b64_enc}\n\n # make request to upload the data\n response = datasets.upload(connection=self._connection,\n dataset_id=self._dataset_id,\n session_id=self._session_id,\n body=body)\n\n if not response.ok:\n # on error, cancel the previously uploaded data\n self.__response_handler(response=response, msg=\"Error uploading data.\")\n datasets.publish_cancel(connection=self._connection,\n dataset_id=self._dataset_id,\n session_id=self._session_id)\n\n if progress_bar:\n pbar.set_postfix(rows=min((index+1)*chunksize, total))\n pbar.close()\n self._tables = []", "title": "" }, { "docid": "d0e4d2e9e909839b8561aa40e520526f", "score": "0.5445403", "text": "def write_table_iter(self, **kwargs) -> None: # pragma: no cover\n\n self._write_table_iter(**kwargs)", "title": "" }, { "docid": "c98a59d593169749fe11cd60ddd9a006", "score": "0.5436738", "text": "def send_table():\n while True:\n message = make_message()\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n for k, v in neighbour_table.iteritems(): # must have to send corresponding node IP\n sock.sendto(message, (k, UDP_SERVER_PORT)) # it sends msg to its neigh whose IP address if k\n\n time.sleep(1)", "title": "" }, { "docid": "5469c023b1c9137b9b28a006e5cc29b1", "score": "0.5418332", "text": "def async_run(self, n_row=25000):\n print('Sending and opening an async-query to EMS ...', end=' ')\n resp_h, content = self._conn.request(\n rtype=\"POST\",\n uri_keys=('database', 'open_asyncq'),\n uri_args=(self._ems_id, self.__flight.get_database()['id']),\n jsondata=self.__queryset\n )\n if 'id' not in content:\n sys.exit(\"Opening Async query did not return the query Id.\")\n query_id = content['id']\n query_header = content['header']\n print('Done.')\n\n ctr = 0\n df = None\n while True:\n print(\" === Async call: %d ===\" % (ctr+1))\n try:\n resp_h, content = self._conn.request(\n rtype=\"GET\",\n uri_keys=('database', 'get_asyncq'),\n uri_args=(\n self._ems_id,\n self.__flight.get_database()['id'],\n query_id,\n n_row * ctr,\n n_row * (ctr+1) - 1)\n )\n content['header'] = query_header\n dff = self.__to_dataframe(content)\n except:\n print(\"Something's wrong. Returning what has been sent so far.\")\n # from pprint import pprint\n # pprint(resp_h)\n # pprint(content)\n return df\n\n if ctr == 0:\n df = dff\n else:\n df = pd.concat([df, dff], axis=0, join='outer', ignore_index=True)\n\n print(\"Received up to %d rows.\" % df.shape[0])\n if dff.shape[0] < n_row:\n break\n ctr += 1\n\n print(\"Done.\")\n return df", "title": "" }, { "docid": "76146592c154388de3aa6cb4d2125d7f", "score": "0.54175615", "text": "def flush_records(self):\n if run_config.TCP_SEGMENTATION and self.chunk == 0:\n self.chunk = run_config.TCP_SEGMENTATION\n\n if self.chunk is not None:\n tf_cfg.dbg(3, \"Trying to send data by chunk\")\n _s = b\"\".join(p.raw_stateful() for p in self.buffer_out)\n n = self.chunk\n for chunk in [_s[i : i + n] for i in range(0, len(_s), n)]:\n self.socket.send(chunk)\n else:\n s = b\"\".join(p.raw_stateful() for p in self.buffer_out)\n self.socket.send(s)\n self.buffer_out = []", "title": "" }, { "docid": "1176e486f0ce20f7af77007c877207ca", "score": "0.5411762", "text": "def insert_rows(db_params, table, column_names, data):\n\n records_received = len(data)\n records_processed = 0\n\n # Create a string like \"%s, %s, %s, %s\" corresponding to number of columns\n values = values_string(len(column_names))\n\n # Convert data from list of dictionaries to list of tuples\n processed_data = dicts_to_tuples(column_names, data)\n\n # Convert list of fields to a single string\n column_names = ', '.join(column_names)\n \n insert_statement = 'INSERT INTO ' + table + '(' + column_names + ') VALUES(' + values + ')'\n \n # Max number of records to try to insert at a time\n step = 30000\n n = 0 \n \n while n < len(processed_data):\n \n sub_list = processed_data[n:(n + step)]\n \n # Connect to database... some of this should be outside of the while loop\n con = create_db_connection(db_params)\n cursor = con.cursor()\n \n cursor.execute('SET NAMES utf8;')\n cursor.execute('SET CHARACTER SET utf8;')\n cursor.execute('SET character_set_connection=utf8;')\n \n try:\n # Insert data (up to the max number of records specified above) into the table\n cursor.executemany(insert_statement, sub_list)\n con.commit()\n \n # In case of error executing the inserts, rollback any changes and make sure\n # to close the connection, then raise the error again so scripts stop\n except MySQLdb.Error as e:\n con.rollback()\n cursor.close()\n con.close()\n raise e\n \n cursor.close()\n con.close()\n \n n += step\n \n records_processed += len(sub_list)\n \n print(str(records_processed) + ' out of ' + str(records_received) + ' records processed')\n \n print('inserted ' + str(records_received) + ' rows into ' + table)", "title": "" }, { "docid": "54d6d008e5b598f4b01bdc6710646409", "score": "0.53712887", "text": "def list_rows(dataset_id, table_id, project=None):\r\n bigquery_client = bigquery.Client(project=project)\r\n dataset_ref = bigquery_client.dataset(dataset_id)\r\n table_ref = dataset_ref.table(table_id)\r\n\r\n # Get the table from the API so that the schema is available.\r\n table = bigquery_client.get_table(table_ref)\r\n\r\n # Load at most 25 results.\r\n rows = bigquery_client.list_rows(table, max_results=25)\r\n\r\n # Use format to create a simple table.\r\n format_string = '{!s:<16} ' * len(table.schema)\r\n\r\n # Print schema field names\r\n field_names = [field.name for field in table.schema]\r\n print(format_string.format(*field_names))\r\n\r\n for row in rows:\r\n print(format_string.format(*row))", "title": "" }, { "docid": "2ed9c2b0d4fadc760507642d3a4fbf8f", "score": "0.53454363", "text": "def uploade_how_many_rows_we_want(self, df, q, table_name):\r\n try:\r\n if len(df) > 300000 or df.memory_usage(deep=True).sum() > self.memory:\r\n raise Exception(\"batch request\")\r\n try:\r\n self.insert(q, df)\r\n except Exception as ex:\r\n if 'string contains an untranslatable character' in str(ex):\r\n for i in np.where(df.dtypes != np.float)[0]:\r\n df['drop'] = df[df.columns[i]].apply(lambda x: self.is_ascii(x))\r\n l_tmp = (df['drop'][df['drop']].index)\r\n if len(l_tmp) > 0:\r\n print(\"rows remove: \" + str(list(l_tmp)))\r\n self.bar.update(len(l_tmp))\r\n df.drop(l_tmp, inplace=True)\r\n df.drop('drop', axis=1, inplace=True)\r\n elif 'batch request' in str(ex) or 'LAN message' in str(ex):\r\n raise Exception(\"batch request\")\r\n else:\r\n with t.connect('{\"host\":\"tdprd\",\"logmech\":\"krb5\"}') as con:\r\n with con.cursor () as cur:\r\n for item in list(set(self.l)):\r\n cur.execute(\"drop table {0}\".format(item))\r\n print('error')\r\n print(ex)\r\n raise error\r\n self.rows += len(df)\r\n self.bar.update(len(df))\r\n\r\n\r\n except Exception as ex:\r\n if \"batch request\" in str(ex):\r\n with t.connect('{\"host\":\"tdprd\",\"logmech\":\"krb5\"}') as con:\r\n with con.cursor () as cur:\r\n # create new tables in tera\r\n if table_name != self.table_name:\r\n cur.execute(\"drop table {0}\".format(table_name))\r\n self.l.remove(table_name)\r\n\r\n if table_name != self.table_name:\r\n tmp_num = len(str(self.num))\r\n table_name1 = table_name[:-tmp_num] + str(self.num)\r\n self.num += 1\r\n table_name2 = table_name[:-tmp_num] + str(self.num)\r\n self.num += 1\r\n else:\r\n table_name1 = table_name + str(self.num)\r\n self.num += 1\r\n table_name2 = table_name + str(self.num)\r\n self.num += 1\r\n create_statment1 = self.create_statment.replace(self.table_name, table_name1)\r\n create_statment2 = self.create_statment.replace(self.table_name, table_name2)\r\n cur.execute(create_statment1)\r\n cur.execute(create_statment2)\r\n\r\n # usally, tera upload some of the data before crashing.\r\n # we dont want duplicates.\r\n\r\n # split the data to 2 dataframes\r\n len_data = math.ceil(len(df)/2)\r\n df1 = df.iloc[:len_data]\r\n df2 = df.iloc[len_data:]\r\n\r\n # replace query\r\n q1 = q.replace(table_name, table_name1)\r\n q2 = q.replace(table_name, table_name2)\r\n\r\n self.l.append(table_name1)\r\n self.uploade_how_many_rows_we_want(df1, q1, table_name1)\r\n self.l.append(table_name2)\r\n self.uploade_how_many_rows_we_want(df2, q2, table_name2)\r\n\r\n\r\n else:\r\n print (ex)\r\n with t.connect('{\"host\":\"tdprd\",\"logmech\":\"krb5\"}') as con:\r\n with con.cursor () as cur:\r\n for item in list(set(self.l)):\r\n cur.execute(\"drop table {0}\".format(item))\r\n raise error", "title": "" }, { "docid": "40366a04ac35236979738465657b4bff", "score": "0.52972186", "text": "def TestQuery():\n # Certify BigQuery access credentials.\n credentials = AppAssertionCredentials(\n scope='https://www.googleapis.com/auth/bigquery')\n http = credentials.authorize(httplib2.Http(memcache))\n service = build('bigquery', 'v2', http=http)\n job_runner = service.jobs()\n\n # Run a query against the BigQuery database.\n logging.debug('Query: %s' % TEST_QUERY)\n jobdata = {'configuration': {'query': {'query': TEST_QUERY}}}\n insert = job_runner.insert(projectId=PROJECT_ID,\n body=jobdata).execute()\n logging.debug('Response: %s' % insert)\n\n currentRow = 0\n queryReply = job_runner.getQueryResults(\n projectId=PROJECT_ID,\n jobId=insert['jobReference']['jobId'],\n startIndex=currentRow).execute()\n results = queryReply\n\n while 'rows' in queryReply and currentRow < queryReply['totalRows'] :\n currentRow += len(queryReply['rows'])\n queryReply = job_runner.getQueryResults(\n projectId=PROJECT_ID,\n jobId=queryReply['jobReference']['jobId'],\n startIndex=currentRow).execute()\n if 'schema' not in results or 'fields' not in results['schema']:\n if 'schema' in queryReply and 'fields' in queryReply['schema']:\n results['schema'] = queryReply['schema']\n if 'rows' in queryReply:\n results['rows'].extend(queryReply['rows'])\n\n # Format the results as an HTML page.\n body = '<h2>The Query</h2><pre>%s</pre>\\n<hr>\\n' % TEST_QUERY\n\n tablerows = '<tr>'\n for field in results['schema']['fields']:\n tablerows += '<th>%s</th>' % field['name']\n\n for row in results['rows']:\n tablerows += '</tr><tr>'\n for value in row['f']:\n tablerows += '<td>%s</td>' % value['v']\n tablerows += '</tr>'\n\n body += '<table border=1>\\n%s\\n</table>\\n' % tablerows\n\n return '<!DOCTYPE html><html><body>%s</body></html>' % body", "title": "" }, { "docid": "d224d8b500082ab8c21eeb95f11a1c06", "score": "0.52285665", "text": "def background_thread():\r\n #count = 0\r\n while True:\r\n #print 'sending...%d'% count\r\n #count += 1\r\n mysqlCon = MySQLdb.connect(mysql_HOST, mysql_ID, mysql_PWD, mysql_DBNAME)\r\n mysqlCur = mysqlCon.cursor()\r\n # get data from database (mysql)\r\n datar = pandas.read_sql('SELECT * FROM %s' % tableName, mysqlCon)\r\n datar = datar.to_json()\r\n socketio.sleep(10)\r\n \r\n socketio.emit('refresh_data', {'table': datar}, namespace='/app_start_mysql')", "title": "" }, { "docid": "c547059122b35c82e7e18c2b721ada35", "score": "0.52088416", "text": "def _write_multiple_rows(self, data):\n\n for row in data:\n self._write_row(row)", "title": "" }, { "docid": "79695454cd438259063d6751685fd35c", "score": "0.5197244", "text": "def stress_test_kafka_producer():\n producer = kafka_manager.get_kafka_producer()\n transaction_data_set = generate_transaction_data()\n for index, row in transaction_data_set.iterrows():\n # Send a piece of transaction every 0.1 second.\n data = row.to_json()\n\n producer.send(topic='test_data', value=data)\n print(\"{} has been send\".format(data))\n return None", "title": "" }, { "docid": "869c0834d2629445f69c0b3aa43303d3", "score": "0.51970416", "text": "def process_all_rows(table):\n for row in iterate_over_rows(table):\n yield process_row(row)", "title": "" }, { "docid": "5bcc578661a2cad44be9af672767a7a7", "score": "0.5188386", "text": "def insert_rows(self, table, rows, target_fields = None, commit_every = 1000):\n if target_fields:\n target_fields = ', '.join(target_fields)\n target_fields = '({})'.format(target_fields)\n else:\n target_fields = ''\n conn = self.get_conn()\n cur = conn.cursor()\n if self.supports_autocommit:\n cur.execute('SET autocommit = 0')\n conn.commit()\n i = 0\n for row in rows:\n i += 1\n l = []\n for cell in row:\n if isinstance(cell, basestring):\n l.append(\"'\" + str(cell).replace(\"'\", \"''\") + \"'\")\n elif cell is None:\n l.append('NULL')\n elif type(cell) == float and numpy.isnan(cell): #coerce numpy NaN to NULL\n l.append('NULL')\n elif isinstance(cell, numpy.datetime64):\n l.append(\"'\" + str(cell) + \"'\")\n elif isinstance(cell, datetime):\n l.append(\"to_date('\" + cell.strftime('%Y-%m-%d %H:%M:%S') + \"','YYYY-MM-DD HH24:MI:SS')\")\n else:\n l.append(str(cell))\n values = tuple(l)\n sql = 'INSERT /*+ APPEND */ INTO {0} {1} VALUES ({2})'.format(table, target_fields, ','.join(values))\n cur.execute(sql)\n if i % commit_every == 0:\n conn.commit()\n logging.info('Loaded {i} into {table} rows so far'.format(**locals()))\n conn.commit()\n cur.close()\n conn.close()\n logging.info('Done loading. Loaded a total of {i} rows'.format(**locals()))", "title": "" }, { "docid": "fec3c5a27ee188175e51f3f11b314daf", "score": "0.5172335", "text": "def main():\n\n bq = _bq.BQHandler()\n io = _io.IO()\n\n times = []\n # times.append({'starttime': dt.datetime.strptime('2011-02-01', \"%Y-%m-%d\"),\n # 'endtime': dt.datetime.strptime('2011-02-28', \"%Y-%m-%d\")\n # })\n times.append({'starttime': dt.datetime.strptime('2012-01-01', \"%Y-%m-%d\"),\n 'endtime': dt.datetime.strptime('2012-01-31', \"%Y-%m-%d\")\n })\n # times.append({'starttime': dt.datetime.strptime('2013-06-01', \"%Y-%m-%d\"),\n # 'endtime': dt.datetime.strptime('2013-06-30', \"%Y-%m-%d\")})\n\n #logging.info('Using times: {}'.format(times))\n\n for t in times:\n start = t['starttime']\n end = t['endtime']\n\n logging.info('Processing time range {} - {}'.format(start.strftime('%Y-%m-%d %H:%M'),\n end.strftime('%Y-%m-%d %H:%M')))\n\n logging.info('Reading data...')\n data = bq.get_rows(start,\n end,\n parameters=['*'],\n project=options.project,\n dataset=options.src_dataset,\n table=options.src_table)\n\n #print(data.shape)\n #data.set_index(['time', 'trainstation'], inplace=True)\n #print(data)\n bq.dataset_to_table(data, options.dst_dataset, options.dst_table)", "title": "" }, { "docid": "871f69669e51dba18a2f68bad2e6bde0", "score": "0.5160392", "text": "def enqueue():\n\tfrappe.enqueue(method=send_statements, queue='long',\n\t\t\t\t\ttimeout=300, is_async=True)", "title": "" }, { "docid": "b557847a19d8f357349fbae5c9479dc3", "score": "0.51529014", "text": "def test_batch(self):\n exc = self._perform_cql_statement(\"\"\"\n BEGIN BATCH\n INSERT INTO mytable (key, value) VALUES ('key2', 'Value 2') USING TIMESTAMP 1111111111111111\n INSERT INTO mytable (key, value) VALUES ('key3', 'Value 3') USING TIMESTAMP 1111111111111112\n APPLY BATCH\n \"\"\")\n if self.supports_v5_protocol:\n self._assert_error_code_map_exists_with_code(exc, 0x0000)", "title": "" }, { "docid": "376bb924112585c483793582a3303cf0", "score": "0.51155293", "text": "def load_batch_bigquery(network,\n s3_prefix=\"\",\n access_key_id=\"\",\n secret_access_key=\"\",\n region_name=\"us-east-1\",\n project_id=None,\n dataset_id=None,\n table_id=None,\n dry_run=False):\n bq_conn = None\n if not dry_run:\n bq_conn = google_build(\n 'bigquery', 'v2',\n credentials=GoogleCredentials.get_application_default())\n s3_stream = events_s3(network, prefix=s3_prefix, access_key_id=access_key_id,\n secret_access_key=secret_access_key,\n region_name=region_name)\n\n def chunked(seq, chunk_size):\n chunk = []\n for item in seq:\n chunk.append(item)\n if len(chunk) >= chunk_size:\n yield chunk\n chunk = []\n for events in chunked(s3_stream, 500):\n write_events_bigquery(events, bq_conn=bq_conn, project_id=project_id,\n dataset_id=dataset_id, table_id=table_id)", "title": "" }, { "docid": "746c4ce1bb8765d4113f40e81f32f64d", "score": "0.50975853", "text": "def _update_db_finish(self, rows):", "title": "" }, { "docid": "8988c13fe3b345ff9f82049e55338213", "score": "0.5096378", "text": "def send_all(self):\n queue = self.queue\n count = {}\n self.queue = {}\n for person in queue.keys():\n count[person] = 0\n\n # Function within a function\n # For use with the while loop, to save have to do these tests within the loop itself (looks messy)\n def count_unprocessed():\n result = False\n for c in count:\n if count[c] < len(queue[c]):\n result = True\n return result\n\n while count_unprocessed():\n sending = []\n for person in queue:\n # Should never exceed 25, but playing safe in case of other errors\n if len(sending) >= 25:\n break\n\n add = min(5, 25-len(sending))\n if count[person] < len(queue[person]):\n # Add the next messages by getting the appropriate splice of the queue\n # Based upon rate limit of 5 messages per person, 25 messages per batch\n sending += queue[person][count[person]:count[person] + add]\n count[person] += add\n\n # Be certain there is actually something to send\n if len(sending) > 0:\n try:\n self.kik.send_messages(sending)\n except KikError as e:\n # Log the error, will appear in apache error logs when running under wsgi\n error_handler(self, e, queue, count, sending)\n # Also need to make certain we don't cause Kik server to go into a loop of resending the message\n # Returning 500 would cause the message to be resent up to a total of 4 times.\n # Hence 202 is a received and being processed but no guarantee of any outcome.\n #\n # NOTE: returning 202 has not actually been tested yet (to confirm it doesn't cause loop).\n # Other option might be to return 504 instead.\n return 202\n\n return 200", "title": "" }, { "docid": "90530b3dd591aa0bfa3fe710ba9ec25c", "score": "0.5092372", "text": "def _write_rows(self):\n try:\n xf = self._writer.xf.send(True)\n except StopIteration:\n self._already_saved()\n\n with xf.element(\"sheetData\"):\n row_idx = 1\n try:\n while True:\n row = (yield)\n row = self._values_to_row(row, row_idx)\n self._writer.write_row(xf, row, row_idx)\n row_idx += 1\n except GeneratorExit:\n pass\n\n self._writer.xf.send(None)", "title": "" }, { "docid": "74f92d83c303cb2a0126bfa58148b994", "score": "0.5077987", "text": "def _send_batch(self, stream_name, record_batch, function_name):\n @backoff.on_predicate(backoff.fibo,\n lambda resp: resp['FailedPutCount'] > 0,\n max_tries=self.MAX_BACKOFF_ATTEMPTS,\n max_value=self.MAX_BACKOFF_FIBO_VALUE,\n on_backoff=backoff_handler(debug_only=False),\n on_success=success_handler(),\n on_giveup=giveup_handler())\n @backoff.on_exception(backoff.fibo,\n self.EXCEPTIONS_TO_BACKOFF,\n max_tries=self.MAX_BACKOFF_ATTEMPTS,\n on_backoff=backoff_handler(debug_only=False),\n on_success=success_handler(),\n on_giveup=giveup_handler())\n def _firehose_request_helper(data):\n \"\"\"Firehose request wrapper to use with backoff\"\"\"\n # Use the current length of data here so we can track failed records that are retried\n LOGGER.debug('Sending %d records to firehose %s', len(data), stream_name)\n\n response = self._client.put_record_batch(DeliveryStreamName=stream_name, Records=data)\n\n # Log this as an error for now so it can be picked up in logs\n if response['FailedPutCount'] > 0:\n LOGGER.warning('Received non-zero FailedPutCount: %d', response['FailedPutCount'])\n # Strip out the successful records so only the failed ones are retried. This happens\n # to the list of dictionary objects, so the called function sees the updated list\n self._strip_successful_records(data, response)\n\n return response\n\n # The record here already contains a newline, so do not append one\n records_data = [\n {'Data': record}\n for record in record_batch\n ]\n\n # The try/except here is to catch the raised error at the end of the backoff\n try:\n return _firehose_request_helper(records_data)\n except self.EXCEPTIONS_TO_BACKOFF:\n LOGGER.exception('Firehose request failed')\n # Use the current length of the records_data in case some records were\n # successful but others were not\n self._log_failed(len(records_data), function_name)", "title": "" }, { "docid": "e1b717b41f61268402ed756e1577e6d5", "score": "0.50601166", "text": "def query(\n self, tq: str, row_type: Any[Dict, List, Tuple] = None\n ) -> Generator[Any[Dict, List, Tuple], None, None]:\n params = {\n \"key\": self.spreadsheet.id,\n \"tq\": self._update_tq_cols(tq),\n \"gid\": self.id,\n }\n response = requests.get(TQ_BASE_URL, params=params)\n result = handle_tq_response(response)\n return self._result_handler(result, row_type=row_type)", "title": "" }, { "docid": "b1d5acff50604f45cd7aac84919fcb0d", "score": "0.5051986", "text": "def execute_batch(self):\n batch_request = ODataV4BatchRequest(self)\n queries = [qry for qry in self.pending_request().next_query()]\n batch_request.add_query(BatchQuery(self, queries)) # Aggregate requests into batch request\n batch_request.execute_query()", "title": "" }, { "docid": "920e4c0d66fecf0e793fddfc493b5662", "score": "0.5048642", "text": "def send_thread(thread_name,q):\n while True:\n output = {}\n output['stop_id'] = STOP_ID\n output['batches'] = []\n if not q.empty():\n while not q.empty():\n b = q.get()\n if ( b is not None):\n output['batches'].append(b)\n\t message = fog_agent_new_tests.compressMessage(output)\n\t print message\n\t \n #print cloud_client(message) \n time.sleep(30)", "title": "" }, { "docid": "41bcff4dd2d032e6fea122abeb2b401b", "score": "0.5042067", "text": "def batch(self):\n raise NotImplementedError('FakeDatastoreClient.batch not implemented')", "title": "" }, { "docid": "c294bcfd5798e4518556f2d80979e58d", "score": "0.5011305", "text": "def submit(self):\n request = {\n 'type': 'batch',\n 'tasks': self.tasks,\n 'count': self.count\n }\n\n self.client.send(request)\n results = self.client.recv()\n\n self.tasks = {}\n self.count = 0\n return results", "title": "" }, { "docid": "5e86dbb91db33af43c431d5cb659e946", "score": "0.50079894", "text": "def execute_many(df):\n print(\"started writting\")\n # Create a list of tupples from the dataframe values\n tuples = [tuple(x) for x in df.to_numpy()]\n # Comma-separated dataframe columns\n cols = \",\".join(list(df.columns))\n # SQL quert to execute\n query = 'INSERT INTO \"Mega\"(%s) VALUES(%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s, %%s)' % (\n cols,\n )\n cursor = connection.cursor()\n\n try:\n print(\"writting more\")\n cursor.executemany(query, tuples)\n connection.commit()\n except (Exception, psycopg2.DatabaseError) as error:\n print(tuples)\n\n print(\"Error: %s\" % error)\n connection.rollback()\n cursor.close()\n return 1\n print(\"execute_many() done\")\n cursor.close()", "title": "" }, { "docid": "6b0c50606df479b9badc6187710f9052", "score": "0.50019884", "text": "def insert_rows(self, rows):\n self.n_rows += len(rows)\n self._write_rows(rows)", "title": "" }, { "docid": "13af23e1d132fd66defdcf9048f4ad2c", "score": "0.5000181", "text": "def send_batch_load_request(self):\n batch = json.dumps(self.__get_batch_data())\n # print json.dumps(batch)\n kafka_producer.push(json.dumps(batch), TOPIC)", "title": "" }, { "docid": "1583ce4e985e89bd09d687e5ebdf7f7c", "score": "0.49949905", "text": "def async_add_table_data(self, *args, **kwargs):\n return self.writer_dbpool.runInteraction(self.add_table_data, *args, **kwargs)", "title": "" }, { "docid": "3f89fa7c994cf0e23eadca2239ba33f6", "score": "0.49654976", "text": "def send_in_batches(self, email_send_task):\n massmail_send_in_batches.delay(self, email_send_task)", "title": "" }, { "docid": "610963822dbd7d7331bdb3a24c8ae90e", "score": "0.49349558", "text": "def query(self, query):\n logging.debug(\"Launching BigQuery query\")\n q = self.connection.run_sync_query(query)\n q.timeout_ms = 600000 # 10 minutes to execute the BQ query should be more than enough. 1 minute was too short\n # TODO use maxResults https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/query? :\n q.use_legacy_sql = False\n q.run()\n logging.debug(\"Fetching BigQuery results\")\n return q.fetch_data()", "title": "" }, { "docid": "711e35c2a1d297d0f6ea5afda2a4143c", "score": "0.4932583", "text": "def bulk_op(iter, chunk_size=500):\n bulk(es, iter,\n stats_only=True,\n chunk_size=chunk_size,\n request_timeout=200.0)", "title": "" }, { "docid": "e26ca104b87e615203718be61e25e3a8", "score": "0.49320462", "text": "def insert_rows(\n self,\n table: Any,\n rows: Any,\n target_fields: Any = None,\n commit_every: Any = 1000,\n replace: Any = False,\n **kwargs,\n ) -> None:\n raise NotImplementedError()", "title": "" }, { "docid": "b6a182a688955f1531bcb0f25f12d884", "score": "0.49135655", "text": "def write_row(self, message=None):\n self.file.write(message)\n self.file.write('\\n')\n now = int(datetime.now().timestamp())\n # flush every 5 secs\n if now - self.last_flush > 5:\n self.last_flush = now\n self.file.flush()", "title": "" }, { "docid": "987efe741690f18244b484a596256b7b", "score": "0.49058977", "text": "def BatchQuery(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "title": "" }, { "docid": "ec104dc39cf9e34aaed5efc5616b6ef1", "score": "0.4902151", "text": "def write(self):\n self.build_statement() # Builds self.query_base\n if not self.conn.is_connected(): # TODO Replace with a forced timeout?\n self.renew_connection()\n cursor = self.conn.cursor()\n cursor.executemany(self.query_base, self.emit_buffer[:self.emit_buffer_index+1])\n self.conn.commit()\n cursor.close()", "title": "" }, { "docid": "b169b1456c4b1c5af4b2fe2eac48059b", "score": "0.48951352", "text": "async def send_data(client: AsyncClient, q: asyncio.Queue):\n while True:\n data_list = await q.get()\n \n try:\n response = await client.index_documents(ENGINE, data_list)\n except Exception as e:\n print(e)\n q.task_done()", "title": "" }, { "docid": "91cd41d828de89b4ce65f592090a20ca", "score": "0.48940787", "text": "def batch_submit_records(self):\n if not self.records_to_submit:\n print(\" Nothing new to submit\")\n return\n\n n_batches = ceil(len(self.records_to_submit) / self.submit_batch_size)\n for i in range(n_batches):\n records = self.records_to_submit[\n i * self.submit_batch_size : (i + 1) * self.submit_batch_size\n ]\n print(\n \" Submitting {} records: {}\".format(\n len(records), [r[\"submitter_id\"] for r in records]\n )\n )\n\n response = requests.put(\n \"{}/api/v0/submission/{}/{}\".format(\n self.base_url, self.program_name, self.project_code\n ),\n headers=self.headers,\n data=json.dumps(records),\n )\n assert (\n response.status_code == 200\n ), \"Unable to submit to Sheepdog: {}\\n{}\".format(\n response.status_code, response.text\n )\n\n self.records_to_submit = []", "title": "" }, { "docid": "6ac7f97f48f2fc328a6d83b2ea8a0a78", "score": "0.48885602", "text": "def send_bulk(self, requests):\n for entry in requests:\n self.prometheus.put(entry)", "title": "" }, { "docid": "be52d8f0e1f8db1b59c55a4c68ece7b8", "score": "0.48819563", "text": "def write_rows_stream(self, request: WriteRowsRequest) -> StreamWriter:\n if not isinstance(request, WriteRowsRequest):\n raise ValueError(\"Use WriteRowsRequest class to build request for write rows interface\")\n\n url = self._get_resource(\"sessions\", request.session_id, \"data\")\n headers = {\n \"Content-Type\": \"application/octet-stream\",\n \"Content-Encoding\": \"deflate\",\n \"Transfer-Encoding\": \"chunked\"\n }\n\n params = {\n \"attempt_number\": str(request.attempt_number),\n \"block_number\": str(request.block_number)\n }\n if request.data_format.type != None:\n params[\"data_format_type\"] = str(request.data_format.type)\n if request.data_format.version != None:\n params[\"data_format_version\"] = str(request.data_format.version)\n\n def upload():\n return self.tunnel_rest.post(url, params=params, headers=headers, file_upload=True)\n\n return StreamWriter(upload)", "title": "" }, { "docid": "b2a813a8cfda68d6f66e73d8d97d48a7", "score": "0.48687708", "text": "async def send_table_names(state):\n names = sorted(state.file_backend.tables)\n data = json.dumps(names)\n await send_message(state, \"table_names\", data=data)", "title": "" }, { "docid": "12e04afd31947a612ac98c92741583a2", "score": "0.4849183", "text": "def send_bulk_tweets( self, tweets):\n self.producer.produce(json.dumps(tweets))", "title": "" }, { "docid": "5def6a865b05ad52fb583a512229a6b3", "score": "0.4833008", "text": "def add_all(self, rows):\n try:\n for row in rows:\n self.add(row)\n self.commit()\n except sql.Error as error:\n self.cursor.execute(\"rollback\")\n raise error", "title": "" }, { "docid": "76b26c382786bdfe8f4e86cf74194878", "score": "0.48316255", "text": "def fetch_batch(self, batch_size=10000, subset=None):\n nrecords = len(self)\n table_name = self._schema_plus_table_name()\n\n for i in range(1, nrecords + 1, batch_size):\n start_i = i\n end_i = i + batch_size - 1 if i + batch_size - 1 <= nrecords else nrecords\n\n # Build query\n select_str = \"*\" if subset is None else \", \".join(subset)\n\n query = \"\"\"\n SELECT *\n FROM (\n SELECT ROW_NUMBER() OVER() AS row_number, {select_str}\n FROM {from_str}) t1\n WHERE row_number BETWEEN {start_i} AND {end_i};\n \"\"\".format(\n select_str=select_str,\n from_str=table_name,\n start_i=start_i,\n end_i=end_i)\n\n # Fetch data\n yield self._client.fetch(query)", "title": "" }, { "docid": "55783730313fa0661176018cb9f72fc4", "score": "0.48171827", "text": "def sql_bulk_insert_from_records_incremental(\n *,\n connection: Connection,\n table: Table,\n columns: T.Tuple[str],\n record_iterable: T.Iterable,\n):\n dict_iterable = (dict(zip(columns, row)) for row in record_iterable)\n for group in iterate_in_chunks(10000, dict_iterable):\n with connection.begin():\n connection.execute(table.insert(), group)\n # self.session.flush() -- Did this line do anything?\n yield", "title": "" }, { "docid": "a563a9ff9c93f86b0f66e41f8602af65", "score": "0.48157364", "text": "def run(self):\n while self.running:\n # Get a write job\n logTuple = self_.pop_from_flush_queue()\n\n if logTuple:\n (keyspace, columnFamily, columnDictionary) = logTuple\n\n retry = True\n\n try:\n retry = not self.flush_to_cassandra(keyspace,\n columnFamily, columnDictionary)\n except Exception, e:\n pstderr(str(e))\n finally:\n # Note: we want to finish the flush even if it\n # failed; otherwise we don't call task_done enough\n # times...\n self_.finished_flush()\n\n if retry:\n # Could not flush to cassandra so add it\n # back to the queue\n pstderr(\"Flush was not successful so add it back \"\n \"to the queue...\")\n self_.push_to_flush_queue(logTuple)\n\n # Note: success or fail, we don't want to try to flush *constantly*\n time.sleep(float(self_.conf[\"server\"][\"maxFlushInterval\"]))", "title": "" }, { "docid": "46155cbe875c9db2d88265b72c66f45e", "score": "0.4813233", "text": "def insert_into_table_batched(df, table, cursor, batch_size=5000):\n columns = df.columns\n columns_comma = '\"' + '\", \"'.join(columns) + '\"'\n\n query = 'INSERT INTO {table} ({columns_comma}) VALUES '\n query = query.format(table=table,\n columns_comma=columns_comma)\n\n lsts = df.values.tolist()\n subs = list(chunker(lsts, batch_size))\n\n args = '(' + ', '.join(len(columns) * ['%s']) + ')'\n\n log.info(\"insert {} batches with {} rows each\".format(len(subs), batch_size))\n\n n = len(subs)\n for i, lst in enumerate(subs):\n values = ', '.join(cursor.mogrify(args, x).decode('utf-8') for x in lst)\n equery = query + values\n\n cursor.execute(equery)\n\n log.info(' batch {0:>4}/{1:>4}'.format(i+1, n))", "title": "" }, { "docid": "898eefb5142fdb4194c80585697409aa", "score": "0.48067272", "text": "def get_table_rows(self, conn):\n raise NotImplementedError(\"Please implement this method\")", "title": "" }, { "docid": "0e579f065df65040d246512663fba752", "score": "0.4804486", "text": "async def async_send_flush_command( self, future=None, repeat=15 ):\n print('requesting queue flush')\n\n tasks = [ asyncio.ensure_future(self.http_client.fetch( self.url, method=\"GET\" )) for _ in range( 0, repeat ) ]\n for future in asyncio.as_completed(tasks):\n data = await future", "title": "" }, { "docid": "8a80479312681cb2b0a91761b369e39c", "score": "0.47989482", "text": "def retrieve_batch(self, table_name, batch_size, offset):\n\n cur = self.__connection.cursor()\n cur.execute(\"SELECT * FROM \" + table_name + \" LIMIT ? OFFSET ?\",\n (batch_size, offset))\n rows = cur.fetchall()\n return rows", "title": "" }, { "docid": "fafd0276bac0336b91d1c3709544ccdc", "score": "0.47852337", "text": "def _send_to_firehose(self):\n def _chunk(record_list, chunk_size):\n \"\"\"Helper function to chunk payloads\"\"\"\n for item in range(0, len(record_list), chunk_size):\n yield record_list[item:item + chunk_size]\n\n def _check_record_batch(batch):\n \"\"\"Helper function to verify record size\"\"\"\n for index, record in enumerate(batch):\n if len(str(record)) > MAX_RECORD_SIZE:\n # Show the first 1k bytes in order to not overload\n # CloudWatch logs\n LOGGER.error('The following record is too large'\n 'be sent to Firehose: %s', str(record)[:1000])\n MetricLogger.log_metric(FUNCTION_NAME,\n MetricLogger.FIREHOSE_FAILED_RECORDS,\n 1)\n batch.pop(index)\n\n delivery_stream_name_pattern = 'streamalert_data_{}'\n\n # Iterate through each payload type\n for log_type, records in self.categorized_payloads.items():\n # This same method is used when naming the Delivery Streams\n formatted_log_type = log_type.replace(':', '_')\n\n for record_batch in _chunk(records, MAX_BATCH_SIZE):\n stream_name = delivery_stream_name_pattern.format(formatted_log_type)\n _check_record_batch(record_batch)\n\n resp = self.firehose_client.put_record_batch(\n DeliveryStreamName=stream_name,\n # The newline at the end is required by Firehose,\n # otherwise all records will be on a single line and\n # unsearchable in Athena.\n Records=[{'Data': json.dumps(record, separators=(\",\", \":\")) + '\\n'}\n for record\n in record_batch])\n\n # Error handle if failures occured\n # TODO(jack) implement backoff here once the rule processor is split\n if resp.get('FailedPutCount') > 0:\n failed_records = [failed\n for failed\n in resp['RequestResponses']\n if failed.get('ErrorCode')]\n MetricLogger.log_metric(FUNCTION_NAME,\n MetricLogger.FIREHOSE_FAILED_RECORDS,\n resp['FailedPutCount'])\n # Only print the first 100 failed records\n LOGGER.error('The following records failed to Put to the'\n 'Delivery stream %s: %s',\n stream_name,\n json.dumps(failed_records[:100], indent=2))\n else:\n MetricLogger.log_metric(FUNCTION_NAME,\n MetricLogger.FIREHOSE_RECORDS_SENT,\n len(record_batch))\n LOGGER.info('Successfully sent %d messages to Firehose:%s',\n len(record_batch),\n stream_name)", "title": "" }, { "docid": "1df9009ed593a4127ea2a8556632ed56", "score": "0.47834793", "text": "def gcs_to_bq(source_bucket_name, destination_bucket_name, dataset_id, table_id):\n # configure BQ details\n bq_client = bigquery.Client()\n job_config = bigquery.LoadJobConfig()\n job_config.source_format = bigquery.SourceFormat.NEWLINE_DELIMITED_JSON\n dataset_ref = bq_client.dataset(dataset_id)\n\n # configure GCS details\n gcs_client = storage.Client()\n source_bucket = gcs_client.get_bucket(source_bucket_name)\n destination_bucket = gcs_client.get_bucket(destination_bucket_name)\n\n # list files in source bucket\n for blob in source_bucket.list_blobs():\n filename = blob.name\n print_log(\"found file: {}\".format(filename))\n file_uri = \"gs://{}/{}\".format(source_bucket_name, filename)\n\n # load file to BQ\n load_job = bq_client.load_table_from_uri(file_uri, dataset_ref.table(table_id), job_config=job_config)\n print_log(\"starting job {}\".format(load_job.job_id))\n load_job.result()\n destination_table = bq_client.get_table(dataset_ref.table(table_id))\n print_log(\"loaded {} rows to BigQuery\".format(destination_table.num_rows))\n\n # transfer file to processed bucket\n source_blob = source_bucket.blob(filename)\n destination_blob = source_bucket.copy_blob(source_blob, destination_bucket, filename)\n print_log(\"Transfered file to processed bucket: {}\".format(filename))\n\n # delete file from staging bucket\n source_blob.delete()\n print_log(\"Deleted file from staging bucket: {}\".format(filename))", "title": "" }, { "docid": "11386d048a97016f064655886aba34e4", "score": "0.4780855", "text": "def send_flush_command( self, future=None, repeat=100 ):\n print('requesting queue flush')\n for _ in range( 0, repeat ):\n self.http_client.fetch( self.url, method=\"GET\" )", "title": "" }, { "docid": "11ec5cb67c82858869301ef52e83138c", "score": "0.47746924", "text": "def get_data(self) -> iter:\r\n with self.__connection.cursor() as cursor:\r\n columns_str = \", \".join(self.columns)\r\n cursor.execute(f\"SELECT {columns_str} FROM {self.__table_name}\")\r\n\r\n self.__connection.commit()\r\n\r\n for row in cursor:\r\n yield self.get_supboard_from_row(row)", "title": "" }, { "docid": "79b8cc76cbb0760102a118000070f79d", "score": "0.47569862", "text": "def next_record(self) -> Iterable[TableQuery]:\n for row in self._get_raw_extract_iter():\n tq = TableQuery(\n query=row[\"query\"],\n user_name=row[\"usename\"],\n starttime=str(row[\"starttime\"]),\n endtime=str(row[\"endtime\"]),\n analysis_date=str(self.analysis_date),\n database=row[\"database\"],\n aborted=row[\"aborted\"],\n sql=row[\"querytxt\"],\n )\n yield tq", "title": "" }, { "docid": "0ce7d7177f6c3c6c293df38cc896586c", "score": "0.47527298", "text": "def write_events_bigquery(events,\n bq_conn=None,\n project_id=None,\n dataset_id=None,\n table_id=None):\n insert_body = {\n \"kind\": \"bigquery#tableDataInsertAllRequest\",\n \"skipInvalidRows\": False,\n \"ignoreUnknownValues\": False,\n \"rows\": [event_to_bigquery(event) for event in events]\n }\n if bq_conn is None:\n pprint.PrettyPrinter(indent=0).pprint(insert_body)\n return False\n query = bq_conn.tabledata().insertAll(projectId=project_id,\n datasetId=dataset_id,\n tableId=table_id,\n body=insert_body)\n response = query.execute(num_retries=5)\n if 'insertErrors' in response:\n for error_set in response['insertErrors']:\n for error in error_set['errors']:\n log.error(error)\n return False\n return True", "title": "" }, { "docid": "999a5b41501589ec7593c703824b126a", "score": "0.4752722", "text": "def get100records(self, session):\n queryStr = \"select * from simpletimeseries limit 100\"\n rows = session.execute(queryStr)\n df = pd.DataFrame(rows)\n # columns = timestampcol, geohash, energy\n df.to_csv('100rowsCassandra.csv')\n # return df", "title": "" }, { "docid": "87bc61ffa23a7686b4dd66dc07b98a6e", "score": "0.47421634", "text": "def read_gbq(\n project_id: str,\n dataset_id: str,\n table_id: str,\n row_filter: str = \"\",\n columns: list[str] = None,\n read_kwargs: dict = None,\n):\n read_kwargs = read_kwargs or {}\n with bigquery_clients(project_id) as (bq_client, bqs_client):\n table_ref = bq_client.get_table(f\"{dataset_id}.{table_id}\")\n if table_ref.table_type == \"VIEW\":\n raise TypeError(\"Table type VIEW not supported\")\n\n def make_create_read_session_request(row_filter=\"\"):\n return bigquery_storage.types.CreateReadSessionRequest(\n max_stream_count=0, # 0 -> use as many streams as BQ Storage will provide\n parent=f\"projects/{project_id}\",\n read_session=bigquery_storage.types.ReadSession(\n data_format=bigquery_storage.types.DataFormat.ARROW,\n read_options=bigquery_storage.types.ReadSession.TableReadOptions(\n row_restriction=row_filter, selected_fields=columns\n ),\n table=table_ref.to_bqstorage(),\n ),\n )\n\n # Create a read session in order to detect the schema.\n # Read sessions are light weight and will be auto-deleted after 24 hours.\n session = bqs_client.create_read_session(\n make_create_read_session_request(row_filter=row_filter)\n )\n schema = pyarrow.ipc.read_schema(\n pyarrow.py_buffer(session.arrow_schema.serialized_schema)\n )\n meta = schema.empty_table().to_pandas()\n\n label = \"read-gbq-\"\n output_name = label + tokenize(\n project_id,\n dataset_id,\n table_id,\n row_filter,\n read_kwargs,\n )\n\n layer = DataFrameIOLayer(\n output_name,\n meta.columns,\n [stream.name for stream in session.streams],\n partial(\n bigquery_read,\n make_create_read_session_request,\n project_id,\n read_kwargs,\n ),\n label=label,\n )\n divisions = tuple([None] * (len(session.streams) + 1))\n\n graph = HighLevelGraph({output_name: layer}, {output_name: set()})\n return new_dd_object(graph, output_name, meta, divisions)", "title": "" }, { "docid": "eecf300c8f83df56af97a267f69bd7e1", "score": "0.4738013", "text": "def iter(self, query, *parameters):\n with self._conn_pool.cursor() as cursor:\n self._execute(cursor, query, parameters)\n column_names = [d[0] for d in cursor.description]\n for row in cursor:\n yield Row(zip(column_names, row))", "title": "" }, { "docid": "588e56ac4a580e490687e71074fd4dc2", "score": "0.47376627", "text": "def send_loop():\n\n while True:\n while not Message.objects.all():\n logger.debug(\"sleeping for %s seconds before checking queue again\" % EMPTY_QUEUE_SLEEP)\n time.sleep(EMPTY_QUEUE_SLEEP)\n send_all()", "title": "" }, { "docid": "9cb47cc4f8164d0fc82d99fdb3814dd7", "score": "0.47308686", "text": "async def create_rows(self, model: Model, rows: list):\n if not self._connection:\n await self.connection()\n table = f\"{model.Meta.schema}.{model.Meta.name}\"\n fields = model.columns(model)\n results = []\n stmt = None\n for row in rows:\n source = []\n pk = []\n cols = []\n for col, field in fields.items():\n print(\"HERE \", col, field)\n if col not in row:\n # field doesnt exists\n default = field.default\n if default is not None:\n if callable(default):\n source.append(default())\n else:\n source.append(default)\n cols.append(col)\n else:\n # val = getattr(model, col)\n # if val is not None:\n # source.append(val)\n # elif field.required is True or field.primary_key is True:\n if field.required is True:\n raise StatementError(f\"Missing Required Field: {col}\")\n else:\n try:\n val = row[col]\n source.append(val)\n cols.append(col)\n except (KeyError, TypeError):\n continue\n try:\n if field.primary_key is True:\n pk.append(col)\n except AttributeError:\n pass\n if not stmt:\n columns = \", \".join(cols)\n n = len(cols)\n values = \",\".join([\"${}\".format(a) for a in range(1, n + 1)])\n primary = \"RETURNING *\"\n insert = f\"INSERT INTO {table} ({columns}) VALUES ({values}) {primary}\"\n logging.debug(f\"INSERT: {insert}\")\n try:\n stmt = await self._connection.prepare(insert)\n except Exception as err:\n print(traceback.format_exc())\n raise Exception(\n \"Exception creating Prepared Sentence {}: {}\".format(\n model.Meta.name, err\n )\n )\n try:\n result = await stmt.fetchrow(*source, timeout=2)\n logging.debug(stmt.get_statusmsg())\n if result:\n results.append(result)\n except asyncpg.exceptions.UniqueViolationError as err:\n raise StatementError(\"Constraint Error: {}\".format(err))\n except Exception as err:\n print(traceback.format_exc())\n raise Exception(\"Error Bulk Insert {}: {}\".format(table, err))\n else:\n return results", "title": "" }, { "docid": "fb9248626d3c159d4b059ba62fcc8cf9", "score": "0.4729991", "text": "def rows(self):\n\n with self.input().engine.begin() as con:\n metadata = sqlalchemy.MetaData()\n engine = self.input().engine\n table = self.input().target_table\n table_bound = sqlalchemy.Table(table,\n metadata,\n autoload=True,\n autoload_with=engine)\n\n result = con.execute(table_bound.select())\n for row in result:\n yield dict(row)", "title": "" }, { "docid": "4a8eae3fff7dfded8c6ea4ba2a977eb9", "score": "0.4728787", "text": "async def runQuery(\n self, query: Query, parameters: Parameters | None = None\n ) -> Rows:", "title": "" }, { "docid": "4857b6b6cc8f6c378ac27cb904de77b3", "score": "0.47272712", "text": "def set_rows(self, rows):\n if not isinstance(rows, int):\n raise ApiError(f\"Rows must be an integer. {rows} is a {type(rows)}.\")\n if rows > 10000:\n raise ApiError(\"Maximum allowed value for rows is 10000\")\n self._batch_size = rows\n return self", "title": "" }, { "docid": "820b260c5cecc0a0b7b33c84ba78d7d8", "score": "0.47267073", "text": "def run(self):\n\n query_handler, reply = self.process_query()\n\n if not query_handler.PENDING:\n MessageTriage.ACTIVE_QUERIES[self.user_id] = None\n else:\n MessageTriage.ACTIVE_QUERIES[self.user_id] = query_handler\n\n for msg in reply:\n text = msg['text']\n attachments = msg['attachments']\n self.send_message(self.username, text, attachments, self.channel)", "title": "" }, { "docid": "024d3330568ebddb96fe1b2cf20ccd4e", "score": "0.47214937", "text": "def produce_events(events, queue_url, batch_size=10, randomize_delay=0):\n client = boto3.client('sqs', region_name=CURRENT_REGION)\n\n for chunk in chunks(events, batch_size):\n records = [make_sqs_record(event, delay_seconds=get_random_delay(randomize_delay)) for event in chunk]\n\n client.send_message_batch(Entries=records, QueueUrl=queue_url)", "title": "" }, { "docid": "8322c34316262a5e99383fcac2a26383", "score": "0.47214234", "text": "def iter(self, query, *parameters, **kwparameters):\n\n\t\tself._ensure_connected()\n\t\tcursor = MySQLConnector.cursor.MySQLCursor(self._db)\n\t\ttry:\n\t\t\tself._execute(cursor, query, parameters, kwparameters)\n\t\t\tcolumn_names = [d[0] for d in cursor.description]\n\t\t\tfor row in cursor:\n\t\t\t\tyield Row(zip(column_names, row))\n\t\tfinally:\n\t\t\tcursor.close()", "title": "" }, { "docid": "56e2f9afc794e499b65762d1a2f9f2fc", "score": "0.47153994", "text": "def sqs_push(attr, bucket_name, table):\n response = table.scan(FilterExpression=Attr(attr + '_seen').eq(0))\n item_ids = [cl_post['id'] for cl_post in response['Items']]\n app_queue = '_' + bucket_name\n try:\n queue = sqs.get_queue_by_name(QueueName=attr + app_queue)\n queue.delete()\n except:\n pass\n time.sleep(65)\n queue = sqs.create_queue(QueueName=attr + app_queue)\n for itm in item_ids:\n response = queue.send_message(MessageBody=itm)\n return", "title": "" }, { "docid": "885e9dda53f6ffae11c3617ffd803b9b", "score": "0.471293", "text": "def consume(self, rowid: int, row: List):\n self.writer.write(row)", "title": "" }, { "docid": "716d17108681a195e99886b3c2c992c5", "score": "0.4712533", "text": "def run_query(\n self,\n sql: str,\n destination_dataset_table: str | None = None,\n write_disposition: str = \"WRITE_EMPTY\",\n allow_large_results: bool = False,\n flatten_results: bool | None = None,\n udf_config: list | None = None,\n use_legacy_sql: bool | None = None,\n maximum_billing_tier: int | None = None,\n maximum_bytes_billed: float | None = None,\n create_disposition: str = \"CREATE_IF_NEEDED\",\n query_params: list | None = None,\n labels: dict | None = None,\n schema_update_options: Iterable | None = None,\n priority: str | None = None,\n time_partitioning: dict | None = None,\n api_resource_configs: dict | None = None,\n cluster_fields: list[str] | None = None,\n location: str | None = None,\n encryption_configuration: dict | None = None,\n ) -> str:\n warnings.warn(\n \"This method is deprecated. Please use `BigQueryHook.insert_job` method.\",\n AirflowProviderDeprecationWarning,\n )\n if not self.project_id:\n raise ValueError(\"The project_id should be set\")\n\n labels = labels or self.labels\n schema_update_options = list(schema_update_options or [])\n\n priority = priority or self.priority\n\n if time_partitioning is None:\n time_partitioning = {}\n\n if not api_resource_configs:\n api_resource_configs = self.api_resource_configs\n else:\n _validate_value(\"api_resource_configs\", api_resource_configs, dict)\n configuration = deepcopy(api_resource_configs)\n if \"query\" not in configuration:\n configuration[\"query\"] = {}\n\n else:\n _validate_value(\"api_resource_configs['query']\", configuration[\"query\"], dict)\n\n if sql is None and not configuration[\"query\"].get(\"query\", None):\n raise TypeError(\"`BigQueryBaseCursor.run_query` missing 1 required positional argument: `sql`\")\n\n # BigQuery also allows you to define how you want a table's schema to change\n # as a side effect of a query job\n # for more details:\n # https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.schemaUpdateOptions # noqa\n\n allowed_schema_update_options = [\"ALLOW_FIELD_ADDITION\", \"ALLOW_FIELD_RELAXATION\"]\n\n if not set(allowed_schema_update_options).issuperset(set(schema_update_options)):\n raise ValueError(\n f\"{schema_update_options} contains invalid schema update options.\"\n f\" Please only use one or more of the following options: {allowed_schema_update_options}\"\n )\n\n if schema_update_options:\n if write_disposition not in [\"WRITE_APPEND\", \"WRITE_TRUNCATE\"]:\n raise ValueError(\n \"schema_update_options is only \"\n \"allowed if write_disposition is \"\n \"'WRITE_APPEND' or 'WRITE_TRUNCATE'.\"\n )\n\n if destination_dataset_table:\n destination_project, destination_dataset, destination_table = self.split_tablename(\n table_input=destination_dataset_table, default_project_id=self.project_id\n )\n\n destination_dataset_table = { # type: ignore\n \"projectId\": destination_project,\n \"datasetId\": destination_dataset,\n \"tableId\": destination_table,\n }\n\n if cluster_fields:\n cluster_fields = {\"fields\": cluster_fields} # type: ignore\n\n query_param_list: list[tuple[Any, str, str | bool | None | dict, type | tuple[type]]] = [\n (sql, \"query\", None, (str,)),\n (priority, \"priority\", priority, (str,)),\n (use_legacy_sql, \"useLegacySql\", self.use_legacy_sql, bool),\n (query_params, \"queryParameters\", None, list),\n (udf_config, \"userDefinedFunctionResources\", None, list),\n (maximum_billing_tier, \"maximumBillingTier\", None, int),\n (maximum_bytes_billed, \"maximumBytesBilled\", None, float),\n (time_partitioning, \"timePartitioning\", {}, dict),\n (schema_update_options, \"schemaUpdateOptions\", None, list),\n (destination_dataset_table, \"destinationTable\", None, dict),\n (cluster_fields, \"clustering\", None, dict),\n ]\n\n for param, param_name, param_default, param_type in query_param_list:\n if param_name not in configuration[\"query\"] and param in [None, {}, ()]:\n if param_name == \"timePartitioning\":\n param_default = _cleanse_time_partitioning(destination_dataset_table, time_partitioning)\n param = param_default\n\n if param in [None, {}, ()]:\n continue\n\n _api_resource_configs_duplication_check(param_name, param, configuration[\"query\"])\n\n configuration[\"query\"][param_name] = param\n\n # check valid type of provided param,\n # it last step because we can get param from 2 sources,\n # and first of all need to find it\n\n _validate_value(param_name, configuration[\"query\"][param_name], param_type)\n\n if param_name == \"schemaUpdateOptions\" and param:\n self.log.info(\"Adding experimental 'schemaUpdateOptions': %s\", schema_update_options)\n\n if param_name != \"destinationTable\":\n continue\n\n for key in [\"projectId\", \"datasetId\", \"tableId\"]:\n if key not in configuration[\"query\"][\"destinationTable\"]:\n raise ValueError(\n \"Not correct 'destinationTable' in \"\n \"api_resource_configs. 'destinationTable' \"\n \"must be a dict with {'projectId':'', \"\n \"'datasetId':'', 'tableId':''}\"\n )\n\n configuration[\"query\"].update(\n {\n \"allowLargeResults\": allow_large_results,\n \"flattenResults\": flatten_results,\n \"writeDisposition\": write_disposition,\n \"createDisposition\": create_disposition,\n }\n )\n\n if (\n \"useLegacySql\" in configuration[\"query\"]\n and configuration[\"query\"][\"useLegacySql\"]\n and \"queryParameters\" in configuration[\"query\"]\n ):\n raise ValueError(\"Query parameters are not allowed when using legacy SQL\")\n\n if labels:\n _api_resource_configs_duplication_check(\"labels\", labels, configuration)\n configuration[\"labels\"] = labels\n\n if encryption_configuration:\n configuration[\"query\"][\"destinationEncryptionConfiguration\"] = encryption_configuration\n\n job = self.insert_job(configuration=configuration, project_id=self.project_id, location=location)\n self.running_job_id = job.job_id\n return job.job_id", "title": "" }, { "docid": "eda9e10c74c3a571c660a0242878c7e9", "score": "0.47078162", "text": "async def async_get_next_all_link_records(self):\n self._clear_read_queue()\n retries = 3\n result = None\n while retries:\n result = await self._get_next_handler.async_send()\n retries -= 1\n\n if result == ResponseStatus.SUCCESS:\n try:\n async with async_timeout.timeout(TIMEOUT):\n yield await self._record_queue.get()\n except asyncio.TimeoutError:\n pass\n else:\n retries = 3\n return", "title": "" }, { "docid": "5ef2f3799bec164afbc58db55f113e3f", "score": "0.47022575", "text": "def insert_into_db(\n db_conn_params: dict, batched: list, query_template: str, ticker: str,\n) -> None:\n with psycopg2.connect(**db_conn_params) as conn:\n with conn.cursor() as cur:\n for b in batched:\n try:\n psycopg2.extras.execute_values(cur, query_template, b)\n conn.commit()\n except psycopg2.errors.InFailedSqlTransaction as e:\n print(f\"Insert did not work, ticker: {ticker}\")\n conn.rollback()\n pass", "title": "" }, { "docid": "75820083df4c1908146ca22efa497d90", "score": "0.4700969", "text": "def iter(self, query, *parameters):\n self._ensure_connected()\n cursor = self._cursor()\n try:\n self._execute(cursor, query, parameters)\n column_names = [d[0] for d in cursor.description]\n for row in cursor:\n yield Row(zip(column_names, row))\n finally:\n cursor.close()", "title": "" }, { "docid": "4e8cbc29202ef7476064ee05bc40923d", "score": "0.47004843", "text": "def run(self):\n self.create_table()\n\n for date, arr in self.data.items():\n for obj in arr:\n self.add_entry(date, obj)\n\n self.conn.commit()\n self.conn.close()", "title": "" }, { "docid": "bcb0e286c4661bb96bc5f0c6ed7fbc1d", "score": "0.46964183", "text": "def send_all(self) -> None:\n if not self.send_queue.empty():\n print(f'🪣 [{self.name.upper()}] Adding new products into the database')\n\n while(not self.send_queue.empty()):\n pid = self.send_queue.get() \n self._send_pid(pid)\n sleep(self.delay)\n with self.db_lock:\n print(c.yellow + f'🔌 [{self.name.upper()}] Added {pid} ({self.site.format_pid(pid)}) into the database' + c.reset)\n self.db.add_data(self.name, int(pid), self.site.format_pid(pid), self.site.image_url(pid))", "title": "" }, { "docid": "935413ba0dafd6416b379e998fe32f1b", "score": "0.4689283", "text": "def worker(\n host: str,\n port: str,\n user: str,\n password: str,\n query: str,\n chunk: list) -> None:\n with connect(\n host=host,\n port=port,\n user=user,\n password=password\n ) as connection:\n with connection.cursor() as cursor:\n cursor.executemany(query, chunk)\n connection.commit()", "title": "" }, { "docid": "31c0a815429794a2fe0831611664ede5", "score": "0.4685855", "text": "def sendall(self, *args, **kwargs):\n id = self.send(*args, **kwargs)\n while True:\n if not id in self.__send_buffer_ids:\n return True\n if not self.connected():\n return False\n time.sleep(0.01)", "title": "" } ]
08b1a4d5ce1065b6a12128ff7982476e
Calculates the hyper_period of the network
[ { "docid": "5f1a2b49e80ae1417065d8a0744fd9a3", "score": "0.6522371", "text": "def calculate_hyper_period(periods):\n return lcm_multiple(*periods)", "title": "" } ]
[ { "docid": "51df2fd3ebacba7e4edc7b7815f617d9", "score": "0.6120404", "text": "def __get_hyperperiod(self):\r\n for task in self.taskset:\r\n pass", "title": "" }, { "docid": "1b2923e8789a715024dae5ad1d39142d", "score": "0.58434445", "text": "def period( self, treq):\r\n nu = self.nu\r\n epoch = self.epoch\r\n nudot = self.nudot\r\n# p = self.p ## period in seconds\r\n# pdot = self.pdot ## period derivative in s/s (dimensionless)\r\n nuddot = self.nuddot ## second derivative of frequency in Hz/s\r\n \r\n ti = (treq-epoch)*86400 ## difference from ephemeris epoch in seconds\r\n\r\n ##print('DBug: ti, p:', ti, p)\r\n ##print('DBug: pdot, nuddot:', pdot, nuddot)\r\n freq = nu + ti*nudot + (ti*ti/2)*nuddot ## freqency at required time\r\n preq = 1.0/freq ## period at required time\r\n ##print('DBug: freq, preq:', freq, preq)\r\n return preq", "title": "" }, { "docid": "ca65e34d7bf2d5829df2f07b6de15b40", "score": "0.5675337", "text": "def period(self):\n\n pass", "title": "" }, { "docid": "34179d7e72643ceb98ecdc1680e1565e", "score": "0.542539", "text": "def learning_rate(epochs):", "title": "" }, { "docid": "c39eb019a9745b7880b532f1a5c41d2b", "score": "0.54125965", "text": "def period(self):\n return period(self.attractor.k, self.to_classical().a)", "title": "" }, { "docid": "dbd2414a72f7a6f2252b2e03d55c47bb", "score": "0.5368704", "text": "def _get_period(self, n_iter):\n if self._update_period is None:\n return None\n if isinstance(self._update_period, float):\n return max(int(self._update_period * n_iter), 1)\n return self._update_period", "title": "" }, { "docid": "573a9ada77ae1aa8c46da3987d6088b2", "score": "0.53571016", "text": "def period(self):\n return timedelta(seconds=np.pi * 2 / self.n)", "title": "" }, { "docid": "4b30e16b5ff983e5163ab8886df3a943", "score": "0.5304899", "text": "def calc_max_edge_flowrate(thermal_network, processes=1):\n\n # create empty DataFrames to store results\n if thermal_network.use_representative_week_per_month:\n thermal_network.edge_mass_flow_df = pd.DataFrame(\n data=np.zeros((2016, len(thermal_network.edge_node_df.columns.values))),\n columns=thermal_network.edge_node_df.columns.values) # stores values for 2016 timesteps\n\n thermal_network.node_mass_flow_df = pd.DataFrame(\n data=np.zeros((2016, len(thermal_network.edge_node_df.index))),\n columns=thermal_network.edge_node_df.index.values) # stores values for 2016 timestep\n\n thermal_network.thermal_demand = pd.DataFrame(\n data=np.zeros((2016, len(thermal_network.building_names))),\n columns=thermal_network.building_names.values) # stores values for 8760 timesteps\n\n else:\n thermal_network.edge_mass_flow_df = pd.DataFrame(\n data=np.zeros((HOURS_IN_YEAR, len(thermal_network.edge_node_df.columns.values))),\n columns=thermal_network.edge_node_df.columns.values) # stores values for 8760 timesteps\n\n thermal_network.node_mass_flow_df = pd.DataFrame(\n data=np.zeros((HOURS_IN_YEAR, len(thermal_network.edge_node_df.index))),\n columns=thermal_network.edge_node_df.index.values) # stores values for 8760 timesteps\n\n thermal_network.thermal_demand = pd.DataFrame(\n data=np.zeros((HOURS_IN_YEAR, len(thermal_network.building_names))),\n columns=thermal_network.building_names.values) # stores values for 8760 timesteps\n\n loops, graph = thermal_network.find_loops()\n\n if loops:\n print('Fundamental loops in network: ', loops)\n # initial guess of pipe diameter\n diameter_guess = initial_diameter_guess(thermal_network)\n else:\n # no iteration necessary\n # read in diameters from shp file\n diameter_guess = read_in_diameters_from_shapefile(thermal_network)\n\n print('start calculating mass flows in edges...')\n iterations = 0\n # t0 = time.perf_counter()\n converged = False\n # Iterate over diameter of pipes since m = f(delta_p), delta_p = f(diameter) and diameter = f(m)\n while not converged:\n print('\\n Diameter iteration number ', iterations)\n diameter_guess_old = diameter_guess\n\n # hourly_mass_flow_calculation\n time_step_slice = range(thermal_network.start_t, thermal_network.stop_t)\n nhours = thermal_network.stop_t - thermal_network.start_t\n\n mass_flows = cea.utilities.parallel.vectorize(hourly_mass_flow_calculation, processes)(\n time_step_slice,\n repeat(diameter_guess, nhours),\n repeat(thermal_network, nhours))\n\n # write mass flows to the dataframes\n thermal_network.edge_mass_flow_df.iloc[time_step_slice] = [mfe[0] for mfe in mass_flows]\n thermal_network.node_mass_flow_df.iloc[time_step_slice] = [mfe[1] for mfe in mass_flows]\n thermal_network.thermal_demand.iloc[time_step_slice] = [mfe[2] for mfe in mass_flows]\n\n # update diameter guess for iteration\n pipe_properties_df = assign_pipes_to_edges(thermal_network)\n diameter_guess = pipe_properties_df[:]['D_int_m':'D_int_m'].values[0]\n\n # exit condition for diameter iteration while statement\n if not loops: # no loops, so no iteration necessary\n converged = True\n thermal_network.no_convergence_flag = False\n elif iterations == thermal_network.diameter_iteration_limit: # Too many iterations\n converged = True\n print(\n '\\n No convergence of pipe diameters in loop calculation, possibly due to large amounts of low mass flows. '\n '\\n Please retry with alternate network design.')\n thermal_network.no_convergence_flag = True\n elif (abs(diameter_guess_old - diameter_guess) > 0.005).any():\n # 0.005 is the smallest diameter change of the catalogue, so at least one diameter value has changed\n converged = False\n # we are half way through the total amount of iterations without convergence\n # the flag below triggers a reduction in the acceptable minimum mass flow to (hopefully) allow for convergence\n if iterations == int(\n thermal_network.diameter_iteration_limit / 2): # int() cast necessary because iterations variable takes int values\n thermal_network.no_convergence_flag = True\n\n # reset all minimum mass flow calculation values\n thermal_network.delta_cap_mass_flow = {}\n thermal_network.nodes = {}\n thermal_network.cc_old = {}\n thermal_network.ch_old = {}\n thermal_network.cc_value = {}\n thermal_network.ch_value = {}\n\n else: # no change of diameters\n converged = True\n thermal_network.no_convergence_flag = False\n\n iterations += 1\n\n # output csv files with node mass flows\n if thermal_network.use_representative_week_per_month:\n # we need to extrapolate 8760 datapoints from 2016 points from our representative weeks.\n # To do this, the initial dataset is repeated 4 times, the remaining values are filled with the average values of all above.\n\n # Nominal node mass flow\n node_mass_flow_for_csv = extrapolate_datapoints_for_representative_weeks(thermal_network.node_mass_flow_df)\n node_mass_flow_for_csv.to_csv(\n thermal_network.locator.get_nominal_node_mass_flow_csv_file(thermal_network.network_type,\n thermal_network.network_name),\n index=False)\n\n # output csv files with aggregated demand\n thermal_demand_for_csv = extrapolate_datapoints_for_representative_weeks(thermal_network.thermal_demand)\n thermal_demand_for_csv.to_csv(\n thermal_network.locator.get_thermal_demand_csv_file(thermal_network.network_type,\n thermal_network.network_name),\n columns=thermal_network.building_names)\n\n else:\n # Nominal node mass flow\n thermal_network.node_mass_flow_df.to_csv(\n thermal_network.locator.get_nominal_node_mass_flow_csv_file(thermal_network.network_type,\n thermal_network.network_name),\n index=False)\n\n # output csv files with aggregated demand\n thermal_network.thermal_demand.to_csv(\n thermal_network.locator.get_thermal_demand_csv_file(thermal_network.network_type,\n thermal_network.network_name),\n columns=thermal_network.building_names, index=False)\n\n return thermal_network.edge_mass_flow_df", "title": "" }, { "docid": "9db6ff0120566cbd2393987450e7db48", "score": "0.52705896", "text": "def learning_rate(self): # pragma: no cover\n return self._optimizer._get_hyper(\"learning_rate\")", "title": "" }, { "docid": "7e05de375b3e031f38a4e88eca38efa0", "score": "0.5243052", "text": "def calc(last_date, rest_traffic):\n now = date.today()\n\n print('Approx. traffic per day = ', rest_traffic / (last_date - now).days)\n\n return rest_traffic / (last_date - now).days", "title": "" }, { "docid": "119a2d73c201baeef516a39859007f6f", "score": "0.5242679", "text": "def _create_learning_rate(hyperparams, step_var):\n if hyperparams.learning_method != 'composite':\n base_rate = hyperparams.learning_rate\n else:\n spec = hyperparams.composite_optimizer_spec\n switch = tf.less(step_var, spec.switch_after_steps)\n base_rate = tf.cond(switch, lambda: tf.constant(spec.method1.learning_rate),\n lambda: tf.constant(spec.method2.learning_rate))\n return tf.train.exponential_decay(\n base_rate,\n step_var,\n hyperparams.decay_steps,\n hyperparams.decay_base,\n staircase=hyperparams.decay_staircase)", "title": "" }, { "docid": "8b94d08b1f56593baee91cf2e6f319a5", "score": "0.5240136", "text": "def period(g):\n\n def val(a, b):\n return a - b + 1\n\n lvls = {0 : 0} # node 0 is on lvl 0\n Q = deque([0]) # queue of nodes to be processed\n per = 0 # no actual period is set yet \n\n while Q:\n w = Q.pop()\n trans = g[w] > 0\n for v in range(len(trans)): \n if trans[v]:\n if (v in lvls.keys()): # v has already been visited\n p = val(lvls[w],lvls[v])\n if (per == 0):\n # set initial period\n per = p\n if (p == 1): \n return 1\n if (p > 1): \n per = frac.gcd(per,p)\n else:\n Q.appendleft(v)\n lvls[v] = lvls[w] + 1\n\n return per", "title": "" }, { "docid": "ce6f6c36794b1d994f1544cd616322d1", "score": "0.5216118", "text": "def calculate_utilization(self, hyper_period, replicas, sensing_period, sensing_time):\n # Init the list where the utilization will be saved for every link\n link_utilization = []\n for _ in self.__links:\n link_utilization.append(0)\n\n # For all frames in the network\n for frame in self.__frames:\n # Get all the unique links in the paths of the frame\n unique_links = []\n for receiver in frame.get_receivers():\n # For all links in the path\n for link in self.__paths[frame.get_sender()][receiver]:\n if link not in unique_links: # If is unique, add it\n unique_links.append(link)\n\n # Once we have all the links in the path, calculate the ns to transmit for all links\n for link in unique_links:\n # First calculate the time occupied by normal transmissions and its period instances\n link_utilization[link] += int(((frame.get_size() * 1000) /\n self.__links_object_container[link].get_speed()) *\n (hyper_period / frame.get_period()))\n\n # Then, add the retransmissions if the link has any\n if self.__links_object_container[link].get_type() == LinkType.wireless:\n for index, collision_domain in enumerate(self.__collision_domains):\n if link in collision_domain:\n link_utilization[link] += int((((frame.get_size() * 1000) /\n self.__links_object_container[link].get_speed()) *\n (hyper_period / frame.get_period())) * replicas[index])\n\n # Last, add the time occupied by sensing and control\n if sensing_period is not None:\n for index, link in enumerate(self.__links_object_container):\n if link.get_type() == LinkType.wireless:\n link_utilization[index] += int((hyper_period / sensing_period) * sensing_time)\n\n # Now calculate the utilization in float for every link and calculate the total utilization of the network\n utilization = 0.0\n possible = True\n for index, link in enumerate(link_utilization):\n link_utilization[index] /= hyper_period\n if link_utilization[index] > 1.0: # Check if is possible to schedule all of its links\n possible = False\n utilization += link_utilization[index]\n return utilization / len(link_utilization), max(link_utilization), possible", "title": "" }, { "docid": "4c88a11b1049dce131538249608e73bf", "score": "0.52041906", "text": "def learning_rate_scheduler(epoch):\n return args.learning_rate * (2 ** -epoch)", "title": "" }, { "docid": "d60c9ecf085c3b073d8473a05e32de71", "score": "0.5191515", "text": "def _cluster_time_estimator(self, sim, args, **kwargs):\n size = args['network size']\n if size == 11:\n return datetime.timedelta(hours=8) #For 2000 runs\n elif size == 15:\n return datetime.timedelta(hours=10) #For 2000 runs\n elif size == 21:\n return datetime.timedelta(hours=1)\n elif size == 25:\n return datetime.timedelta(hours=1)\n else:\n raise RuntimeError(\"No time estimate for network sizes other than 11, 15, 21 or 25\")", "title": "" }, { "docid": "fea411227ff60ad64d9b444a44fe0411", "score": "0.5185473", "text": "def system_equations_period(self):\n return self._system_equations_period", "title": "" }, { "docid": "b7f1991de3c15b9c02e530d5a17b2021", "score": "0.5176294", "text": "def scheduler_period(self) -> ConfigNodePropertyInteger:\n return self._scheduler_period", "title": "" }, { "docid": "b9547bb7b9507fd02d99829aa98011d3", "score": "0.51404774", "text": "def _decay(self):\n costs = []\n for var in tf.trainable_variables():\n if var.op.name.find(r'weights') > 0:\n costs.append(tf.nn.l2_loss(var))\n # tf.histogram_summary(var.op.name, var)\n\n return tf.mul(self.hps.weight_decay_rate, tf.add_n(costs))", "title": "" }, { "docid": "25e4f9cf1be1ce414b6d785bb0378c88", "score": "0.51315457", "text": "def _decay(self):\n costs = []\n for var in tf.trainable_variables():\n if var.op.name.find(r'DW') > 0:\n costs.append(tf.nn.l2_loss(var))\n # tf.summary.histogram(var.op.name, var)\n\n return tf.multiply(self.hps.weight_decay_rate, tf.add_n(costs))", "title": "" }, { "docid": "ed7b679e761591c3718cd952de6d753b", "score": "0.5118514", "text": "def initilize_hyperparams(self):\n self.iteration_count = 0\n self.running_min = 0\n self.running_max = 0\n self.running_mean = 0\n self.running_var = 0", "title": "" }, { "docid": "97ca308d7df73e11767fc1736bbb4953", "score": "0.5113784", "text": "def get_period(self): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "eafb894e923e6054af77bcee5e2abb1f", "score": "0.5088353", "text": "def calculate_period(self):\n sum_mass = self.first_mass + self.second_mass\n self.period = (sqrt(4*pow(pi, 2)*pow(self.sum_semi_major_axes, 3)\n / (self.G*(self.convert_sun_mass_to_kg(sum_mass)))))\n\n return self.period", "title": "" }, { "docid": "617e37e6030712df830b0c3ffc59749d", "score": "0.5087948", "text": "def period(self) -> Parameter:\n return self._period", "title": "" }, { "docid": "e493058c8d3158e2ac84a187bcafe31d", "score": "0.507456", "text": "def period(self, u):\n return (self.xmax - self.xmin)/u", "title": "" }, { "docid": "a87f1a498dfff5e33c3745a5c80630e0", "score": "0.505109", "text": "def period(self):\n raise NotImplemented()", "title": "" }, { "docid": "e28a921e21db7eb5f7c933a55bda9f58", "score": "0.50409764", "text": "def hyper_parameters(self):\n return {\n \"early_stopping\": \"True\",\n \"epochs\": \"20\",\n \"learning_rate\": \"0.05\",\n \"min_count\": \"5\",\n \"min_epochs\": \"1\",\n \"mode\": \"supervised\",\n \"patience\": \"5\",\n \"vector_dim\": \"20\",\n \"word_ngrams\": \"2\",\n }", "title": "" }, { "docid": "acf8a9e25134647a54ee63a44a640563", "score": "0.5036689", "text": "def activation_time(L_b, Kr_b, p, a):\n t_ac = (L_b / Kr_b / p ** a) ** (1. / (a + 1)) / 60\n return t_ac", "title": "" }, { "docid": "2092a760ca15487e17259f590f736e03", "score": "0.50074404", "text": "def learning_rate_schedule(global_step):\n step = np.float32(global_step+1)\n \n return hparams.d_model ** -0.5 * min(step * hparams.warmup_steps ** -1.5, step ** -0.5)", "title": "" }, { "docid": "83ed17b30b609d084b3834cde0182e79", "score": "0.49679837", "text": "def period(self):\n return self._period", "title": "" }, { "docid": "83ed17b30b609d084b3834cde0182e79", "score": "0.49679837", "text": "def period(self):\n return self._period", "title": "" }, { "docid": "71cb6adf9caf052a4b2cfc937324b3d2", "score": "0.49678785", "text": "def __init__(self, params, net):\n self.momentum_decay = params['momentum_decay']\n self.lr = params['learning_rate']\n self.max_epochs = params['max_epochs']", "title": "" }, { "docid": "2e53addeac0eed4f8ae61d9bd3bd470a", "score": "0.49609005", "text": "def get_learning_rate_decay(self):\n\n hparams = self.hparams\n\n if hparams.decay_scheme in [\"luong5\", \"luong10\", \"luong234\"]:\n decay_factor = 0.5\n if hparams.decay_scheme == \"luong5\":\n start_decay_step = int(hparams.num_train_steps / 2)\n decay_times = 5\n elif hparams.decay_scheme == \"luong10\":\n start_decay_step = int(hparams.num_train_steps / 2)\n decay_times = 10\n elif hparams.decay_scheme == \"luong234\":\n start_decay_step = int(hparams.num_train_steps * 2 / 3)\n decay_times = 4\n remain_steps = hparams.num_train_steps - start_decay_step\n decay_steps = int(remain_steps / decay_times)\n elif not hparams.decay_scheme: # no decay\n start_decay_step = hparams.num_train_steps\n decay_steps = 0\n decay_factor = 1.0\n elif hparams.decay_scheme:\n raise ValueError(\"Unknown decay scheme %s\" % hparams.decay_scheme)\n \n utils.log(\"decay_scheme=%s, start_decay_step=%d, decay_steps %d, \"\n \"decay_factor %g\" % (hparams.decay_scheme,\n start_decay_step,\n decay_steps,\n decay_factor))\n\n if hparams.decay_scheme in [\"luong5\", \"luong10\", \"luong234\"]:\n return tf.cond(\n self.global_step < start_decay_step,\n lambda: self.learning_rate,\n lambda: tf.train.exponential_decay(\n self.learning_rate,\n (self.global_step - start_decay_step),\n decay_steps, decay_factor, staircase=True),\n name=\"learning_rate_decay_cond\")\n elif not hparams.decay_scheme:\n return self.learning_rate", "title": "" }, { "docid": "a6da779e3e96ab10734dc3ae5234f236", "score": "0.49212915", "text": "def get_learning_curve(self, filter_t=1.):\n latencies = np.array(self.latency_list)\n # calculate a running average over the latencies with a averaging time 'filter_t'\n for i in range(1,latencies.shape[0]):\n latencies[i] = latencies[i-1] + (latencies[i] - latencies[i-1])/float(filter_t)\n\n return self.latencies", "title": "" }, { "docid": "adc6e5084b593509b7a082b9c73e8e86", "score": "0.49123508", "text": "def duty_cycle(self):\n if self._high is not None:\n return 100.0 * self._high / self._period\n else:\n return 0.0", "title": "" }, { "docid": "adc6e5084b593509b7a082b9c73e8e86", "score": "0.49123508", "text": "def duty_cycle(self):\n if self._high is not None:\n return 100.0 * self._high / self._period\n else:\n return 0.0", "title": "" }, { "docid": "3acdc3f6829331d9dc1548f14c7e3f80", "score": "0.49082023", "text": "def get_period(self, data=None):\n return 5", "title": "" }, { "docid": "7c2a665e491518b00eb41d763a0966e4", "score": "0.49076515", "text": "def period(self):\n return self._task_info.period", "title": "" }, { "docid": "cf6a4287af0ade2ad935f2b359b17256", "score": "0.48960057", "text": "async def period(self) -> int:\n response = await self._adguard._request(\"stats_info\")\n return response[\"interval\"]", "title": "" }, { "docid": "be2e2ec5b17249b1182e953f9e906930", "score": "0.48945913", "text": "def get_learning_rate_fn(self):\n return lr_schedulers.get_learning_rate_fn(self.hparams)", "title": "" }, { "docid": "829486a3c7ba3cd4d83124c4b53e9a2f", "score": "0.48899275", "text": "def cq_dam_webdav_version_linking_scheduler_period(self) -> ConfigNodePropertyInteger:\n return self._cq_dam_webdav_version_linking_scheduler_period", "title": "" }, { "docid": "23b1d43753c17a433c67068c2f0ab35e", "score": "0.48891845", "text": "def compute(self, dss_instance:dss):\n timestep = dss_instance.Solution.StepSize()/(3600)\n sub_losses = dss_instance.Circuit.Losses()\n \n self.total_loss['active_power'] += (sub_losses[0])*timestep/1000000\n self.total_loss['reactive_power'] += (sub_losses[1])*timestep/1000000", "title": "" }, { "docid": "767622222f81979365ebc738d0ca8432", "score": "0.48887688", "text": "def compute_cost(self, params, seq, is_prob=False):\n blank = 0\n seqLen = len(seq) # Length of label sequence (# phones)\n L = 2 * seqLen + 1 # Length of label sequence with blanks\n T = len(params) # Length of utterance (time)\n numLabels = len(params[0].value())\n #print \"T=\", T\n \n if not is_prob:\n for t in range (T):\n max_elem = dy.max_dim(params[t], 0)\n #print \"max_elem=\", max_elem.value()\n pmax = max_elem\n params[t] = dy.exp(params[t]-pmax)\n params[t] = params[t] * dy.pow(dy.sum_elems(params[t]), dy.scalarInput(-1))\n #params = params - dy.max_dim(params,d=0)\n #params = dy.exp(params)\n #params = params / dy.sum(params,axis=0)\n \n \n alphas = [[dy.scalarInput(0)] * L for xx in range(T)]\n betas = [[dy.scalarInput(0)] * L for xx in range(T)]\n #alphas = np.zeros((L,T))\n #betas = np.zeros((L,T))\n\n # Initialize alphas and forward pass \n alphas[0][0] = dy.pick(params[0], blank)#params[blank,0]\n alphas[0][1] = dy.pick(params[0], seq[0])#params[seq[0],0]\n c = alphas[0][0] + alphas[0][1]#np.sum(alphas[:,0])\n c_n = dy.pow(c, dy.scalarInput(-1))\n alphas[0][0] = alphas[0][0] * c_n\n alphas[0][1] = alphas[0][1] * c_n\n \n llForward = dy.log(c)#np.log(c)\n #print llForward.value()\n for t in xrange(1, T):\n start = 2 * (T-t)\n if L <= start:\n start = 0\n else:\n start = L-start\n end = min(2 * t + 2, L)\n for s in xrange(start, L):\n l = (s-1) / 2\n # blank\n if s % 2 == 0:\n if s == 0:\n alphas[t][s] = alphas[t-1][s] * dy.pick(params[t], blank)\n else:\n alphas[t][s] = (alphas[t-1][s] + alphas[t-1][s-1]) * dy.pick(params[t], blank)\n # same label twice\n elif s == 1 or seq[l] == seq[l-1]:\n alphas[t][s] = (alphas[t-1][s] + alphas[t-1][s-1]) * dy.pick(params[t], seq[l])\n else:\n alphas[t][s] = (alphas[t-1][s] + alphas[t-1][s-1] + alphas[t-1][s-2]) * dy.pick(params[t], seq[l])\n\n c = dy.esum(alphas[t])\n c_n = dy.pow(c, dy.scalarInput(-1))\n for tt in range(start, end):\n alphas[t][tt] = alphas[t][tt] * c_n\n llForward += dy.log(c)\n #print llForward.value()\n #print \"t=\", t, \"llForward=\", llForward.value()\n print \"Debug:\"\n print \"\\tFORWARD LOSS=\", llForward.value()\n # Initialize betas and backwards pass\n betas[T-1][L-1] = dy.pick(params[T-1], blank)\n betas[T-1][L-2] = dy.pick(params[T-1], seq[seqLen-1])\n c = betas[T-1][L-1] + betas[T-1][L-2]\n c_n = dy.pow(c, dy.scalarInput(-1))\n betas[T-1][L-1] *= c_n\n betas[T-1][L-2] *= c_n\n llBackward = dy.log(c)\n #print \"BACKWARD pass:\"\n for t in xrange(T-1, 0, -1):\n t = t-1\n start = 2 * (T-t)\n if L <= start:\n start = 0\n else:\n start = L-start\n end = min(2 * t + 2, L)\n for s in xrange(end, 0, -1):\n s = s-1\n l = (s-1) / 2\n if s % 2 == 0:\n if s == L-1:\n betas[t][s] = betas[t + 1][s] * dy.pick(params[t], blank)\n else:\n betas[t][s] = (betas[t + 1][s] + betas[t + 1][s + 1]) * dy.pick(params[t], blank)\n # same label twice\n elif s == L-2 or seq[l] == seq[l + 1]:\n betas[t][s] = (betas[t + 1][s] + betas[t + 1][s + 1]) * dy.pick(params[t], seq[l])\n else:\n betas[t][s] = (betas[t + 1][s] + betas[t + 1][s + 1] + betas[t + 1][s + 2]) * dy.pick(params[t], seq[l])\n\n #c = np.sum(betas[start:end, t])\n c = dy.esum(betas[t])\n c_n = dy.pow(c, dy.scalarInput(-1))\n for tt in range (L):\n betas[t][tt] = betas[t][tt] * c_n\n #betas[start:end, t] = betas[start:end, t] / c\n llBackward += dy.log(c)\n #print \"t=\", t, \"llBackward=\", llBackward.value()\n #alpha-beta\n print \"\\tBACKWARD LOSS=\", llBackward.value()\n ab = []\n for tt in range (T):\n ab_row = []\n for ll in range(L):\n ab_row.append(alphas[tt][ll] * betas[tt][ll])#*dy.pow(params[tt][ll],dy.scalarInput(-1)))\n ab.append(ab_row)\n ##PHASE 1\n grad_v = [[dy.scalarInput(0)] * numLabels for xx in range (T)]\n for s in xrange(L):\n # blank\n if s % 2 == 0:\n for t in xrange(T):\n grad_v[t][blank] += ab[t][s]\n if ab[t][s] != 0:\n ab[t][s] = ab[t][s] * dy.pow(params[t][blank], dy.scalarInput(-1))\n else:\n for t in xrange(T):\n grad_v[t][seq[(s-1) / 2]] += ab[t][s]\n if ab[t][s] != 0:\n ab[t][s] = ab[t][s] * dy.pow((params[t][seq[(s-1) / 2]]), dy.scalarInput(-1))\n #PHASE 2\n absum = [dy.scalarInput(0)] * T\n for t in xrange(T):\n for s in xrange(L):\n absum[t] += ab[t][s]\n #phase 3\n eps = dy.scalarInput(0.00001)\n for t in xrange(T):\n for s in xrange(numLabels):\n tmp = params[t][s] * absum[t]\n #print tmp.value()\n if tmp > 0:\n grad_v[t][s] = params[t][s] - grad_v[t][s] * dy.pow(tmp + eps, dy.scalarInput(-1))\n else:\n grad_v[t][s] = params[t][s]\n #for dynet backprop\n losses = []\n last_proc = 0\n for t in range (T):\n proc = t * 100 / T\n if proc % 5 == 0 and proc != last_proc:\n last_proc = proc\n sys.stdout.write(\" \" + str(proc))\n sys.stdout.flush()\n \n #print \"P=\", params[t].value()\n dp = [0] * numLabels\n for l in range (numLabels):\n dp[l] = params[t].value()[l] - grad[t][l].value()\n \n target = dy.inputVector(dp)\n #target.set(dp)\n #targets.append(target)\n losses.append(dy.squared_distance(params[t], target))\n #losses.append(-dy.esum(grad_v[t]))\n #print \"DP=\", dp\n ctc_loss = dy.esum(losses)\n print \"\\tCTC=\", ctc_loss.value()\n return ctc_loss", "title": "" }, { "docid": "0975dea03d7207b1f7e561f43a60e34c", "score": "0.48856983", "text": "def compute_wave_period(Un, Fn, dn):\r\n # This equation may not be suitable for wind speed values < 1 m/s\r\n # The WWIII database tends to include some 0s, otherwise values > 2\r\n if Un < 1:\r\n LOGGER.warning(f'Found wind velocity of {Un:.2f}, using 1m/s in wave '\r\n 'height calculation instead')\r\n Un = 1\r\n g = 9.81\r\n dn = -dn\r\n ds = g*dn/Un**2\r\n Fs = g*Fn/Un**2\r\n A = numpy.tanh(0.1*ds**2.01)\r\n B = numpy.tanh(2.77e-7*Fs**1.45/A)\r\n T_n = 7.69*Un/g*(A*B)**0.187\r\n return T_n", "title": "" }, { "docid": "ff64444cc12a5b9788059668093a7015", "score": "0.48733956", "text": "def get_time(self):\n if self.estimated_time <= 0:\n return self.estimated_time\n\n return self.estimated_time / (1 - self.beta_pow)", "title": "" }, { "docid": "7e320a020f16de61902101b079772fb3", "score": "0.48569837", "text": "def period(self) -> float:\n return self.__uhfli.scope_duration.get()", "title": "" }, { "docid": "fac24321d3adcfae0e6290ae775bb66a", "score": "0.48502827", "text": "def calculate_accumulated_task_runtime(workflow):\n\n runtime = 0\n for tid,task in workflow.nodes.items():\n session = task.get('session')\n if session:\n runtime += (session.get('utime',0) - session.get('itime',0))\n\n return runtime", "title": "" }, { "docid": "a33f63ec624a622f5d83028fc605909f", "score": "0.48479798", "text": "def getPeriod(self):\n\t\tif not self.groups:\n\t\t\treturn 0\n\t\treturn self.groups[1][0].getPeriod()", "title": "" }, { "docid": "1e54124818a0da07cd222a439ee0733c", "score": "0.48212546", "text": "def time_period(self):\n return self.args[3]", "title": "" }, { "docid": "c9a5e1e4604bc87908b7a6f70af89658", "score": "0.48183092", "text": "def get_network_speed(self, a=None):\n\n if a is None:\n a = self.a\n delta_a = self.activation.f(self.W_rec.dot(a) + self.b_rec) - a\n\n return (self.alpha**2 / 2) * np.square(delta_a).sum()", "title": "" }, { "docid": "909210a79774313d6d3efd774f291743", "score": "0.48173952", "text": "def make_periods(self):\n period_base = self.make_period_base()\n return np.kron(np.ones(self.n_total), period_base)", "title": "" }, { "docid": "cdf8a2a95598beaf54891374a4e05809", "score": "0.4814189", "text": "def exploration_rate(epoch):\n # start_eps = 1.0\n start_eps = 0.3\n end_eps = 0.1\n const_eps_epochs = 0.1 * epochs # 10% of learning time\n eps_decay_epochs = 0.6 * epochs # 60% of learning time\n\n if epoch < const_eps_epochs:\n return start_eps\n elif epoch < eps_decay_epochs:\n # Linear decay\n return start_eps - (epoch - const_eps_epochs) / \\\n (eps_decay_epochs - const_eps_epochs) * (start_eps - end_eps)\n else:\n return end_eps", "title": "" }, { "docid": "b44b8487f1037ebb982fa61027c4f2a6", "score": "0.4801111", "text": "def dL_gw_dt(sp, a, e):\n return -32./5. * sp.m_reduced()**2 * sp.m_total()**(5./2.) / a**(7./2.) / (1. - e**2)**2 * (1. + 7./8.*e**2)", "title": "" }, { "docid": "24b9e45431b68016a7fc75fa35a014a3", "score": "0.4800037", "text": "def exploration_rate(epoch):\n start_eps = 1.0\n end_eps = 0.1\n const_eps_epochs = 0.1 * self.epochs # 10% of learning time\n eps_decay_epochs = 0.6 * self.epochs # 60% of learning time\n\n if epoch < const_eps_epochs:\n return start_eps\n elif epoch < eps_decay_epochs:\n # Linear decay\n return start_eps - (epoch - const_eps_epochs) / \\\n (eps_decay_epochs - const_eps_epochs) * (start_eps - end_eps)\n else:\n return end_eps", "title": "" }, { "docid": "0302b701c0d1dd3b26c2b80c14403bdd", "score": "0.47953922", "text": "def oht_by_net(self):\n if self._oht_by_net is None:\n self._oht_by_net = self.net_transport * self.avg_t * self.rhocp\n return self._oht_by_net", "title": "" }, { "docid": "62ee587b4e0eafce4aa82b899a9c1180", "score": "0.4794951", "text": "def getPeriod(self):\n return self.period", "title": "" }, { "docid": "4411cf656420e57ea8d2721d8373a54f", "score": "0.47948888", "text": "def exploration_rate(epoch):\n start_eps = 1.0\n end_eps = 0.10\n const_eps_epochs = 0.1 * epochs # 10% of learning time\n eps_decay_epochs = 0.6 * epochs # 60% of learning time\n\n if epoch < const_eps_epochs:\n return start_eps\n elif epoch < eps_decay_epochs:\n # Linear decay\n return start_eps - (epoch - const_eps_epochs) / \\\n (eps_decay_epochs - const_eps_epochs) * (start_eps - end_eps)\n else:\n return end_eps", "title": "" }, { "docid": "76cc2d6a45ee02cb039b35a09adb5a76", "score": "0.47907448", "text": "def _lr_scheduler(self, epoch):\n\n n_decays = epoch // self.learning_rate['decay_frequency']\n lr = self.learning_rate['initial_value'] * (self.learning_rate['decay_factor'] ** n_decays)\n # no lr below minimum control 10e-7\n return max(1e-7, lr)", "title": "" }, { "docid": "42546732bbddb95e8d7fcaa025c65aaa", "score": "0.4788969", "text": "def calc_return_period(self):\n\t\trtn_prd = 0\n\t\tif self.ground_rent != 0 and self.purchase_price != 0:\n\t\t\trtn_prd = self.purchase_price / self.ground_rent\n\t\tself.return_period = rtn_prd", "title": "" }, { "docid": "35d9ecc7f0102b90db19e9838a316ea8", "score": "0.47878933", "text": "def getPeriod(self,header=False):\n if self.isCalibrator():\n return 1.0/self.header['CAL_FREQ']\n if self.params is None:\n return None\n if header or self.polyco is None:\n return self.params.getPeriod()\n else:\n P0 = self.polyco.calculatePeriod()\n #print P0,self.params.getPeriod()\n if np.abs(P0)<1e-5: #Problem with large DT POLYCO values?\n return self.params.getPeriod()\n else:\n ratio = (P0-self.params.getPeriod())/self.params.getPeriod()\n if ratio < 0.5 or ratio > 2:\n return self.params.getPeriod()\n return P0\n return self.polyco.calculatePeriod()", "title": "" }, { "docid": "61ababa67dd362b341fcda32483c3959", "score": "0.47846147", "text": "def exploration_rate(epoch):\n start_eps = 1.0\n end_eps = 0.1\n const_eps_epochs = 0.1 * epochs # 10% of learning time\n eps_decay_epochs = 0.6 * epochs # 60% of learning time\n\n if epoch < const_eps_epochs:\n return start_eps\n elif epoch < eps_decay_epochs:\n # Linear decay\n return start_eps - (epoch - const_eps_epochs) / \\\n (eps_decay_epochs - const_eps_epochs) * (start_eps - end_eps)\n else:\n return end_eps", "title": "" }, { "docid": "e1aba3d1f0ae32d10515e62ced1faecd", "score": "0.47809008", "text": "def _get_iter_per_cycle(self):\n return self.steps_per_epoch * self.cycle_len", "title": "" }, { "docid": "a0da5e2b58f964efbfef0402f9146105", "score": "0.4780063", "text": "def calculate_gains_over_time(amount_inv=0.0, period=6):\r\n\r\n return # return total amount invested with gains over period\r", "title": "" }, { "docid": "27902467cd7afb38cf40082cf72f5ee3", "score": "0.4765387", "text": "def get_period(self):\n return self.get_abstract().period", "title": "" }, { "docid": "6b53ba6a441b147ac01c6572c973c4d9", "score": "0.47633883", "text": "def calculateHyperProbs(self, hypers, slopes):\n \n rate=tf.maximum(hypers[0],0.01)\n slopes=tf.math.abs(slopes[0])\n prob=0\n\n #Calculate probability of new hypers\n val=self.exponentialLogProb(self.hyperRate, hypers[0])\n prob+=tf.reduce_sum(input_tensor=val)\n\n #Calculate probability of weights and biases given new hypers\n val=self.exponentialLogProb(hypers[0],slopes)\n prob+=tf.reduce_sum(input_tensor=val)\n\n return(prob)", "title": "" }, { "docid": "59d47f2893d6ac968e27d9ec9d28c307", "score": "0.4759497", "text": "def compute(self, dss_instance:dss):\n timestep = dss_instance.Solution.StepSize()/(3600)\n sub_losses = dss_instance.Circuit.Losses()\n \n self.active_power.append((sub_losses[0])*timestep/1000)\n self.reactive_power.append((sub_losses[1])*timestep/1000)", "title": "" }, { "docid": "b04b48e7ebb16cfdbe747dfb3c975412", "score": "0.47509256", "text": "def _get_timeperiod(self):\n return self.__timeperiod", "title": "" }, { "docid": "2043278a08794000aa8a5175e1875df7", "score": "0.4747372", "text": "def init_hyperparams(self):\n self.lambda_user = 0.081\n self.lambda_item = 0.081\n self.num_epochs = 25", "title": "" }, { "docid": "4fc19a78efc6d251cf5c69e15b0ca199", "score": "0.47469765", "text": "def calc_period(start, end) -> int:\n sd, sh = start\n ed, eh = end\n\n hours = int(ed - sd) * 24 + (eh - sh) // 100 + 1\n\n return hours", "title": "" }, { "docid": "86b244f9b26e7fdad521e6f64c12ce6f", "score": "0.47431767", "text": "def business_days_in_period(self, date_tensor, period_tensor):\n return self.business_days_between(date_tensor, date_tensor + period_tensor)", "title": "" }, { "docid": "69a4ea3c991324c6281ddbb05c058d86", "score": "0.47420564", "text": "def period(self):\n rate = tofloat(self.base_rate.value)\n if rate == 0: return nan\n period = self.sequence_length/rate\n return period", "title": "" }, { "docid": "7ee88467eeb44cd0427d2ddfc8e00b7d", "score": "0.47404128", "text": "def degeneracy(self):\n return len(self)//periodicity(self)", "title": "" }, { "docid": "037fbdb4235d1803b2c4f019b4ec7342", "score": "0.47401607", "text": "def probability_by_time(self, t):\n pass", "title": "" }, { "docid": "cf3d3e6ae2798c04b0bf534683d26437", "score": "0.47329992", "text": "def _get_cycle_ratio(self, time, phase, period):\n phase_casted = tf.cast(phase, dtype=time.dtype)\n period_casted = tf.cast(period, dtype=time.dtype)\n time = tf.reshape(time, [tf.shape(time)[0],1]) #This change\n shifted_time = time - phase_casted\n cycle_ratio = (shifted_time%period_casted) / period_casted\n return tf.cast(cycle_ratio, dtype=tf.float32)", "title": "" }, { "docid": "ca38bab9eaaebd254e8bcc159d3763cd", "score": "0.47325802", "text": "def compute_period(x_sol, hamiltonian, parameters, hbar, index, state):\n\ts = np.linspace(0, 1, 2 ** 15 + 1) # Compute the s parameter with the correct number of element for doing the romb algorithm for the integral\n\tns = len(s) # Number of element in s\n\n\tx_sol_list = [] # Empty list to save the independent variables\n\tif type(x_sol) is list: # If there are more than one independent variable\n\t\tfor i in range(0, len(x_sol)): # Iterate over all the variables\n\t\t\tx_sol_list.append(x_sol(s)) # Save all the variables in a list\n\telse: # If there is only one independent variable\n\t\tx_sol_list = [x_sol(s)] # Save the variable in a list\n\t\tindex = [index] # Make the index a list with only one element\n\n\tfor i in range(0, len(index)): # Iterate over all the independent variables to include in the list of parameters\n\t\tparameters[index[i]] = x_sol_list[i] # Include the vec of independent variables\n\n\th_matrix = create_hypermatrix(parameters, hamiltonian) # Construct the hypermatrix of the hamiltonian\n\tenergies = np.linalg.eigvalsh(h_matrix) # Extract only the instant eigenenergies of the Hamiltonian\n\n\tn = np.shape(energies)[1] # Extract the dimension of the Hamiltonian\n\n\te_g = np.zeros([n - 1, ns]) # Array in which rows will be saved the gaps between the energies\n\tcounter = 0\n\tfor i in range(0, n): # Iterate over all the gaps\n\t\tif i != state:\n\t\t\te_g[counter, :] = np.abs(energies[:, state] - energies[:, i]) # Compute the gaps, always a positive value\n\t\t\tcounter += 1\n\n\tphi = romb(e_g, dx=(s[1] - s[0]), axis=1) / hbar # Compute the integral of the gaps\n\n\tt = 2 * np.pi / phi # Compute the period\n\n\treturn t", "title": "" }, { "docid": "86c122eb16199e0ca889150607bf8cf9", "score": "0.47302592", "text": "def periodicty(self):\n return self._periodicty", "title": "" }, { "docid": "6a8d362af49f80ed48c40952c94ccb3f", "score": "0.4723686", "text": "def _get_rapid_retrans_decay(self):\n return self.__rapid_retrans_decay", "title": "" }, { "docid": "e46743a98d8ef9ba74ce8ac023439a07", "score": "0.47232622", "text": "def get_learning_rate(self, epoch):\n return", "title": "" }, { "docid": "1594ef116a5fb6b44c15c95ec93944ac", "score": "0.47195083", "text": "def computingH(start,end,num):\n\n return float((end - start)/num)", "title": "" }, { "docid": "101221ec18f1348ffd961c24d4cf3225", "score": "0.4719434", "text": "def compute_inference_time(self):\n print ()\n r=requests.post(self.url,json={'connect':'yes',}) \n duration= json.loads(r.json()) \n return duration[\"time\"]", "title": "" }, { "docid": "fe3d5e3b516333ad867185a3e0495f1d", "score": "0.47152457", "text": "def period(start, end, year, proxy, savein, site, mask, verbose, config):\n # LOAD CONFIG FILE\n configname = config # change variable name\n if not configname:\n configname = 'config.json'\n\n config = load_config(configname)\n if not config: return None\n\n # SITE PARAMS\n site_params = config['site']\n asset_path = site_params['assetPath']\n property_name = site_params['propertyName']\n usersite = site # change variable name\n\n # SAVE PARAMS\n destination = savein or config['saveTo']\n save_params = config[destination]\n soptions = ['drive', 'asset', 'local']\n\n # MIN AREA\n limit = config['minArea']\n\n # RUN COMMAND AND HASH\n command = 'glad period {} {} --proxy {} -s {} -m {} -v {}'.format(\n start, end, proxy, savein, mask, verbose)\n if usersite:\n command += ' --site {}'.format(usersite)\n\n config_str = json.dumps(config, indent=2)\n tohash = '{} {}'.format(config_str, command)\n tohash = tohash.encode('utf-8')\n import hashlib\n h = hashlib.sha256()\n h.update(tohash)\n hexcode = h.hexdigest()\n logname = 'period {} to {} {}'.format(start, end, hexcode)\n\n header = HEADER.format(config_str, command)\n\n # LOGGER\n from geepyGLAD.logger import Logger\n logdir = 'logs'\n logger = Logger(logname, logdir)\n\n logger.header(header)\n\n if destination not in soptions:\n msg = 'savein parameter must be one of {}'.format(soptions)\n logger.log(msg)\n print(msg)\n return None\n\n # INITIALIZE EE\n import ee\n initEE(logger)\n try:\n from geepyGLAD import utils, alerts, batch\n except Exception as e:\n msg = 'ERROR while importing geepyGLAD - {}'.format(e)\n logger.log(msg)\n raise e\n\n site = ee.FeatureCollection(asset_path)\n\n if usersite:\n site = site.filterMetadata(property_name, 'equals', usersite)\n site = ee.Feature(site.first())\n\n args = dict(\n start=start,\n end=end,\n year=int(year),\n proxy=bool(proxy),\n site=site,\n limit=limit,\n property_name=property_name,\n verbose=verbose,\n folder=save_params['folder'],\n logger=logger\n )\n\n raster_mask_id = config['rasterMask']\n if raster_mask_id and mask:\n raster_mask = ee.Image(raster_mask_id)\n args['raster_mask'] = raster_mask\n\n # COMPUTE ALERTS\n try:\n batch.period(**args, destination=destination)\n except Exception as e:\n msg = 'ERROR: {}'.format(str(e))\n logger.log(msg)\n raise e", "title": "" }, { "docid": "a48b02aa64369334ab01603ae285fab8", "score": "0.47055587", "text": "def computingH(start,end,total):\n \n return float((end - start)/total)", "title": "" }, { "docid": "b0be4a56de6f2110b888ccfba689d9b3", "score": "0.46994677", "text": "def get_hps(self) -> Dict:\n return self._hyper_params", "title": "" }, { "docid": "424c661896a72c40249e0e2feae36699", "score": "0.4696818", "text": "def _period_grid(time):\n number_obs = len(time)\n length_lc = np.max(time) - np.min(time)\n\n dt = 2 * length_lc / number_obs\n max_period = np.rint(length_lc / dt) * dt\n min_period = dt\n\n periods = np.arange(min_period, max_period + dt, dt)\n\n return periods", "title": "" }, { "docid": "424c661896a72c40249e0e2feae36699", "score": "0.4696818", "text": "def _period_grid(time):\n number_obs = len(time)\n length_lc = np.max(time) - np.min(time)\n\n dt = 2 * length_lc / number_obs\n max_period = np.rint(length_lc / dt) * dt\n min_period = dt\n\n periods = np.arange(min_period, max_period + dt, dt)\n\n return periods", "title": "" }, { "docid": "a3670cd4558c3c4dd6927ad56b4e1a39", "score": "0.46840978", "text": "def HT_DCPERIOD(Real):\n return None, 1", "title": "" }, { "docid": "1425651c64b75ad22ff8f1977f0c1567", "score": "0.468399", "text": "def period_dot(self, t):\n t = Quantity(t, \"yr\")\n return self.B**2 / (self.period(t) * B_CONST**2)", "title": "" }, { "docid": "44ee6f1ea749047ed18bf0159211be32", "score": "0.46780792", "text": "def requests_per_day(self):\n return self._rh.requests_per_day", "title": "" }, { "docid": "1a7ee361bf0218507820fb56b63d8166", "score": "0.46544233", "text": "def estimate_period(x, y, period_low=1, period_high=100, res=200):\n periods, pgram = periodogram(x, y, period_low=period_low,\n period_high=period_high, res=res)\n return periods[pgram.argmax()]", "title": "" }, { "docid": "1dbde1a793fc91e81e53333398096baf", "score": "0.46356735", "text": "def compute_hitrate(self):\n HR = (self.a + self.d)/self.n\n return HR", "title": "" }, { "docid": "83e20de752f6bb3dbd647feaf9c075d0", "score": "0.46353546", "text": "def getHyperParameterDistribution(self, t, name, plot=False, **kwargs):\n if not self.storeHistory:\n raise PostProcessingError('To get past hyper-parameter distributions, Online Study must be called with '\n 'flag \"storeHistory=True\". Use \"getCurrentHyperParameterDistribution\" instead.')\n\n # determine indices of transition model and hyper-parameter\n hpIndex = -1\n for i, tm in enumerate(self.transitionModels):\n try:\n hpIndex = self._getHyperParameterIndex(tm, name)\n tmIndex = i\n except PostProcessingError:\n pass\n if hpIndex == -1:\n raise PostProcessingError('No hyper-parameter \"{}\" found. Check hyper-parameter names.'.format(name))\n\n # access hyper-parameter distribution\n if t == 'avg':\n # compute time-averaged distribution\n hyperParameterDistribution = np.sum(self.hyperParameterSequence, axis=0)/len(self.hyperParameterSequence)\n else:\n # try to access distribution of specified time step\n if t not in self.formattedTimestamps:\n raise PostProcessingError('Supplied time ({}) does not exist in data or is out of range.'.format(t))\n timeIndex = list(self.formattedTimestamps).index(t) # to select corresponding posterior distribution\n\n hyperParameterDistribution = self.hyperParameterSequence[timeIndex]\n\n hyperParameterDistribution = hyperParameterDistribution[tmIndex]\n axesToMarginalize = list(range(len(self.hyperParameterNames[tmIndex])))\n axesToMarginalize.remove(hpIndex)\n\n # reshape hyper-parameter grid for easy marginalization\n hyperGridSteps = [len(x) for x in self.allFlatHyperParameterValues[tmIndex]]\n distribution = hyperParameterDistribution.reshape(hyperGridSteps, order='C')\n marginalDistribution = np.squeeze(np.apply_over_axes(np.sum, distribution, axesToMarginalize))\n\n x = self.allFlatHyperParameterValues[tmIndex][hpIndex]\n if plot:\n # check if categorical\n if np.any(np.abs(np.diff(np.diff(x))) > 10 ** -10):\n plt.bar(np.arange(len(x)), marginalDistribution, align='center', width=1., **kwargs)\n plt.xticks(np.arange(len(x)), x)\n plt.ylabel('probability')\n # regular spacing\n else:\n plt.bar(x, marginalDistribution, align='center',\n width=self.hyperGridConstants[tmIndex][hpIndex],\n **kwargs)\n plt.ylabel('probability')\n\n plt.xlabel(self.hyperParameterNames[tmIndex][hpIndex])\n\n return x, marginalDistribution", "title": "" }, { "docid": "d1b3b495f409400cc97f00b0bd705b53", "score": "0.46346396", "text": "def lr_scheduler(epoch):\n return alpha / (1 + decay_rate * epoch)", "title": "" }, { "docid": "4a284493d9af4447e679ef9476d49e3f", "score": "0.46257442", "text": "def histogram_belmont_time(winning_times):", "title": "" }, { "docid": "bd99a39b1eb0606cbb301345157f464f", "score": "0.46247214", "text": "def set_hyperparameter_dict():\n param_dict = {\n 'max_seq_len': 384, \n 'train_batch_size': 128,\n 'test_batch_size': 32,\n 'num_epochs': 5,\n 'learning_rate': 2e-5,\n }\n return param_dict", "title": "" }, { "docid": "dba855507c7101c6b5a82115ddfd72f0", "score": "0.4622924", "text": "def periodicity(self) -> int:\n return _lib.ls_get_periodicity(self._payload)", "title": "" }, { "docid": "d9d2d155ed01cd52f9a35885cfddbe35", "score": "0.46173906", "text": "def get_growth_rate(self,doubling_time):\n if doubling_time is None or doubling_time == 0.0:\n return 0.0\n return (2.0 ** (1.0 / doubling_time) - 1.0)", "title": "" }, { "docid": "34767798f1214c44cd5a2bb607898aa0", "score": "0.4613837", "text": "def trickle_traffic(self) -> float:\n return pulumi.get(self, \"trickle_traffic\")", "title": "" }, { "docid": "c9cd97a28414d63e02ad3a667b36acc8", "score": "0.4603625", "text": "def pc_work_time_avg(self):\n return _op25_repeater_swig.gardner_costas_cc_sptr_pc_work_time_avg(self)", "title": "" }, { "docid": "e2d3f9a2872cc1ee3d48e784328cfc4a", "score": "0.46029702", "text": "def get_hyperparams(self):\n return self.hyperparams", "title": "" }, { "docid": "a645fce4be6bb4c062b1e7dedb80067a", "score": "0.459808", "text": "def figure_number_of_periods(number_of_days, mode) :\n\n #print \"periods(\", number_of_days,\", \",mode,\") called ...\"\n nm = 'periods'\n msg = \"called w/ (number_of_days=\"+str(number_of_days)\n msg += \", mode=\"+mode+\") \"\n\n # set upper limits for period lengths\n MIN_DAYS_IN_WK = 6\n MAX_DAYS_IN_WK = 9\n MIN_DAYS_IN_BIWK = 11\n MAX_DAYS_IN_BIWK = 22\n\n DELTA_WK = 9\n DELTA_BIWK = 18\n PERIOD_NUMBER_START = 1\n # Boundary condition catcher\n number_of_periods = -1\n\n # WK mode\n if mode == \"WK\":\n for n in range(2, number_of_days) :\n if number_of_days < 12 :\n if (number_of_days > MAX_DAYS_IN_WK or\n number_of_days < MIN_DAYS_IN_WK) :\n # raises an ERROR\n number_of_periods = -1\n break\n else :\n number_of_periods = PERIOD_NUMBER_START\n break\n\n elif number_of_days <= DELTA_WK * n :\n number_of_periods = PERIOD_NUMBER_START + ( n - 1 )\n # For easier comparison to periods.py\n if number_of_days > 72 :\n msg = \"length of anneal month = \" +str(number_of_days)+ \"; For real? \"\n\n # Do we really need to set this to 0? What's wrong with a bigger number?\n msg = \"I give it \"+str(number_of_periods)+\" (WKs). \"\n msg += \"Is this bad? \"\n msg += \"Code would usually give such a large difference \"\n msg += \"number_of_periods = 0, but why? \"\n\n break\n\n # BIWK mode\n elif mode == 'BIWK':\n for n in range(2, number_of_days) :\n if number_of_days < MAX_DAYS_IN_BIWK :\n # raises an ERROR if < MIN_DAYS_IN_BIWK1\n if number_of_days < MIN_DAYS_IN_BIWK : number_of_periods = -1\n else : number_of_periods = PERIOD_NUMBER_START\n break\n\n elif number_of_days <= DELTA_BIWK * n :\n number_of_periods = PERIOD_NUMBER_START + ( n - 1 )\n # For easier comparison to periods.py\n if number_of_days > 72 :\n msg = \"length of anneal month = \" +str(number_of_days)+ \"; For real? \"\n print((\"E\", msg, nm))\n # Do we really need to set this to 0? What's wrong with a bigger number?\n msg = \"I give it \"+str(number_of_periods)+\" (BIWKs). \"\n msg += \"Is this bad? \"\n msg += \"Code would usually give such a large difference \"\n msg += \"number_of_periods = 0, but why? \"\n print((\"W\", msg, nm))\n break\n else:\n sys.exit('what mode did you put in?')\n\n # return value\n #print \"return number_of_periods =\", number_of_periods\n return number_of_periods", "title": "" }, { "docid": "fc0970307d5053c16a78e0f7873bb92c", "score": "0.4595098", "text": "def Hyperparameters():\n best_acc = 0 # best test accuracy\n start_epoch = 0 # start from epoch 0 or last checkpoint epoch\n Epochs = 50\n learning_rate = 0.01\n batch_size = 32\n # 定义两个数组\n Loss_list = []\n Train_Accuracy_list = []\n Test_Accuracy_list = []\n Loss_list.append(3)\n Test_Accuracy_list.append(0)\n Train_Accuracy_list.append(0)\n return best_acc, start_epoch, Epochs, learning_rate, batch_size, Loss_list, Train_Accuracy_list, Test_Accuracy_list", "title": "" } ]
b81cda7c44c9a95a958a4ec21e97f111
Logic for handling sub dictionaries in table_output (hide, reparse)
[ { "docid": "8777b65c057a3b1fc26fb145ec0e0bc0", "score": "0.5488281", "text": "def check_sub_dict(parsed_dict, key, value):\n # check for specific well-known keys we want to format and show differently.\n if key == 'address':\n # street city state post_code country\n pretty_address = \"{0} {1} {2} {3} {4}\".format(str(value.get('street', '')).strip(), str(value.get('city',\n '')).strip(),\n str(value.get('state', '')).strip(), str(value.get('post_code',\n '')).strip(),\n str(value.get('country', '')).strip())\n parsed_dict[get_pretty_name(key)] = pretty_address\n status = True\n\n elif key == 'peer_config':\n # routing peer configuration, add multiple lines.\n parsed_dict[get_pretty_name('protocol')] = value.get('protocol', None)\n parsed_dict[get_pretty_name('local_as_num')] = value['bgp_config'].get('local_as_num', None)\n parsed_dict[get_pretty_name('remote_as_num')] = value['bgp_config'].get('remote_as_num', None)\n parsed_dict[get_pretty_name('peer_ip')] = value.get('peer_ip', None)\n status = True\n\n elif key == 'ethernet_port':\n\n # Ethernet port info\n speed = value.get('speed', 'Unknown')\n duplex = value.get('full_duplex', 'Unknown')\n if duplex == 'Unknown':\n pretty_duplex = 'Unknown'\n elif duplex:\n pretty_duplex = \"Full Duplex\"\n else:\n pretty_duplex = \"Half Duplex\"\n\n if speed == 'Unknown':\n # speed was non retrievable\n parsed_dict[get_pretty_name('ethernet_port')] = pretty_duplex + ', ' + speed\n elif speed == 0:\n # port is down\n parsed_dict[get_pretty_name('ethernet_port')] = '(Auto)'\n else:\n # port is up\n parsed_dict[get_pretty_name('ethernet_port')] = pretty_duplex + ', ' + str(speed) + 'Mbps'\n status = True\n\n else:\n # not well known, do not display dict.\n status = False\n\n return status", "title": "" } ]
[ { "docid": "166e72bbf193ac96653fef6836052aed", "score": "0.5735426", "text": "def printptable(typec, t=\"adv\", at=typedict, ad=typedefdict):\n typec = typec[0].upper() + typec[1:].lower() ## This line capitalizes the type correctly for future use with the dictionaries.\n if t == \"adv\":\n typedict = at\n if t == \"def\":\n typedict = ad\n else:\n typedict = at\n if typedict == at:\n tabletype = \"advantages\"\n else:\n tabletype = \"defenses\" ## Previous lines set the type of table to be printed - advantages or defenses.\n print \"2.0x\\t1.0x\\t0.5x\\t0.0x\\t\" + \"<<\" + typec.upper(), tabletype + \">>\" ## Labels the columns.\n print \"\\t\\t\\t\\t\\t|\"\n if len(typedict[typec][2.0]) > 0:\n se = 1\n else:\n se = 0\n if len(typedict[typec][1.0]) > 0:\n e = 1\n else:\n e = 0\n if len(typedict[typec][0.5]) > 0:\n nve = 1\n else:\n nve = 0\n if len(typedict[typec][0.0]) > 0:\n ne = 1\n else:\n ne = 0 ## Previous lines set boring, uneffective ways to tell if a column is empty.\n al = 0\n cur1 = 0\n cur2 = 0\n cur3 = 0\n cur4 = 0\n while al < 4: # al is a variable that stores how many columns are empty - and since there are only 3 columns, 4 is where it stops printing.\n al = 0\n if se == 1:\n try:\n print typedict[typec][2.0][cur1][0:7] + \"\\t\",\n cur1 += 1\n except IndexError:\n se = 0\n print \"\\t\",\n al += 1\n else:\n print \"\\t\",\n al += 1\n if e == 1:\n try:\n print typedict[typec][1.0][cur2][0:7] + \"\\t\",\n cur2 += 1\n except IndexError:\n e = 0\n print \"\\t\",\n al += 1\n else:\n print \"\\t\",\n al += 1\n if nve == 1:\n try:\n print typedict[typec][0.5][cur3][0:7] + \"\\t\",\n cur3 += 1\n except IndexError:\n nve = 0\n print \"\\t\",\n al += 1\n else:\n print \"\\t\",\n al += 1\n if ne == 1:\n try:\n print typedict[typec][0.0][cur4][0:7] + \"\\t\\t|\",\n cur4 += 1\n except IndexError:\n ne = 0\n print \"\\t\\t|\",\n al += 1\n else:\n al += 1\n if al == 4:\n break\n print \"\\t\\t|\",\n \n print ## The long part before this just prints every type in the table in a \"SE\\tE\\t\\NVE\\tNE\\t\" format.", "title": "" }, { "docid": "474b007087e4b5462f0e6cf8af189143", "score": "0.5694981", "text": "def print_table(data, col_ids = [] ):\n\t\n\n\tfor row in data['response']['data']:\n\t\t\n\t\tfor i in range( len(row) if col_ids==[] else min(len(row), len(col_ids))):\n\t\t\tsys.stdout.write('-------------------------------')\n\t\tsys.stdout.write(\"\\n\")\n\t\t\n\t\tfor i in range(1, len(row) ): #don't print the subject key\n\t\t\tcell = row[i]\n\t\t\t\n \t\t\t# handle the case when only certain col ids\n \t\t\t# were selected\n \t\t\t# NOTE: adjusting for subject id\n \t\t\tif( len(col_ids) == 0 or (i-1) in col_ids):\n\t \t\t\tvalue = smart_str(cell)\n \t\t\t\tif( len(value) > 30):\n \t\t\t\t\tvalue = value[:26] + '...'\n \t\t\t\tsys.stdout.write( \"|{0:29}\".format(value))\n\t\tsys.stdout.write(\"|\\n\")", "title": "" }, { "docid": "fa30c4391fc18b9dcbf3c7dac41e24e6", "score": "0.55789304", "text": "def _make_table_subsections(dict_of_variables, plural):\n s = \"s\" if plural else \"\"\n html = []\n for key, section in dict_of_variables.items():\n for name, val in section.items():\n html.append(f\"<th class='sc-subheader'>Value{s}</th>\")\n if val.variances is not None:\n html.append(f\"<th class='sc-subheader'>Variance{s}</th>\")\n return \"\".join(html)", "title": "" }, { "docid": "71ad320ad756590562b4504b03afbb5b", "score": "0.5575455", "text": "def table_output(data, excludelist=None, orderlist=None, remove_sub_dict=True, remove_sub_list=True,\n space_indent=4, trailing_newline=True, filters_enabled=True):\n\n # handle empty list\n if not data:\n logger.debug('sub_table_output - got empty list')\n return_str = u\"\\tNo results found.\"\n if trailing_newline:\n return return_str + u'\\n'\n else:\n return return_str\n\n output_list = []\n\n # Check for debug override of excluding variables in output.\n if not filters_enabled:\n excludelist = None\n\n # Assume outer list wrapper\n for listline in data:\n parsed_dict = OrderedDict({})\n\n if orderlist is not None:\n # ordering list exists\n for priority_key in orderlist:\n # for each priority key, insert if exists.\n priority_key_value = listline.get(priority_key, None)\n if priority_key in listline:\n logger.debug('got priority key: {0}'.format(priority_key))\n\n if excludelist is not None:\n # there is a regex exclude list, check if key matches\n is_in_excludelist = False\n\n for regexstr in excludelist:\n if re.search(regexstr, priority_key):\n # matched exclude list\n is_in_excludelist = True\n\n if not is_in_excludelist:\n # Not in exclude list, add to output, 'prettyfiying' data as needed.\n\n # is this a list\n if (type(priority_key_value) is list) and (remove_sub_list is True):\n check_sub_list(parsed_dict, priority_key, priority_key_value)\n\n # is this a dict\n elif (type(priority_key_value) is dict) and (remove_sub_dict is True):\n check_sub_dict(parsed_dict, priority_key, priority_key_value)\n\n else:\n # other key, just add\n parsed_dict[get_pretty_name(priority_key)] = get_pretty_name(\n get_pretty_data(priority_key, priority_key_value))\n\n else:\n # no exclude list and not dict or list\n # is this a list\n if (type(priority_key_value) is list) and (remove_sub_list is True):\n check_sub_list(parsed_dict, priority_key, priority_key_value)\n\n # is this a dict\n elif (type(priority_key_value) is dict) and (remove_sub_dict is True):\n check_sub_dict(parsed_dict, priority_key, priority_key_value)\n\n else:\n # other key, just add\n parsed_dict[get_pretty_name(priority_key)] = get_pretty_name(\n get_pretty_data(priority_key, priority_key_value))\n\n # Pop the key out of the dictionary. Remaining keys will be sorted after priority ones.\n listline.pop(priority_key)\n else:\n # There is no value for this key. Add a blank to preserve spot in the desplay list\n # (cover case of blank spot in first value):\n parsed_dict[get_pretty_name(priority_key)] = ' '\n\n # Sort remaining keys not in priority orderlist (or all if orderlist is none). Inserted as found.\n for k, v in listline.items():\n\n if excludelist is not None:\n # there is a regex exclude list, check if key matches\n is_in_excludelist = False\n\n for regexstr in excludelist:\n if re.search(regexstr, k):\n # matched exclude list\n is_in_excludelist = True\n\n if not is_in_excludelist:\n # Not in exclude list, add to output, 'prettyfiying' data as needed.\n # logger.debug('name = {0}, pretty_name = {1}'.format(k, get_pretty_name(k)))\n\n # is this a list\n if (type(v) is list) and (remove_sub_list is True):\n check_sub_list(parsed_dict, k, v)\n\n # is this a dict\n elif (type(v) is dict) and (remove_sub_dict is True):\n check_sub_dict(parsed_dict, k, v)\n\n else:\n # other key, just add.\n parsed_dict[get_pretty_name(k)] = get_pretty_name(get_pretty_data(k, v))\n else:\n # no exclude list and not dict or list\n # logger.debug('name = {0}, pretty_name = {1}'.format(k, get_pretty_name(k)))\n\n # is this a list\n if (type(v) is list) and (remove_sub_list is True):\n check_sub_list(parsed_dict, k, v)\n\n # is this a dict\n elif (type(v) is dict) and (remove_sub_dict is True):\n check_sub_dict(parsed_dict, k, v)\n\n # other key, just add\n else:\n parsed_dict[get_pretty_name(k)] = get_pretty_name(get_pretty_data(k, v))\n # add parsed dictionary to output\n output_list.append(parsed_dict)\n\n # Make initial output string\n data_str = str(tabulate.tabulate(output_list, headers='keys'))\n\n # Indent string for output\n return_str = u\"\\n\".join((space_indent * u\" \") + i for i in data_str.splitlines())\n\n if trailing_newline:\n return return_str + u'\\n'\n else:\n return return_str", "title": "" }, { "docid": "dbc77cd912bebb6f681e21aaca55e55a", "score": "0.5528583", "text": "def print_dict(input_dict):\n if innotebook():\n outstr = tabulate({\n 'name': list(input_dict.keys()), \n 'description': list(input_dict.values()),\n }, headers=\"keys\", tablefmt='html')\n display(HTML(outstr))\n else:\n print(tabulate({\n 'name': list(input_dict.keys()), \n 'description': list(input_dict.values()),\n }, headers=\"keys\"))", "title": "" }, { "docid": "dab2272895513005ea640caf4a428344", "score": "0.5462363", "text": "def output(data):\n pt = PrettyTable()\n pt.field_names = [\"Algorithm\", \"DNA\", \"Protein\"]\n for key in set().union(*(ds.keys() for ds in [data[\"DNA\"], data[\"Protein\"]])):\n pt.add_row([key, str(data[\"DNA\"].get(key, 0)), str(data[\"Protein\"].get(key, 0))])\n print(pt)", "title": "" }, { "docid": "d6214ac3a40e80e9969dc8c2940cf191", "score": "0.54371494", "text": "def build_table(hdr1, hdr2, out_dict, hdr3=None, hdr4=None, hdr5=None):\n # Build two tables, one for the all vols which has slice balance info\n # and one that has the selected account info with more columns\n out_tbl = PrettyTable()\n if hdr3 is None:\n out_tbl.field_names = [hdr1, hdr2]\n for key, val in out_dict.items():\n out_tbl.add_row([key, val])\n else:\n out_tbl.field_names = [hdr1, hdr2, hdr3, hdr4, hdr5]\n for key, val in out_dict.items():\n vol_name = key\n vol_id = val[0]\n node_id = val[1]\n node_name = val[2]\n service_id = val[3]\n out_tbl.add_row([vol_id, vol_name, node_id, node_name, service_id])\n print(f\"{out_tbl}\")", "title": "" }, { "docid": "b26b8623442bb0ab00720feb85bd538d", "score": "0.53679067", "text": "def process_tables(self, _list):\r\n # Phase 1 \r\n _newlist = []\r\n for _i in _list:\r\n if _i.__contains__('table'):\r\n # - Group \"table-row\"\r\n _tmp = self.group_children( _i['table'], \r\n tag = 'table-row', \r\n clean= True)\r\n\r\n _tmp1 = []\r\n for _j in _tmp:\r\n if _j.__contains__('table-row'):\r\n _tmp2 = self.group_children( _j['table-row'], \r\n tag = 'table-cell', \r\n clean= True)\r\n _tmp3 = _j.copy()\r\n _tmp3['table-row'] = _tmp2\r\n _tmp1.append( _tmp3)\r\n else: \r\n _tmp1.append( _j)\r\n\r\n _tmp4 = _i.copy()\r\n _tmp4['table'] = _tmp1\r\n _newlist.append( _tmp4)\r\n else:\r\n _newlist.append( _i)\r\n _list = _newlist\r\n \r\n # Phase 2: convert into \"text\"\r\n _newlist = []\r\n for _i in _list:\r\n if _i.__contains__('table'):\r\n _table = _i['table']\r\n # Get rows.\r\n _nrow = 0\r\n for _j in _table:\r\n if _j.__contains__('table-row'):\r\n _row = _j['table-row']\r\n # Get cells\r\n for _k in _row:\r\n if _k.__contains__('table-cell'):\r\n _cell = _k['table-cell']\r\n _txt = u''\r\n for _m in _cell:\r\n _txt += _m['text'] + ' '\r\n _txt = _txt[0:-1]\r\n _tmp = _cell[0].copy()\r\n _tmp['text'] = _txt\r\n _tmp.pop('nesting')\r\n _tmp['tags']['type'] = 'table-cell'\r\n _tmp['tags'].update( { 'nrow' : _nrow })\r\n _newlist.append( _tmp)\r\n _nrow +=1\r\n else:\r\n _newlist.append( _i ) \r\n return _newlist", "title": "" }, { "docid": "84c8c80baa496f2e4d4ca12c7cde5af1", "score": "0.53593355", "text": "def print_structure(self, max_rows=20, output=sys.stdout):\n max_length = min(len(self.items()), max_rows)\n\n name_column = self.keys()[0:max_length]\n type_column = [str(len(table.rows)) for key, table in self.items()[0:max_length]]\n rows = zip(name_column, type_column)\n column_names = ['table', 'rows']\n text = Text()\n column_types = [text, text]\n\n table = Table(rows, column_names, column_types)\n\n return table.print_table(output=output, max_column_width=None)", "title": "" }, { "docid": "ef07abf31cc2ca938453e50d02844bec", "score": "0.5311365", "text": "def param_sweep_table(self, per_tagtype_results, fontsize=r\"\\small\", two_cols=True):\n self.outstring_list.append(r\"\\begin{table*}[h]\")\n self.outstring_list.append(r\"\\centering\")\n self.outstring_list.append(fontsize)\n if two_cols:\n self.outstring_list.append(r\"\\begin{tabular}{%s}\" % (\"l|\" + \"c\" * (N_TABLE_COLS-1) + \"|\" + \"c\" * (N_TABLE_COLS-1)))\n else:\n self.outstring_list.append(r\"\\begin{tabular}{%s}\" % (\"l\" + \"c\" * (N_TABLE_COLS-1)))\n self.outstring_list.append(r\"\\toprule\")\n firstrow = [\"\"]\n firstrow.extend(STATS_TO_SHOW) \n if two_cols:\n firstrow.extend(STATS_TO_SHOW) # Second time....\n self._write_row(firstrow)\n if two_cols:\n (tagtype1, tagtype2) = per_tagtype_results.keys()\n results1 = per_tagtype_results[tagtype1]\n results2 = per_tagtype_results[tagtype2] \n self._write_two_col_tagtype_subtable(tagtype1, tagtype2, results1, results2)\n else:\n for (tagtype, per_tagtype_results) in per_tagtype_results.iteritems():\n self._write_tagtype_subtable(tagtype, per_tagtype_results)\n self.outstring_list.append(r\"\\bottomrule\")\n self.outstring_list.append(r\"\\end{tabular}\")\n self.outstring_list.append(r\"\\caption{Results for ??? param sweep.}\")\n self.outstring_list.append(r\"\\label{???}\")\n self.outstring_list.append(r\"\\end{table*}\")\n self.outstring_list.append(\"\")\n return \"\\n\".join(self.outstring_list)", "title": "" }, { "docid": "130f669331e960dc555a0c927545d447", "score": "0.5188038", "text": "def parse_tb(tb, returnable, row_):\n temp = {\n 'tb_id': row_['identifier'],\n 'board': row_['board'],\n 'channel': row_['channel'],\n 'medium': row_['medium'],\n 'gradeLevel': row_['gradeLevel'],\n 'subject': row_['subject'],\n 'tb_name': row_['name'],\n 'status': row_['status']\n }\n try:\n temp['identifier'] = tb['identifier']\n except KeyError:\n pass\n try:\n temp['name'] = tb['name']\n except KeyError:\n pass\n try:\n temp['contentType'] = tb['contentType']\n except KeyError:\n pass\n try:\n temp['dialcodes'] = tb['dialcodes'][0]\n except KeyError:\n pass\n try:\n temp['leafNodesCount'] = tb['leafNodesCount']\n except KeyError:\n pass\n returnable.append(temp)\n if ('children' in tb.keys()) and tb['children']:\n for child in tb['children']:\n parse_tb(child, returnable, row_)", "title": "" }, { "docid": "52808bcb59dfbec2788e9a6fbc3322c5", "score": "0.51846856", "text": "def _PrintDictAsTable(self, title, src_dict):\n print(f'### {title:s}')\n print('')\n print('Identifier | Number')\n print('--- | ---')\n\n for key, value in sorted(src_dict.items()):\n print(f'{key:s} | {value:d}')\n\n print('')", "title": "" }, { "docid": "63e166342915a5fecabed72c71245eba", "score": "0.515415", "text": "def populateTable(self):\n\n if len(self.documentsDict) > 0:\n for key in self.documentsDict:\n data = []\n docData = self.documentsDict[key]\n if docData.gotMS == True:\n data = docData.massSpectrum\n if data.get('cmap', \"\") == \"\": data['cmap'] = self.config.interactive_line_color\n kwargs = {\"toolset\":\"MS\", \"color\":(200, 236, 236)}\n self.append_to_table(data, key, \"\", \"MS\", **kwargs)\n\n if 'unidec' in docData.massSpectrum:\n for innerKey in docData.massSpectrum['unidec']:\n if innerKey in ['Charge information']: continue\n data = docData.massSpectrum['unidec'][innerKey]\n kwargs = {\"color\":(176, 202, 220), \"toolset\":innerKey}\n self.append_to_table(data, key, innerKey, \"UniDec\", **kwargs)\n\n if hasattr(docData, \"gotSmoothMS\"):\n if docData.gotSmoothMS == True:\n data = docData.smoothMS\n if data.get('cmap', \"\") == \"\": data['cmap'] = self.config.interactive_line_color\n kwargs = {\"toolset\":\"MS\", \"color\":(116, 185, 255)}\n self.append_to_table(data, key, \"\", \"Processed MS\", **kwargs)\n\n\n if 'unidec' in docData.smoothMS:\n for innerKey in docData.smoothMS['unidec']:\n if innerKey in ['Charge information']: continue\n data = docData.smoothMS['unidec'][innerKey]\n kwargs = {\"color\":(176, 202, 220), \"toolset\":innerKey}\n self.append_to_table(data, key, innerKey, \"UniDec, processed\", **kwargs)\n\n if docData.got1RT == True:\n data = docData.RT\n if data.get('cmap', \"\") == \"\": data['cmap'] = self.config.interactive_line_color\n kwargs = {\"toolset\":\"1D\", \"color\":(219, 209, 255)}\n self.append_to_table(data, key, \"\", \"RT\", **kwargs)\n\n if docData.got1DT == True:\n data = docData.DT\n if data.get('cmap', \"\") == \"\": data['cmap'] = self.config.interactive_line_color\n kwargs = {\"toolset\":\"1D\", \"color\":(255, 118, 117)}\n self.append_to_table(data, key, \"\", \"1D\", **kwargs)\n\n if docData.got2DIMS == True:\n data = docData.IMS2D\n kwargs = {\"toolset\":\"2D\", \"color\":(255, 206, 252)}\n self.append_to_table(data, key, \"\", \"2D\", **kwargs)\n\n if docData.got2Dprocess == True:\n data = docData.IMS2Dprocess\n kwargs = {\"toolset\":\"2D\", \"color\":(99, 110, 114)}\n self.append_to_table(data, key, \"\", \"2D, processed\", **kwargs)\n\n if docData.gotExtractedIons == True:\n for innerKey in docData.IMS2Dions:\n data = docData.IMS2Dions[innerKey]\n kwargs = {\"toolset\":\"2D\", \"color\":(179, 180, 180)}\n self.append_to_table(data, key, innerKey, \"2D\", **kwargs)\n\n if docData.gotMultipleMS == True:\n for innerKey in docData.multipleMassSpectrum:\n data = docData.multipleMassSpectrum[innerKey]\n if data.get('cmap', \"\") == \"\": data['cmap'] = self.config.interactive_line_color\n kwargs = {\"toolset\":\"MS\", \"color\":(200, 236, 236)}\n self.append_to_table(data, key, innerKey, \"MS, multiple\", **kwargs)\n\n if 'unidec' in docData.multipleMassSpectrum[innerKey]:\n for innerInnerKey in docData.multipleMassSpectrum[innerKey]['unidec']:\n if innerInnerKey in ['Charge information']: continue\n data = docData.multipleMassSpectrum[innerKey]['unidec'][innerInnerKey]\n kwargs = {\"color\":(176, 202, 220), \"toolset\":innerInnerKey}\n innerInnerKeyLabel = \"{} | {}\".format(innerInnerKey, innerKey)\n self.append_to_table(data, key, innerInnerKeyLabel, \"UniDec, multiple\", **kwargs)\n\n if hasattr(docData, 'gotMultipleRT'):\n for innerKey in docData.multipleRT:\n data = docData.multipleRT[innerKey]\n if data.get('cmap', \"\") == \"\": data['cmap'] = self.config.interactive_line_color\n kwargs = {\"toolset\":\"1D\", \"color\":(219, 209, 255)}\n self.append_to_table(data, key, innerKey, \"RT, multiple\", **kwargs)\n\n if hasattr(docData, 'gotMultipleDT'):\n for innerKey in docData.multipleDT:\n data = docData.multipleDT[innerKey]\n if data.get('cmap', \"\") == \"\": data['cmap'] = self.config.interactive_line_color\n kwargs = {\"toolset\":\"1D\", \"color\":(255, 118, 117)}\n self.append_to_table(data, key, innerKey, \"1D, multiple\", **kwargs)\n\n\n if docData.gotExtractedDriftTimes == True:\n for innerKey in docData.IMS1DdriftTimes:\n if docData.dataType == 'Type: MANUAL': tableKey = '1D'\n else: tableKey = 'DT-IMS'\n data = docData.IMS1DdriftTimes[innerKey]\n if data.get('cmap', \"\") == \"\": data['cmap'] = self.config.interactive_line_color\n kwargs = {\"toolset\":\"1D\", \"color\":(154, 236, 219)}\n self.append_to_table(data, key, innerKey, tableKey, **kwargs)\n\n if docData.gotCombinedExtractedIonsRT == True:\n for innerKey in docData.IMSRTCombIons:\n data = docData.IMSRTCombIons[innerKey]\n if data.get('cmap', \"\") == \"\": data['cmap'] = self.config.interactive_line_color\n kwargs = {\"toolset\":\"RT\", \"color\":(219, 209, 255)}\n self.append_to_table(data, key, innerKey, \"RT, combined\", **kwargs)\n\n if docData.gotCombinedExtractedIons == True:\n for innerKey in docData.IMS2DCombIons:\n data = docData.IMS2DCombIons[innerKey]\n kwargs = {\"toolset\":\"2D\", \"color\":(255, 206, 252)}\n self.append_to_table(data, key, innerKey, \"2D, combined\", **kwargs)\n\n if docData.got2DprocessIons == True:\n for innerKey in docData.IMS2DionsProcess:\n data = docData.IMS2DionsProcess[innerKey]\n kwargs = {\"toolset\":\"2D\", \"color\":(255, 206, 252)}\n self.append_to_table(data, key, innerKey, \"2D, processed\", **kwargs)\n\n # Overlay data\n if docData.gotOverlay == True:\n for innerKey in docData.IMS2DoverlayData:\n data = docData.IMS2DoverlayData[innerKey]\n overlayMethod = re.split('-|,|:|__', innerKey)\n if overlayMethod[0] in ['Mask', 'Transparent']: color_label = \"{}/{}\".format(data['cmap1'], data['cmap2'])\n else: color_label = data.get('cmap', \"\")\n kwargs = {\"toolset\":\"2D\", \"color_label\":color_label, \"color\":(214, 220, 198)}\n self.append_to_table(data, key, innerKey, \"Overlay\", **kwargs)\n\n if docData.gotStatsData == True:\n for innerKey in docData.IMS2DstatsData:\n data = docData.IMS2DstatsData[innerKey]\n overlayMethod = re.split('-|,|:|__', innerKey)\n kwargs = {\"color\":(222, 215, 255), \"toolset\":\"2D\"}\n self.append_to_table(data, key, innerKey, \"Statistical\", **kwargs)\n\n if len(docData.other_data) > 0:\n for innerKey in docData.other_data:\n data = docData.other_data[innerKey]\n kwargs = {\"color\":(215, 224, 184)}\n self.append_to_table(data, key, innerKey, \"Annotated data\", **kwargs)\n \n # Tandem dat a\n if len(docData.tandem_spectra) > 0:\n data = docData.tandem_spectra\n if data.get('cmap', \"\") == \"\": data['cmap'] = self.config.interactive_line_color\n kwargs = {\"toolset\":\"1D\", \"color\":(219, 209, 255)}\n self.append_to_table(data, key, \"\", \"MS/MS\", **kwargs)\n\n self.onAddPageChoices(evt=None)\n else:\n msg = 'Document list is empty'\n self.presenter.onThreading(None, (msg, 4), action='updateStatusbar')\n self.onAddPageChoices(evt=None)", "title": "" }, { "docid": "b83b466cc3bd5e01c8d2c456879a9ccf", "score": "0.5150255", "text": "def printTable(self, title, entry):\n print(title + \": \")\n anz = 0\n tmp = StringBuilder()\n i = 0\n while i < entry.index:\n if not entry.isStrong(i):\n tmp.append(printTableEntry(entry.entries[i]))\n j = 0\n while j < entry.getRetIndexAnz(i):\n retIndex = entry.getRetIndex(i, j)\n tmp.append(\" (\")\n if entry.isExpanded(i):\n tmp.append(\"EX:\").append(retIndex).append(\":\").append(entry.isExtendedTable(i)).append(\"/\").append(entry.isOnTable(i)).append(\"/\")\n else:\n tmp.append(retIndex).append(\"/\").append(printTableEntry(entry.entries[retIndex])).append(\")\")\n j += 1\n tmp.append(\" \")\n anz += 1\n if (anz % 5) == 0:\n tmp.append(\"\\r\\n\")\n i += 1\n print(tmp.__str__())", "title": "" }, { "docid": "ca7341123066a011d9883d9a4ef1b06d", "score": "0.51325935", "text": "def mageck_printdict(dict0,args,sgdict,sampledict,sampleids):\n # print header\n # print items\n dfmt=\"{:.5g}\"\n ofile=open(args.output_prefix+'.normalized.txt','w')\n # headers\n mapres_list=['']*len(sampledict)\n for (k,v) in sampledict.items():\n mapres_list[v]=k\n if len(sampledict)>0:\n cntheader=[mapres_list[x] for x in sampleids]\n else:\n cntheader=None\n logging.info('Writing normalized read counts to '+args.output_prefix+'.normalized.txt')\n if cntheader !=None:\n print('sgRNA\\tGene\\t'+'\\t'.join(cntheader),file=ofile)\n if len(sgdict)==0:\n for (k,v) in dict0.items():\n print(k+'\\t'+'None'+'\\t'+'\\t'.join([str(x) for x in v]),file=ofile)\n else:\n for (k,v) in dict0.items():\n if k not in sgdict: # only print those in the genedict\n logging.warning(k+' not in the sgRNA list')\n continue\n print('\\t'.join([k,sgdict[k]])+'\\t'+'\\t'.join([str(x) for x in v]),file=ofile)\n # print the remaining counts, fill with 0\n ofile.close()", "title": "" }, { "docid": "d9910a061eebc5b48eb6bd035ffdaea4", "score": "0.5129878", "text": "def normalize_tables(self, filing_date, input_dict, visited_data_names) -> (dict, dict):\n master_dict = {}\n for normalized_category, pattern_string in flatten_dict(self.regex_patterns).items():\n master_dict[normalized_category] = np.nan\n\n for title, table in input_dict.items():\n for scraped_name, scraped_value in flatten_dict(table).items():\n for normalized_category, pattern_string in flatten_dict(self.regex_patterns).items():\n if re.search(pattern_string, scraped_name, re.IGNORECASE):\n master_dict[normalized_category] = scraped_value\n break\n pprint(master_dict)\n return {}, master_dict", "title": "" }, { "docid": "a0e439e305afb9e89abba582971d517c", "score": "0.511949", "text": "def writeTableRow( dict, keys=None ):\n if not keys:\n keys = list(dict.keys())\n keys.sort()\n s = \"\"\n # Write a row of the HTML table for every key in keys. Handle some key\n # values specially.\n for key in keys:\n if key == \"anNames\":\n # Print the number of antennas when printing the list of ant names\n s += '<tr><th>' + key + '</th>' + \\\n '<td> Number = ' + str( len( dict[key] ) ) + ', ' + \\\n 'Names = ' + str( dict[key] ) + \\\n '</td></tr>\\n'\n elif key == 'freqCov':\n # Print frequencies with limited precision\n fs = \"\"\n for freqs in dict[key]:\n fs += '(%.3e, %.3e) ' % ( freqs[0], freqs[1] )\n s += '<tr><th>' + key + '</th><td>' + fs + '</td></tr>\\n'\n elif (key == 'RA') or (key == 'RAPnt'):\n s += '<tr><th>' + key + '</th>' + \\\n '<td>' + UVDesc.PRA2HMS(dict[key]) + '</td></tr>\\n'\n elif (key == 'Dec') or (key == 'DecPnt'):\n s += '<tr><th>' + key + '</th>' + \\\n '<td>' + UVDesc.PDec2DMS(dict[key]) + '</td></tr>\\n'\n elif (key == 'timeRange'):\n s += '<tr><th> Time Range </th>' + \\\n '<td>' + day2dhms(dict['timeRange'][0]) + ' - ' + \\\n day2dhms(dict['timeRange'][1]) + ' </td></tr>\\n'\n elif (key == 'Freq'):\n s += '<tr><th>' + key + '</th>' + \\\n '<td>' + \"%6.3f\"%(dict[key]*1.0e-9) + ' GHz </td></tr>\\n'\n elif (key == 'BW'):\n s += '<tr><th>' + key + '</th>' + \\\n '<td>' + \"%6.3f\"%(dict[key]*1.0e-6) + ' MHz </td></tr>\\n'\n elif (key == 'SNR'):\n s += '<tr><th>' + key + '</th>' + \\\n '<td>' + \"%6.1f\"%(dict[key]) + ' </td></tr>\\n'\n elif (key == 'Exposure'):\n s += '<tr><th>' + key + '</th>' + \\\n '<td>' + \"%6.3f\"%(dict[key]*24.0) + ' Hours </td></tr>\\n'\n elif (key == 'Size') or (key == \"Cells\"):\n s += '<tr><th>' + key + '</th>' + \\\n '<td>' + \"%8.5f\"%(dict[key]*3.6e3) + ' asec </td></tr>\\n'\n elif (key == 'ISum') or (key == \"QSum\") or (key == \"USum\"):\n s += '<tr><th>' + key + '</th>' + \\\n '<td>' + \"%8.3f\"%(dict[key]*1.0e3) + ' mJy </td></tr>\\n'\n elif (key == 'IPeak') or (key == \"QPeak\") or (key == \"UPeak\"):\n s += '<tr><th>' + key + '</th>' + \\\n '<td>' + \"%8.3f\"%(dict[key]*1.0e3) + ' mJy </td></tr>\\n'\n elif (key == 'IRMS') or (key == \"QRMS\") or (key == \"URMS\"):\n s += '<tr><th>' + key + '</th>' + \\\n '<td>' + \"%8.5f\"%(dict[key]*1.0e3) + ' mJy </td></tr>\\n'\n elif (key == 'IBeam'):\n s += '<tr><th> Clean Beam </th>' + \\\n '<td>' + \\\n \" %6.4f, %6.4f, %6.1f\"%(dict[key][0]*3.6e3, dict[key][1]*3.6e3, dict[key][2]) + \\\n ' (asec,asec,deg) </td></tr>\\n'\n elif (key == 'FailProc'):\n s += '<tr><th> Failing process </th>' + \\\n '<td>' + \" %s\"%(dict[key])+' </td></tr>\\n'\n else:\n # Everything else\n s += '<tr><th>' + key + '</th>' + \\\n '<td>' + str( dict[key] ) + '</td></tr>\\n'\n return s", "title": "" }, { "docid": "cbb0b66a31373c95f4ac28142fa5b56b", "score": "0.5102154", "text": "def format_data(\n result: Dict[str, List[Dict[str, Any]]],\n field: str,\n output_prefix: str,\n data_key: str,\n) -> Dict[str, Any]:\n data = result.get(field, [])\n output = {f\"{output_prefix}_raw\": data if data else None}\n\n for count, item in enumerate(data, start=1):\n output[f\"{output_prefix}_{count}\"] = item.get(data_key)\n\n return output", "title": "" }, { "docid": "04597fe57f8e3ab35749d62f9dbb501b", "score": "0.5093385", "text": "def exoDataClean(data, output):\n\tkeys = data.keys()\n\tcleanData = dict()\n\tfor i, key in enumerate(keys):\n\t\tif i == len(keys)-1:\n\t\t\toutput.write(str(key)+\",\\n\")\n\t\telse:\n\t\t\toutput.write(str(key)+\", \")\n\t\tcleanData[key] = list()\n\n\tfor i in xrange(len(data[keys[0]])):\n\t\tflag = True\n\t\tfor key in keys:\n\t\t\tif data[key][i].strip() == \"\":\n\t\t\t\tflag = False\n\t\t\t\tbreak\n\t\tif flag == True:\n\t\t\tfor j, key in enumerate(keys):\n\t\t\t\tif j == len(keys)-1:\n\t\t\t\t\toutput.write(str(data[key][i])+\",\\n\")\n\t\t\t\telse:\n\t\t\t\t\toutput.write(str(data[key][i])+\", \")\n\t\t\t\tcleanData[key].append(data[key][i])\n\treturn cleanData", "title": "" }, { "docid": "c76d61e30849953ba6c19354201e3178", "score": "0.5062532", "text": "def tablize_dict_of_dicts(data, include=None, outer_key_name=\"index\"):\n\n if include is None:\n # using a set is fast but doesn't preserve order\n # include = set(chain(*[ld.keys() for ld in listofdicts]))\n\n # this approach is slower but preserves order\n include = []\n for outerkey, rowdict in data.items():\n for k in rowdict.keys():\n if k not in include:\n include.append(k)\n\n table = []\n table.append(include)\n for key, innerdict in data.items():\n row = [key]\n for k in include:\n row.append(innerdict.get(k, \" \"))\n\n table.append(row)\n\n table[0].insert(0, outer_key_name)\n\n return table", "title": "" }, { "docid": "7c4773fce1918a4c1f2d3233cc923fac", "score": "0.5061999", "text": "def check_sub_list(parsed_dict, key, value):\n # check for specific well-known lists we want to format and show differently.\n if key == 'ipv4_addrs':\n # add IPs\n pretty_ips = \", \".join(value)\n parsed_dict[get_pretty_name(key)] = pretty_ips\n\n status = True\n elif key == 'bound_interfaces':\n # add bound_interfaces\n pretty_interfaces = \", \".join(value)\n parsed_dict[get_pretty_name(key)] = pretty_interfaces\n\n status = True\n elif key == 'lan_network_ids':\n # add bound_interfaces\n pretty_lannets = \", \".join(value)\n parsed_dict[get_pretty_name(key)] = pretty_lannets\n\n status = True\n\n elif key == 'site_paths_allowed':\n # WAN Paths\n path_list = []\n # list of dicts with wn_name and wn_path_type\n for wn_entry in value:\n # verify dict\n if type(wn_entry) is dict:\n # get wan_path_type, prettify and added to path_list\n path_list.append(get_pretty_name(wn_entry.get('wan_path_type', \"Unknown\")))\n # Create a path string\n parsed_dict['Allowed WAN Paths'] = \", \".join(path_list)\n status = True\n\n elif key == 'roles':\n # list of dicts - roles.\n role_list = []\n # list of roles\n for role_entry in value:\n # verify dict\n if type(role_entry) is dict:\n # get the roles\n role_list.append(get_pretty_name(role_entry.get('name', 'Unknown')))\n # Create role string\n parsed_dict['Roles'] = \", \".join(role_list)\n status = True\n\n else:\n # don't add it to output.\n status = False\n\n return status", "title": "" }, { "docid": "97d6da1cbde5674b7defd5b58cdb1f2d", "score": "0.505427", "text": "def format(self, table):\n response = {}\n response['rows'] = rows = []\n response['cols'] = cols = []\n for column in table.columns:\n cols.append({\"id\": column.name, \"label\": column.name, \"type\": column.data_type})\n for row in table.data:\n row_list = []\n for e in row:\n row_list.append({\"v\": e})\n rows.append({\"c\": row_list})\n return response", "title": "" }, { "docid": "ef0e28f785eb25fafb525fc3bcf7e8da", "score": "0.50422555", "text": "def _display_table(self):\n\n keys = list(self.colnames)\n if self.hide_keys:\n # Index only the keys not in hide keys in order\n [keys.remove(key) for key in self.hide_keys if key in keys]\n\n if self.display_keys != slice(None):\n keys = [dk for dk in self.display_keys if dk in keys]\n\n table = self[keys]\n # The slicing operation resets display and hide keys to default, but we\n # have already applied it\n table.unhide_columns()\n\n return table", "title": "" }, { "docid": "9e4cf820dbe7a120c6585505a5bee94a", "score": "0.50422037", "text": "def dump_inline_table(self, section):\r\n retval = \"\"\r\n if isinstance(section, dict):\r\n val_list = []\r\n for k, v in section.items():\r\n val = self.dump_inline_table(v)\r\n val_list.append(k + \" = \" + val)\r\n retval += \"{ \" + \", \".join(val_list) + \" }\\n\"\r\n return retval\r\n else:\r\n return unicode(self.dump_value(section))", "title": "" }, { "docid": "444bcad4294495bec9d46c8ef13d7d70", "score": "0.5035515", "text": "def json2table(json):\n filter_terms = ['ResponseMetadata']\n table = []\n try:\n for k, v in filter(lambda (k, v): k not in filter_terms,\n json.iteritems()):\n table.append([k.encode('ascii', 'ignore'),\n str(v).encode('ascii', 'ignore')])\n return tabulate(table, tablefmt='fancy_grid')\n except Exception as e:\n print(e)\n return json", "title": "" }, { "docid": "9905a42efa9ef08b7e43af7f456e2ca5", "score": "0.5033793", "text": "def convert_object(self, json_input):\n if not json_input:\n return \"\" #avoid empty tables\n return \"%s<tr>%s</tr></table>\" %(\n self.table_init_markup,\n \"</tr><tr>\".join([\n \"<th>%s</th><td>%s</td>\" %(\n self.convert_json_node(k),\n self.convert_json_node(v)\n )\n for k, v in json_input.items()\n ])\n )", "title": "" }, { "docid": "1b3d17fee97385b286d08067d78502cf", "score": "0.50268096", "text": "def make_json_dumper(wfp):\n last = 0\n if subtables:\n for (name, lines) in subtables:\n wfp.write(\" const char *%s_vocabulary[] = {\\n\" % name)\n for ln in lines:\n value = ln[1]\n if value.endswith(\" (default)\"):\n value = value[:-10]\n wfp.write(' \"%s\",\\n' % value)\n wfp.write(\" };\\n\")\n wfp.write('#define DISPLAY_%s(n) (((n) < '\n '(unsigned int)NITEMS(%s_vocabulary)) ? '\n '%s_vocabulary[n] : \"INVALID %s\")\\n' %\n (name.upper(), name, name, name.upper()))\n wfp.write(\"\\n\")\n record = after is None\n # Elements of each tuple type except 'a':\n # 1. variable name,\n # 2. unscaled printf format\n # 3. wrapper for unscaled variable reference\n # 4. scaled printf format\n # 5. wrapper for scaled variable reference\n # Elements of 'a' tuple:\n # 1. Name of array field\n # 2. None\n # 3. None\n # 4. None\n # 5. Name of length field\n tuples = []\n vocabularies = [x[0] for x in subtables]\n for (i, t) in enumerate(table):\n if '|' in t:\n fields = [s.strip() for s in t.split('|')]\n name = fields[4]\n ftype = fields[5]\n if after == name:\n record = True\n continue\n if before == name:\n record = False\n continue\n if ftype == 'x' or not record:\n continue\n fmt = r'\\\"%s\\\":' % name\n fmt_text = r'\\\"%s_text\\\":' % name\n if ftype == 'u':\n tuples.append((name,\n fmt+\"%u\", \"%s\",\n None, None))\n elif ftype == 'e':\n tuples.append((name,\n fmt+\"%u\", \"%s\",\n None, None))\n if vocabularies:\n this = vocabularies.pop(0)\n ref = \"DISPLAY_%s(%%s)\" % (this.upper())\n else:\n ref = 'FOO[%s]'\n tuples.append((name,\n fmt_text+r\"\\\"%s\\\"\", ref,\n None, None))\n elif ftype == 'i':\n tuples.append((name,\n fmt+\"%d\", \"%s\",\n None, None))\n elif ftype == 't':\n tuples.append((name,\n fmt+r'\\\"%s\\\"', \"%s\",\n None, None))\n elif ftype == 'b':\n tuples.append((name,\n fmt+r'\\\"%s\\\"', \"JSON_BOOL(%s)\",\n None, None))\n elif ftype[0] == 'd':\n print(\"Cannot generate code for data members\", file=sys.stderr)\n sys.exit(1)\n elif ftype[0] == 'U':\n tuples.append((name,\n fmt+\"%u\", \"%s\",\n fmt+\"%%.%sf\" % ftype[1], '%s / SCALE'))\n elif ftype[0] == 'I':\n tuples.append((name,\n fmt+\"%d\", \"%s\",\n fmt+\"%%.%sf\" % ftype[1], '%s / SCALE'))\n elif ftype[0] == 'a':\n ftype = ftype[1:]\n if ftype[0] == '^':\n lengthfield = last\n else:\n lengthfield = \"n\" + name\n tuples.append((name, None, None, None, lengthfield))\n else:\n print(\"Unknown type code\", ftype, file=sys.stderr)\n sys.exit(1)\n last = name\n startspan = 0\n\n def scaled(idx):\n \"\"\"Check if scaled.\"\"\"\n return tuples[idx][3] is not None\n\n def tslice(e, idx):\n \"\"\"Missing docstring.\"\"\"\n return [x[idx] for x in tuples[startspan:e+1]]\n\n lbase = \" \" * 8\n step = \" \" * 4\n inarray = None\n header = \"(void)snprintf(buf + strlen(buf), buflen - strlen(buf),\"\n for (i, (var, uf, uv, sf, sv)) in enumerate(tuples):\n if uf is not None:\n print(lbase + \"for (i = 0; i < %s.%s; i++) {\" % (structname, sv),\n file=wfp)\n inarray = var\n lbase = \" \" * 12\n startspan = i+1\n continue\n # At end of tuples, or if scaled flag changes, or if next op is array,\n # flush out dump code for a span of fields.\n if i+1 == len(tuples):\n endit = '}\",'\n elif tuples[i+1][1] is not None:\n endit = r',\\\"%s\\\":[\",' % tuples[i+1][0]\n elif scaled(i) != scaled(i + 1):\n endit = ',\",'\n else:\n endit = None\n if endit:\n if not scaled(i):\n print(lbase + header, file=wfp)\n if inarray:\n prefix = '{\"'\n else:\n prefix = '\"'\n print(lbase + step + prefix + ','.join(tslice(i, 1)) + endit,\n file=wfp)\n for (j, t) in enumerate(tuples[startspan:i+1]):\n if inarray:\n ref = structname + \".\" + inarray + \"[i].\" + t[0]\n else:\n ref = structname + \".\" + t[0]\n wfp.write(lbase + step + t[2] % ref)\n if j == i - startspan:\n wfp.write(\");\\n\")\n else:\n wfp.write(\",\\n\")\n else:\n print(lbase + \"if (scaled)\", file=wfp)\n print(lbase + step + header, file=wfp)\n print(lbase + step * 2 + '\"' + ','.join(tslice(i, 3)) + endit,\n file=wfp)\n for (j, t) in enumerate(tuples[startspan:i+1]):\n if inarray:\n ref = structname + \".\" + inarray + \"[i].\" + t[0]\n else:\n ref = structname + \".\" + t[0]\n wfp.write(lbase + step*2 + t[4] % ref)\n if j == i - startspan:\n wfp.write(\");\\n\")\n else:\n wfp.write(\",\\n\")\n print(lbase + \"else\", file=wfp)\n print(lbase + step + header, file=wfp)\n print(lbase + step * 2 + '\"' + ','.join(tslice(i, 1)) + endit,\n file=wfp)\n for (j, t) in enumerate(tuples[startspan:i+1]):\n if inarray:\n ref = structname + \".\" + inarray + \"[i].\" + t[0]\n else:\n ref = structname + \".\" + t[0]\n wfp.write(lbase + step*2 + t[2] % ref)\n if j == i - startspan:\n wfp.write(\");\\n\")\n else:\n wfp.write(\",\\n\")\n startspan = i+1\n # If we were looking at a trailing array, close scope\n if inarray:\n lbase = \" \" * 8\n print(lbase + \"}\", file=wfp)\n print(lbase + \"if (buf[strlen(buf)-1] == ',')\", file=wfp)\n print(lbase + step + r\"buf[strlen(buf)-1] = '\\0';\", file=wfp)\n print(lbase + \"(void)strlcat(buf, \\\"]}\\\", buflen - strlen(buf));\",\n file=wfp)", "title": "" }, { "docid": "a974a0f229751ebb2c87085fde6b1249", "score": "0.50166523", "text": "def adapt_info_for_output(self):\n for key in self.finalized_data_dictionary:\n if key in self.dict_of_data_to_be_formatted:\n self.finalized_data_dictionary[key] = self.dict_of_data_to_be_formatted[key]\n self.finalized_data_list_for_single_entry = [value for value in self.finalized_data_dictionary.values()]\n return self.finalized_data_list_for_single_entry", "title": "" }, { "docid": "6db443ebba0467aff5e8d9638e9e6c7a", "score": "0.5010922", "text": "def descend_obj(obj,sep='\\t'):\n if type(obj) in [h5py._hl.group.Group,h5py._hl.files.File]:\n for key in obj.keys():\n print (sep,'-',key,':',obj[key])\n descend_obj(obj[key],sep=sep+'\\t')\n elif type(obj)==h5py._hl.dataset.Dataset:\n for key in obj.attrs.keys():\n print (sep+'\\t','-',key,':',obj.attrs[key])", "title": "" }, { "docid": "af7a23dea74badf539418ccc0bb3fbe3", "score": "0.50062406", "text": "def print_data(self):\n for row in self.table_data:\n print('Row: %s' % row.rowid)\n for k in row.keys():\n v = row[k]\n print('|- %s: %s' % (k, v))", "title": "" }, { "docid": "46294d497018faa1b65a40bf421fbeb3", "score": "0.50003624", "text": "def test_make_sidecar_table(schema_obj):\n # mri.MRISpatialEncoding selected for having some level and description addenda\n rendered_table = render.make_sidecar_table(schema_obj, \"mri.MRISpatialEncoding\").split(\"\\n\")\n\n assert rendered_table[0].startswith(\"| **Key name**\")\n assert rendered_table[1].startswith(\"|-------------\")\n\n fields = schema_obj.rules.sidecars.mri.MRISpatialEncoding.fields\n assert len(rendered_table) == len(fields) + 2\n\n for field, render_row in zip(fields, rendered_table[2:]):\n assert render_row.startswith(f\"| {field}\")\n spec = fields[field]\n if isinstance(spec, str):\n level = spec\n level_addendum = \"\"\n description_addendum = \"\"\n else:\n level = spec[\"level\"]\n level_addendum = spec.get(\"level_addendum\", \"\").replace(\"required\", \"REQUIRED\")\n description_addendum = spec.get(\"description_addendum\", \"\")\n\n assert level.upper() in render_row\n assert level_addendum.split(\"\\n\")[0] in render_row\n assert description_addendum.split(\"\\n\")[0] in render_row", "title": "" }, { "docid": "34a9b1483b26a6ce424a6aad59b1c95c", "score": "0.49867263", "text": "def print_tabel(tabel):\n if tabel['kop']:\n print(tabel['kop'])\n for regel in tabel['inhoud']:\n print(regel)", "title": "" }, { "docid": "0a57604553930ff3fbb0a9a4fd3cd3a3", "score": "0.4965026", "text": "def get_table(init_dict):\n init_dict[\"replication\"][\"binsize\"] = 5000\n init_dict[\"replication\"][\"states\"] = 90\n init_dict[\"replication\"][\"beta\"] = 0.9999\n result_trans_5000, result_fixp_5000 = get_repl_result(init_dict[\"replication\"])\n init_dict[\"replication\"][\"beta\"] = 0\n result_trans_5000_0, result_fixp_5000_0 = get_repl_result(init_dict[\"replication\"])\n init_dict[\"replication\"][\"binsize\"] = 2571\n init_dict[\"replication\"][\"states\"] = 175\n result_trans_2571_0, result_fixp_2571_0 = get_repl_result(init_dict[\"replication\"])\n init_dict[\"replication\"][\"beta\"] = 0.999\n result_trans_2571, result_fixp_2571 = get_repl_result(init_dict[\"replication\"])\n table = \"\\\\begin{tabular}{lrrrrr} \\\\toprule States & 90 & 90 & 175 & 175 \\\\\\\\\"\n table += \"\\\\midrule Binsize & 5000 & 5000 & 2571 & 2571 \\\\\\\\ \\\\midrule \"\n table += \"$\\\\theta_{30}$ & \" + str(round(result_trans_5000[\"x\"][0], 4)) + \" & \"\n table += str(round(result_trans_5000_0[\"x\"][0], 4)) + \" & \"\n table += str(round(result_trans_2571[\"x\"][0], 4)) + \" & \"\n table += str(round(result_trans_2571_0[\"x\"][0], 4)) + \"\\\\\\\\\" + \"$\\\\theta_{31}$ & \"\n table += str(round(result_trans_5000[\"x\"][1], 4)) + \" & \"\n table += str(round(result_trans_5000_0[\"x\"][1], 4)) + \" & \"\n table += str(round(result_trans_2571[\"x\"][1], 4)) + \" & \"\n table += str(round(result_trans_2571_0[\"x\"][1], 4)) + \" \\\\\\\\ \"\n table += \"$\\\\theta_{32}$ & \" + str(round(result_trans_5000[\"x\"][2], 4)) + \" & \"\n table += str(round(result_trans_5000_0[\"x\"][2], 4)) + \" & \"\n table += str(round(result_trans_2571[\"x\"][2], 4)) + \" & \"\n table += str(round(result_trans_2571_0[\"x\"][2], 4)) + \" \\\\\\\\\"\n table += \"$\\\\theta_{33}$ & & & \"\n table += str(round(result_trans_2571[\"x\"][3], 4)) + \" & \"\n table += str(round(result_trans_2571_0[\"x\"][3], 4)) + \" \\\\\\\\ \"\n table += \"RC & \" + str(round(result_fixp_5000[\"x\"][0], 4)) + \" & \"\n table += str(round(result_fixp_5000_0[\"x\"][0], 4))\n table += \" & \" + str(round(result_fixp_2571[\"x\"][0], 4)) + \" & \"\n table += str(round(result_fixp_2571_0[\"x\"][0], 4))\n table += \" \\\\\\\\ $\\\\theta_{11}$ & \" + str(round(result_fixp_5000[\"x\"][1], 4)) + \" & \"\n table += str(round(result_fixp_5000_0[\"x\"][1], 4)) + \" & \"\n table += str(round(result_fixp_2571[\"x\"][1], 4)) + \" & \"\n table += str(round(result_fixp_2571_0[\"x\"][1], 4)) + \" \\\\\\\\ LL & \"\n table += str(round(result_trans_5000[\"fun\"] + result_fixp_5000[\"fun\"], 4)) + \" & \"\n table += str(round(result_trans_5000_0[\"fun\"] + result_fixp_5000_0[\"fun\"], 4))\n table += \" & \" + str(round(result_trans_2571[\"fun\"] + result_fixp_2571[\"fun\"], 4))\n table += \" & \"\n table += str(round(result_trans_2571_0[\"fun\"] + result_fixp_2571_0[\"fun\"], 4))\n table += \" \\\\\\\\ \\\\midrule $\\\\beta$ & 0.9999\"\n table += \" & 0 & 0.9999 & 0 \\\\\\\\ \\\\bottomrule \\\\end{tabular}\"\n os.makedirs(\"figures\", exist_ok=True)\n f = open(\"figures/replication.txt\", \"w+\")\n f.write(table)\n f.close()", "title": "" }, { "docid": "ff699a1e687976e6190b5003d8921e28", "score": "0.49622014", "text": "def manage_datatype(self):\n #Try to retrieve table for each section in the current page\n self.tables_by_section = {}\n for section in self.sections:\n self.tables_by_section.update(\\\n self.retrieve_data(section))\n\n def group_table(section_dict):\n \"\"\"\n Group all RadioTable of different section in a single list.\n Recusively search RadioTable in subsections.\n \"\"\"\n list_table = []\n for section_content in section_dict.values():\n if type(section_content) == OrderedDict:\n list_table.extend(group_table(section_content))\n elif type(section_content) == list:\n list_table.extend(section_content)\n return list_table\n\n self.tables = group_table(self.tables_by_section)", "title": "" }, { "docid": "b8a6ab6f944affd51a072d8661bdc0a8", "score": "0.4959002", "text": "def html_table_from_nested_dict(data, ordering):\n\n html = \"<table><tbody>\"\n # Add an empty first cell for the row header column\n header_row = [\"\"]\n header_row.extend(ordering)\n html += html_table_header_row(header_row)\n\n for row_header, row in data.items():\n html += html_row_with_ordered_headers(row, ordering, row_header)\n\n return html + \"\\n</tbody></table>\"", "title": "" }, { "docid": "9c62ce27834ac3494a499666b268bc0a", "score": "0.4958803", "text": "def produce_output(self, rows):\n super(TableFormatter, self).produce_output(rows)\n self.print_stash()", "title": "" }, { "docid": "d85b4da18a2d2fe5471b488ed67aa363", "score": "0.49530903", "text": "def ContentToWrite(isc_dict, num_tab, content, tokens):\n s = ''\n for key, val in isc_dict.items():\n if key in tokens:\n for childkey, childval in val.items():\n s = key + ' ' + str(childval) + ';\\n'\n content.append(s)\n s = ''\n content.append('\\n')\n elif (isinstance(val, dict)):\n for tab in range (0, num_tab):\n s += \"\\t\"\n s += key + \" {\\n\"\n content.append(s)\n num_tab += 1\n ContentToWrite(val, num_tab, content, tokens)\n if num_tab >= 1:\n num_tab -= 1\n s = ''\n for tab in range (0, num_tab):\n s += \"\\t\"\n s += \"};\\n\"\n content.append(s)\n if num_tab == 0:\n content.append(\"\\n\")\n s = ''\n else:\n for tab in range (0, num_tab):\n s += \"\\t\"\n if \"True\" in str(val):\n s += key + \";\\n\"\n else:\n s += key + \" \" + str(val) + \";\\n\"\n content.append(s)\n s = ''\n if num_tab == 0:\n return content", "title": "" }, { "docid": "711f1b15b9034d2c0900c8147c48b0a6", "score": "0.49389514", "text": "def finalize_output(output, filter_key):\n\n\tfor year in output['unicef']:\n\t\tcount_countries_donors(output['unicef'][year])\n\n\n\tfor year in output['unicef']:\n\t\toutput['unicef'][year]['value']['U'] = output['unicef'][year]['value']['B'] - output['unicef'][year]['value']['C']\n\n\t\tfor k in output['unicef'][year]['value']:\n\t\t\toutput['unicef'][year]['value'][k] = round(output['unicef'][year]['value'][k], 1)\n\toutput['unicef'] = {\n\t\t'years': sorted(output['unicef'].values(), key=itemgetter('year')),\n\t\t'invalid_transactions': output['invalid_transactions'],\n\t\t'total_transactions': output['total_transactions']\n\t}\n\toutput['unicef']['current_year_index'] = get_current_year_index(output['unicef']['years'])\n\n\tfor year in output['donors']:\n\t\tfor donor in output['donors'][year]:\n\t\t\tcount_countries_donors(output['donors'][year][donor])\n\n\tyear_donors = {} \n\tfor year in output['countries']:\n\t\tfor country in output['countries'][year]:\n\t\t\tif year not in year_donors:\n\t\t\t\tyear_donors[year] = set()\n\t\t\tfor donor in output['countries'][year][country]['donors']:\n\t\t\t\t# gets the donor count the way the map and flow views do it; i.e. only count donors\n\t\t\t\t# that donated to a country\n\t\t\t\tyear_donors[year].add(donor)\n\t\t\tcount_countries_donors(output['countries'][year][country])\n\n\n\tfor year_data in output['unicef']['years']:\n\t\t# recalculate donor count for each year since increment_totals doesn't\n\t\t# take any filtering into account\n\t\tyear = year_data['year']\n\t\tyear_data['num_donors'] = len(year_donors[year])\n\t\t# output['countries'][country] = sorted(output['countries'][country].values(), key=itemgetter('year'))", "title": "" }, { "docid": "13dd210e317e99f5656394b069a9c055", "score": "0.49329644", "text": "def test_nested_dict(self):\n nested = self.TEI.export(output=Mimetypes.PYTHON.NestedDict, exclude=[\"tei:note\"])\n self.assertEqual(nested[\"1\"][\"3\"][\"8\"], \"Ibis ab excusso missus in astra sago. \",\n \"Check that notes are removed \")\n self.assertEqual(nested[\"1\"][\"pr\"][\"1\"], \"Spero me secutum in libellis meis tale temperamen-\",\n \"Check that dictionary path is well done\")\n self.assertEqual(nested[\"1\"][\"12\"][\"1\"], \"Itur ad Herculeas gelidi qua Tiburis arces \",\n \"Check that dictionary path works on more than one passage\")\n self.assertEqual(nested[\"2\"][\"pr\"][\"1\"], \"'Quid nobis' inquis 'cum epistula? parum enim tibi \",\n \"Check that different fist level works as well\")\n self.assertEqual(\n [list(nested.keys()), list(nested[\"1\"].keys())[:3], list(nested[\"2\"][\"pr\"].keys())[:3]],\n [[\"1\", \"2\"], [\"pr\", \"1\", \"2\"], [\"sa\", \"1\", \"2\"]],\n \"Ensure that text keeps its order\")", "title": "" }, { "docid": "2577c31493d47e8dcdd8f8f17a8bfeed", "score": "0.49302596", "text": "def group_table(section_dict):\n list_table = []\n for section_content in section_dict.values():\n if type(section_content) == OrderedDict:\n list_table.extend(group_table(section_content))\n elif type(section_content) == list:\n list_table.extend(section_content)\n return list_table", "title": "" }, { "docid": "513e879cfce6d040b4db76596c8216d2", "score": "0.49291167", "text": "def print_table():\n for i in range(len(table) * 3):\n print('-', end='')\n\n print()\n\n for row in range(len(table)):\n print('|', end=' ')\n for col in range(len(table)):\n if table[row][col] == '_':\n print(' ', end=' ')\n else:\n print(table[row][col], end=' ')\n print('|')\n\n for i in range(len(table) * 3):\n print('-', end='')\n print()", "title": "" }, { "docid": "f09195e0012deb03fde29290837206ff", "score": "0.4923641", "text": "def get_html(titles, subtitles, dbe, col_labels, col_names, col_sorting, tbl,\r\n flds, cur, first_col_as_label, val_dics, add_total_row, where_tbl_filt,\r\n css_idx, *, display_n=None, page_break_after=False):\r\n debug = False\r\n verbose = False\r\n idx_and_data = namedtuple('idx_and_data', 'sort_idx, lbl_cols') \r\n CSS_LBL = mg.CSS_SUFFIX_TEMPLATE % (mg.CSS_LBL, css_idx)\r\n CSS_ALIGN_RIGHT = mg.CSS_SUFFIX_TEMPLATE % (mg.CSS_ALIGN_RIGHT, css_idx)\r\n CSS_TOTAL_ROW = mg.CSS_SUFFIX_TEMPLATE % (mg.CSS_TOTAL_ROW, css_idx)\r\n CSS_PAGE_BREAK_BEFORE = mg.CSS_SUFFIX_TEMPLATE % (\r\n mg.CSS_PAGE_BREAK_BEFORE, css_idx)\r\n html = []\r\n title_dets_html = output.get_title_dets_html(titles, subtitles, css_idx,\r\n istable=True)\r\n html.append(title_dets_html)\r\n html.append(f\"\\n\\n{mg.REPORT_TABLE_START}<table cellspacing='0'>\\n\") ## IE6 - no support CSS borderspacing\r\n hdr_html = get_hdr_dets(col_labels, css_idx)\r\n html.append(hdr_html)\r\n ## build body\r\n body_html = ['\\n<tbody>', ]\r\n ## Prepare column level config\r\n ## pre-store val dics for each column where possible\r\n cols_n = len(col_names)\r\n col_val_dics = []\r\n for col_name in col_names:\r\n if val_dics.get(col_name):\r\n col_val_dic = val_dics[col_name]\r\n col_val_dics.append(col_val_dic)\r\n else:\r\n col_val_dics.append(None)\r\n ## pre-store css class(es) for each column\r\n col_class_lsts = [[] for unused in col_names]\r\n if first_col_as_label:\r\n col_class_lsts[0] = [CSS_LBL]\r\n for i, col_name in enumerate(col_names):\r\n if flds[col_name][mg.FLD_BOLNUMERIC] and not col_val_dics[i]:\r\n col_class_lsts[i].append(CSS_ALIGN_RIGHT)\r\n if add_total_row:\r\n row_tots = [0 for unused in col_names] ## init\r\n row_tots_used = set() ## some will never have anything added to them\r\n ## get data from SQL\r\n objqtr = getdata.get_obj_quoter_func(dbe)\r\n colnames_clause = ', '.join([objqtr(x) for x in col_names])\r\n \"\"\"\r\n Get data from SQL and apply labels. Collect totals along the way as is\r\n currently the case.\r\n\r\n Sort by labels if appropriate. Then generate HTML row by row and cell by\r\n cell.\r\n \"\"\"\r\n SQL_get_data = dedent(f\"\"\"\\\r\n SELECT {colnames_clause}\r\n FROM {getdata.tblname_qtr(dbe, tbl)}\r\n {where_tbl_filt}\r\n \"\"\")\r\n if debug: print(SQL_get_data)\r\n cur.execute(SQL_get_data) ## must be dd.cur\r\n ## get labelled vals and a sorting index (list of labelled values)\r\n idx_and_data_rows = []\r\n row_idx = 0\r\n while True:\r\n if display_n:\r\n if row_idx >= display_n:\r\n break ## got all we need\r\n row = cur.fetchone()\r\n if row is None:\r\n break ## run out of rows\r\n row_idx+=1\r\n sorting_lbls = []\r\n labelled_cols = []\r\n for idx_col in range(cols_n):\r\n ## process row data to display cell contents\r\n raw_val = row[idx_col]\r\n if col_val_dics[idx_col]: ## has a label dict\r\n row_val = col_val_dics[idx_col].get(\r\n raw_val,\r\n '-' if raw_val is None else raw_val) ## use if possible\r\n else:\r\n if raw_val or raw_val in ('', 0):\r\n row_val = raw_val\r\n elif raw_val is None:\r\n row_val = '-'\r\n labelled_cols.append(row_val)\r\n ## include row_val in lbl list which the data will be sorted by\r\n if col_sorting[idx_col] == mg.SORT_LBL_KEY:\r\n sorting_lbls.append(row_val)\r\n else:\r\n sorting_lbls.append(raw_val) ## no label\r\n ## process totals\r\n if add_total_row:\r\n ## Skip if first col as val and this is first col\r\n ## Skip if prev was a Null (\"-\")\r\n ## Add to running total if a number\r\n if ((first_col_as_label and idx_col == 0) \r\n or row_val == '-'):\r\n pass\r\n elif (lib.TypeLib.is_basic_num(row_val) \r\n and lib.TypeLib.is_basic_num(row_tots[idx_col])):\r\n row_tots[idx_col] += row_val\r\n row_tots_used.add(idx_col)\r\n idx_and_data_rows.append(idx_and_data(sorting_lbls, labelled_cols))\r\n if add_total_row:\r\n if debug: print(f'row_tots: {row_tots}')\r\n ## sort labelled data if appropriate\r\n if debug and verbose:\r\n print(f'Unsorted\\n\\n{idx_and_data_rows}')\r\n idx_and_data_rows.sort(key=lambda s: s.sort_idx)\r\n if debug and verbose:\r\n print(f'Sorted\\n\\n{idx_and_data_rows}')\r\n ## generate html\r\n for idx_and_data_row in idx_and_data_rows:\r\n labelled_cols = idx_and_data_row.lbl_cols\r\n row_tds = []\r\n for i, labelled_col in enumerate(labelled_cols):\r\n ## cell format\r\n col_class_names = '\"' + ' '.join(col_class_lsts[i]) + '\"'\r\n col_classes = (\r\n f'class = {col_class_names}' if col_class_names else '')\r\n row_tds.append(f'<td {col_classes}>{labelled_col}</td>')\r\n body_html.append('<tr>' + ''.join(row_tds) + '</td></tr>')\r\n if add_total_row:\r\n row_tot_vals = []\r\n for i in range(cols_n):\r\n val = (str(row_tots[i]) if i in row_tots_used else '&nbsp;&nbsp;')\r\n row_tot_vals.append(val)\r\n if first_col_as_label:\r\n tot_cell = f\"<td class='{CSS_LBL}'>\" + _('TOTAL') + '</td>'\r\n row_tot_vals.pop(0)\r\n else:\r\n tot_cell = ''\r\n ## never a displayed total for strings (whether orig data or labels)\r\n joiner = f'</td><td class=\"{CSS_ALIGN_RIGHT}\">'\r\n body_html.append(f\"<tr class='{CSS_TOTAL_ROW}'>\"\r\n + tot_cell + f'<td class=\"{CSS_ALIGN_RIGHT}\">'\r\n + joiner.join(row_tot_vals) + '</td></tr>')\r\n body_html.append('</tbody>')\r\n html.append('\\n'.join(body_html))\r\n html.append(f'\\n</table>{mg.REPORT_TABLE_END}')\r\n if page_break_after:\r\n html.append(f\"<br><hr><br><div class='{CSS_PAGE_BREAK_BEFORE}'></div>\")\r\n title = (titles[0] if titles else mg.TAB_TYPE2LBL[mg.DATA_LIST])\r\n output.append_divider(html, title, indiv_title='')\r\n return '\\n'.join(html)", "title": "" }, { "docid": "a40be228b41c19da30d0f5d4d8940501", "score": "0.49146357", "text": "def build_table(self):", "title": "" }, { "docid": "d1986bcef6374d436f0b1272e1b003b4", "score": "0.49092203", "text": "def dump_remaining(fsh, struct, empty_num, key_cols, value_cols, mark_new=None):\n if \"data\" in struct:\n # end condition\n if \"used\" not in struct:\n # unused row found,\n # construct the output row, consisting of keys\n # followed by blanks, followed by values\n # XXX: this breaks when the keys aren't the first columns\n row = []\n for key in key_cols:\n row.append(struct[\"data\"][key])\n row.extend([\"\"] * empty_num)\n for value in value_cols:\n row.append(struct[\"data\"][value])\n if mark_new:\n row.append(1)\n\n # append the created row\n fsh.append(row)\n return\n else:\n # we're not fully deep in the tree, keep diving\n for item in struct:\n dump_remaining(\n fsh, struct[item], empty_num, key_cols, value_cols, mark_new=mark_new\n )", "title": "" }, { "docid": "bf8f64e1c3d89654c94f82400e42b361", "score": "0.49035808", "text": "def _post_process_output(self, output):\n cellnames = list(output.keys())\n context = self.grid['context'][cellnames].tolist()\n years = output[cellnames[0]]['years']\n Np = len(cellnames)\n Ny = len(years)\n\n keys = ['precip', 'runoff', 'evapo', 'perco',\n 'subrun1', 'subrun2', 'rechg']\n data = {key: np.zeros((Np, Ny, 12)) for key in keys}\n data['cid'] = cellnames\n data['years'] = years\n data['idx_nan'] = []\n data['lat_dd'] = self.grid.loc[cellnames]['lat_dd'].values\n data['lon_dd'] = self.grid.loc[cellnames]['lon_dd'].values\n\n for i, cellname in enumerate(cellnames):\n print(\"\\rPost-processing cell %d of %d...\" % (i+1, Np), end=' ')\n for key in keys[:-1]:\n data[key][i, :, :] = output[cellname][key]\n\n if np.any(np.isnan(output[cellname]['rechg'])):\n data['idx_nan'].append(i)\n data['rechg'][i, :, :] = output[cellname]['rechg']\n elif context[i] == 2:\n # Redistribute recharge as subsurface runoff if cell is\n # located next to a stream.\n if np.all(output[cellname]['subrun2'] == 0):\n # Convert recharge as surficial subsurface runoff.\n data['subrun1'][i, :, :] += output[cellname]['rechg']\n else:\n # Convert recharge as deep subsurface runoff.\n data['subrun2'][i, :, :] += output[cellname]['rechg']\n else:\n data['rechg'][i, :, :] = output[cellname]['rechg']\n print(\"done\")\n\n return data", "title": "" }, { "docid": "6cf7d8689f6313b8ebd63ca1aebd58f5", "score": "0.4900038", "text": "def _format_show_dictionary(dict_x):\n\n try:\n _keysNames = dict_x.keys()\n pairs = \"\"\n\n for _keyName in _keysNames:\n if type(dict_x[_keyName]) != list and type(dict_x[_keyName]) != dict:\n pairs += str(_keyName) + \": \" \\\n + str(dict_x[_keyName]) + '\\n'\n if type(dict_x[_keyName]) == list:\n pairs += str(_keyName) + \": \"\n pairs += _format_sublist(dict_x[_keyName], False) + '\\n'\n if type(dict_x[_keyName]) == dict:\n pairs += str(_keyName) + \": \"\n pairs += _format_subdict(dict_x[_keyName], False) + '\\n'\n return pairs[:-1]\n\n except Exception:\n return dict_x", "title": "" }, { "docid": "d3f1e73b26627e7c3fb33c25afd8b06b", "score": "0.48974314", "text": "def parse_dict(self, data, objects):\n elems = self.parse_array(data, objects, b'>>')\n return PdfDict(zip(elems[::2], elems[1::2]))", "title": "" }, { "docid": "ad7281627f1b252ce9ee91d0cc7abc6c", "score": "0.48637915", "text": "def post_process_inline_table(self):\n # print(self.execution_status_json.keys())\n\n # print(\"post_process_inline_table\")\n # print(self.execution_status_json.keys())\n self.outputs = {}\n for o in self.execution_status_json['outputAttachments']:\n attachment_name = o['name']\n category = o['category']\n attachment_type = o['type']\n # print(attachment_type)\n # print(category)\n # print(attachment_name)\n if category == 'output' and attachment_type == 'INLINE_TABLE':\n table = o['table']\n # print(attachment_name)\n self.outputs[attachment_name] = self.post_process_inline_table_get_dataframe(table)\n # elif attachment_name == 'solution.json':\n # # Note: the attachment_type == 'REST', which means the contents is a URL to get the solution, not the solution itself.\n # data_url = o['url']\n # self.post_process_retrieve_solution(data_url)\n # # elif attachment_name[-4:] == '.zip':\n # elif \"dump_\" in attachment_name:\n # data_url = o['url']\n # self.retrieve_file(attachment_name, data_url)\n # pass", "title": "" }, { "docid": "fcc0b85d87965751c957f6785f2f73f6", "score": "0.48607087", "text": "def build_one_value_parsed_output(**kwargs) -> Tuple[Dict[str, Any], str]:\n command_parse_and_output_data = kwargs[\"command_parse_and_output_data\"]\n collection_name = command_parse_and_output_data.get(\"collection_name\")\n response = kwargs[\"handled_result\"]\n output = parse_two_keys_dict(generate_list_dicts(response[collection_name]))\n output[\"DATETIME\"] = response.get(\"DATETIME\")\n output[\"TEXT\"] = response.get(\"TEXT\")\n readable_output = tableToMarkdown(name=command_parse_and_output_data[\"table_name\"], t=output)\n return output, readable_output", "title": "" }, { "docid": "7cdb5db886314d7501cac33806eeec05", "score": "0.48602575", "text": "def render_dictionary(data, headers=None):\n return IPython.core.display.HTML(_html.HtmlBuilder.render_table(data, headers))", "title": "" }, { "docid": "757010950840f2b802131112fb24abee", "score": "0.4859387", "text": "def _decodeOptionsTable(record):\n\n def _last_trade_date_transformation(string):\n \"\"\"\n Helper Function put last trade date in proper format\n Don actualy use here but keeping for now for python datetime\n transform with a pandas.DataFrame\n \"\"\"\n return datetime.strptime(string[:-4], \"%Y-%m-%d %I:%M%p\")\n\n if record is None:\n # Calls and Puts table didn't have any data\n return None, None\n record = json.loads(record)\n frame = {\"Last Trade Date\": record[\"Last Trade Date\"]}\n shape = np.frombuffer(\n base64.b64decode(record['Shape'].encode('ascii')),\n dtype=np.int32).astype(int)\n\n table = np.frombuffer(base64.b64decode(\n record['Table'].encode('ascii')), dtype=np.float16).reshape(shape)\n\n for label, data in zip(record['Column Labels'], table.T):\n frame[label] = data\n\n return frame, len(table)", "title": "" }, { "docid": "d5d5f6f4f73657737c4244b7d7f9968b", "score": "0.4856183", "text": "def tablize(listofdicts, include=\"*\"):\n\n table = []\n if include == \"*\":\n # using a set is fast but doesn't preserve order\n # include = set(chain(*[ld.keys() for ld in listofdicts]))\n\n # this approach is slower but preserves order\n include = list(listofdicts[0].keys())\n for rowdict in listofdicts[1:]:\n for k in rowdict.keys():\n if k not in include:\n include.append(k)\n # include = list(include)\n\n # include only specified columns\n table.append(include)\n for i in listofdicts:\n # i2 = dict{k:v for k,v in i.iteritems() if i in include}\n row = []\n for k in include:\n row.append(i.get(k, \" \"))\n\n table.append(row)\n\n return table", "title": "" }, { "docid": "1a9f6c5aff1306cc574e6d57e1991a4f", "score": "0.4855806", "text": "def convertTable(self, table):\n warnings = 0\n # this array will contain strings that will be shown in case of possible\n # errors, before the user is asked if he wants to accept the changes.\n warning_messages = []\n newTable = table\n ##################\n # bring every <tag> into one single line.\n num = 1\n while num != 0:\n newTable, num = re.subn(r'([^\\r\\n]{1})(<[tT]{1}[dDhHrR]{1})',\n r'\\1\\r\\n\\2', newTable)\n\n ##################\n # every open-tag gets a new line.\n\n ##################\n # Note that we added the ## characters in markActiveTables().\n # <table> tag with attributes, with more text on the same line\n newTable = re.sub(\n r'(?i)[\\r\\n]*?<##table## (?P<attr>[\\w\\W]*?)>(?P<more>[\\w\\W]*?)[\\r\\n ]*',\n r'\\r\\n{| \\g<attr>\\r\\n\\g<more>', newTable)\n # <table> tag without attributes, with more text on the same line\n newTable = re.sub(r'(?i)[\\r\\n]*?<##table##>(?P<more>[\\w\\W]*?)[\\r\\n ]*',\n r'\\r\\n{|\\n\\g<more>\\r\\n', newTable)\n # <table> tag with attributes, without more text on the same line\n newTable = re.sub(r'(?i)[\\r\\n]*?<##table## (?P<attr>[\\w\\W]*?)>[\\r\\n ]*',\n r'\\r\\n{| \\g<attr>\\r\\n', newTable)\n # <table> tag without attributes, without more text on the same line\n newTable = re.sub(r'(?i)[\\r\\n]*?<##table##>[\\r\\n ]*',\n '\\r\\n{|\\r\\n', newTable)\n # end </table>\n newTable = re.sub(r'(?i)[\\s]*<\\/##table##>',\n '\\r\\n|}', newTable)\n\n ##################\n # caption with attributes\n newTable = re.sub(\n r'(?i)<caption (?P<attr>[\\w\\W]*?)>(?P<caption>[\\w\\W]*?)<\\/caption>',\n r'\\r\\n|+\\g<attr> | \\g<caption>', newTable)\n # caption without attributes\n newTable = re.sub(r'(?i)<caption>(?P<caption>[\\w\\W]*?)<\\/caption>',\n r'\\r\\n|+ \\g<caption>', newTable)\n\n ##################\n # <th> often people don't write them within <tr>, be warned!\n # <th> with attributes\n newTable = re.sub(\n r\"(?i)[\\r\\n]+<th(?P<attr> [^>]*?)>(?P<header>[\\w\\W]*?)<\\/th>\",\n r\"\\r\\n!\\g<attr> | \\g<header>\\r\\n\", newTable)\n\n # <th> without attributes\n newTable = re.sub(r\"(?i)[\\r\\n]+<th>(?P<header>[\\w\\W]*?)<\\/th>\",\n r'\\r\\n! \\g<header>\\r\\n', newTable)\n\n # fail save. sometimes people forget </th>\n # <th> without attributes, without closing </th>\n newTable, n = re.subn(r'(?i)[\\r\\n]+<th>(?P<header>[\\w\\W]*?)[\\r\\n]+',\n r'\\r\\n! \\g<header>\\r\\n', newTable)\n if n > 0:\n warning_messages.append(\n u'WARNING: found <th> without </th>. (%d occurences)\\n' % n)\n warnings += n\n\n # <th> with attributes, without closing </th>\n newTable, n = re.subn(\n r'(?i)[\\r\\n]+<th(?P<attr> [^>]*?)>(?P<header>[\\w\\W]*?)[\\r\\n]+',\n r'\\n!\\g<attr> | \\g<header>\\r\\n', newTable)\n if n > 0:\n warning_messages.append(\n u'WARNING: found <th ...> without </th>. (%d occurences\\n)' % n)\n warnings += n\n\n ##################\n # <tr> with attributes\n newTable = re.sub(\"(?i)[\\r\\n]*<tr(?P<attr> [^>]*?)>[\\r\\n]*\",\n r\"\\r\\n|-\\g<attr>\\r\\n\", newTable)\n\n # <tr> without attributes\n newTable = re.sub(\"(?i)[\\r\\n]*<tr>[\\r\\n]*\",\n r\"\\r\\n|-\\r\\n\", newTable)\n\n ##################\n # normal <td> without arguments\n newTable = re.sub(r'(?i)[\\r\\n]+<td>(?P<cell>[\\w\\W]*?)<\\/td>',\n r'\\r\\n| \\g<cell>\\r\\n', newTable)\n\n ##################\n # normal <td> with arguments\n newTable = re.sub(\n r'(?i)[\\r\\n]+<td(?P<attr> [^>]*?)>(?P<cell>[\\w\\W]*?)<\\/td>',\n r'\\r\\n|\\g<attr> | \\g<cell>', newTable)\n\n # WARNING: this sub might eat cells of bad HTML, but most likely it\n # will correct errors\n # TODO: some more docu please\n newTable, n = re.subn(\"(?i)[\\r\\n]+<td>(?P<cell>[^\\r\\n]*?)<td>\",\n r\"\\r\\n| \\g<cell>\\r\\n\", newTable)\n if n > 0:\n warning_messages.append(\n u'<td> used where </td> was expected. (%d occurences)\\n' % n)\n warnings += n\n\n # fail save, sometimes it's a <td><td></tr>\n # newTable, n = re.subn(\"[\\r\\n]+<(td|TD)>([^<]*?)<(td|TD)><\\/(tr|TR)>\",\n # \"\\r\\n| \\\\2\\r\\n\", newTable)\n # newTable, n = re.subn(\"[\\r\\n]+<(td|TD)([^>]*?)>([^<]*?)<(td|TD)><\\/(tr|TR)>\",\n # \"\\r\\n|\\\\2| \\\\3\\r\\n\", newTable)\n # if n > 0:\n # warning_messages.append(\"WARNING: found <td><td></tr>, but no </td>.\"\n # \" (%d occurences)\\n\" % n)\n # warnings += n\n\n # what is this for?\n newTable, n = re.subn(\"[\\r\\n]+<(td|TD)([^>]+?)>([^\\r\\n]*?)<\\/(td|TD)>\",\n r\"\\r\\n|\\2 | \\3\\r\\n\", newTable)\n if n > 0:\n warning_messages.append(\n u\"WARNING: (sorry, bot code unreadable (1). I don't know why \"\n u\"this warning is given.) (%d occurences)\\n\" % n)\n\n # fail save. sometimes people forget </td>\n # <td> without arguments, with missing </td>\n newTable, n = re.subn(r'(?i)<td>(?P<cell>[^<]*?)[\\r\\n]+',\n r'\\r\\n| \\g<cell>\\r\\n', newTable)\n if n > 0:\n warning_messages.append(u\"NOTE: Found <td> without </td>. This \"\n u\"shouldn't cause problems.\\n\")\n\n # <td> with attributes, with missing </td>\n newTable, n = re.subn(\n r'(?i)[\\r\\n]*<td(?P<attr> [^>]*?)>(?P<cell>[\\w\\W]*?)[\\r\\n]+',\n r'\\r\\n|\\g<attr> | \\g<cell>\\r\\n', newTable)\n if n > 0:\n warning_messages.append(u\"NOTE: Found <td> without </td>. This \"\n u\"shouldn't cause problems.\\n\")\n\n ##################\n # Garbage collecting ;-)\n newTable = re.sub(r'(?i)<td>[\\r\\n]*<\\/tr>', '', newTable)\n # delete closing tags\n newTable = re.sub(r'(?i)[\\r\\n]*<\\/t[rdh]>', '', newTable)\n\n ##################\n # OK, that's only theory but works most times.\n # Most browsers assume that <th> gets a new row and we do the same\n # newTable, n = re.subn(\"([\\r\\n]+\\|\\ [^\\r\\n]*?)([\\r\\n]+\\!)\",\n # \"\\\\1\\r\\n|-----\\\\2\", newTable)\n # warnings = warnings + n\n # adds a |---- below for the case the new <tr> is missing\n # newTable, n = re.subn(\"([\\r\\n]+\\!\\ [^\\r\\n]*?[\\r\\n]+)(\\|\\ )\",\n # \"\\\\1|-----\\r\\n\\\\2\", newTable)\n # warnings = warnings + n\n\n ##################\n # most <th> come with '''title'''. Senseless in my eyes cuz\n # <th> should be bold anyways.\n newTable = re.sub(r\"[\\r\\n]+\\!([^'\\n\\r]*)'''([^'\\r\\n]*)'''\",\n r'\\r\\n!\\1\\2', newTable)\n\n ##################\n # kills indention within tables. Be warned, it might seldom bring\n # bad results.\n # True by default. Set 'deIndentTables = False' in user-config.py\n if config.deIndentTables:\n num = 1\n while num != 0:\n newTable, num = re.subn(r'(\\{\\|[\\w\\W]*?)\\n[ \\t]+([\\w\\W]*?\\|\\})',\n r'\\1\\r\\n\\2', newTable)\n\n ##################\n # kills additional spaces after | or ! or {|\n # This line was creating problems, so I commented it out --Daniel\n # newTable = re.sub(\"[\\r\\n]+\\|[\\t ]+?[\\r\\n]+\", \"\\r\\n| \", newTable)\n # kills trailing spaces and tabs\n newTable = re.sub(r'\\r\\n(.*)[\\t\\ ]+[\\r\\n]+',\n r'\\r\\n\\1\\r\\n', newTable)\n # kill extra new-lines\n newTable = re.sub(r'[\\r\\n]{4,}(\\!|\\|)',\n r'\\r\\n\\1', newTable)\n\n ##################\n # shortening if <table> had no arguments/parameters\n newTable = re.sub(r'[\\r\\n]+\\{\\|[\\ ]+\\| ', r'\\r\\n{| ', newTable)\n # shortening if <td> had no articles\n newTable = re.sub(r'[\\r\\n]+\\|[\\ ]+\\| ', '\\r\\n| ', newTable)\n # shortening if <th> had no articles\n newTable = re.sub(r'\\n\\|\\+[\\ ]+\\|', '\\n|+ ', newTable)\n # shortening of <caption> had no articles\n newTable = re.sub(r'[\\r\\n]+\\![\\ ]+\\| ', '\\r\\n! ', newTable)\n\n ##################\n # proper attributes. attribute values need to be in quotation marks.\n num = 1\n while num != 0:\n # group 1 starts with newlines, followed by a table or row tag\n # ( {| or |--- ), then zero or more attribute key - value\n # pairs where the value already has correct quotation marks, and\n # finally the key of the attribute we want to fix here.\n # group 2 is the value of the attribute we want to fix here.\n # We recognize it by searching for a string of non-whitespace\n # characters\n # - [^\\s]+? - which is not embraced by quotation marks - [^\"]\n newTable, num = re.subn(\n r'([\\r\\n]+(?:\\|-|\\{\\|)[^\\r\\n\\|]+) *= *([^\"\\s>]+)',\n r'\\1=\"\\2\"', newTable, 1)\n\n num = 1\n while num != 0:\n # The same for header and cell tags ( ! or | ), but for these tags\n # the attribute part is finished by a | character. We don't want to\n # change cell contents which accidentially contain an equal sign.\n # Group 1 and 2 are anologously to the previous regular expression,\n # group 3 are the remaining attribute key - value pairs.\n newTable, num = re.subn(\n r'([\\r\\n]+(?:!|\\|)[^\\r\\n\\|]+) *= *([^\"\\s>]+)([^\\|\\r\\n]*)\\|',\n r'\\1=\"\\2\"\\3|', newTable, 1)\n\n ##################\n # merge two short <td>s\n num = 1\n while num != 0:\n newTable, num = re.subn(\n r'[\\r\\n]+(\\|[^\\|\\-\\}]{1}[^\\n\\r]{0,35})'\n r'[\\r\\n]+(\\|[^\\|\\-\\}]{1}[^\\r\\n]{0,35})[\\r\\n]+',\n r'\\r\\n\\1 |\\2\\r\\n', newTable)\n ####\n # add a new line if first is * or #\n newTable = re.sub(r'[\\r\\n]+\\| ([*#]{1})',\n r'\\r\\n|\\r\\n\\1', newTable)\n\n ##################\n # strip <center> from <th>\n newTable = re.sub(r'([\\r\\n]+\\![^\\r\\n]+?)<center>([\\w\\W]+?)<\\/center>',\n r'\\1 \\2', newTable)\n # strip align=\"center\" from <th> because the .css does it\n # if there are no other attributes than align, we don't need\n # that | either\n newTable = re.sub(r'([\\r\\n]+\\! +)align\\=\\\"center\\\" +\\|',\n r'\\1', newTable)\n # if there are other attributes, simply strip the align=\"center\"\n newTable = re.sub(\n r'([\\r\\n]+\\![^\\r\\n\\|]+?)align\\=\\\"center\\\"([^\\n\\r\\|]+?\\|)',\n r'\\1 \\2', newTable)\n\n ##################\n # kill additional spaces within arguments\n num = 1\n while num != 0:\n newTable, num = re.subn(\n r'[\\r\\n]+(\\||\\!)([^|\\r\\n]*?)[ \\t]{2,}([^\\r\\n]+?)',\n r'\\r\\n\\1\\2 \\3', newTable)\n\n ##################\n # I hate those long lines because they make a wall of letters\n # Off by default, set 'splitLongParagraphs = True' in user-config.py\n if config.splitLongParagraphs:\n num = 1\n while num != 0:\n # TODO: how does this work? docu please.\n # why are only äöüß used, but not other special characters?\n newTable, num = re.subn(\n r'(\\r\\n[A-Z]{1}[^\\n\\r]{200,}?[a-zäöüß]\\.)\\ ([A-ZÄÖÜ]{1}[^\\n\\r]{200,})',\n r'\\1\\r\\n\\2', newTable)\n return newTable, warnings, warning_messages", "title": "" }, { "docid": "097f440392fdce57b956381fdf3bb250", "score": "0.48533386", "text": "def get_summary(self, show=None, show_core=None,\n sep=' ', line='-', just='l',\n table_sep=None, verb=True, return_=False):\n # # Make sure the data is accessible\n # msg = \"The data is not accessible because self.strip(2) was used !\"\n # assert self._dstrip['strip']<2, msg\n\n # -----------------------\n # Build for groups\n col0 = ['group name', 'nb. ref', 'nb. data']\n ar0 = [(k0,\n len(self._dgroup['dict'][k0]['lref']),\n len(self._dgroup['dict'][k0]['ldata']))\n for k0 in self._dgroup['lkey']]\n\n # -----------------------\n # Build for refs\n col1 = ['ref key', 'group', 'size', 'nb. data']\n ar1 = [(k0,\n self._dref['dict'][k0]['group'],\n self._dref['dict'][k0]['size'],\n len(self._dref['dict'][k0]['ldata']))\n for k0, v0 in self._dref['lkey']]\n\n # -----------------------\n # Build for ddata\n col2 = ['data key']\n if show_core is None:\n show_core = self._show_in_summary_core\n if isinstance(show_core, str):\n show_core = [show_core]\n lkcore = ['shape', 'group', 'ref']\n assert all([ss in self._lparams + lkcore for ss in show_core])\n col2 += show_core\n\n if show is None:\n show = self._show_in_summary\n if show == 'all':\n col2 += self._lparams\n else:\n if isinstance(show, str):\n show = [show]\n assert all([ss in self._lparams for ss in show])\n col2 += show\n\n ar2 = []\n for k0 in self._lkdata:\n v0 = self._ddata[k0]\n lu = [k0] + [str(v0[cc]) for cc in col2[1:]]\n ar2.append(lu)\n\n return self._get_summary(\n [ar0, ar1, ar2], [col0, col1, col2],\n sep=sep, line=line, table_sep=table_sep,\n verb=verb, return_=return_)", "title": "" }, { "docid": "75d99bf5edb82f58c056c54f02601e32", "score": "0.48527044", "text": "def dicts2table(dicts):\n return petl.wrap(petl.fromdicts(dicts)) if dicts else petl.empty()", "title": "" }, { "docid": "b2b5876b0ec75531698fc4a812d67d6e", "score": "0.48514977", "text": "def pivot_nested_dict(nested_dict):\n #TODO: Implement this function\n pass", "title": "" }, { "docid": "c2f2bcde568ec88a673028a31cafffa5", "score": "0.48490247", "text": "def nested_tables():\n\n # Create data for this example\n author_data: List[Author] = []\n author_1 = Author(\"Frank Herbert\", \"10/08/1920\", \"Tacoma, Washington\")\n author_1.books.append(Book(\"Dune\", \"1965\"))\n author_1.books.append(Book(\"Dune Messiah\", \"1969\"))\n author_1.books.append(Book(\"Children of Dune\", \"1976\"))\n author_1.books.append(Book(\"God Emperor of Dune\", \"1981\"))\n author_1.books.append(Book(\"Heretics of Dune\", \"1984\"))\n author_1.books.append(Book(\"Chapterhouse: Dune\", \"1985\"))\n author_1.relatives.append(Relative(\"Flora Lillian Parkinson\", \"First Wife\"))\n author_1.relatives.append(Relative(\"Beverly Ann Stuart\", \"Second Wife\"))\n author_1.relatives.append(Relative(\"Theresa Diane Shackelford\", \"Third Wife\"))\n author_1.relatives.append(Relative(\"Penelope Herbert\", \"Daughter\"))\n author_1.relatives.append(Relative(\"Brian Patrick Herbert\", \"Son\"))\n author_1.relatives.append(Relative(\"Bruce Calvin Herbert\", \"Son\"))\n\n author_2 = Author(\"Jane Austen\", \"12/16/1775\", \"Steventon, Hampshire, England\")\n author_2.books.append(Book(\"Sense and Sensibility\", \"1811\"))\n author_2.books.append(Book(\"Pride and Prejudice\", \"1813\"))\n author_2.books.append(Book(\"Mansfield Park \", \"1814\"))\n author_2.books.append(Book(\"Emma\", \"1815\"))\n author_2.books.append(Book(\"Northanger Abbey\", \"1818\"))\n author_2.books.append(Book(\"Persuasion\", \"1818\"))\n author_2.books.append(Book(\"Lady Susan\", \"1871\"))\n author_2.relatives.append(Relative(\"James Austen\", \"Brother\"))\n author_2.relatives.append(Relative(\"George Austen\", \"Brother\"))\n author_2.relatives.append(Relative(\"Edward Austen\", \"Brother\"))\n author_2.relatives.append(Relative(\"Henry Thomas Austen\", \"Brother\"))\n author_2.relatives.append(Relative(\"Cassandra Elizabeth Austen\", \"Sister\"))\n author_2.relatives.append(Relative(\"Francis William Austen\", \"Brother\"))\n author_2.relatives.append(Relative(\"Charles John Austen\", \"Brother\"))\n\n author_data.append(author_1)\n author_data.append(author_2)\n\n # Define table which presents Author data fields vertically with no header.\n # This will be nested in the parent table's first column.\n author_columns: List[Column] = list()\n author_columns.append(Column(\"\", width=14))\n author_columns.append(Column(\"\", width=20))\n\n # The text labels in this table will be bold text. They will also be aligned by the table code.\n # When styled text is aligned, a TextStyle.RESET_ALL sequence is inserted between the aligned text\n # and the fill characters. Therefore, the Author table will contain TextStyle.RESET_ALL sequences,\n # which would interfere with the background color applied by the parent table. To account for this,\n # we will manually color the Author tables to match the background colors of the parent AlternatingTable's\n # rows and set style_data_text to False in the Author column.\n odd_author_tbl = SimpleTable(author_columns, data_bg=EightBitBg.GRAY_0)\n even_author_tbl = SimpleTable(author_columns, data_bg=EightBitBg.GRAY_15)\n\n # Define AlternatingTable for books checked out by people in the first table.\n # This will be nested in the parent table's second column.\n books_columns: List[Column] = list()\n books_columns.append(Column(ansi.style(\"Title\", bold=True), width=25))\n books_columns.append(\n Column(\n ansi.style(\"Published\", bold=True),\n width=9,\n header_horiz_align=HorizontalAlignment.RIGHT,\n data_horiz_align=HorizontalAlignment.RIGHT,\n )\n )\n\n books_tbl = AlternatingTable(\n books_columns,\n column_borders=False,\n border_fg=EightBitFg.GRAY_15,\n header_bg=EightBitBg.GRAY_0,\n odd_bg=EightBitBg.GRAY_0,\n even_bg=EightBitBg.GRAY_15,\n )\n\n # Define BorderedTable for relatives of the author\n # This will be nested in the parent table's third column.\n relative_columns: List[Column] = list()\n relative_columns.append(Column(ansi.style(\"Name\", bold=True), width=25))\n relative_columns.append(Column(ansi.style(\"Relationship\", bold=True), width=12))\n\n # Since the header labels are bold, we have the same issue as the Author table. Therefore, we will manually\n # color Relatives tables to match the background colors of the parent AlternatingTable's rows and set style_data_text\n # to False in the Relatives column.\n odd_relatives_tbl = BorderedTable(\n relative_columns,\n border_fg=EightBitFg.GRAY_15,\n border_bg=EightBitBg.GRAY_0,\n header_bg=EightBitBg.GRAY_0,\n data_bg=EightBitBg.GRAY_0,\n )\n\n even_relatives_tbl = BorderedTable(\n relative_columns,\n border_fg=EightBitFg.GRAY_0,\n border_bg=EightBitBg.GRAY_15,\n header_bg=EightBitBg.GRAY_15,\n data_bg=EightBitBg.GRAY_15,\n )\n\n # Define parent AlternatingTable which contains Author and Book tables\n parent_tbl_columns: List[Column] = list()\n\n # All of the nested tables already have background colors. Set style_data_text\n # to False so the parent AlternatingTable does not apply background color to them.\n parent_tbl_columns.append(\n Column(ansi.style(\"Author\", bold=True), width=odd_author_tbl.total_width(), style_data_text=False)\n )\n parent_tbl_columns.append(Column(ansi.style(\"Books\", bold=True), width=books_tbl.total_width(), style_data_text=False))\n parent_tbl_columns.append(\n Column(ansi.style(\"Relatives\", bold=True), width=odd_relatives_tbl.total_width(), style_data_text=False)\n )\n\n parent_tbl = AlternatingTable(\n parent_tbl_columns,\n column_borders=False,\n border_fg=EightBitFg.GRAY_93,\n header_bg=EightBitBg.GRAY_0,\n odd_bg=EightBitBg.GRAY_0,\n even_bg=EightBitBg.GRAY_15,\n )\n\n # Construct the tables\n parent_table_data: List[List[Any]] = []\n for row, author in enumerate(author_data, start=1):\n # First build the author table and color it based on row number\n author_tbl = even_author_tbl if row % 2 == 0 else odd_author_tbl\n\n # This table has three rows and two columns\n table_data = [\n [ansi.style(\"Name\", bold=True), author.name],\n [ansi.style(\"Birthday\", bold=True), author.birthday],\n [ansi.style(\"Place of Birth\", bold=True), author.place_of_birth],\n ]\n\n # Build the author table string\n author_tbl_str = author_tbl.generate_table(table_data, include_header=False, row_spacing=0)\n\n # Now build this author's book table\n table_data = [[book.title, book.year_published] for book in author.books]\n book_tbl_str = books_tbl.generate_table(table_data)\n\n # Lastly build the relatives table and color it based on row number\n relatives_tbl = even_relatives_tbl if row % 2 == 0 else odd_relatives_tbl\n table_data = [[relative.name, relative.relationship] for relative in author.relatives]\n relatives_tbl_str = relatives_tbl.generate_table(table_data)\n\n # Add these tables to the parent table's data\n parent_table_data.append(['\\n' + author_tbl_str, '\\n' + book_tbl_str + '\\n\\n', '\\n' + relatives_tbl_str + '\\n\\n'])\n\n # Build the parent table\n top_table_str = parent_tbl.generate_table(parent_table_data)\n ansi_print(top_table_str)", "title": "" }, { "docid": "141225ef822bc8b95a06c03ee4c0360c", "score": "0.48480618", "text": "def handle_dictionary(self, d):\n i = 0\n self.start_object()\n for key, value in d.iteritems():\n self.currentLoc += key+'.'\n #self.stream.write(unicode(self.currentLoc))\n i += 1\n self.handle_simple(key)\n self.stream.write(u': ')\n self.handle_object(value)\n if i != len(d):\n self.stream.write(u', ')\n self.currentLoc = self.currentLoc[0:(len(self.currentLoc)-len(key)-1)]\n self.end_object()", "title": "" }, { "docid": "7eb4fa6ef3e78fea481a1703d2f12d0e", "score": "0.48447067", "text": "def dict_as_html(dict_list,debug=False):\n\n html_out = \"\"\n # assumption all keys are the same as first line of sata set\n keys = list(dict_list[0].keys())\n keys_html = wrap_multiple(keys)\n html_out += wrap(keys_html,tag=\"tr\",lf=False)\n\n for row in dict_list: \n values = []\n for key in keys:\n if row.get(key,None) is None:\n continue\n value = str(row[key][\"value\"])\n link = row[key][\"link\"]\n if link is not None:\n value = get_link(link,value)\n values.append(value)\n values_html = wrap_multiple(values)\n html_out += wrap(values_html,tag=\"tr\",lf=False)\n\n html_out = wrap(html_out,tag=\"table\",lf=False)\n html_out = wrap(html_out,tag=\"html\",lf=False)\n\n return html_out", "title": "" }, { "docid": "eb18a95f27c53986d506649e7b65226a", "score": "0.48424333", "text": "def pretty_table_inst(self)-> None:\n\n return [self.cwid,self.name, self.department], self.courses", "title": "" }, { "docid": "104a7f6fd1589af8b804a5e11693409d", "score": "0.48294201", "text": "def print_meta(self, regex = None, show_other_non_matching_fields= True):\n #TODO check for blank regex\n if regex is None:\n self.__print_meta_all()\n else:\n matching={}\n non_matching={}\n for meta in self.metas:\n for key in self.__get_keys_for_print(meta):\n value = meta[key]\n if re.search(regex, value) is not None:\n matching[key] = value;\n else:\n non_matching[key] = value\n\n if len(matching) > 0:\n print meta[\"path\"]\n for key in matching.keys():\n print \" \" + key + \" : \" + matching[key]\n if show_other_non_matching_fields == True:\n for key in non_matching.keys():\n print \" \" + key + \": \" + non_matching[key]", "title": "" }, { "docid": "64c10f79e13c33d4784fba7bb861d788", "score": "0.4829083", "text": "def print_individuals_data(individual_dict, run_validations):\n table = prettytable.PrettyTable()\n table.field_names = ('ID', 'Name', 'Gender', 'Birthday', 'Age', 'Alive', 'Death', 'Child', 'Spouse')\n for indi_id, individual in sorted(individual_dict.items()):\n table.add_row(\n [individual.id, individual.name, individual.gender, individual.birthday, individual.age, individual.alive,\n individual.death, individual.child, individual.spouse])\n print(\"Individuals\")\n print(table.get_string())\n if run_validations:\n validate_individuals(individual_dict)", "title": "" }, { "docid": "bfa547dce2f1924aeb9efa5a2229841b", "score": "0.48278472", "text": "def format_return(data, vertical=False):\n if vertical:\n table_data = []\n table_formatted = []\n\n if type(data) is list:\n if len(data) > 0:\n for return_dict in data:\n items = []\n for key, value in sorted(return_dict.items()):\n items.append([str(key), str(value)])\n table_data.append(items)\n else:\n return 'An empty result set was returned'\n else:\n items = []\n for key, value in sorted(data.items()):\n items.append([str(key), str(value)])\n\n table_data.append(items)\n\n for row in table_data:\n table = SingleTable(row)\n table.inner_heading_row_border = False\n title = None\n for item in row:\n # Prefer display name if it exists\n if item[0] == 'display_name':\n title = item[1]\n break\n if item[0] == 'name':\n title = item[1]\n break\n if title is not None:\n table.title = title\n\n table_formatted.append(table.table)\n\n return table_formatted\n else:\n table_data = [[]]\n\n if type(data) is list:\n if len(data) > 0:\n for key in sorted(data[0]):\n table_data[0].append(key)\n\n for return_dict in data:\n row = []\n\n for key in sorted(return_dict):\n row.append(str(return_dict[key]))\n\n table_data.append(row)\n else:\n return 'An empty result set was returned'\n else:\n row = []\n\n for key, value in sorted(data.items()):\n table_data[0].append(key)\n row.append(str(value))\n\n table_data.append(row)\n\n return [AsciiTable(\n table_data, 'Count: ' + str(len(table_data) - 1)\n ).table]", "title": "" }, { "docid": "6d4012a59a744a24558edf48328bafe9", "score": "0.48219907", "text": "def customizations(record):\n record = type(record)\n record = page_double_hyphen(record)\n record = convert_to_unicode(record)\n ## delete the following keys.\n # for val in unwanted:\n # record.pop(val, None)\n for val in list(record.keys()):\n dic = wanted[record['ENTRYTYPE']] if record['ENTRYTYPE'] in wanted else wanted['101']\n if not (val in dic) and not (val in ['ID', 'ENTRYTYPE']): record.pop(val, None)\n # for v in [k for k in record.keys() if not k in wanted]:\n # record.pop(v, None)\n return record", "title": "" }, { "docid": "5058926a07be862bb2d0d13e5093d0dc", "score": "0.48201543", "text": "def printDict(obj, nested_level=0, output=sys.stdout):\n spacing = ' '\n spacing2 = ' '\n if type(obj) == dict:\n print( '%s' % ((nested_level) * spacing), file=output)\n for k, v in obj.items():\n if hasattr(v, '__iter__'):\n print('%s%s:' % ( (nested_level+1) * spacing, k), file=output, end='')\n IxNetRestMain.printDict(v, nested_level+1, output)\n else:\n print('%s%s: %s' % ( (nested_level + 1) * spacing, k, v), file=output)\n\n print('%s' % (nested_level * spacing), file=output)\n elif type(obj) == list:\n print('%s[' % ((nested_level) * spacing), file=output)\n for v in obj:\n if hasattr(v, '__iter__'):\n IxNetRestMain.printDict(v, nested_level + 1, file=output)\n else:\n print('%s%s' % ((nested_level + 1) * spacing, v), file=output)\n print('%s]' % ((nested_level) * spacing), output)\n else:\n print('%s%s' % ((nested_level * spacing2), obj), file=output)", "title": "" }, { "docid": "73aebf7bf14e6f9aad7110095c62c542", "score": "0.48171017", "text": "def _extract_output(outputs):\n ret = ''\n for dict in outputs:\n for key, val in dict.items():\n if str(key).startswith('text'):\n for content in val:\n ret += str(content)\n elif key == 'data':\n for dt_key, dt_val in val.items():\n if str(dt_key).startswith('text'):\n for dt_content in dt_val:\n if not str(dt_content).startswith('<matplotlib') and not str(dt_content).startswith('<graphviz'):\n ret += str(dt_content)\n return ret", "title": "" }, { "docid": "cffc89c6dd0d7ef8c103c41de0536780", "score": "0.48139814", "text": "def table_printer(the_dict, header_info=None):\n # header_info [\"attribute\", \"value\"]\n if (header_info is not None) or (header_info == \"\"):\n result = '<tr><th>{0}</th><th>{1}</th></tr>'\\\n .format(header_info[0], header_info[1])\n else:\n result = ''\n if isinstance(the_dict, dict):\n for name, value in the_dict.iteritems():\n result = result + \\\n '<tr><td>{0}</td><td>{1}</td></tr>'\\\n .format(name.title(), str(table_printer(value)))\n result = '<table>' + result + '</table>'\n return result\n elif isinstance(the_dict, list):\n for element in the_dict:\n try:\n for name, value in element.iteritems():\n result = result + \\\n '<tr><td>{0}</td><td>{1}</td></tr>'\\\n .format(name.title(), str(table_printer(value)))\n except:\n #If the element is not dict\n return str(element)\n result = '<table>' + result + '</table>'\n return result\n else:\n return the_dict", "title": "" }, { "docid": "effcb1a6cd0d9ef502158c318a7700ca", "score": "0.48100194", "text": "def displayTable(self, table, inner, subt):\n if subt == 0:\n \"\"\"About the category\"\"\"\n dsply = self.session.query(table) \\\n .filter(table.cat == inner)\n else:\n \"\"\"About the substituted products\"\"\"\n dsply = self.session.query(table) \\\n .filter(table.is_subtitute == 1)\n l_id = []\n for pr in dsply:\n l_id.append(pr.id)\n print(\"##########################################\")\n print(f\"{pr.id}, {pr.name}, {pr.bar_code}\")\n print(\"##########################################\")\n if subt != 0:\n input(\"Taper 'Entrée' pour sortir:\")\n else:\n choice = input(\"Tapez 00 pou revenir aux catégories ou Entrée pour continuer:\")\n if choice == \"00\":\n qut1 = \"q\" # Value for back in the previous.\n return qut1\n self.session = Session() # reinitialize the session.\n return l_id # Return a list of id products.", "title": "" }, { "docid": "61b3191e762ff1a2dbbf5eaa7c4ea02b", "score": "0.48088205", "text": "def dict_cmd(dic):\n def _input(name):\n return input_default(\"[%s]: \" % colored(name,'red', attrs=['bold']), \"Uknow\")\n\n m = dic\n with LogControl.jump(LogControl.SIZE[0] - 1, 0):\n print(\"=\" * LogControl.SIZE[1])\n for k in dic:\n v = _input(k)\n if v == \"*None*\":\n continue\n m[k] = v\n return m", "title": "" }, { "docid": "7d22926b60d6557d1e8f6136a9fc39cd", "score": "0.48068053", "text": "def build_table_of_content_response(self, table_of_content_data: Dict[str, List[Dict[str, str]]]) -> None:\n api_options: Dict[str, List[str]] = {}\n for key, value in table_of_content_data.items():\n api_options[key] = [next(iter(current.keys())) for current in value]\n\n self.game_response[\"API-Options\"] = api_options", "title": "" }, { "docid": "fa334330b60021ec8f87da315f419a02", "score": "0.48036337", "text": "def call36_dict(node):\n p = self.prec\n self.prec = 100\n\n self.indent_more(INDENT_PER_LEVEL)\n sep = INDENT_PER_LEVEL[:-1]\n line_number = self.line_number\n\n assert node[0].kind.startswith('kvlist')\n # Python 3.5+ style key/value list in dict\n kv_node = node[0]\n l = list(kv_node)\n i = 0\n # Respect line breaks from source\n while i < len(l):\n self.write(sep)\n name = self.traverse(l[i], indent='')\n # Strip off beginning and trailing quotes in name\n name = name[1:-1]\n if i > 0:\n line_number = self.indent_if_source_nl(line_number,\n self.indent + INDENT_PER_LEVEL[:-1])\n line_number = self.line_number\n self.write(name, '=')\n value = self.traverse(l[i+1], indent=self.indent+(len(name)+2)*' ')\n self.write(value)\n sep = \",\"\n if line_number != self.line_number:\n sep += \"\\n\" + self.indent + INDENT_PER_LEVEL[:-1]\n line_number = self.line_number\n i += 2\n pass\n self.prec = p\n self.indent_less(INDENT_PER_LEVEL)\n return", "title": "" }, { "docid": "a431410804a4e9de18b275c641492a20", "score": "0.48028067", "text": "def table_output(self, stat_table_f, gene_no, go_merge_dict, max_raw_distance=0, max_lee_distance=0, max_jasmine_distance=0):\n\t\tgene_id = self.gene_no2gene_id[gene_no]\n\t\trow = ['gene_id', 'function group', 'is_correct', 'is_correct_l1', \\\n\t\t\t'is_correct_lca', 'context']\n\t\tstat_table_f.writerow(row)\n\t\t\n\t\tfor go_no in go_merge_dict:\n\t\t\t#row is for output\n\t\t\trow = []\n\t\t\trow.append(gene_id)\n\t\t\tunit = go_merge_dict[go_no]\n\t\t\t#the go_no's merged + itself\n\t\t\tgo_no_list = unit.cluster_array\n\t\t\tgo_no_list.append(go_no)\n\t\t\t\n\t\t\tgo_name_list = []\n\t\t\tfor term_no in go_no_list:\n\t\t\t\tgo_name_list.append('%s(%s)'%(self.go_no2go_name[term_no], self.go_no2go_id[term_no]) )\n\t\t\t\n\t\t\t#row.append('%s'%('/'.join(map(repr,go_no_list) )))\n\t\t\trow.append('%s'%('/'.join(go_name_list)))\n\t\t\trow.append(unit.is_correct)\n\t\t\trow.append(unit.is_correct_l1)\n\t\t\trow.append(unit.is_correct_lca)\n\t\t\tcontext_gene_id_list = []\n\t\t\tfor gene_no in unit.context_dict:\n\t\t\t\tcontext_gene_id_list.append(self.gene_no2gene_id[gene_no])\n\t\t\t#context_gene_no_list = list(unit.context_dict)\n\t\t\t#context_gene_no_list.sort()\n\t\t\t#row.append('%s'%('/'.join( map(repr, context_gene_no_list) ) ))\n\t\t\trow.append('%s'%('/'.join(context_gene_id_list)))\n\t\t\t\"\"\"\n\t\t\t#append the three kinds of maximum distances\n\t\t\trow.append(max_raw_distance)\n\t\t\trow.append(max_lee_distance)\n\t\t\trow.append(max_jasmine_distance)\n\t\t\t\"\"\"\n\n\t\t\tstat_table_f.writerow(row)", "title": "" }, { "docid": "00881babee8ef3adb707bd691ec1eacf", "score": "0.4793923", "text": "def make_table(self, table, subs):\r\n clear_widgets(self.results)\r\n\r\n if table == \"hdd\":\r\n headers = [\r\n \"Brand\",\r\n \"Part Number\",\r\n \"Type\",\r\n \"Deminsions\",\r\n \"Height\",\r\n \"Connector\",\r\n \"HDD (GB)\",\r\n \"SSD (GB)\",\r\n \"Speed\",\r\n \"Subbed?\",\r\n ]\r\n elif table == \"mem\":\r\n headers = [\r\n \"Brand\",\r\n \"Part Number\",\r\n \"Connector\",\r\n \"Capacity\",\r\n \"Speed\",\r\n \"Subbed?\",\r\n ]\r\n elif table == \"cpu\":\r\n headers = [\"Brand\", \"Part Number\", \"OEM\", \"Description\", \"Subbed?\"]\r\n\r\n self.widths = {}\r\n for col_num in enumerate(subs[0]):\r\n columns = []\r\n for sub in subs:\r\n columns.append(sub[col_num[0]])\r\n self.widths[col_num[0]] = max(len(element) for element in columns)\r\n self.label_width = max(self.widths[col_num[0]], len(headers[col_num[0]]))\r\n if self.widths[col_num[0]] < self.label_width:\r\n self.widths[col_num[0]] = self.label_width + 2\r\n\r\n for col, header in enumerate(headers):\r\n tk.Label(\r\n self.results, text=header, width=self.widths[col], justify=\"center\"\r\n ).grid(column=col, row=0)\r\n\r\n for row, sub in enumerate(subs):\r\n if row % 2 == 0:\r\n bg_color = \"snow3\"\r\n else:\r\n bg_color = \"snow2\"\r\n\r\n if sub[-1] == \"TRUE\":\r\n fg_color = \"green4\"\r\n else:\r\n fg_color = \"Red2\"\r\n\r\n if sub[0] == \"CVO\":\r\n fg_color = \"steelblue\"\r\n\r\n for col, info in enumerate(sub):\r\n info_var = tk.StringVar()\r\n info_var.set(info)\r\n tk.Entry(\r\n self.results,\r\n width=self.widths[col] + 2,\r\n textvariable=info_var,\r\n readonlybackground=bg_color,\r\n foreground=fg_color,\r\n relief=\"flat\",\r\n justify=\"center\",\r\n state=\"readonly\",\r\n ).grid(column=col, row=row + 1, sticky=\"EW\")", "title": "" }, { "docid": "531ede4010156e8fd7fd61b8b1ab9877", "score": "0.47836605", "text": "def test_extract_nested_layout(self):\n pres = {'newrow_h1_1':set([NEWROW]),\n 'newcol_h1_1':set([NEWCOL]),\n 'newcol_h1_2':set([NEWCOL]),\n 'innerrow_h1_3':set([NEWINNERROW]),\n 'innercol_h1_3':set([NEWINNERCOL]),\n 'innercol_h1_4':set([NEWINNERCOL]),\n 'newcol_h1_5':set([NEWCOL]),\n 'newrow_h1_6':set([NEWROW]),\n 'newcol_h1_6':set([NEWCOL]),\n 'newcol_h1_7':set([NEWCOL]),\n 'h1_1':set([]),\n 'h1_2':set([]),\n 'h1_3':set([]),\n 'h1_4':set([]),\n 'h1_5':set([]),\n 'h1_6':set([]),\n 'h1_7':set([]),\n }\n html = ('<div class=\"row columns3\">'\n '<div class=\"column firstcolumn\">'\n '<div><h1>1</h1></div>'\n '</div>'\n '<div class=\"column\">'\n '<div>'\n '<h1>2</h1>'\n '<div class=\"row columns2\">'\n '<div class=\"column firstcolumn\">'\n '<div><h1>3</h1></div>'\n '</div>'\n '<div class=\"column lastcolumn\">'\n '<div><h1>4</h1></div>'\n '</div>'\n '</div>'\n '</div>'\n '</div>'\n '<div class=\"column lastcolumn\">'\n '<div><h1>5</h1></div>'\n '</div>'\n '</div>'\n '<div class=\"row columns2\">'\n '<div class=\"column firstcolumn\">'\n '<div><h1>6</h1></div>'\n '</div>'\n '<div class=\"column lastcolumn\">'\n '<div><h1>7</h1></div>'\n '</div>'\n '</div>'\n )\n pres2, html2 = extract_presentation(html)\n self.assertEqual(pres, pres2)", "title": "" }, { "docid": "9adb682abb81ed6708e6092ba2455507", "score": "0.47825462", "text": "def parse_turnout_table(response, data_type=\"turnout\"):\n meth_name = \"parse_turnout_table\"\n\n root = response.selector\n url = response.url\n md5 = hashlib.md5(response.body).hexdigest()\n\n if data_type == \"turnout_uik\":\n result = get_name_uik(root)\n else:\n result = get_name(root)\n\n logging.debug(\"%s: result: %r\", meth_name, result)\n\n row_headers = [\n join(row.xpath(\".//text()\").extract())\n for row in root.xpath(XPATHS[data_type][\"row_header\"])\n ]\n logging.debug(\"%s: row_headers: %r\", meth_name, row_headers)\n\n important_rows = range(2, len(row_headers))\n important_cols = [2, 3, 4, 5]\n\n rows = []\n for row_num in important_rows:\n xp = XPATHS[data_type][\"cell\"]\n cols = [\n myfloat(\n join(\n root.xpath(xp % (row_num + 1, col_num + 1)).extract()\n ).rstrip(\"%\")\n )\n for col_num in important_cols\n ]\n rows.append(cols)\n\n result.update(\n {\n \"md5\": md5, \"url\": url, \"data_type\": data_type,\n \"timestamp\": now().isoformat(), \"row_headers\": row_headers[2:],\n \"column_headers\": [\"10:00\", \"12:00\", \"15:00\", \"18:00\"],\n \"data\": rows\n }\n )\n return result", "title": "" }, { "docid": "c19c2049437acfcfbef63324cc94a6a8", "score": "0.47791365", "text": "def do_showDictionary(fields, gDict, args):\n# dumpclean(gDict)\n if \"listOfSites\" in gDict:\n sites = gDict[\"listOfSites\"]\n print \"List of sites: \", sites\n for site in sites:\n showSite(site, gDict)\n \n if \"listOfSensors\" in gDict:\n sensors = gDict[\"listOfSensors\"]\n print \"List of sensors: \", sensors\n for sensor in sensors:\n showSensor(sensor, gDict)\n pass", "title": "" }, { "docid": "6c42e3fdf58c4023a65b89afb8844e22", "score": "0.47731677", "text": "def CYKParserTable(gramm, word):\n pass", "title": "" }, { "docid": "2cead5b9724bf9dbd41be353c1d07ca3", "score": "0.47642455", "text": "def process_each_table(input_files, KO_dict, tax_dict, na_taxids):\n\n KO_collapsed_tabs = []\n tax_collapsed_tabs = []\n\n for i in range(len(input_files)):\n\n tab = pd.read_csv(input_files[i], sep=\"\\t\", dtype = {'taxid': str})\n\n # getting sample column names\n sample_cols = tab.columns[11:].tolist()\n\n # building dictionaries that will hold all KO terms and taxa from all input files\n KO_dict = add_to_KO_dict(tab, KO_dict)\n tax_dict, na_taxids = add_to_tax_dict(tab, tax_dict, na_taxids)\n\n # making collapsed KO and tax tables\n KO_tab = tab[['KO_ID'] + sample_cols].copy()\n # collapsing based on KO terms\n KO_tab = KO_tab.groupby(by = ['KO_ID'], dropna = False).sum()\n\n tax_tab = tab[['taxid'] + sample_cols].copy()\n # setting any taxids that are all NA at these standard ranks to \"NA\" (some have an assigned taxid, but don't have a D/P/C/O/F/G/S taxid, like 1 and 131567)\n tax_tab.replace(na_taxids, NaN, inplace = True)\n # collapsing based on tax\n tax_tab = tax_tab.groupby(by = ['taxid'], dropna = False).sum()\n\n # appending to lists of tables\n KO_collapsed_tabs.append(KO_tab)\n tax_collapsed_tabs.append(tax_tab)\n\n return(KO_collapsed_tabs, tax_collapsed_tabs, KO_dict, tax_dict)", "title": "" }, { "docid": "f0e7573fb93e129506555ed8f731d1d6", "score": "0.4764081", "text": "def _parse_tables(self, item):\n content_table = item.find(\"table\")\n\n timeinfo = []\n\n # Fetch datetime for each column\n hour_items = content_table.find(\"thead\")\n hour_items = hour_items.find(\"tr\", {\"class\": \"meteogram-times\"}).find_all(\"td\")\n for hour_item in hour_items:\n timeinfo.append(self._parse_datetime(hour_item.find(\"span\")[\"title\"]))\n\n process_fields = (\n (\"icon\", \"meteogram-weather-symbols\", self._parse_symbol),\n (\"temperature\", \"meteogram-temperatures\", self._parse_temperature),\n (\"wind\", \"meteogram-wind-symbols\", self._parse_wind),\n (\"feelslike\", \"meteogram-apparent-temperatures\", self._parse_temperature),\n (\"ppcp\", \"meteogram-probabilities-of-precipitation\", self._parse_ppcp),\n (\"rainfall\", \"meteogram-hourly-precipitation-values\", self._parse_rainfall),\n )\n\n # Fetch raw information for each row\n data_raw = {}\n for (fieldname, selector, callback) in process_fields:\n data_raw[fieldname] = self._process_row(content_table, selector, callback)\n\n def combine_data():\n \"\"\" Combine raw data rows with timestamps \"\"\"\n data = []\n for i, timestamp in enumerate(timeinfo):\n single_data = {\n \"timestamp\": timestamp,\n }\n for field, item_data in data_raw.items():\n single_data[field] = item_data[i]\n data.append(single_data)\n return data\n\n return {\"meta\": {\"updated_at\": self.deterministic_timestamp}, \"forecast\": combine_data()}", "title": "" }, { "docid": "7216a433baca2d16b4ea41291a3a2784", "score": "0.47629026", "text": "def remix_obs(adata, obs_config):\n\n for field_name, field_value in obs_config.items():\n\n if isinstance(field_value, dict):\n # If the value is a dict, that means we are supposed to map from an existing column to the new one\n source_column, column_map = next(iter(field_value.items()))\n\n for key in column_map:\n if key not in adata.obs[source_column].unique():\n print(f'WARNING: key {key} not in adata.obs[\"{source_column}\"]')\n\n for value in adata.obs[source_column].unique():\n if value not in column_map:\n print(f'WARNING: Value {value} in adata.obs[\"{source_column}\"] not in translation dict')\n if is_ontology_field(field_name):\n ontology_term_map, ontology_label_map = {}, {}\n print(\"\\n\", field_name, \"\\n\")\n for original_value, maybe_curie in column_map.items():\n curie, label = get_curie_and_label(maybe_curie)\n ontology_term_map[original_value] = curie\n ontology_label_map[original_value] = label\n print(\";\".join([original_value, curie, label]))\n\n ontology_column = adata.obs[source_column].replace(ontology_term_map, inplace=False)\n label_column = adata.obs[source_column].replace(ontology_label_map, inplace=False)\n\n safe_add_field(adata.obs, field_name, ontology_column)\n safe_add_field(adata.obs, get_label_field_name(field_name), label_column)\n else:\n if is_ontology_field(field_name):\n # If it's an ontology field, look it up\n label_field_name = get_label_field_name(field_name)\n ontology_term, ontology_label = get_curie_and_label(field_value)\n safe_add_field(adata.obs, field_name, ontology_term)\n safe_add_field(adata.obs, label_field_name, ontology_label)\n else:\n safe_add_field(adata.obs, field_name, field_value)", "title": "" }, { "docid": "aff9b3cae65a54992295c153d5bafcfb", "score": "0.47583693", "text": "def SpecFilters():\n return {\n 'Zbb':{'SEL':999,'LEL':1176,'NoSC':1476, 'NoCS':1019,\n 'ALG':(16,64), '0.02':(0.32,1.28), '0.035':(0.56,2.24),\n '0.05':(0.8,3.2), '0.10':(1.6,6.4)},\n 'Jbb':{'SEL':1180,'LEL':1416,'NoSC':1574, 'NoCS':1019,\n 'ALG':(16,64), '0.02':(0.32,1.28), '0.035':(0.56,2.24),\n '0.05':(0.8,3.2), '0.10':(1.6,6.4)},\n 'Hbb':{'SEL':1473,'LEL':1803,'NoSC':1651, 'NoCS':1019,\n 'ALG':(16,64), '0.02':(0.32,1.28), '0.035':(0.56,2.24),\n '0.05':(0.8,3.2), '0.10':(1.6,6.4)},\n 'Kbb':{'SEL':1965,'LEL':2381,'NoSC':1665, 'NoCS':1019,\n 'ALG':(16,64), '0.02':(0.32,1.28), '0.035':(0.56,2.24),\n '0.05':(0.8,3.2), '0.10':(1.6,6.4)},\n 'Kcb':{'SEL':1965,'LEL':2381,'NoSC':1665, 'NoCS':1019,\n 'ALG':(16,64), '0.02':None, '0.035':None,\n '0.05':None, '0.10':(1.6,6.4)},\n 'Zn4':{'SEL':1103,'LEL':1158,'NoSC':459, 'NoCS':2038,\n 'ALG':(32,64), '0.02':(0.64,1.28), '0.035':(1.12,2.24),\n '0.05':(1.6,3.2), '0.10':(3.2,6.4)},\n 'Jn1':{'SEL':1174,'LEL':1232,'NoSC':388, 'NoCS':2038,\n 'ALG':(32,64), '0.02':(0.64,1.28), '0.035':(1.12,2.24),\n '0.05':(1.6,3.2), '0.10':(3.2,6.4)},\n 'Jn2':{'SEL':1228,'LEL':1289,'NoSC':408, 'NoCS':2678,\n 'ALG':(42,64), '0.02':(0.84,1.28), '0.035':(1.47,2.24),\n '0.05':(2.1,3.2), '0.10':(4.2,6.4)},\n 'Jn3':{'SEL':1275,'LEL':1339,'NoSC':428, 'NoCS':3063,\n 'ALG':(48,64), '0.02':(0.96,1.28), '0.035':(1.68,2.24),\n '0.05':(2.4,3.2), '0.10':(4.8,6.4)},\n 'Jn4':{'SEL':1323,'LEL':1389,'NoSC':441, 'NoCS':2678,\n 'ALG':(42,64), '0.02':(0.84,1.28), '0.035':(1.47,2.24),\n '0.05':(2.1,3.2), '0.10':(4.2,6.4)},\n 'Hn1':{'SEL':1466,'LEL':1541,'NoSC':376, 'NoCS':2292,\n 'ALG':(36,64), '0.02':(0.72,1.28), '0.035':(1.26,2.24),\n '0.05':(1.8,3.2), '0.10':(3.6,6.4)},\n 'Hn2':{'SEL':1532,'LEL':1610,'NoSC':391, 'NoCS':2868,\n 'ALG':(45,64), '0.02':(0.90,1.28), '0.035':(1.58,2.24),\n '0.05':(2.25,3.2), '0.10':(4.5,6.4)},\n 'Hn3':{'SEL':1594,'LEL':1676,'NoSC':411, 'NoCS':3063,\n 'ALG':(48,64), '0.02':(0.96,1.28), '0.035':(1.68,2.24),\n '0.05':(2.4,3.2), '0.10':(4.8,6.4)},\n 'Hn4':{'SEL':1652,'LEL':1737,'NoSC':426, 'NoCS':2671,\n 'ALG':(42,64), '0.02':(0.84,1.28), '0.035':(1.47,2.24),\n '0.05':(2.1,3.2), '0.10':(4.2,6.4)},\n 'Hn5':{'SEL':1721,'LEL':1808,'NoSC':436, 'NoCS':2038,\n 'ALG':(32,64), '0.02':(0.64,1.28), '0.035':(1.12,2.24),\n '0.05':(1.6,3.2), '0.10':(3.2,6.4)},\n 'Kn1':{'SEL':1955,'LEL':2055,'NoSC':401, 'NoCS':2292,\n 'ALG':(36,64), '0.02':(0.72,1.28), '0.035':(1.26,2.24),\n '0.05':(1.8,3.2), '0.10':(3.6,6.4)},\n 'Kn2':{'SEL':2036,'LEL':2141,'NoSC':421, 'NoCS':2868,\n 'ALG':(45,64), '0.02':(0.90,1.28), '0.035':(1.58,2.24),\n '0.05':(2.25,3.2), '0.10':(4.5,6.4)},\n 'Kn3':{'SEL':2121,'LEL':2229,'NoSC':433, 'NoCS':3063,\n 'ALG':(48,64), '0.02':(0.96,1.28), '0.035':(1.68,2.24),\n '0.05':(2.4,3.2), '0.10':(4.8,6.4)},\n 'Kc3':{'SEL':2121,'LEL':2229,'NoSC':443, 'NoCS':3063,\n 'ALG':(48,64), '0.02':None, '0.035':None,\n '0.05':None, '0.10':(4.8,6.4)},\n 'Kn4':{'SEL':2208,'LEL':2320,'NoSC':449, 'NoCS':2671,\n 'ALG':(42,64), '0.02':(0.84,1.28), '0.035':(1.47,2.24),\n '0.05':(2.1,3.2), '0.10':(4.2,6.4)},\n 'Kc4':{'SEL':2208,'LEL':2320,'NoSC':449, 'NoCS':2671,\n 'ALG':(42,64), '0.02':None, '0.035':None,\n '0.05':None, '0.10':(4.2,6.4)},\n 'Kn5':{'SEL':2292,'LEL':2408,'NoSC':465, 'NoCS':2038,\n 'ALG':(32,64), '0.02':(0.64,1.28), '0.035':(1.12,2.24),\n '0.05':(1.6,3.2), '0.10':(3.2,6.4)},\n 'Kc5':{'SEL':2292,'LEL':2408,'NoSC':465, 'NoCS':2038,\n 'ALG':(32,64), '0.02':None, '0.035':None,\n '0.05':None, '0.10':(3.2,6.4)},\n }", "title": "" }, { "docid": "88986577890fa41d686eee3e622b5641", "score": "0.47484678", "text": "def _rewrite_dict(self, obj_as_dict: dict, original: Any):\n if isinstance(original, obograph.LogicalDefinitionAxiom):\n restrictions = original.restrictions\n obj_as_dict[\"genusIds\"] = \"|\".join(original.genusIds)\n obj_as_dict[\"restrictionsPropertyIds\"] = \"|\".join([r.propertyId for r in restrictions])\n obj_as_dict[\"restrictionsFillerIds\"] = \"|\".join([r.fillerId for r in restrictions])\n obj_as_dict[\"restrictions\"] = \"|\".join(\n [f\"{r.propertyId}={r.fillerId}\" for r in original.restrictions]\n )\n del obj_as_dict[\"meta\"]\n if isinstance(original, summary_stats.UngroupedStatistics):\n for slot in [\n \"edge_count_by_predicate\",\n \"synonym_statement_count_by_predicate\",\n \"mapping_statement_count_by_predicate\",\n \"mapping_statement_count_by_object_source\",\n \"mapping_statement_count_subject_by_object_source\",\n \"class_count_by_subset\",\n ]:\n fc = obj_as_dict[slot]\n for k, v in fc.items():\n if \"#\" in k:\n k = k.split(\"#\")[-1]\n elif k.startswith(\"oio:\"):\n k = k.replace(\"oio:\", \"\")\n obj_as_dict[f\"{slot}_{k}\"] = v.filtered_count\n del obj_as_dict[slot]\n for slot in [\"was_generated_by\"]:\n obj = getattr(original, slot)\n if obj:\n for k, v in vars(obj).items():\n if v is not None:\n obj_as_dict[f\"{slot}_{k}\"] = v\n del obj_as_dict[slot]\n for slot in [\"ontologies\"]:\n objs = getattr(original, slot, [])\n n = 0\n for obj in objs:\n n += 1\n for k, v in vars(obj).items():\n col_name = f\"{slot}_{k}\"\n if n > 1:\n col_name = f\"{col_name}_{n}\"\n if v is not None:\n obj_as_dict[col_name] = v\n del obj_as_dict[slot]", "title": "" }, { "docid": "7a4d68da5c4ad87d6009257422896c41", "score": "0.4743701", "text": "def parse_hive3_describe_formatted_output(self, output):\n result = {}\n for line in output.split('\\n'):\n line_elements = [s.strip() for s in line.split(',')]\n if len(line_elements) >= 2:\n result[line_elements[0]] = line_elements[1]\n return result", "title": "" }, { "docid": "8608359ed2856e9857c424dc77689bfc", "score": "0.47411913", "text": "def _process_data(data):\n if isinstance(data, Table):\n return data\n\n return Table(data, default=set)", "title": "" }, { "docid": "9de7d228f8a4d3a791f5292f49f3eab7", "score": "0.47371295", "text": "def loaditr(self, table, iter):\n fields = None\n #self.begin()\n for i in iter:\n if fields == None: fields = ','.join(['?'] * len(i.keys()))\n self.loaddict(table, i, fields)\n #self.commit()\n return", "title": "" }, { "docid": "3bf87fe4adadea708c333b71fd10244b", "score": "0.47316718", "text": "def test_format_value_dict(self):\n test_dict = {'a': 'b'}\n expected_dict_format = \"\"\"\n <table class=\"table table-striped\">\n <tbody>\n <tr>\n <th>a</th>\n <td>b</td>\n </tr>\n </tbody>\n </table>\"\"\"\n self.assertEqual(\n format_html(format_value(test_dict)),\n format_html(expected_dict_format)\n )", "title": "" }, { "docid": "bc6f7cebf83625c1cdddd867363f958d", "score": "0.47314888", "text": "def prepare_tsv_dictionary(dictionary):\n new_dictionary = {}\n for key, value in dictionary.items():\n # print(key, value)\n if type(value) in [set, list]:\n value = list(value)\n if len(value) == 0:\n print('empty values')\n print(key, value)\n continue\n if type(value[0]) != str:\n value = [json.dumps(x) for x in value]\n value = \"||\".join(value)\n elif type(value) == dict:\n value = json.dumps(value)\n value = value.replace('\\\\\"', '\"')\n new_dictionary[key] = value\n return new_dictionary", "title": "" }, { "docid": "fdead266fd6d3b739d855f445f6d8cc3", "score": "0.4731144", "text": "def format(self, table):\n response = []\n columns = table.columns_to_str()\n column_length = len(columns)\n if column_length == 1:\n for row in table.data:\n row_list = []\n for column in columns:\n row_list.append(row[0])\n response.append({'data': row_list, 'name': column})\n else:\n for i in range(column_length):\n response.append({'data': []})\n for row in table.data:\n for index, column in enumerate(columns):\n data_list = []\n if index is not 0:\n data_list.append(row[0])\n data_list.append(row[index])\n response[index-1]['data'].append(copy.deepcopy(data_list))\n response[index-1]['name'] = column\n return response", "title": "" }, { "docid": "8c6dc9158e165851db2f3c42c98426fd", "score": "0.47294933", "text": "def _browse_extract(record, fields):\n return dict([(field, \"%s-transformed\" % getattr(record, field))\n for field in fields])", "title": "" }, { "docid": "4df948dce3b72b20b3b6b48fbc3ab6ae", "score": "0.47225136", "text": "def build_out_result_dict(report_wb, map_dict):\n\n result = {}\n\n # all versions are in 1 place: tab Intro & cell B1\n version = get_value(report_wb, 'Intro', 'B1')\n\n # grab all the elements (data fields) based on version\n for field in map_dict[version].keys():\n if 'Location' not in field:\n\n # if value in multiple cells, and must be concat'd\n if len(map_dict[version][field][1]) > 1:\n\n concat_str = ''\n\n for cell in map_dict[version][field][1]:\n concat_str += ' {}'.format(get_value(report_wb, map_dict[version][field][0], cell))\n\n result[field] = concat_str\n\n else:\n result[field] = get_value(report_wb, map_dict[version][field][0], map_dict[version][field][1][0])\n\n return result", "title": "" }, { "docid": "9ac0fca0a26f8cff40b920701fd88881", "score": "0.47196075", "text": "def tabular_report(sample_dictionary, blast_dictionary):\n print(\"Writing the report...\")\n sample_dict = sample_dictionary.copy()\n blast_dict = blast_dictionary.copy()\n samples = []\n for sequenceID in sample_dict:\n samples.append(sequenceID[1:])\n records = []\n for record in blast_dict.keys():\n records.append(blast_dict[record]['SeqID'])\n columns = [\"SeqID\", \"Sequence\", \"SeqLength\", \"Description\", \"Accession\", \"Db\", \"Score\", \"E_value\", \"Percent_Identity\", \"Organism\", \"Source\", \"Domain\", \"Taxonomy\"]\n # columns = list(next(iter(blast_dict.values())).keys())\n OUT = open(\"blast_report.txt\", \"w\")\n OUT.write('\\t'.join(columns) + '\\n')\n for record in blast_dict.keys():\n OUT.write('\\t'.join([str(blast_dict[record][x]) for x in columns]) + '\\n')\n for sample in samples:\n if sample not in records:\n sample_stripped = sample.split(\"\\t\")[0]\n OUT.write(sample_stripped + '\\t' + sample_dict['@'+sample]['sequence'] + '\\t' + str(len(sample_dict['@'+sample]['sequence'])) + '\\t' + 'NO HIT OR SEQUENCE QUALITY BELOW THRESHOLD\\n')\n OUT.close()", "title": "" }, { "docid": "3da3e9321dc4ee7af25afbc6840ca29b", "score": "0.47189152", "text": "def clean_reflection_tables(self):\n self._initial_keys.append(\"inverse_scale_factor\")\n self._initial_keys.append(\"inverse_scale_factor_variance\")\n self._initial_keys.append(\"Ih_values\")\n self._initial_keys.append(\"intensity.scale.value\")\n self._initial_keys.append(\"intensity.scale.variance\")\n self.reflection_table[\"intensity.scale.value\"] = self.reflection_table[\n \"intensity\"\n ]\n self.reflection_table[\"intensity.scale.variance\"] = self.reflection_table[\n \"variance\"\n ]\n if \"Esq\" in self.reflection_table:\n del self.reflection_table[\"Esq\"]\n for key in self.reflection_table.keys():\n if key not in self._initial_keys:\n del self._reflection_table[key]", "title": "" }, { "docid": "61082c6db688c525bcafb911bea58548", "score": "0.47139597", "text": "def visit_Dict(self, node):\n return '{{{}}}'.format(\n ', '.join([\n '{}: {}'.format(self.visit(node.keys[idx]), self.visit(node.values[idx]))\n for idx in range(len(node.keys))\n ])\n )", "title": "" }, { "docid": "d4db3f8ed74fc7ab35f003e773061147", "score": "0.47111735", "text": "def main():\n printTable(tableData)", "title": "" }, { "docid": "4d2230b14e32cab78218e68a869b589f", "score": "0.47089976", "text": "def handle_special_keys(self, config_key, full_dict_key, output_dictionary, trimmed_value):\n def add_index_key_value(key_suffix, value_suffix):\n json_key = full_dict_key + key_suffix\n json_value = trimmed_value + \".\" + value_suffix\n output_dictionary[json_key] = json_value\n\n if config_key == \"Ref\":\n add_index_key_value(\"Amb\", \"amb\")\n add_index_key_value(\"Ann\", \"ann\")\n add_index_key_value(\"Bwt\", \"bwt\")\n add_index_key_value(\"Pac\", \"pac\")\n add_index_key_value(\"Sa\", \"sa\")\n\n # dict files replace the '.fasta'/'.fa' extension with '.dict'\n # 'find' returns the index of the first char in the string searched for (or -1 if the string was not found)\n extension_start_index = trimmed_value.find(\".fa\")\n if extension_start_index == -1:\n self.project_logger.log_error(\n \"E.par.REF.1\",\n \"REF key appears to not have a valid value: '\" + trimmed_value +\n \" has no '.fa' or '.fasta' extension\"\n )\n else:\n base_name = trimmed_value[:extension_start_index]\n output_dictionary[full_dict_key + \"Dict\"] = base_name + \".dict\"\n\n elif config_key == \"DBSNP\":\n output_dictionary[str(full_dict_key) + \"Idx\"] = str(trimmed_value) + \".idx\"", "title": "" }, { "docid": "ba4add1894d4f9882b533392f4627876", "score": "0.4707821", "text": "def SubLines(self):\n self.templateB = []\n for line in self.templateA:\n result = line\n # only do substitution with KEY-VALUE pairs\n if len(line)==2:\n key=line[0]\n if key in self.dic:\n result=[key,self.Get(key)]\n self.templateB.append(result)", "title": "" }, { "docid": "df74e413f9822b13a55f7e23967672d4", "score": "0.47059834", "text": "def json_to_table(repo, json_response, response_type):\n if response_type == 'traffic':\n label0 = \"Visitors\"\n label1 = \"Views\"\n label2 = \"Unique visitors\"\n if response_type == 'clones':\n label0 = \"Git clones\"\n label1 = \"Clones\"\n label2 = \"Unique cloners\"\n\n repo_name = repo\n total_label1 = str(json_response['count'])\n total_uniques = str(json_response['uniques'])\n\n # If there is granular date-level data\n dates_and_label1 = OrderedDict()\n detailed_label1 = json_response[label1.lower()] # 'views', 'clones'\n for row in detailed_label1:\n utc_date = str(row['timestamp'][0:10])\n dates_and_label1[utc_date] = (str(row['count']), str(row['uniques']))\n\n \"\"\" Table template\n repo_name\n Date label1 label2\n Totals # #\n date # #\n ... ... ...\n \"\"\"\n # Set the table\n table = '> ' + repo_name + ' - ' + label0 + '\\n' +\\\n 'Date' + '\\t\\t' + label1 + '\\t' + label2 + '\\n' +\\\n 'Totals' + '\\t\\t' + total_label1 + '\\t' + total_uniques + '\\n'\n # Add rows to the table\n for row in dates_and_label1:\n table += row + '\\t' + dates_and_label1[row][0] + '\\t' + dates_and_label1[row][1] + '\\n'\n\n return table", "title": "" }, { "docid": "df74e413f9822b13a55f7e23967672d4", "score": "0.47059834", "text": "def json_to_table(repo, json_response, response_type):\n if response_type == 'traffic':\n label0 = \"Visitors\"\n label1 = \"Views\"\n label2 = \"Unique visitors\"\n if response_type == 'clones':\n label0 = \"Git clones\"\n label1 = \"Clones\"\n label2 = \"Unique cloners\"\n\n repo_name = repo\n total_label1 = str(json_response['count'])\n total_uniques = str(json_response['uniques'])\n\n # If there is granular date-level data\n dates_and_label1 = OrderedDict()\n detailed_label1 = json_response[label1.lower()] # 'views', 'clones'\n for row in detailed_label1:\n utc_date = str(row['timestamp'][0:10])\n dates_and_label1[utc_date] = (str(row['count']), str(row['uniques']))\n\n \"\"\" Table template\n repo_name\n Date label1 label2\n Totals # #\n date # #\n ... ... ...\n \"\"\"\n # Set the table\n table = '> ' + repo_name + ' - ' + label0 + '\\n' +\\\n 'Date' + '\\t\\t' + label1 + '\\t' + label2 + '\\n' +\\\n 'Totals' + '\\t\\t' + total_label1 + '\\t' + total_uniques + '\\n'\n # Add rows to the table\n for row in dates_and_label1:\n table += row + '\\t' + dates_and_label1[row][0] + '\\t' + dates_and_label1[row][1] + '\\n'\n\n return table", "title": "" }, { "docid": "5b0994a0321ed45efa5a371be46e8526", "score": "0.4696164", "text": "def print_wd_to_table(self):\n table = \"\"\n labels = self.wd_item[\"labels\"]\n descriptions = self.wd_item[\"descriptions\"]\n aliases = self.wd_item[\"aliases\"]\n disambiguators = self.wd_item[\"disambiguators\"]\n table = table + \"'''Labels'''\\n\\n\"\n for k, v in labels.items():\n table += \"* '''{key}''': {val}\\n\\n\".format(key=k, val=v)\n table = table + \"'''Descriptions'''\\n\\n\"\n for k, v in descriptions.items():\n table += \"* '''{key}''': {val}\\n\\n\".format(key=k, val=v)\n table = table + \"'''Aliases'''\\n\\n\"\n for k, v in aliases.items():\n for single_alias in v:\n table += \"* '''{key}''': {val}\\n\\n\".format(\n key=k, val=single_alias)\n table += \"'''Disambiguators'''\\n\\n\"\n for k, v in disambiguators.items():\n table += \"* '''{key}''': {val}\\n\\n\".format(key=k, val=v)\n if self.wd_item[\"wd-item\"] is not None:\n table = table + \"'''Possible item''': \" + \\\n utils.wd_template(\"Q\", self.wd_item[\"wd-item\"]) + \"\\n\\n\"\n else:\n table = table + \"'''Possible item''': \\n\\n\"\n table_head = \"{| class='wikitable'\\n|-\\n! Property\\n! Value\\n! Qualifiers\\n! References\\n\"\n table = table + table_head\n statements = self.wd_item[\"statements\"]\n for statement in statements:\n claims = statements[statement]\n for claim in claims:\n value = claim[\"value\"]\n if value is None:\n continue\n value_to_print = \"\"\n if utils.string_is_q_item(value):\n value = utils.wd_template(\"Q\", value)\n value_to_print += str(value)\n elif \"quantity_value\" in value:\n quantity = str(value[\"quantity_value\"])\n if \"unit\" in value:\n value_to_print += \"{quantity} {unit}\".format(\n quantity=quantity,\n unit=utils.wd_template(\"Q\", value[\"unit\"]))\n else:\n value_to_print += quantity\n elif \"time_value\" in value:\n value_to_print += utils.dict_to_iso_date(\n value[\"time_value\"])\n elif \"monolingual_value\" in value:\n value_to_print += \"({lang}) {text}\".format(\n text=value[\"monolingual_value\"],\n lang=value[\"lang\"])\n else:\n value_to_print += str(value).strip()\n quals = claim[\"quals\"]\n refs = claim[\"refs\"]\n quals_to_print = \"\"\n if len(quals) == 0:\n quals_to_print = quals_to_print\n else:\n for q in quals:\n quals_to_print = quals_to_print + \"<br>\" + utils.wd_template(\n \"P\", q) + \" : \" + json.dumps(quals[q])\n if len(refs) == 0:\n ref_to_print = \"\"\n else:\n for r in refs:\n ref_to_print = json.dumps(\n r, default=utils.datetime_convert)\n table = table + \"|-\\n\"\n table = table + \"| \" + utils.wd_template(\"P\", statement) + \"\\n\"\n table = table + \"| \" + value_to_print + \"\\n\"\n table = table + \"| \" + quals_to_print + \"\\n\"\n table = table + \"| \" + ref_to_print + \"\\n\"\n table = table + \"|}\\n\"\n table = table + \"----------\\n\"\n return table", "title": "" }, { "docid": "2a8faec719059a20305fa5467bdf5733", "score": "0.46961382", "text": "def _writeRows(self):\n\n\t\tfor record in self._list_of_dicts:\n\t\t\trow_data = []\n\t\t\tfor column_data in self.headers:\n\t\t\t\trow_data.append(str(record[column_data]))\n\t\t\tprint self._delimeter.join(row_data )", "title": "" } ]
fbf142af4779a30ee9ab42abf9b5eef3
Returns the value of the objective after solve.
[ { "docid": "33896780b6f57a9b89dd03d9bccfe9c4", "score": "0.84447485", "text": "def objective_value(self):\n self.__check_has_feasible_solution()\n return self.__solve_helper.objective_value()", "title": "" } ]
[ { "docid": "ac988b24dde3b60a3417a5ab21dcaaf5", "score": "0.79332906", "text": "def objective_value(self: \"Model\") -> Optional[numbers.Real]:\n return self.solver.get_objective_value()", "title": "" }, { "docid": "fd35a53ae58889ad4bf5d206e4ec6411", "score": "0.7464509", "text": "def objective_value(self):\n return self._objective_value", "title": "" }, { "docid": "5bf2cbdcba820a7edca4386a293346ac", "score": "0.7339869", "text": "def get_objective_value(self):\n return None", "title": "" }, { "docid": "04e92a877c28a716d00f67fa94006ad4", "score": "0.71732587", "text": "def objective(self) -> float:", "title": "" }, { "docid": "1506a9b2d61eb55a5280c8b7c93676b0", "score": "0.7154781", "text": "def cost(self):\r\n if self.prob.status != pulp.LpStatusOptimal: # Not solved\r\n raise Exception(\"Cannot get the cost of an unsolved problem\")\r\n return pulp.value(self.prob.objective)", "title": "" }, { "docid": "38bd2afd04f6da3227d04b83a9bd6515", "score": "0.7112217", "text": "def objective(self):\n return self._objective", "title": "" }, { "docid": "38bd2afd04f6da3227d04b83a9bd6515", "score": "0.7112217", "text": "def objective(self):\n return self._objective", "title": "" }, { "docid": "9ecb7eac3edcad6273c47109654c0e55", "score": "0.6953116", "text": "def getObjectiveValue(self):\n if self.lang == 'AMPL':\n s = self.interface.getCurrentObjective().getValues().toList()[0]\n if self.lang == 'Julia':\n s = self.interface.getobjectivevalue(self.jumpmod)\n return s", "title": "" }, { "docid": "17a044d50afad2443bd83a6c56c38954", "score": "0.69345254", "text": "def objective_const(self: \"Model\") -> float:\n return self.solver.get_objective_const()", "title": "" }, { "docid": "fe063f48f6ed64513f7547c5250a6d23", "score": "0.6717924", "text": "def objective(self, x):\n return self.f(x)", "title": "" }, { "docid": "0408b772ebc0bad361ed1b1848be116f", "score": "0.6660911", "text": "def objective(self: \"Model\") -> \"mip.LinExpr\":\n return self.solver.get_objective()", "title": "" }, { "docid": "9acbab263e51d39ece682b359df5edc1", "score": "0.6646636", "text": "def solve(self):\n self._solver_manager.solve()\n return self._solver_manager.solution", "title": "" }, { "docid": "08dc8bf5eb1153c5d6859e61670643ec", "score": "0.6542083", "text": "def solution(self):\n return self.variables.solution", "title": "" }, { "docid": "5022f11ba8862cbc8ee866c226193242", "score": "0.65129334", "text": "def cost(self) -> float:\n return self.objective()", "title": "" }, { "docid": "8c499283a807fea521e6299bb2d7e036", "score": "0.64991647", "text": "def objective_func(self, var_params: list):\n\n p = len(var_params) // 2\n\n result = self.execute_circuit(gamma=var_params[0:p], beta=var_params[p::])\n exp_val = self.compute_exp_val(result)\n\n print(var_params[0:p], var_params[p::])\n print(exp_val)\n\n # Statistics from the optimization process:\n self.all_counts.append(result.get_counts())\n self.exp_vals.append(- exp_val)\n\n return - exp_val", "title": "" }, { "docid": "76f19ce42206fb762cc0ca6f4e48245b", "score": "0.6438537", "text": "def _solve(\n self, X: ArrayLike, y: ArrayLike, solver_options: dict, *args, **kwargs\n ) -> NDArray[float]:\n self.canonicals_.problem.solve(\n solver=self.solver, warm_start=self.warm_start, **solver_options\n )\n return self.canonicals_.beta.value", "title": "" }, { "docid": "de8365f7c8f6250eaffdbde599f9a4a5", "score": "0.6423733", "text": "def cost(self):\r\n if self.prob.status != pulp.LpStatusOptimal: # Not solved\r\n raise Exception(\"Cannot get the cost of an unsolved problem\")\r\n res = 0\r\n for i in self.instances:\r\n res += self.vms[i].varValue * i.price\r\n return res", "title": "" }, { "docid": "9962f7d2776ced1623042be80533d2c8", "score": "0.63888776", "text": "def objective_values(self: \"Model\") -> List[numbers.Real]:\n return [self.solver.get_objective_value_i(i) for i in range(self.num_solutions)]", "title": "" }, { "docid": "9146d5c4fae6c3f6e9566437068408d2", "score": "0.6313139", "text": "def solve(self):\n self.add_variables()\n self.add_obj(self.obj)\n for i in self.constraints:\n self.add_cons(i)\n self.m.optimize()\n if self.m.status == GRB.OPTIMAL:\n x = self.m.getAttr('x')\n opt = self.m.objVal\n return x, opt\n elif self.m.status == GRB.INFEASIBLE:\n return 'infeasible', 0\n elif self.m.status == GRB.UNBOUNDED:\n return 'unbounded', 0\n elif self.m.status == GRB.INF_OR_UNBD:\n return 'infeasible', 'unbounded'", "title": "" }, { "docid": "34080bff5c998650c1f2f4299f8e91c6", "score": "0.6302774", "text": "def value(self, var):\n self.__check_has_feasible_solution()\n return self.__solve_helper.var_value(var.index)", "title": "" }, { "docid": "7e8246bede58d7e0f4d669f439cfa8d7", "score": "0.63021296", "text": "def solve(self):\n self._create_var_stops()\n self._create_var_violation()\n self._cst_at_least_one_violated()\n self._cst_violation_feature()\n self._cst_at_least_one_stop_selected()\n\n self.modelOptim.optimize()\n\n if self.modelOptim.Status == gurobipy.GRB.INFEASIBLE:\n return self._deal_with_infeasible()\n\n else:\n return self._retrieve_solution()", "title": "" }, { "docid": "45bf575df7aa068a4fd67058213769fd", "score": "0.62212336", "text": "def get_objective_NA (self):\n return self.objective_NA", "title": "" }, { "docid": "ba937385012ef16015761f7851a675c9", "score": "0.61787426", "text": "def do_solve(self):\n self.solve(5)\n self.show_result()", "title": "" }, { "docid": "33b7f2414e6cb9fa50c16c777869fd31", "score": "0.61577874", "text": "def objective(self) -> str:\n return self._objective", "title": "" }, { "docid": "ce8f02cb9c285b98099cf232af18498d", "score": "0.61470824", "text": "def solve(self, objective, constraints, cached_data,\n warm_start, verbose, solver_opts):\n pass", "title": "" }, { "docid": "c827b66b1830cfb258afd5837481467e", "score": "0.61452925", "text": "def objective(self):\n return sum(distances.euclidean(node[1], self.edges[node][1])\n for node in self.nodes)", "title": "" }, { "docid": "34b7c4af29d0efd744a7042409a87e60", "score": "0.612622", "text": "def get(self):\n self._get_solution()\n return self.solution_peak", "title": "" }, { "docid": "9ce4077bf63d3bc220572dcfd5808414", "score": "0.6096793", "text": "def solve(self):\n self._print_header(forPICOS = True)\n\n self._load_problem()\n self._reset_stopwatch()\n\n self._verbose(\"Solving the problem via {}.\".format(self.name))\n primals, duals, objectiveValue, meta = self._solve()\n\n # Enforce proper format of the returned solution.\n assert primals is None or type(primals) is dict\n assert duals is None or type(duals) is list\n assert type(objectiveValue) in (int, float, list) \\\n or objectiveValue is None \\\n or objectiveValue == \"toEval\"\n assert type(meta) is dict\n assert \"status\" in meta and type(meta[\"status\"]) is str\n assert \"time\" not in meta, \\\n \"Field 'time' of solution metadata is set in Solver base class.\"\n assert self.timer is not None, \\\n \"Solvers must measure search time via _stopwatch.\"\n\n meta[\"time\"] = self.timer\n\n # Output solution status.\n self._verbose(\"Solution is {} after {:.1e}s.\"\n .format(meta[\"status\"], meta[\"time\"]))\n\n # Warn about incomplete primals.\n if primals is not None and None in primals.values():\n if any([True for primal in primals.values() if primal is not None]):\n self._warn(\"The primal solution is incomplete.\")\n else:\n primals = None\n\n if primals is None:\n self._verbose(\"No primal solution obtained.\")\n\n # Warn about incomplete duals.\n if duals is not None and None in duals:\n if any([True for dual in duals if dual is not None]):\n self._warn(\"The dual solution is incomplete.\")\n else:\n duals = None\n\n if duals is None:\n self._verbose(\"No dual solution obtained.\")\n\n self._print_footer(forPICOS = True)\n\n return primals, duals, objectiveValue, meta", "title": "" }, { "docid": "8c8071b3d22d6e6a51c48f4cb6d26888", "score": "0.60960627", "text": "def fro_objective(self):\n R = self.V - np.dot(self.W, self.H)\n return np.multiply(R, R).sum()", "title": "" }, { "docid": "abfea6faed86b26288870767bca08b19", "score": "0.6091793", "text": "def objective(x, grad):\n #print \"==================================\"\n if grad.size > 0:\n fx, gx = ac(x[None], grad=True)\n grad[:] = gx[0][:]\n else:\n try:\n fx = ac(x,gp,y_max)\n fx=fx[0]\n #print fx\n except:\n return 0\n return fx[0]", "title": "" }, { "docid": "3bd2ae7dd2d8558202a87b44bb858358", "score": "0.60878354", "text": "def objective_function(self, position):\n objective_value = self.obj_func(*position)\n return objective_value", "title": "" }, { "docid": "a737b887a904dacb12b99b2706cf3bf5", "score": "0.6083064", "text": "def solved_reward_val(self):\n return 1000.0", "title": "" }, { "docid": "c40c240d267d07e4c66d6bd68b66a9b8", "score": "0.60800326", "text": "def _get_value(self, obs, action):\n if tuple(obs) == self.goal:\n return self.V[self.goal]\n value = 0.\n # print(obs, action, self.get_transition(obs, action).items())\n for obs_prime, transition in self.get_transition(obs, action).items():\n r = self.R[tuple(obs) + (action,)][tuple(obs_prime)]\n value += transition * (r + self.gamma * self.V[obs_prime])\n return value", "title": "" }, { "docid": "d611392ffadd39681d33e8d5df1dc325", "score": "0.60779935", "text": "def get_expr_value(self, expr):\n return self._solver.getvalue(expr)", "title": "" }, { "docid": "ba15474d7ddbbbe68e6bebf65938fd50", "score": "0.60667306", "text": "def solution(self):\n solution = pycosat.solve(self.clauses)\n\n if solution == \"UNSAT\":\n raise UnsatisfiableConstraints(\"Constraints are unsatisfiable\")\n elif solution == \"UNKNOWN\":\n raise SolutionNotFound(\"Search limits exhausted without solution\")\n else:\n return self.remap_solution(solution)", "title": "" }, { "docid": "b150027b9eeadcc401d7b76ed26b1907", "score": "0.60615265", "text": "def objective(x):\n model.evaluate(x)\n total = 0.0\n for funcname, func in preferences.items():\n if issubclass(func.__class__, classfunctions.SmoothClassFunction):\n try:\n # e.g. cost of this design\n param_val = getattr(model, funcname)()\n except ValueError:\n # invalid, throw a big penalty.\n return 1e3\n total += func.evaluate(param_val) # transformed cost\n return total", "title": "" }, { "docid": "e0b7802ee34acfaa81a09785c20672ba", "score": "0.6017658", "text": "def solve(self):\n q = np.zeros(self.graph._num_vertices, dtype = float)\n return self.minimize(q)", "title": "" }, { "docid": "fad814d9da3ed30cca9e7ba81f95460a", "score": "0.59884715", "text": "def get_solution(self):\n\n frac = 0\n # for i in subproblem.model.y:\n for i in range(1, 5):\n for t in self.model.t:\n val = self.model.y[i,t].value\n if min(val - floor(val) , ceil(val) - val) >= 1e-6: # tolerance on integrality violation\n frac = 1\n\n if (frac > 0):\n return (None, None)\n return (self.solution_value, deepcopy(self.solution) )", "title": "" }, { "docid": "9e7a69925b0f7a68b9fecbef0b386371", "score": "0.59778816", "text": "def fvSolution(self):\r\n return self.__fvSolution", "title": "" }, { "docid": "65c913a702858800e6ee24fa8835f5bd", "score": "0.5977236", "text": "def get_best_value(self):\r\n # Todo: implement\r\n return self.best_global.cost", "title": "" }, { "docid": "71f0b66db0517835c07fc7df0012c338", "score": "0.5970132", "text": "def soln(self):\n if not hasattr(self, \"_soln\"):\n self.fit()\n return self._soln", "title": "" }, { "docid": "f69277ae9ba796468a56c70a410d7626", "score": "0.5966226", "text": "def solve(self):\r\n solution = solve_ivp(self.logistic_growth, self.t_span, self.initial_state, t_eval=self.t)\r\n return solution.y[0]", "title": "" }, { "docid": "cd633160ff8fb031cd78edff489f95a4", "score": "0.59439915", "text": "def best_objective_bound(self):\n self.__check_has_feasible_solution()\n return self.__solve_helper.best_objective_bound()", "title": "" }, { "docid": "edbab19c1b6cf8fd7b406b6cc14574f9", "score": "0.59209156", "text": "def calc(self, **vars):\r\n # easy\r\n return self.value", "title": "" }, { "docid": "b85ca2fdf63af4a3eb6105224790cf3e", "score": "0.591304", "text": "def _objective(self, beta):\n return 1/self.n * sum(log(1 + exp(-self.Y * (self.X.T @ beta)))) + self.lamb*(beta.T @ beta)", "title": "" }, { "docid": "88348540e0c4b1815f9c68b143cc10a8", "score": "0.5912373", "text": "def get_current_value(self):\n # If the cost is negative, it accumulates until it's fixed and then returns to 0. If positive,\n # it continues to accumulate after it finishes\n if self.value < 0:\n if 0 < self.release_time < self.env.now:\n return 0\n else:\n return self.value * (self.env.now - self.start_time)\n else:\n if 0 < self.release_time < self.env.now:\n return self.value * (self.env.now - self.release_time)\n else:\n return 0", "title": "" }, { "docid": "9dac34e2819d400f47b431bfd7c1ff6a", "score": "0.59083873", "text": "def solve(self):\n\n # Update l and u in the solver instance\n self.solver.update(l=self.l, u=self.u)\n\n # Warm start solver with currently stored solution\n self.solver.warm_start(x=self.x, y=self.y)\n\n # Solve current problem\n results = self.solver.solve()\n\n # Store solver status\n self.status = results.info.status_val\n\n # DEBUG: Problems that hit max_iter are infeasible\n # if self.status == osqp.constant('OSQP_MAX_ITER_REACHED'):\n # self.status = osqp.constant('OSQP_PRIMAL_INFEASIBLE')\n\n # Store number of iterations\n self.num_iter = results.info.iter\n\n # Store solve time\n self.osqp_solve_time = results.info.run_time\n\n # Store solver solution\n self.x = results.x\n self.y = results.y\n\n # Enforce integer variables to be exactly within the bounds\n if self.status == osqp.constant('OSQP_SOLVED') or \\\n self.status == osqp.constant('OSQP_MAX_ITER_REACHED'):\n # import ipdb; ipdb.set_trace()\n n_int = self.data.n_int\n i_idx = self.data.i_idx\n self.x[i_idx] = \\\n np.minimum(np.maximum(self.x[i_idx],\n self.l[-n_int:]),\n self.u[-n_int:])\n # if any(self.x[i_idx] < self.l[-n_int:]):\n # import ipdb; ipdb.set_trace()\n # if any(self.x[i_idx] > self.u[-n_int:]):\n # import ipdb; ipdb.set_trace()\n\n # Update objective value of relaxed problem (lower bound)\n self.lower = self.data.compute_obj_val(self.x)\n\n # # Get lower bound (objective value of relaxed problem)\n # self.lower = results.info.obj_val", "title": "" }, { "docid": "2912a0bee37c51594ba4a46c60463363", "score": "0.5907494", "text": "def solve(self, *args, **kwargs):\r\n return self.prob.solve(*args, **kwargs)", "title": "" }, { "docid": "2912a0bee37c51594ba4a46c60463363", "score": "0.5907494", "text": "def solve(self, *args, **kwargs):\r\n return self.prob.solve(*args, **kwargs)", "title": "" }, { "docid": "419727c9d33568941c5bbd12d519117b", "score": "0.59057456", "text": "def solve(self):\n self.solution = self.tf_session(self.equations, self.initial_conditions)\n return self.solution", "title": "" }, { "docid": "375962ac0f2e45b2060303351e7520ce", "score": "0.59010744", "text": "def _compute_total_value(self):\n total = 0\n for obj, agent in self._assignments.iteritems():\n total += self._matrix[agent][obj]\n\n print 'optimal solution: {}'.format(total)\n return total", "title": "" }, { "docid": "1ff791c92ae49368437002cdb26d19c3", "score": "0.5888282", "text": "def get_objective(self):\n c, offset = self._cache_to_matrix(self.obj_cache)\n c = self.vec_intf.const_to_matrix(c.T, convert_scalars=True)\n c = intf.from_2D_to_1D(c)\n offset = self.vec_intf.scalar_value(offset)\n # Negate offset because was negated before.\n return c, -offset", "title": "" }, { "docid": "cb44f87032651f1c06741fb129d639de", "score": "0.5879185", "text": "def minimizer(self):\n return self.x", "title": "" }, { "docid": "87d97ba4427142f1e732a65e112d622c", "score": "0.5878775", "text": "def objective(self) -> float:\n return sum(route.cost() for route in self.routes)", "title": "" }, { "docid": "42dc885112b657a507386a242b5440f3", "score": "0.586209", "text": "def max(self,objective):\r\n self.sense = -1\r\n self.obj = array('d',len(self.var) * [0.0])\r\n if len(self.obj) == 0: return\r\n if type(objective) == ListType:\r\n coefList = objective\r\n elif type(objective) == ExpressionType:\r\n coefList = objective._expr.items()\r\n elif type(objective) == type(self.var[0]): # variable?\r\n coefList = [ (objective,1.0) ]\r\n else:\r\n print(\"ERROR: Unknown objective type\",type(objective),\"ignored\")\r\n return\r\n for (var,value) in coefList:\r\n if var: self.obj[var.id] = value\r\n if self.solverInitialised:\r\n indices = array('i',range(len(self.var))) \r\n cplex.CPXchgobj(self.Env,self.LP,len(self.var),ptr(indices),\r\n ptr(self.obj))\r\n gc.collect() # do interim garbage collection\r", "title": "" }, { "docid": "4522fe5d1a8d0fa2d528a6c3d904e25a", "score": "0.5856968", "text": "def solve(self, rhs):\n # TODO\n pass", "title": "" }, { "docid": "b7269d514aabc61a38ef7c695413c16c", "score": "0.58491343", "text": "def getValue(self):\n return self.__systolic", "title": "" }, { "docid": "06db6a105fb64e42e036d3faa4aafa8c", "score": "0.5839615", "text": "def rhs(self) -> scope.jsii_calc_lib.Value:\n return jsii.get(self, \"rhs\")", "title": "" }, { "docid": "8ec935126e69603ee12b5bcf559690e6", "score": "0.5839146", "text": "def addObjective(self) -> None:\n self.lp_problem += pulp.lpSum([self.b[(i,j)] for i in range(self.n) for j in range(self.m)]) + pulp.lpSum([self.x[i] for i in range(self.n)])", "title": "" }, { "docid": "e6921d8cf4982af51d009de6d870f2a2", "score": "0.5839005", "text": "def compute_J(self, simulation):\n\n # stores all of the fields for the objective function in self.field_arg_list\n self._solve_objfn_arg_fields(simulation)\n\n # pass these arguments to the objective function\n return self.objective.J(*self.field_arg_list)", "title": "" }, { "docid": "127d25dbe21582d7c0656c395116eb13", "score": "0.5838645", "text": "def solve_real(self):\n return self.solver(self.real_input)", "title": "" }, { "docid": "13380e7a12ea30d3ddbb51090b1d80e3", "score": "0.5820692", "text": "def result(self):\n return self.compute_ideal()", "title": "" }, { "docid": "c61c11edf6a00a36857f22a60be14370", "score": "0.5794479", "text": "def _calc_objective(n_inv, hinge_loss, softness, direction):\n\n\t\treturn n_inv * np.sum(hinge_loss) + softness * direction.dot(direction)", "title": "" }, { "docid": "cdcb4bf33af5c89a87079c241a5b53ea", "score": "0.5793604", "text": "def solve(self):\n ...", "title": "" }, { "docid": "8913785b9f736cb588fa4b173ba9f5fd", "score": "0.5789654", "text": "def solve_simple(self):\n return self.solver(self.simple_input)", "title": "" }, { "docid": "49344b20c3b0e0ad0df4b6afc60026db", "score": "0.5781801", "text": "def optimize(self):\n\n # Define objective function\n def objfunc(params):\n return self.expectation(beta=params[0:self.p], gamma=params[self.p:2 * self.p])\n\n # Optimize parameters\n optimizer = COBYLA(maxiter=5000, tol=0.0001)\n params = self.beta_val + self.gamma_val\n ret = optimizer.optimize(num_vars=2 * self.p, objective_function=objfunc, initial_point=params)\n self.beta_val = ret[0][0:self.p]\n self.gamma_val = ret[0][self.p:2 * self.p]\n self.error = ret[1]\n return", "title": "" }, { "docid": "08934ff73489cf89ef032204275ddae9", "score": "0.57783955", "text": "def f_objective(sav, cah, par, f_vfun):\n\n sav = float(sav)\n if sav < 0.0 or sav > cah:\n return np.inf\n\n # Consumption implied by savings level\n cons = cah - sav\n\n # Continuation value interpolated onto asset grid\n vcont = f_vfun(sav)\n\n # current-period utility\n if cons <= 0.0:\n u = - np.inf\n else:\n if par.gamma == 1.0:\n u = np.log(cons)\n else:\n u = (cons**(1.0 - par.gamma) - 1.0) / (1.0 - par.gamma)\n\n # Objective evaluated at current savings level\n obj = u + par.beta * vcont\n\n # We are running a minimiser, return negative of objective value\n return -obj", "title": "" }, { "docid": "3b7da63226f6b126bdd7b6985eb30ae6", "score": "0.57642055", "text": "def score(self, params):\n #return None\n #print(params\n jac = ndt.Jacobian(self.loglike, stepMax=1e-4)\n return jac(params)[-1]", "title": "" }, { "docid": "3b7da63226f6b126bdd7b6985eb30ae6", "score": "0.57642055", "text": "def score(self, params):\n #return None\n #print(params\n jac = ndt.Jacobian(self.loglike, stepMax=1e-4)\n return jac(params)[-1]", "title": "" }, { "docid": "0bdbaa9fd133bff26274f038437cca38", "score": "0.5759196", "text": "def solve(self, theta):", "title": "" }, { "docid": "dc192ff0161c29e1e48d2f779bc44010", "score": "0.5752301", "text": "def as_solution(self):\n try:\n solution = self.golfsolution\n except:\n solution = None\n return solution", "title": "" }, { "docid": "4b977082c6fdcb77f6526d7c615ae387", "score": "0.57517564", "text": "def solution_function(self, sol_coeffs):\n pass", "title": "" }, { "docid": "76679cb68b65da1d05068c55a2777543", "score": "0.5747292", "text": "def reduced_cost(self, var):\n self.__check_has_feasible_solution()\n return self.__solve_helper.reduced_cost(var.index)", "title": "" }, { "docid": "cd8346e5d3184c990a4efbadd1d53e55", "score": "0.57406664", "text": "def solve(self):\n pass", "title": "" }, { "docid": "5664673b6237caa459306579421e09eb", "score": "0.5740519", "text": "def get_solve_params(self):\n return self.solve_params", "title": "" }, { "docid": "5664673b6237caa459306579421e09eb", "score": "0.5740519", "text": "def get_solve_params(self):\n return self.solve_params", "title": "" }, { "docid": "5664673b6237caa459306579421e09eb", "score": "0.5740519", "text": "def get_solve_params(self):\n return self.solve_params", "title": "" }, { "docid": "5664673b6237caa459306579421e09eb", "score": "0.5740519", "text": "def get_solve_params(self):\n return self.solve_params", "title": "" }, { "docid": "a09fd94ef79aff20e24e1939af596676", "score": "0.5730252", "text": "def constraint(self, solution):\n x = solution.get_x()\n return self._k-sum(x)", "title": "" }, { "docid": "9491894d4a7f466930881b7ee250608a", "score": "0.5728073", "text": "def GetSolution(self):\n\n return self.MultiCFN.getSolution()", "title": "" }, { "docid": "822f6f3f5734d3e7ee6fd3b2106e22a1", "score": "0.5725078", "text": "def objectivefunction(self, simulation, evaluation):\n return -spotpy.objectivefunctions.rrmse(evaluation, simulation)", "title": "" }, { "docid": "e52987212e1b3e970a651f5687e35c53", "score": "0.57202935", "text": "def get_solution_and_add_constraints(self, model):\n self.log(\"Found a solution!\")\n this_solution = []\n\n # Get the ILP variables\n variables = model.getVars()\n\n # Get the objective value\n obj_value = model.objVal\n this_solution.append(\"# Objective Value = {}\".format(int(obj_value)))\n\n # Collect the variables that are set to 1\n ones_vars = []\n\n # For each variable\n for var in variables:\n # Extract the variable's name and value\n var_name = var.varName\n var_value = int(var.x)\n\n # Add this solution to the solutions list\n this_solution.append(\"{} {}\".format(var_name, var_value))\n\n # If it's a 1, remember it\n if var_value == 1:\n ones_vars.append(var)\n\n # Add a blank line to the solutions list, so it looks better\n this_solution.append(\"\")\n\n # Print the solution string to console\n solutions_string = \"\\n\".join(this_solution)\n print(solutions_string)\n\n # Add this solution to the list of all solutions\n self.solutions += this_solution\n\n # Compile a new constraint to get different values\n # For every variable C(i) that set to 1, do\n # sum(C(i)) <= (# Of 1's Variables) - 1\n # This ensures that we don't select all of them again,\n # but we can select a subset of them\n constraints = sum(ones_vars)\n model.addConstr(constraints <= (obj_value - 1))", "title": "" }, { "docid": "e8c5bb2270347875fe4e47b03c51a8fb", "score": "0.57177764", "text": "def value(self):\r\n if self._value is not None:\r\n return self._value\r\n\r\n # Compute the dependencies\r\n dep_values = [dep.value() for dep in self.deps]\r\n self._value = self._compute(dep_values)\r\n return self._value", "title": "" }, { "docid": "453f47aa902c82d144665edeb8aa43a3", "score": "0.57029927", "text": "def calc(self, graph):\n return self._value", "title": "" }, { "docid": "50f92c56c3ee3dda09ecb2b000481cb2", "score": "0.57004035", "text": "def value(self) -> float:\n if self.calc.num:\n return self.calc.var\n return None", "title": "" }, { "docid": "ae54111a70ba6beef70a8dec74ae611a", "score": "0.57001936", "text": "def _get_best_observed_value(\n experiment: Experiment,\n optimization_config: Optional[OptimizationConfig] = None,\n trial_indices: Optional[Iterable[int]] = None,\n ) -> Optional[float]:\n if optimization_config is None:\n optimization_config = not_none(experiment.optimization_config)\n if optimization_config.is_moo_problem:\n raise NotImplementedError(\n \"Please use `get_hypervolume` for multi-objective problems.\"\n )\n\n res = best_point_utils.get_best_by_raw_objective_with_trial_index(\n experiment=experiment,\n optimization_config=optimization_config,\n trial_indices=trial_indices,\n )\n\n predictions = res[2] if res is not None else None\n if predictions is None:\n return None\n\n means = not_none(predictions)[0]\n objective = optimization_config.objective\n if isinstance(objective, ScalarizedObjective):\n value = 0\n for metric, weight in objective.metric_weights:\n value += means[metric.name] * weight\n return value\n else:\n name = objective.metric_names[0]\n return means[name]", "title": "" }, { "docid": "93979d7f81f2ad4928fb9fb57ba3e27d", "score": "0.5688892", "text": "def error_objective_expression(self):\n # equation number 1 (first part) in Mitsos et al. 2009\n time_start = self.midas.times[0]\n time_end = self.midas.times[1]\n measured = self.midas.df\n measured_start = measured.query(\"time==\" + str(time_start))\n measured_end = measured.query(\"time==\" + str(time_end))\n\n error_obj = None # error objective expression initialization\n for k in self.experiment:\n for j in self.midas.names_species:\n value_start = measured_start.get_value(index=measured_start.index[k], col=j)\n value_end = measured_end.get_value(index=measured_end.index[k], col=j)\n value = value_end - value_start\n if not isnan(value):\n if value < 0:\n value += 1\n error_obj += value + (1 - 2 * value) * self.x_var[j][k]\n return error_obj", "title": "" }, { "docid": "f97f61f1375c0b2aab73fa6092434de8", "score": "0.56811684", "text": "def solve(self):\n return\n self.iterate() # first iteration\n def r2z(v): # convert real to complex\n n = v.shape[0]//2\n return v[0:n] + 1j*v[n:2*n]\n def z2r(v): # convert complex to real\n return np.concatenate([v.real,v.imag])\n\n def fopt(cij): # function to return the error\n self.cij = r2z(cij) # store this vector\n self.cij2v() # update the v vectors\n self.iterate() # do an iteration\n print(self.cij)\n self.v2cij() # convert the v to cij\n return z2r(self.cij)-cij\n cij = optimize.broyden1(fopt,z2r(self.cij),f_tol=1e-8,max_rank=10)\n# cij = optimize.fsolve(fopt,z2r(self.cij),xtol=1e-8)\n# cij = optimize.anderson(fopt,z2r(self.cij),f_tol=1e-6,w0=0.1)\n# cij = optimize.newton_krylov(fopt,z2r(self.cij),f_tol=1e-6,outer_k=8)\n self.cij = cij\n self.cij2v() # update", "title": "" }, { "docid": "52666accd0d8c1f7b1061e8ec3bb4483", "score": "0.5677603", "text": "def get_value_target(self, observation):\n next_v = self.critic_target(observation.next_state).max(dim=-1)[0]\n next_v = next_v * (1 - observation.done)\n return self.get_reward(observation) + self.gamma * next_v", "title": "" }, { "docid": "16ff69687db38c38fbbc6027366a59c3", "score": "0.56728065", "text": "def __solve__(self):\n d=np.size(self.x,1)\n w0=np.zeros((d,1))\n t=100000\n n = np.size(self.x, 0)\n u, s, vh = np.linalg.svd(self.x)\n s = np.amax(s)\n eta=0.2/((1/n)*(1/np.amax([1e-5,self.gamma]))*s*s+self.lam)\n\n ww = ACCL.solve_adaptive(self, w0, eta, t)\n self.wstar= ww[:,t].transpose()\n self.fstar=self.obj(self.wstar)\n print ('norm of final gradient={:.2g}'.format(np.linalg.norm(self.grad(self.wstar),2)))\n return", "title": "" }, { "docid": "f3616f043cae5edf7d4f18294828987c", "score": "0.5660515", "text": "def get_value(self, variable, iteration):\n return self.solutions[iteration][variable.name]", "title": "" }, { "docid": "8dc1f1072b2168e0b1e8afcdabe8fe7b", "score": "0.56583613", "text": "def compute_solution(self, model, t_eval):\n timer = pybamm.Timer()\n\n solve_start_time = timer.time()\n pybamm.logger.info(\"Calling ODE solver\")\n solution = self.integrate(\n self.dydt,\n self.y0,\n t_eval,\n events=self.event_funs,\n mass_matrix=model.mass_matrix.entries,\n jacobian=self.jacobian,\n )\n solve_time = timer.time() - solve_start_time\n\n # Identify the event that caused termination\n termination = self.get_termination_reason(solution, self.events)\n\n return solution, solve_time, termination", "title": "" }, { "docid": "0652065c57bbc170854779251d91ce60", "score": "0.5652767", "text": "def _solve(self):\n pass", "title": "" }, { "docid": "6252955fc6d62b2401104d0d9096450c", "score": "0.56457096", "text": "def solve(self):\n raise NotImplementedError", "title": "" }, { "docid": "6252955fc6d62b2401104d0d9096450c", "score": "0.56457096", "text": "def solve(self):\n raise NotImplementedError", "title": "" }, { "docid": "4fdfc6dcc08a5e6c1d89f3e91232279d", "score": "0.56355405", "text": "def _upper_solve(self, cplex_epsilon=0.001):\n\n # Clean up the model\n self.TopModel.cleanup(cplex_epsilon)\n\n # Solve the MILP\n self.TopModel.solve()\n\n # Get the objective value\n obj = self.TopModel.solution.get_objective_value()\n\n # Get the solution vector\n defend = [False for a in self.Net.def_arcs]\n for i in range(len(self.Net.def_arcs)):\n if self.TopModel.solution.get_values(self.def_vars[i]) == 1:\n defend[i] = True\n\n return (obj, defend)", "title": "" }, { "docid": "467952e213906b61df8bed79686bd00e", "score": "0.56354064", "text": "def potential(q, params):\r\n\r\n return models.compute_model(q, params, Py2Cpp_int([0,0]))", "title": "" }, { "docid": "b274c274131a8a1fda2369df11f6cc58", "score": "0.5626949", "text": "def solve(self, theta: float = 1e-6) -> Tuple[np.ndarray, np.ndarray]:\n self.mdp.ensure_compiled()\n self.theta = theta\n return self._policy_improvement()", "title": "" }, { "docid": "b2e695f5cc335abbf0a20d64e683b540", "score": "0.5626538", "text": "def maximize(objective, variables, constraints, **kwargs):\n solution = _minimize(-1 * objective, variables, constraints, **kwargs)\n objective_value = run_expression(objective, solution)\n return {'solution': {var.name: val for var, val in solution.items()},\n 'objective_value': objective_value}", "title": "" }, { "docid": "60e1ca80a4c87a249abb86b3c3de610e", "score": "0.5626107", "text": "def _solve(self):\n start_time = clock()\n #\n # Setup parallelization\n #\n with Pyro4.locateNS() as ns:\n all_workers = ns.list(prefix=\"worker.\")\n workers = [Pyro4.Proxy(uri) for uri in all_workers.values()]\n for w in workers:\n w._pyroAsync() # set proxy in asynchronous mode\n nworkers = len(workers)\n if nworkers == 0:\n return None, None\n #\n # Initialize priority queue\n #\n sense = self.root.sense\n incumbent_value = sense*float('Inf')\n queue = PriorityQueue(sense=sense)\n queue.add(self.root)\n abs_tol = self.root.get_abs_tol()\n nbounded = 0\n #\n # Search\n #\n while len(queue) > 0:\n #\n # Send subproblems to workers\n #\n # We are sending the incumbent value, because the worker may conditionally\n # decide to generate an incumbent solution.\n #\n handles = []\n waiting = set()\n results = {}\n i=0\n while (i < nworkers) and len(queue) > 0:\n subproblem = queue.pop()\n #handles.append( workers[i].async_compute_results(subproblem, incumbent_value, sense) )\n #waiting.add(i)\n results[i] = workers[i].async_compute_results(subproblem, incumbent_value, sense)\n while True:\n if results[i].ready:\n results[i] = results[i].value\n break\n print(\"HERE\")\n i += 1\n #\n # Get results from workers\n #\n # result = (bound, value, solution, terminal_flag)\n # If value is None, then no solution is returned\n #\n while True:\n for i in list(waiting):\n if handles[i].ready:\n results[i] = handles[i].value\n waiting.discard(i)\n if len(waiting) == 0:\n break\n sleep(0.001)\n #print(\"HERE\")\n #\n # Process results\n #\n for i, result in results.items():\n bound, value, solution, subproblem = result\n #\n # Update nbounded and print diagnostics\n #\n nbounded += 1\n if True or nbounded % 1000 == 0 :\n print(\"#\" + str(nbounded) + \" pool=\" + str(len(queue)) + \" inc=\" \\\n + str(incumbent_value) + \" bnd=\" + str(bound))\n #\n # Update incumbent and prune the queue if an improving incumbent has been found\n #\n if (value is not None) and (sense*value < sense*incumbent_value):\n incumbent_value = value\n incumbent_solution = solution\n queue.prune(incumbent_value - sense*abs_tol)\n #\n # Generate children for non-terminal nodes\n #\n if sense*bound < sense*incumbent_value - abs_tol and not subproblem.terminal():\n numchildren = subproblem.separate()\n for j in range(numchildren):\n child = subproblem.make_child(j)\n assert(not child is None)\n queue.add( child )\n #\n # Terminate parallelization\n #\n for w in workers:\n w._pyroRelease()\n #\n # Save information and return\n #\n run_time = clock() - start_time\n print(str(nbounded) + \" subproblems bounded\")\n print(\"Run time \" + str(run_time) + \" seconds\")\n self._incumbent_value = incumbent_value\n self._incumbent_solution = incumbent_solution\n return (incumbent_value, incumbent_solution)", "title": "" } ]
177d8bb99822d9f182c26406bbdbf2c4
Puts a list of the referenced UID into the loadable for use in the node if this is loaded.
[ { "docid": "b13acc039b95bfdb1c55439a4009eb24", "score": "0.6025457", "text": "def addReferences(self, loadable):\n dcm = pydicom.read_file(loadable.files[0])\n loadable.referencedInstanceUIDs = []\n self._addReferencedSeries(loadable, dcm)\n self._addReferencedImages(loadable, dcm)\n loadable.referencedInstanceUIDs = list(set(loadable.referencedInstanceUIDs))", "title": "" } ]
[ { "docid": "d96bf21a885a86eebd587b28717baf1d", "score": "0.5601305", "text": "def put_list(self):\n self._check(pn_data_put_list(self._data))", "title": "" }, { "docid": "8f6609cd5d5501b1d2c674e337a5f338", "score": "0.54111516", "text": "def setInternalList(self, lst):\n\n self.genomeList = lst", "title": "" }, { "docid": "f02fe86657ef5106c7afe4bb54000e8e", "score": "0.53419894", "text": "def _add_dload_object(self, load: DLOAD) -> None:\n key = load.sid\n if key in self.dloads:\n self.dloads[key].append(load)\n else:\n self.dloads[key] = [load]\n self._type_to_id_map[load.type].append(key)", "title": "" }, { "docid": "7b9c5d1f7ad93788c7857f1c38dad992", "score": "0.53059477", "text": "def loadList(self, list):\n oldroot = self._root\n olddb = self._db\n oldwl = self._wl\n self._root = None\n self._db = None\n self._wl = None\n try:\n if self._type == IN_MEMORY:\n self._root = self._mem_loadList(list)\n elif self._type == ON_DISK:\n raise DawgError(\"On-disk dawg must be loaded from database.\")\n except Exception, detail:\n # If this process fails, make sure to reset the state\n self._root = oldroot\n self._db = olddb\n self._wl = oldwl\n raise DawgError(\"%s\" % detail)", "title": "" }, { "docid": "b7d3903f8671f00b899f88adc40c34aa", "score": "0.5288776", "text": "def load_list(self):\n # Load List\n # This is a list that must be created of all the incident ids you want to update. Currently,\n # the target column to be updated is the 3rd column.\n self.driver.get(self.list_url)", "title": "" }, { "docid": "a57ef5e34a0435229e395d986a3fb1d7", "score": "0.51319563", "text": "def _load_users(self):\n r = sql.abstractRequetesSQL.get_users()()\n self.users = {d[\"id\"]: dict(d) for d in r}", "title": "" }, { "docid": "97020b47a2e9b0922af44281596610b9", "score": "0.51258665", "text": "def load_users(self):\n\n self.users.load()", "title": "" }, { "docid": "27269012942703af08add676adc5da62", "score": "0.5061475", "text": "def set_managed_upids(self, upids):\n self.managed_upids = list(upids)", "title": "" }, { "docid": "fb3cf87839ac9f65354e177fccdfcc99", "score": "0.50573176", "text": "def loaded():", "title": "" }, { "docid": "a20ecc9ebd10380fd4503ce3bbc9a531", "score": "0.5026621", "text": "def loadBefore(oid, tid):", "title": "" }, { "docid": "64b0a2fbd96533e503cc71fcbd4137b4", "score": "0.50192875", "text": "def load(self, label_lookup_path, uid_lookup_path):\n if not tf.gfile.Exists(uid_lookup_path):\n tf.logging.fatal('File does not exist %s', uid_lookup_path)\n if not tf.gfile.Exists(label_lookup_path):\n tf.logging.fatal('File does not exist %s', label_lookup_path)\n\n # Loads mapping from string UID to human-readable string\n proto_as_ascii_lines = tf.gfile.GFile(uid_lookup_path).readlines()\n uid_to_human = {}\n p = re.compile(r'[n\\d]*[ \\S,]*')\n for line in proto_as_ascii_lines:\n parsed_items = p.findall(line)\n uid = parsed_items[0]\n human_string = parsed_items[2]\n uid_to_human[uid] = human_string\n\n # Loads mapping from string UID to integer node ID.\n node_id_to_uid = {}\n proto_as_ascii = tf.gfile.GFile(label_lookup_path).readlines()\n for line in proto_as_ascii:\n if line.startswith(' target_class:'):\n target_class = int(line.split(': ')[1])\n if line.startswith(' target_class_string:'):\n target_class_string = line.split(': ')[1]\n node_id_to_uid[target_class] = target_class_string[1:-2]\n\n # Loads the final mapping of integer node ID to human-readable string\n node_id_to_name = {}\n for key, val in node_id_to_uid.items():\n if val not in uid_to_human:\n tf.logging.fatal('Failed to locate: %s', val)\n name = uid_to_human[val]\n node_id_to_name[key] = name\n\n return node_id_to_name", "title": "" }, { "docid": "64b0a2fbd96533e503cc71fcbd4137b4", "score": "0.50192875", "text": "def load(self, label_lookup_path, uid_lookup_path):\n if not tf.gfile.Exists(uid_lookup_path):\n tf.logging.fatal('File does not exist %s', uid_lookup_path)\n if not tf.gfile.Exists(label_lookup_path):\n tf.logging.fatal('File does not exist %s', label_lookup_path)\n\n # Loads mapping from string UID to human-readable string\n proto_as_ascii_lines = tf.gfile.GFile(uid_lookup_path).readlines()\n uid_to_human = {}\n p = re.compile(r'[n\\d]*[ \\S,]*')\n for line in proto_as_ascii_lines:\n parsed_items = p.findall(line)\n uid = parsed_items[0]\n human_string = parsed_items[2]\n uid_to_human[uid] = human_string\n\n # Loads mapping from string UID to integer node ID.\n node_id_to_uid = {}\n proto_as_ascii = tf.gfile.GFile(label_lookup_path).readlines()\n for line in proto_as_ascii:\n if line.startswith(' target_class:'):\n target_class = int(line.split(': ')[1])\n if line.startswith(' target_class_string:'):\n target_class_string = line.split(': ')[1]\n node_id_to_uid[target_class] = target_class_string[1:-2]\n\n # Loads the final mapping of integer node ID to human-readable string\n node_id_to_name = {}\n for key, val in node_id_to_uid.items():\n if val not in uid_to_human:\n tf.logging.fatal('Failed to locate: %s', val)\n name = uid_to_human[val]\n node_id_to_name[key] = name\n\n return node_id_to_name", "title": "" }, { "docid": "72d1382115962b60b47ec38dcbd491d3", "score": "0.5002378", "text": "def user_list(self, user_list):\n self._user_list = user_list", "title": "" }, { "docid": "41954601419646447c92db28ff4ae21b", "score": "0.49877876", "text": "def setRefObjLoader(self, refObjLoader):\n self.refObjLoader = refObjLoader", "title": "" }, { "docid": "d4ca2eb4baf2e3e167ad26d177ebec2b", "score": "0.4974077", "text": "def _setter(self, val):\n if isinstance(val, (list, Bundle)):\n self.members.clear()\n self.members.add(val)\n else:\n raise TypeError(\"Can only set with a list or Bundle\")", "title": "" }, { "docid": "c6cbfa50902b351e2050aa13a1b53ed5", "score": "0.49580973", "text": "def __setitem__(self, key, value):\n super(EntityList, self).__setitem__(key, value)\n try:\n self._keys.append(key)\n except AttributeError: # _keys is not set\n pass", "title": "" }, { "docid": "7c7d2155befa51c03bb44dcc59a44769", "score": "0.4877322", "text": "def add_to_list(self, type_of_key, value):\n if self.log_to_redis is False:\n return\n\n self.redis_client.lpush(self.redis_keys[type_of_key], value)", "title": "" }, { "docid": "31322fe79e603c10e5267731787547b0", "score": "0.48741788", "text": "def load_data(self, data):\n self.core_star_system_uuids = set((uuid.UUID(s) for s in data[\"core_star_system_uuids\"]))\n self.aux = auxiliary.load_data(self.aux, data[\"auxiliary\"])", "title": "" }, { "docid": "494b78dade1d3bb6ae7ea760cdb615f8", "score": "0.48735067", "text": "def on_load_updated(self, load):\n pass", "title": "" }, { "docid": "e0505bdb2923df2f02d0bcbff0fe8a7d", "score": "0.48661077", "text": "def load(self):\n super(Loader, self).load()\n self._load = True\n self.run_concurrent()", "title": "" }, { "docid": "186d6dff31b2e46cdc6cf3314cd3d586", "score": "0.4863755", "text": "def __load(self, obj, list_obj, id_col):\n selection = list_obj.get_selection()\n model, node = selection.get_selected()\n if not node:\n return\n idv = model.get_value(node, id_col)\n pdata = self.__preg.get_plugin(idv)\n self.__pmgr.load_plugin(pdata)\n self.__rebuild_load_list()", "title": "" }, { "docid": "9a6cd253e3a2ec6e87c203eac7000a4d", "score": "0.48578203", "text": "def register(self, on_nodes: Optional[Union[str, list]] = None) -> None:\n if on_nodes is None:\n value = [node['name'] for node in self.nodes]\n else:\n on_nodes = on_nodes if isinstance(on_nodes, list) else [on_nodes]\n value = list(set(self.load_on_startup) | set(on_nodes))\n self._register(on_nodes=value)", "title": "" }, { "docid": "354e4011ab61d70f77fc3cde2471afdc", "score": "0.4854562", "text": "def _restore_objs_from_IDs(self):\n if isinstance(self.location, str):\n self.location = Thing.ID_dict[self.location] # XXX will this work correctly for the room if it isn't loaded yet? \n if self.contents != None:\n self.contents = [Thing.ID_dict[id] for id in self.contents if (isinstance(id, str) and id in Thing.ID_dict)]", "title": "" }, { "docid": "f20f2c86906bfccc6fe0578909493bb4", "score": "0.48425776", "text": "def update_ids(self, new_id):\n assert isinstance(new_id, MultiID)\n self.uid = new_id.uid\n self.flat_repr = new_id.flat_repr", "title": "" }, { "docid": "4b1c268f9086b10169443a5b13b5cff3", "score": "0.48380515", "text": "def _add_lseq_object(self, load: LSEQ) -> None:\n key = load.sid\n if key in self.load_combinations:\n self.load_combinations[key].append(load)\n else:\n self.load_combinations[key] = [load]\n self._type_to_id_map[load.type].append(key)", "title": "" }, { "docid": "11a934cfd2d6148ec9d923409526ef0b", "score": "0.48363495", "text": "def get_load_ids(self):\n if self.load_ids_ref is None:\n return self.load_ids\n load_ids = []\n supported_loads = [\n 'FORCE', 'FORCE1', 'FORCE2', 'MOMENT', 'MOMENT1', 'MOMENT2',\n 'PLOAD', 'PLOAD1', 'PLOAD2', 'PLOAD4', 'GRAV', 'SPCD',\n # 'GMLOAD',\n 'RLOAD1', 'RLOAD2', 'TLOAD1', 'TLOAD2', 'PLOADX1', 'LOAD',\n 'RFORCE', 'RFORCE1', #'RFORCE2'\n 'ACCEL', 'ACCEL1', 'SLOAD', 'ACSRCE',\n ]\n for loads in self.load_ids_ref:\n load_idsi = []\n for load in loads:\n if isinstance(load, integer_types):\n load_ids.append(load)\n #elif load.type == 'LOAD':\n #load_ids.append(load.sid)\n elif load.type in supported_loads:\n load_idsi.append(load.sid)\n else:\n msg = ('The get_load_ids method doesnt support %s cards.\\n'\n '%s' % (load.__class__.__name__, str(load)))\n raise NotImplementedError(msg)\n\n load_idi = list(set(load_idsi))\n assert len(load_idi) == 1, load_idsi\n load_ids.append(load_idi[0])\n return load_ids", "title": "" }, { "docid": "5dd3c168898d7f8ceae0082c6d0a083f", "score": "0.4816781", "text": "def getUIDs(self):", "title": "" }, { "docid": "bd7d655b98cc866d9d6253e7e9d02c28", "score": "0.48126554", "text": "def load(self,ob):\n\n loadfunc = self.getLoader(ob)\n if loadfunc is not None:\n\n linkset = self.newSet(ob)\n newlinks = loadfunc(ob,linkset)\n\n if newlinks is not None and newlinks is not linkset:\n if hasattr(ob,'__dict__'):\n ob.__dict__[self] = newlinks\n return newlinks\n\n elif self.inverse and not isinstance(ob,self.inverse.types):\n return NullSet\n else:\n linkset = self.newSet(ob)\n\n if hasattr(ob,'__dict__'):\n return LoadEvent(ob,self,linkset).linkset\n else:\n return NullSet", "title": "" }, { "docid": "e153bc3bb18ad239ce1e00d1a087c19d", "score": "0.4805923", "text": "def _add_load_object(self, load: Union[FORCE, FORCE1, FORCE2, MOMENT, MOMENT1, MOMENT2,\n PLOAD, PLOAD1, PLOAD2, PLOAD4, PLOADX1,\n GRAV, ACCEL, ACCEL1, SPCD, SLOAD,\n QBDY1, QBDY2, QBDY3, QVOL, TEMPAX, PRESAX,\n RFORCE, RFORCE1, LOADCYN, LOADCYH, DEFORM,\n GMLOAD]) -> None:\n key = load.sid\n if key in self.loads:\n self.loads[key].append(load)\n else:\n self.loads[key] = [load]\n self._type_to_id_map[load.type].append(key)", "title": "" }, { "docid": "9f9015c7c9ef911f3ffcb48a9c28f934", "score": "0.4793293", "text": "def cross_reference(self, model: BDF) -> None:\n msg = ', which is required by SLOAD=%s' % (self.sid)\n self.nodes_ref = []\n for nid in self.nodes:\n self.nodes_ref.append(model.Node(nid, msg=msg))\n #self.nodes_ref = model.EmptyNodes(self.nodes, msg=msg)", "title": "" }, { "docid": "a65b3ec60eb0ba214fd88190e198103e", "score": "0.47685033", "text": "def __load_by_id__(self, uid):\n\n try:\n self.__load_entry__(self.__create_object__(pwd.getpwuid(uid)))\n except Exception as e:\n raise DatabaseError('Error loading user by UID {}: {}'.format(uid, e))", "title": "" }, { "docid": "3ef95e8e395d9495679c02218811f702", "score": "0.4757025", "text": "def referenced_elements(self, referenced_elements):\n\n self._referenced_elements = referenced_elements", "title": "" }, { "docid": "11a5cd7781c3c2ccd1e228f415d8678b", "score": "0.47536066", "text": "def put(self, key: int, value: int) -> None:\n k = key % 1000 #Get the hash of key.\n for i, x in enumerate(self.lists[k]): #Traverse the corresponding list.\n if x[0] == key: #If key exists, override its value and return.\n self.lists[k][i] = (key, value)\n return\n self.lists[k].append((key, value)) #If key does not exist, append the key value pair to list.", "title": "" }, { "docid": "3d41da7ec8dcd616430fbee0f685e877", "score": "0.47412315", "text": "def __init__(self):\n self._users = []\n self._key = 'bdc_collection_builder:users'\n self._load_from_disk()", "title": "" }, { "docid": "f574730018d96c63206fbaf8bb634556", "score": "0.47377267", "text": "def set_many(self, uid=\"\", key_value_dict=None, exp=None):\n uid = (self.uid_prefix+uid).format(exp_uid=(self.exp_uid if exp == None else exp))\n return self.timed(self.db.set_many)(self.collection, uid, key_value_dict)", "title": "" }, { "docid": "f65494158176bbc726af77b14d1df9e3", "score": "0.4736317", "text": "def set_use(self,v):\n _ldns.ldns_key_list_set_use(self,v)\n #parameters: ldns_key_list *,bool,\n #retvals: ", "title": "" }, { "docid": "a2985c48f176fae357b59bd85496dc73", "score": "0.47063372", "text": "def Load(self, *args):\n return _snap.TIntSet_Load(self, *args)", "title": "" }, { "docid": "9b886efbbef66618ae3da36fb40b21f6", "score": "0.46959683", "text": "def setProcessing(data_id):\n list_key = ndb.Key(cached_list_model.CachedList, data_id)\n cached_list = list_key.get()\n\n if not cached_list:\n raise ValueError('A cached list with data id %s does not exist' % data_id)\n\n cached_list.is_processing = True\n cached_list.put()", "title": "" }, { "docid": "681a06ca9b01daaf8a4dd09169de8563", "score": "0.4694152", "text": "def _change_objs_to_IDs(self):\n if self.location:\n self.location = self.location.id\n if self.contents:\n self.contents = [obj.id for obj in self.contents]", "title": "" }, { "docid": "c1b9eda2c065953b8e5a854733a5c583", "score": "0.4685404", "text": "def load(self):\n \n for item in filter(self.is_available, self.collection):\n self.insert(item)", "title": "" }, { "docid": "b65cd3b7a4f7d96ff60e9d17f0d36b3d", "score": "0.46822602", "text": "def _set_id(self, v, load=False):\n parent = getattr(self, \"_parent\", None)\n if parent is not None and load is False:\n raise AttributeError(\"Cannot set keys directly when\" +\n \" within an instantiated list\")\n\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='leafref', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"id must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='leafref', is_config=True)\"\"\",\n })\n\n self.__id = t\n if hasattr(self, '_set'):\n self._set()", "title": "" }, { "docid": "b65cd3b7a4f7d96ff60e9d17f0d36b3d", "score": "0.46822602", "text": "def _set_id(self, v, load=False):\n parent = getattr(self, \"_parent\", None)\n if parent is not None and load is False:\n raise AttributeError(\"Cannot set keys directly when\" +\n \" within an instantiated list\")\n\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='leafref', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"id must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='leafref', is_config=True)\"\"\",\n })\n\n self.__id = t\n if hasattr(self, '_set'):\n self._set()", "title": "" }, { "docid": "2d1e6a639d5fa18587619620f7cfc906", "score": "0.46791795", "text": "def add_known_list(self, elements: List[bytes]) -> None:\n\n for el in elements:\n self.add_known_preimage(b\"\\x00\" + el)\n\n mt = MerkleTree(element_hash(el) for el in elements)\n\n self.known_trees[mt.root] = mt", "title": "" }, { "docid": "4ef09f7cb8d593ac192ee6bfe1b6c172", "score": "0.46691102", "text": "def _set_id(self, v, load=False):\n parent = getattr(self, \"_parent\", None)\n if parent is not None and load is False:\n raise AttributeError(\"Cannot set keys directly when\" +\n \" within an instantiated list\")\n\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='leafref', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"id must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='leafref', is_config=False)\"\"\",\n })\n\n self.__id = t\n if hasattr(self, '_set'):\n self._set()", "title": "" }, { "docid": "225f516413914385a628e4de0f2bf549", "score": "0.466366", "text": "def setUsers(self):\n for element in self.elements.values():\n element.setUsers(self.elements)", "title": "" }, { "docid": "b42e71f7118e87df7677d06728da2ce8", "score": "0.4656552", "text": "def _set_id(self, v, load=False):\n parent = getattr(self, \"_parent\", None)\n if parent is not None and load is False:\n raise AttributeError(\"Cannot set keys directly when\" +\n \" within an instantiated list\")\n\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/telemetry', defining_module='openconfig-telemetry', yang_type='leafref', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"id must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/telemetry', defining_module='openconfig-telemetry', yang_type='leafref', is_config=False)\"\"\",\n })\n\n self.__id = t\n if hasattr(self, '_set'):\n self._set()", "title": "" }, { "docid": "2692bbb62ca124a5a959b29aa6b0f0a9", "score": "0.46537644", "text": "def load(self, item=None):\n # do load processing there\n # 3 = 1 + 1\n # marking as loaded\n # 0 is valid\n if item is not None:\n self.items.append(item)\n self.loaded = True", "title": "" }, { "docid": "a0b54a49f9400d52b79df4124f813450", "score": "0.46535492", "text": "def mark_as_reference_data(self):\n self.reference_data = list(range(len(self.data)))\n debug(\"Data pool marked reference:\")\n for x in self.data:\n info(str(x))", "title": "" }, { "docid": "f12dc32488a818018958ce72c2bd3150", "score": "0.46515447", "text": "def __populate_lists(self):\n self.__populate_load_list()\n self.__populate_reg_list()\n self.__populate_addon_list()", "title": "" }, { "docid": "8560b6bcf41855aad9d2db9c53b1dd37", "score": "0.4639316", "text": "def _safe_cross_reference_loads(self) -> None:\n xref_errors = defaultdict(list)\n for unused_lid, load_combinations in self.load_combinations.items():\n for load_combination in load_combinations:\n try:\n load_combination.safe_cross_reference(self, xref_errors)\n except TypeError: # pragma: no cover\n print(load_combination)\n raise\n self._show_safe_xref_errors('loads', xref_errors)\n\n for unused_lid, loads in self.loads.items():\n for load in loads:\n load.safe_cross_reference(self, xref_errors)\n self._show_safe_xref_errors('loads', xref_errors)\n\n for unused_lid, sid in self.dloads.items():\n for load in sid:\n load.safe_cross_reference(self, xref_errors)\n\n for unused_lid, sid in self.dload_entries.items():\n for load in sid:\n load.safe_cross_reference(self, xref_errors)\n\n for unused_key, darea in self.dareas.items():\n darea.safe_cross_reference(self, xref_errors)\n\n for unused_key, dphase in self.dphases.items():\n dphase.safe_cross_reference(self, xref_errors)\n\n for unused_key, tic in self.tics.items():\n tic.safe_cross_reference(self, xref_errors)", "title": "" }, { "docid": "3d499df8dec4c9bd01e6628bd7f3f82c", "score": "0.46392226", "text": "def bundle_keys(self, bundle_keys):\n\n\n self._bundle_keys = bundle_keys", "title": "" }, { "docid": "c1f0f9c090c2d82c2645c5c4a73ced52", "score": "0.4632435", "text": "def set(self, *args):\n return _coin.SoChildList_set(self, *args)", "title": "" }, { "docid": "6baaa6640cc703096ec7a4f4ad80d2e2", "score": "0.46261927", "text": "def __set__(self, obj, val):\n if isinstance(val, (list, Bundle)):\n self.clear()\n self.add(Bundle)\n else:\n raise TypeError(\"Can only set with a list or Bundle\")", "title": "" }, { "docid": "5d7414f9dc8f9ef786297192aa61aa66", "score": "0.46256503", "text": "def SetData(self):\n\n # populate the data in the attachment list\n links = linkmgt.Get().links\n self._list.DeleteAllItems()\n for l in links:\n index = self._list.InsertStringItem(sys.maxint, l._type)\n self._list.SetStringItem(index, 1, l._name)\n self._list.SetItemData(index, l._id)\n if l._ignored:\n self._list.SetItemImage(index, 1, 1)\n else:\n self._list.SetItemImage(index, 0, 0)\n \n self.__SyncEditState()", "title": "" }, { "docid": "89f5219ef4b4ab42b08c1c1649dd74f8", "score": "0.4619927", "text": "def initList(self):\n self.items.setlist([self.collection[name].nicename for name in self.enabler.actives])\n self.local_dict = dict([(self.collection[name].nicename, self.collection[name])\n for name in self.collection])", "title": "" }, { "docid": "e0922cf473f18b55823740646fd7d1bf", "score": "0.4619784", "text": "def store(self, objs, keys, complete_sets=[]):\r\n pass", "title": "" }, { "docid": "c7a0822a81c22851a3660ae1b9f006bc", "score": "0.46192393", "text": "def preload_all_users(self):\r\n\r\n users = keystone.user_list(self._request)\r\n # Cache all users on right indexes, this is more effective than to\r\n # obtain large number of users one by one by keystone.user_get\r\n for u in users:\r\n self._users[u.id] = u", "title": "" }, { "docid": "c77c1befe7cbe706281e001acad2be85", "score": "0.4609529", "text": "def _reset(self, load):\n values = reduce(iadd, self._lists, [])\n self._clear()\n self._load = load\n self._update(values)", "title": "" }, { "docid": "2f6497c598e1123031115ad027e5b2d6", "score": "0.4609397", "text": "def init_on_load(self):\n self.warning_store = list()", "title": "" }, { "docid": "ba84fba7ee7e44ea56a9ba40eef553e4", "score": "0.46054962", "text": "def loadPrefObjects():\n pass", "title": "" }, { "docid": "54ce642512f1d6a63d84f601a5d18057", "score": "0.46040687", "text": "def load_track_list(self, track_list_filename, path_to_profiles='./.profiles'):\n \n if self.profiles_path_exists(path_to_profiles):\n if os.path.exists(os.path.join(path_to_profiles,track_list_filename)):\n f = open(os.path.join(path_to_profiles,track_list_filename),'rb')\n self.track_list = pickle.load(f)\n f.close()", "title": "" }, { "docid": "e06de141d33f81452198976b67bc39f4", "score": "0.45834938", "text": "def load_node_feat(uid):\n with open(os.path.join(SNAP_DIR, uid + '.feat'), 'rb') as fp:\n nodes = [__node_process(feat) for feat in fp.readlines()]\n return nodes", "title": "" }, { "docid": "2f22b430c10db24ee58b427c16b34f9a", "score": "0.45731816", "text": "def uid(self, uid):\n\n self._uid = uid", "title": "" }, { "docid": "2f22b430c10db24ee58b427c16b34f9a", "score": "0.45731816", "text": "def uid(self, uid):\n\n self._uid = uid", "title": "" }, { "docid": "1823e8be07885fb2dc77cbb4ab62c38d", "score": "0.45699072", "text": "def _load_pst(self):\n psts = self.persistant_storage.load()\n if psts:\n for name, value in psts:\n setattr(self, name, value)", "title": "" }, { "docid": "90e1523b36f385ef6c42d5ed339a886f", "score": "0.45621628", "text": "def set_attenuator_load(load):\n attenuator.write('L {0}'.format(load))", "title": "" }, { "docid": "bfea7d92aac48571d7b25d17a2944bf8", "score": "0.45563465", "text": "def _update_load_data(self):\n if not self._data_loaded:\n self.data # noqa: B018", "title": "" }, { "docid": "8efbbedd85c2258d513c1e783bcedb2c", "score": "0.45559835", "text": "def set_test_doc_ids_list(self, value):\n assert(isinstance(value, list) or value is None)\n self.__train_doc_ids = value", "title": "" }, { "docid": "d881339de61b1dfe9780f1adcde05e01", "score": "0.455329", "text": "def populateList(self):\n self.send(\"USR ,\")", "title": "" }, { "docid": "92a149b188216e9a4768800d008a8209", "score": "0.4548173", "text": "def prepare_synchronize( self, value ):\n\t\tself.real_objects_discovery = value", "title": "" }, { "docid": "72ca8d3db82d0844ea56adaafefad267", "score": "0.45466968", "text": "def update_node_list(self, node_list: List) -> None:\n self._node_list = node_list\n self._set_node_dict()", "title": "" }, { "docid": "7f20f3cf7dad2fdc675932ec427ebd4a", "score": "0.45466962", "text": "def _set_id(self, v, load=False):\n parent = getattr(self, \"_parent\", None)\n if parent is not None and load is False:\n raise AttributeError(\"Cannot set keys directly when\" +\n \" within an instantiated list\")\n\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"id must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=True)\"\"\",\n })\n\n self.__id = t\n if hasattr(self, '_set'):\n self._set()", "title": "" }, { "docid": "7f20f3cf7dad2fdc675932ec427ebd4a", "score": "0.45466962", "text": "def _set_id(self, v, load=False):\n parent = getattr(self, \"_parent\", None)\n if parent is not None and load is False:\n raise AttributeError(\"Cannot set keys directly when\" +\n \" within an instantiated list\")\n\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"id must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=True)\"\"\",\n })\n\n self.__id = t\n if hasattr(self, '_set'):\n self._set()", "title": "" }, { "docid": "7f20f3cf7dad2fdc675932ec427ebd4a", "score": "0.45466962", "text": "def _set_id(self, v, load=False):\n parent = getattr(self, \"_parent\", None)\n if parent is not None and load is False:\n raise AttributeError(\"Cannot set keys directly when\" +\n \" within an instantiated list\")\n\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"id must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=True)\"\"\",\n })\n\n self.__id = t\n if hasattr(self, '_set'):\n self._set()", "title": "" }, { "docid": "7f20f3cf7dad2fdc675932ec427ebd4a", "score": "0.45466962", "text": "def _set_id(self, v, load=False):\n parent = getattr(self, \"_parent\", None)\n if parent is not None and load is False:\n raise AttributeError(\"Cannot set keys directly when\" +\n \" within an instantiated list\")\n\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"id must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=True)\"\"\",\n })\n\n self.__id = t\n if hasattr(self, '_set'):\n self._set()", "title": "" }, { "docid": "7f20f3cf7dad2fdc675932ec427ebd4a", "score": "0.45466962", "text": "def _set_id(self, v, load=False):\n parent = getattr(self, \"_parent\", None)\n if parent is not None and load is False:\n raise AttributeError(\"Cannot set keys directly when\" +\n \" within an instantiated list\")\n\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"id must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=True)\"\"\",\n })\n\n self.__id = t\n if hasattr(self, '_set'):\n self._set()", "title": "" }, { "docid": "7f20f3cf7dad2fdc675932ec427ebd4a", "score": "0.45466962", "text": "def _set_id(self, v, load=False):\n parent = getattr(self, \"_parent\", None)\n if parent is not None and load is False:\n raise AttributeError(\"Cannot set keys directly when\" +\n \" within an instantiated list\")\n\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"id must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=True)\"\"\",\n })\n\n self.__id = t\n if hasattr(self, '_set'):\n self._set()", "title": "" }, { "docid": "7f20f3cf7dad2fdc675932ec427ebd4a", "score": "0.45466962", "text": "def _set_id(self, v, load=False):\n parent = getattr(self, \"_parent\", None)\n if parent is not None and load is False:\n raise AttributeError(\"Cannot set keys directly when\" +\n \" within an instantiated list\")\n\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"id must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=True)\"\"\",\n })\n\n self.__id = t\n if hasattr(self, '_set'):\n self._set()", "title": "" }, { "docid": "7f20f3cf7dad2fdc675932ec427ebd4a", "score": "0.45466962", "text": "def _set_id(self, v, load=False):\n parent = getattr(self, \"_parent\", None)\n if parent is not None and load is False:\n raise AttributeError(\"Cannot set keys directly when\" +\n \" within an instantiated list\")\n\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"id must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=True)\"\"\",\n })\n\n self.__id = t\n if hasattr(self, '_set'):\n self._set()", "title": "" }, { "docid": "e74cbee713591eb939fe300857002da4", "score": "0.4541474", "text": "def _list(self, value):\n self._list_prop = jsonutils.loads(value)\n self._fetched_time = timeutils.utcnow()\n self._signing_directory.write_file(self._FILE_NAME, value)", "title": "" }, { "docid": "5f813bd9b980d69a2c25c42c6eef4438", "score": "0.4540038", "text": "def __setNodes(self, value):\n\n self.__nodes = value", "title": "" }, { "docid": "23094060dbdd3793d37bb8608d68c181", "score": "0.45398328", "text": "def update(self, itens):\n\t\tif self._is_unique:\n\t\t\tself._list.update(itens)\n\t\telse:\n\t\t\tself._list.extend(itens)", "title": "" }, { "docid": "b90ede481af477d0e36c285522b88d12", "score": "0.45367652", "text": "def _populateExisting(self):", "title": "" }, { "docid": "5f535ba701364eeb689b1b0e368c5ab5", "score": "0.45324385", "text": "def _set_id(self, v, load=False):\n parent = getattr(self, \"_parent\", None)\n if parent is not None and load is False:\n raise AttributeError(\"Cannot set keys directly when\" +\n \" within an instantiated list\")\n\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"id must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=False)\"\"\",\n })\n\n self.__id = t\n if hasattr(self, '_set'):\n self._set()", "title": "" }, { "docid": "5f535ba701364eeb689b1b0e368c5ab5", "score": "0.45324385", "text": "def _set_id(self, v, load=False):\n parent = getattr(self, \"_parent\", None)\n if parent is not None and load is False:\n raise AttributeError(\"Cannot set keys directly when\" +\n \" within an instantiated list\")\n\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"id must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=False)\"\"\",\n })\n\n self.__id = t\n if hasattr(self, '_set'):\n self._set()", "title": "" }, { "docid": "5f535ba701364eeb689b1b0e368c5ab5", "score": "0.45324385", "text": "def _set_id(self, v, load=False):\n parent = getattr(self, \"_parent\", None)\n if parent is not None and load is False:\n raise AttributeError(\"Cannot set keys directly when\" +\n \" within an instantiated list\")\n\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"id must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=False)\"\"\",\n })\n\n self.__id = t\n if hasattr(self, '_set'):\n self._set()", "title": "" }, { "docid": "5f535ba701364eeb689b1b0e368c5ab5", "score": "0.45324385", "text": "def _set_id(self, v, load=False):\n parent = getattr(self, \"_parent\", None)\n if parent is not None and load is False:\n raise AttributeError(\"Cannot set keys directly when\" +\n \" within an instantiated list\")\n\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"id must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=False)\"\"\",\n })\n\n self.__id = t\n if hasattr(self, '_set'):\n self._set()", "title": "" }, { "docid": "5f535ba701364eeb689b1b0e368c5ab5", "score": "0.45324385", "text": "def _set_id(self, v, load=False):\n parent = getattr(self, \"_parent\", None)\n if parent is not None and load is False:\n raise AttributeError(\"Cannot set keys directly when\" +\n \" within an instantiated list\")\n\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"id must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=False)\"\"\",\n })\n\n self.__id = t\n if hasattr(self, '_set'):\n self._set()", "title": "" }, { "docid": "5f535ba701364eeb689b1b0e368c5ab5", "score": "0.45324385", "text": "def _set_id(self, v, load=False):\n parent = getattr(self, \"_parent\", None)\n if parent is not None and load is False:\n raise AttributeError(\"Cannot set keys directly when\" +\n \" within an instantiated list\")\n\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"id must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=False)\"\"\",\n })\n\n self.__id = t\n if hasattr(self, '_set'):\n self._set()", "title": "" }, { "docid": "5f535ba701364eeb689b1b0e368c5ab5", "score": "0.45324385", "text": "def _set_id(self, v, load=False):\n parent = getattr(self, \"_parent\", None)\n if parent is not None and load is False:\n raise AttributeError(\"Cannot set keys directly when\" +\n \" within an instantiated list\")\n\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"id must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=False)\"\"\",\n })\n\n self.__id = t\n if hasattr(self, '_set'):\n self._set()", "title": "" }, { "docid": "5f535ba701364eeb689b1b0e368c5ab5", "score": "0.45324385", "text": "def _set_id(self, v, load=False):\n parent = getattr(self, \"_parent\", None)\n if parent is not None and load is False:\n raise AttributeError(\"Cannot set keys directly when\" +\n \" within an instantiated list\")\n\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"id must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=False)\"\"\",\n })\n\n self.__id = t\n if hasattr(self, '_set'):\n self._set()", "title": "" }, { "docid": "45a8563e524f72ed39ecb2bf2f2d0199", "score": "0.4529859", "text": "def load_saved_list(self):\r\n saved_list = self.config.dict_config[\"data\"][\"saved_list\"].strip()\r\n list_from_config = []\r\n if saved_list:\r\n list_from_config = saved_list.split(\"\\n\")\r\n #\r\n self.ordered_set_selected = ordered_set.OrderedSet(list_from_config)\r\n #\r\n self.sync_lists()", "title": "" }, { "docid": "aa5c192d063636ecfb8a0c08079b16cf", "score": "0.4528868", "text": "def _set_members_listed(self):\n self._set_member_count()\n\n if self.member_count:\n # wait until the elements are loaded\n selenium_utils.get_when_clickable(\n self._driver, locator.ObjectWidget.MEMBERS_TITLE_LIST)\n\n self.members_listed = self._driver.find_elements(\n *locator.ObjectWidget.MEMBERS_TITLE_LIST)\n else:\n self.members_listed = []", "title": "" }, { "docid": "5f544fd8f8ca7054edda22966c9421c8", "score": "0.4526888", "text": "def assign(self, v):\n self.children = [v]", "title": "" }, { "docid": "5f544fd8f8ca7054edda22966c9421c8", "score": "0.4526888", "text": "def assign(self, v):\n self.children = [v]", "title": "" }, { "docid": "5f544fd8f8ca7054edda22966c9421c8", "score": "0.4526888", "text": "def assign(self, v):\n self.children = [v]", "title": "" }, { "docid": "7a33267047ccf499ec3764e1ce1d55c8", "score": "0.45260206", "text": "def reference_files(self, value: List[str]) -> None:\n if not (isinstance(value, List)):\n raise TypeError(\"reference_files must be a List[str]\")\n self.attr_setter(\"_reference_files\", value)", "title": "" }, { "docid": "74d9c4086fb461c29dfee7e89b0f9f1c", "score": "0.45240074", "text": "def post_load(self):\n pass", "title": "" }, { "docid": "ea7220a67d461f550a269f13686f8aaf", "score": "0.45207995", "text": "def load(self):\n\n self.load_groups()\n self.load_users()", "title": "" }, { "docid": "47e5711f3dd6f7636e68be3bf06c66f8", "score": "0.45087463", "text": "def _sync_mylist(videoid, task_handler, enabled):\n operation = {\n 'export_item': 'add',\n 'remove_item': 'remove'}.get(task_handler.__name__)\n if enabled and operation and g.ADDON.getSettingBool('mylist_library_sync'):\n common.debug('Syncing my list due to change of Kodi library')\n api.update_my_list(videoid, operation)", "title": "" } ]
22c51d61993faeb008bf5a86d6f28863
Initialize new Tileset The heavy lifting is done by
[ { "docid": "23f5432f6952651e35d889a1b63d1c6f", "score": "0.0", "text": "def __init__(self, path, output='.', param_file=None, skip_file=None,\n label=None, **kwargs):\n self.basepath = os.path.dirname(__file__)\n try:\n os.makedirs(output)\n except OSError:\n pass\n self.output = output\n self.normal = True\n self.verbose = False\n self.fill = (0,0,0)\n self.text = tuple([(255 - x) for x in self.fill])\n self.grid = {}\n self.set_filename(path, label=label)\n # Does mosaic already exist?\n try:\n fp = os.path.join(self.output, self.filename + '.tif')\n open(fp, 'r')\n print('{} already exists'.format(fp))\n except FileNotFoundError:\n self.populate_tiles(path, param_file, skip_file, label, **kwargs)", "title": "" } ]
[ { "docid": "a88e6185afc685196ac28a3ff0eae075", "score": "0.796752", "text": "def __init__(self):\n self.tileset = None\n self.dim = 0\n self.size = None # dim of tileset in pixels\n self.tsize = None # dim of tileset in tiles\n self.tiles = [] # the tiles", "title": "" }, { "docid": "2612c7ac7450abf1e302e394c412d8ec", "score": "0.7033765", "text": "def initialize_tiles(self) -> None:\n cnt = 0\n for x in range(8):\n for y in range(8):\n tile = Tile(None, x, y)\n if cnt % 2 == 0:\n tile.color = TILE_COLOR_LIGHT\n tile.fill(TILE_COLOR_LIGHT)\n else:\n tile.color = TILE_COLOR_DARK\n tile.fill(TILE_COLOR_DARK)\n self.tilemap[x][y] = tile\n cnt += 1\n cnt += 1", "title": "" }, { "docid": "8f1f31e7af974979aa9d6f667081e9db", "score": "0.6846944", "text": "def __init__(self, width, height):\n self.width = width\n self.height = height\n self.tiles = {}\n self.t = 0", "title": "" }, { "docid": "0ec102f503197a3304835cc45d477b4c", "score": "0.6788723", "text": "def _parse_tileset(self, node):\n for i, image in load_tmx_tileset(self._path, node):\n self._tileset[i] = image\n\n for tileset_index, tile_object_type in load_tmx_tile_objects(node):\n self._tile_objects[tileset_index] = tile_object_type", "title": "" }, { "docid": "b26aa5ca191ed434cd167d417b28f61d", "score": "0.67487997", "text": "def __init__(self, tiles:dict):\n self.tiles = tiles\n self.x = 0\n self.y = 0", "title": "" }, { "docid": "388d9544006aaa4f8594eaa508db3cdc", "score": "0.6681122", "text": "def __init__(self, dimX, dimY):\n self.dimX = dimX\n self.dimY = dimY\n self.GST=SurroundingTiles.get()\n self.create()\n self.update_map_tiles()", "title": "" }, { "docid": "93f520a275833b0e1409794df492e1ac", "score": "0.66566855", "text": "def __init__(self, width, height):\n self.width = width\n self.height = height\n # TODO: Your code goes here\n self.tiles ={}\n for i in range(self.width):\n for j in range(self.height):\n self.tiles[(i,j)] = 0", "title": "" }, { "docid": "ba8fb695f7259b257f0bdb6917431c80", "score": "0.6608518", "text": "def _init_fake_tile(cls):\r\n\t\t# use fixed SelectableBuildingComponent here, to make sure subclasses also read the same variable\r\n\t\tif not hasattr(SelectableBuildingComponent, \"_fake_tile_obj\"):\r\n\t\t\t# create object to create instances from\r\n\t\t\tfake_tile_obj = horizons.globals.fife.engine.getModel().createObject('fake_tile_obj', 'ground')\r\n\t\t\tSelectableBuildingComponent._fake_tile_obj = fake_tile_obj\r\n\t\t\tfife.ObjectVisual.create(SelectableBuildingComponent._fake_tile_obj)\r\n\r\n\t\t\timg_path = 'content/gfx/fake_water.png'\r\n\t\t\timg = horizons.globals.fife.imagemanager.load(img_path)\r\n\t\t\tfor rotation in [45, 135, 225, 315]:\r\n\t\t\t\tSelectableBuildingComponent._fake_tile_obj.get2dGfxVisual().addStaticImage(rotation, img.getHandle())", "title": "" }, { "docid": "b8e8cd5f54b63016b39c1adc0568ddfb", "score": "0.659178", "text": "def __init__(self, filename):\n self.tile = self.load_tile(filename)", "title": "" }, { "docid": "de6121ea3c28ccb16ec6afc8b3e9924c", "score": "0.6577811", "text": "def __init__(self):\n self.tiles = [[Tile(), Tile(), Tile()],\n [Tile(), Tile(), Tile()],\n [Tile(), Tile(), Tile()]]\n # Group by rows, columns, diagonals\n self.groups = []\n for row in self.tiles:\n self.groups.append(row)\n for col_i in range(len(self.tiles[0])):\n self.groups.append([row[col_i] for row in self.tiles])\n diag = []\n for i in range(len(self.tiles)):\n diag.append(self.tiles[i][i])\n self.groups.append(diag)\n diag = []\n for i in range(len(self.tiles)):\n diag.append(self.tiles[i][len(self.tiles) - i - 1])\n self.groups.append(diag)", "title": "" }, { "docid": "ecf7c613941b05f752a460053e5a6d87", "score": "0.6559751", "text": "def __init__(\n self, tile=None, features=None, schema=None, driver=None\n ):\n self.tile = tile\n self.schema = schema\n self.driver = driver\n self.features = features", "title": "" }, { "docid": "1259735da6ce28c5689d101721e40b9a", "score": "0.6548306", "text": "def __init__(self, height, width):\n\n #This creates a 2d array of Tiles, this can also add objects to the map\n self.map = [[Tile(char = ' ',blocks_movement=True,blocks_sight=True) for y in range(0,height)] for x in range(0,width)]\n\n #initialize the piece dictionary to hold all pieces\n self.piece_dict = {}", "title": "" }, { "docid": "2cbc200f619611c5ed47585f8b07c49d", "score": "0.654049", "text": "def _init_fake_tile(self):\r\n\t\t# use fixed SelectableBuildingComponent here, to make sure subclasses also read the same variable\r\n\t\tif not hasattr(CombatManager, \"_fake_range_tile_obj\"):\r\n\t\t\t# create object to create instances from\r\n\t\t\tCombatManager._fake_range_tile_obj = horizons.globals.fife.engine.getModel().createObject('_fake_range_tile_obj', 'ground')\r\n\t\t\tfife.ObjectVisual.create(CombatManager._fake_range_tile_obj)\r\n\r\n\t\t\timg_path = 'content/gfx/fake_water.png'\r\n\t\t\timg = horizons.globals.fife.imagemanager.load(img_path)\r\n\t\t\tfor rotation in [45, 135, 225, 315]:\r\n\t\t\t\tCombatManager._fake_range_tile_obj.get2dGfxVisual().addStaticImage(rotation, img.getHandle())\r\n\t\tif not hasattr(self, '_selected_fake_tiles'):\r\n\t\t\tself._selected_fake_tiles = []\r\n\t\tif not hasattr(self, '_selected_tiles'):\r\n\t\t\tself._selected_tiles = []", "title": "" }, { "docid": "28c95ede2b4bdb0a149da39457f21d4f", "score": "0.64977485", "text": "def __createTilesets(self):# dict\n tilesets = {}\n\n for cfg in self.config[\"tilesets\"]:\n split = cfg[\"source\"].split(\"/\")\n name = split[-2]\n file = split[-1]\n path = PATH[\"tilesets\"] + \"\\\\\" + name + \"\\\\\" + file\n config = loadJSON(path)\n tilesets.update({name: Tileset(config)})\n\n return tilesets", "title": "" }, { "docid": "6136f74fed36deb764e5f8cfad48ffbb", "score": "0.6459776", "text": "def __init__(self, tile=None, features=None, schema=None, driver=None):\n self.tile = tile\n self.schema = schema\n self.driver = driver\n self.features = features", "title": "" }, { "docid": "48230356704facfa6a75095cd291f58b", "score": "0.64164615", "text": "def new(self):\n\t\t\n\t\tself.all_sprites = pg.sprite.LayeredUpdates()\n\t\tself.isoTiles = pg.sprite.LayeredUpdates()\n\t\tself.isoCubes = pg.sprite.LayeredUpdates()\n\n\t\n\t\tfor y in range(0, 2*HEIGHT, TILESIZE):\n\t\t\tfor x in range(0, WIDTH, TILESIZE):\n\t\t\t\tIsoTile(self, x, 0.5 * y)\n\t\t\t\tIsoTile(self, x + 0.5 * TILESIZE, 0.5 * y + 0.25 * TILESIZE)\n\n\t\t# for y in range(1, 10, 1):\n\t\t# \tfor x in range(10, 0, -1):\n\n\t\t# \t\t\tIsoCube(self, x * TILESIZE, 0.5 * y * TILESIZE)\n\t\t# \t\t\tIsoCube(self, x * TILESIZE + 0.5 * TILESIZE, 0.5 * y * TILESIZE + 0.25 * TILESIZE)\n\t\t\t\t\t\n\n\t\tself.run()", "title": "" }, { "docid": "125c0f8893b2e9d80b61784d367bac46", "score": "0.6394867", "text": "def set_initial_tiles(self):\n if (self.BOARD_SIZE >= MIN_BOARD_SIZE):\n self.create_tile(self.BOARD_SIZE//2, self.BOARD_SIZE//2 - 1)\n self.create_tile(self.BOARD_SIZE//2 - 1, self.BOARD_SIZE//2 - 1)\n self.create_tile(self.BOARD_SIZE//2 - 1, self.BOARD_SIZE//2)\n self.create_tile(self.BOARD_SIZE//2, self.BOARD_SIZE//2)", "title": "" }, { "docid": "f7b9c1de9663ad9a1a564bbf2e4bd539", "score": "0.63791543", "text": "def __init__(self, puzzle_height, puzzle_width, initial_grid=None):\n self._height = puzzle_height\n self._width = puzzle_width\n self._grid = [[col + puzzle_width * row\n for col in range(self._width)]\n for row in range(self._height)]\n\n if initial_grid != None:\n for row in range(puzzle_height):\n for col in range(puzzle_width):\n self._grid[row][col] = initial_grid[row][col]\n\n self.get_zero_pos()\n self._target_tile = (0,0)", "title": "" }, { "docid": "707c81712c7adf4e91768514631d9e28", "score": "0.6320834", "text": "def __init__(self, tileType):\n\n self._tileType = tileType\n self._food = {PLAINS:3, OCEAN:1, HILLS:0,\n MOUNTAIN:0, FOREST:0}[tileType]\n self._production = {FOREST:3, MOUNTAIN:1, HILLS:2,\n PLAINS:0, OCEAN:0}[tileType]", "title": "" }, { "docid": "7437b5f340bcc1ce30e85777446a7d0d", "score": "0.62998134", "text": "def __init__(self):\n self.tiles = [[self.EMPTY] * 8 for i in range(8)]\n\n self.tiles[3][3], self.tiles[3][4] = self.WHITE, self.BLACK\n self.tiles[4][3], self.tiles[4][4] = self.BLACK, self.WHITE\n\n # cache legal moves in attempt to reduce function calls\n self._legal_moves = {self.BLACK: None, self.WHITE: None}\n\n self.piece_count = {self.BLACK: 2, self.WHITE: 2, self.EMPTY: 60}", "title": "" }, { "docid": "22c218acddbdb14a7c1b7068ed637247", "score": "0.6293839", "text": "def __init__(self, gridspec: List[List[Color]]):\n self._tiles = gridspec", "title": "" }, { "docid": "b41893f3fcb8bbfe65c35b154333259a", "score": "0.62823385", "text": "def __init__(self, size):\n assert size >= MINIMAL_GRID_SIZE\n self.size = size \n self.init_tile_index()\n\n # Initialize dictionary tile_value to contain codes for ten\n # tiles in Solitaire Tantrix in one 4x4 corner of grid\n self.tile_value = {}\n for idx in range(len(SOLITAIRE_CODES)):\n self.tile_value[self.tile_index[idx]] = SOLITAIRE_CODES[idx]", "title": "" }, { "docid": "a73d09c41bcec27573879950a8218bb3", "score": "0.6270395", "text": "def load_tile_set():\n global tile_dict, current_tile, tile_map, tile_map_prop\n i = 0\n j = tile_location\n pygame.draw.rect(screen, black, pygame.Rect(i, j - 5, width, (block_size*8)))\n tile_index = 1\n for infile in glob.glob('Tiles/*.png'):\n pic = pygame.image.load(infile)\n pic = pygame.transform.scale(pic, (block_size, block_size))\n if i + block_size > width:\n i = 0\n j += block_size\n screen.blit(pic, (i, j))\n index = str(i) + ':' + str(j)\n tile_map[pic] = tile_index\n tile_map_prop[tile_index] = infile\n tile_index += 1\n tile_dict[index] = pic\n i += block_size\n pygame.display.flip()", "title": "" }, { "docid": "ca29b1d072e2372ad07e0c0656f7ccd0", "score": "0.625259", "text": "def _init(self):\n\n # Initialize vars which will contains\n # the coordinates of the guardian and macgyver\n mac_gyver_pos = None\n guardian_pos = None\n\n for y, columns in enumerate(self.grid):\n for x, tile in enumerate(columns):\n\n if tile == \"#\":\n # Add a new wall.\n Wall(x, y, self.scale)\n elif tile == \" \":\n Floor(x, y, self.scale)\n # Add every empty tiles into the array `empty_tiles`.\n # This array will be helpful for the placement of items.\n self.empty_tiles[(x, y)] = \" \"\n else:\n # Add a new floor tile.\n Floor(x, y, self.scale)\n\n if tile == \"S\":\n # The tile where MacGyver will start\n mac_gyver_pos = (x, y)\n elif tile == \"F\":\n # The tile where the guardian will stand\n guardian_pos = (x, y)\n\n # At the end, we remove the tiles adjacent to macgyver\n # and the guardian from the list of empty tiles.\n self._remove_adjacent_coordinates(mac_gyver_pos)\n self._remove_adjacent_coordinates(guardian_pos)", "title": "" }, { "docid": "5bd3b5b1a9a9b433e91da34dade587b1", "score": "0.6226957", "text": "def __init__(self, rows: int, cols: int, centered: bool,\n tile_weights: Dict[TileType, int],\n tiles: List[Tile]=None) -> None:\n self.grid : OctagonalGridSystem = OctagonalGridSystem(rows, cols,\n centered)\n self.rtg : RandomTileTypeGenerator = RandomTileTypeGenerator(\n tile_weights)\n\n indices : List[Tuple[int, int]] = self.grid.indices()\n\n if tiles:\n self.tile_system = TileSystem(tiles)\n else:\n ts : List[Tile] = [Tile(self.rtg.get_random_tile(),\n GridLocation(Row(j[0]), Column(j[1])), i)\n for i, j in enumerate(indices)]\n self.tile_system = TileSystem(ts)", "title": "" }, { "docid": "9b7f09a866fb60b744f8877778d4f59f", "score": "0.61959046", "text": "def __init__(self, width, height, num_mines):\n\n self.width = width\n self.height = height\n self.game_over = False # if the game is finished or not\n self.cursor = [0, 0] # cursor's initial position / [col, row]\n self.num_flags = num_mines # the number of flags left to display\n\n if num_mines > self.width * self.height:\n print('Number of mines cannot exceed number of tiles!')\n exit()\n\n # Generate mines' positions randomly and apply them to Tile objects\n self.tiles = list()\n mine_indices = random.sample(range(self.width * self.height), k=num_mines)\n for i in range(self.height): # 1st index indicates which row\n tile_row = list()\n for j in range(self.width): # 2nd index indicates which column\n tile_obj = Tile()\n if i * self.width + j in mine_indices:\n tile_obj.mine = True\n tile_obj.value = -1\n tile_row.append(tile_obj)\n self.tiles.append(tile_row)\n\n initialize_value(self.tiles) # to initialize each value of tiles", "title": "" }, { "docid": "8d99b9adc6ae7786b554aa97554dfd2d", "score": "0.61594784", "text": "def __init__(self, size):\n assert size >= MINIMAL_GRID_SIZE\n self._tiling_size = size\n # Initialize dictionary tile_value to contain codes for ten\n # tiles in Solitaire Tantrix in one 4x4 corner of grid\n self._tile_value = {}\n value_index = 0\n for i_index in range(MINIMAL_GRID_SIZE):\n for j_index in range(MINIMAL_GRID_SIZE):\n for k_index in range(size - 3, size + 1):\n if i_index + j_index + k_index == 6:\n self._tile_value[(i_index, j_index, k_index)] = SOLITAIRE_CODES[value_index]\n value_index += 1", "title": "" }, { "docid": "f83a793dff8e36fead0ce83377004adf", "score": "0.614208", "text": "def __init__(self):\n self.SC_LineTuples = SC_Linetuple()\n self.SC_RecTuples = SC_Rectuple()\n self.SC_2_mono = SC_2_Monotonicity()\n self.SC_2_big = SC_2_Biggest_tile()\n\n\n self.HW_linetuple = HW_lineTuple()\n self.HW_rectuple = HW_recTangTuple()\n self.HW_axetuple = HW_axeTuple()\n self.HW_max = HW_maxTileCount()\n self.HW_mergeable = HW_mergeableTileCount()\n self.HW_layer = HW_layerTileCount()\n self.HW_distinct = HW_distinctTileCount()\n self.HW_empty = HW_emptyTileCount()\n\n # self.featureSet = [self.SC_LineTuples, self.SC_RecTuples] # basic, comb 1\n # self.featureSet = [self.HW_max, self.HW_mergeable, self.HW_layer, self.HW_distinct, self.HW_empty] #simple, comb 2\n self.featureSet = [self.SC_2_big, self.SC_2_mono, self.HW_mergeable, self.HW_layer, self.HW_distinct, self.HW_empty] #our proposed, comb3", "title": "" }, { "docid": "5b68e93e31f55cc56cc9ea4751dc2c83", "score": "0.6114395", "text": "def __init__(self, width, height):\n # width, height, and make a list for tiles\n self.width = width\n self.height = height\n self.tiles = [] \n \n # make self.tiles a nested list\n for y in range(0, self.height):\n tiles_row = []\n for x in range(0, self.width):\n tiles_row.append(\"house\")\n self.tiles.append(tiles_row)", "title": "" }, { "docid": "ce12119c8bb520d20cd97e2961e49bc1", "score": "0.60968465", "text": "def init_tiles(room, layout):\n for i in range(room.length):\n for j in range(room.width):\n tile_type = layout[i][j]\n if tile_type == 0:\n new_tile = Tile(i, j, True, False, False, False)\n new_tile.in_room = True\n room.tiles[i][j] = new_tile\n elif tile_type == 1:\n new_tile = Tile(i, j, False, False, False, False)\n new_tile.in_room = True\n room.tiles[i][j] = new_tile\n else:\n new_tile = Tile(i, j, False, True, False, False)\n new_tile.in_room = True\n room.tiles[i][j] = new_tile", "title": "" }, { "docid": "a734f9ae59e43579029e91a5517b589c", "score": "0.60964465", "text": "def load(self):\n etree = ElementTree.parse(self.filename).getroot()\n self.set_properties(etree)\n\n # initialize the gid mapping\n self.imagemap[(0,0)] = 0\n\n for node in etree.findall('layer'):\n self.addTileLayer(TiledLayer(self, node))\n\n for node in etree.findall('objectgroup'):\n self.objectgroups.append(TiledObjectGroup(self, node))\n\n for node in etree.findall('tileset'):\n self.tilesets.append(TiledTileset(self, node))\n\n # \"tile objects\", objects with a GID, have need to have their\n # attributes set after the tileset is loaded\n for o in self.getObjects():\n p = self.getTilePropertiesByGID(o.gid)\n if p:\n o.name = \"TileObject\"\n o.__dict__.update(p)", "title": "" }, { "docid": "b527d13461130cb751dcb246774e17a2", "score": "0.6032166", "text": "def __init__(self):\n\n self._allStones = StoneCollection()", "title": "" }, { "docid": "38f28f72f824eaa9336065f05ebb0143", "score": "0.6031742", "text": "def initialise_map(self, height, width):\n\n self.terrain = np.zeros([1, height, width])\n self.magics = np.zeros([1, 12, height, width])", "title": "" }, { "docid": "a6ce518dba6f3fe0000b72f771ae09ea", "score": "0.6024666", "text": "def initialize(self):\r\n self.reset()\r\n self.get_temperature()\r\n self.get_setpoint()\r\n self.get_heater_range()", "title": "" }, { "docid": "8e978c621a0e230627c9d6ae5105d0fb", "score": "0.60079765", "text": "def __init__(self,\n tiles_list,\n *args,\n **kwargs):\n super(RasterTilesListViz, self).__init__(None, *args, **kwargs)\n\n self.template = 'raster_list'\n for tiles in tiles_list:\n if \"tiles_size\" not in tiles:\n tiles[\"tiles_size\"] = 256\n if \"tiles_bounds\" not in tiles:\n tiles[\"tiles_bounds\"] = \"undefined\"\n if \"tiles_maxzoom\" not in tiles:\n tiles[\"tiles_maxzoom\"] = 22\n if \"tiles_minzoom\" not in tiles:\n tiles[\"tiles_minzoom\"] = 0\n self.tiles_list = tiles_list", "title": "" }, { "docid": "0d41db20d34f5a299ce0311c4c3aebbf", "score": "0.59969175", "text": "def _set_tiles(self, tiles):\n self._tiles = tiles[:]\n \n # The image now needs to be redrawn\n self._render_base_image()", "title": "" }, { "docid": "db062e08722046078fd1ecae4b3aa318", "score": "0.59963715", "text": "def init(board):\n board.explored_nodes: set[Node] = set()\n board.inrange_nodes: set[Node] = set()\n\n board.grid = [\n [HeuristicNode(board[y][x]) for x in board.width] \n for y in board.height \n ]\n\n start_nodes: set[Node] = set([\n node\n for row in board.grid\n for node in row\n if node.state == NodeState.START\n ])\n\n for node in start_nodes:\n board.inrange_nodes.append(node)", "title": "" }, { "docid": "636764ed5e85b1b795972ef18b065d99", "score": "0.5994074", "text": "def new_tile(self):\n # replace with your code\n pass", "title": "" }, { "docid": "8066240996478b1d7b2061e611f304b4", "score": "0.59921074", "text": "def __init__(self):\n\t\tself.data_train = PrepareData().create_training_and_test_data_sets()[0]\n\t\tself.data_test = PrepareData().create_training_and_test_data_sets()[1]\n\t\tself.label_train = PrepareData().create_training_and_test_data_sets()[2]\n\t\tself.label_test = PrepareData().create_training_and_test_data_sets()[3]", "title": "" }, { "docid": "ce0de221c0a1a03ee7d6ca759108b324", "score": "0.59898406", "text": "def _post_init(self) -> None:\n self.tile_name = self._get_tile_name()\n self.needs_extraction = False\n\n # Post init done by the super class\n super()._post_init()", "title": "" }, { "docid": "5e4eb5773ca1698b965c70d27592be04", "score": "0.5985773", "text": "def __init__(self, puzzle_height, puzzle_width, initial_grid=None):\n self._height = puzzle_height\n self._width = puzzle_width\n self._grid = [[col + puzzle_width * row\n for col in range(self._width)]\n for row in range(self._height)]\n\n if initial_grid != None:\n for row in range(puzzle_height):\n for col in range(puzzle_width):\n self._grid[row][col] = initial_grid[row][col]", "title": "" }, { "docid": "5e4eb5773ca1698b965c70d27592be04", "score": "0.5985773", "text": "def __init__(self, puzzle_height, puzzle_width, initial_grid=None):\n self._height = puzzle_height\n self._width = puzzle_width\n self._grid = [[col + puzzle_width * row\n for col in range(self._width)]\n for row in range(self._height)]\n\n if initial_grid != None:\n for row in range(puzzle_height):\n for col in range(puzzle_width):\n self._grid[row][col] = initial_grid[row][col]", "title": "" }, { "docid": "5e4eb5773ca1698b965c70d27592be04", "score": "0.5985773", "text": "def __init__(self, puzzle_height, puzzle_width, initial_grid=None):\n self._height = puzzle_height\n self._width = puzzle_width\n self._grid = [[col + puzzle_width * row\n for col in range(self._width)]\n for row in range(self._height)]\n\n if initial_grid != None:\n for row in range(puzzle_height):\n for col in range(puzzle_width):\n self._grid[row][col] = initial_grid[row][col]", "title": "" }, { "docid": "3accd3e8b1f4faf07e1c85174c66c066", "score": "0.5980938", "text": "def __init__(self, puzzle_height, puzzle_width, initial_grid=None):\n self._height = puzzle_height\n self._width = puzzle_width\n self._grid = [ [col + puzzle_width * row for col in range(self._width)] for row in range(self._height)]\n if initial_grid != None:\n for row in range(puzzle_height):\n for col in range(puzzle_width):\n self._grid[row][col] = initial_grid[row][col]", "title": "" }, { "docid": "a96d72e41d35e9bca02636e1e4f26cfa", "score": "0.5978449", "text": "def __init__(self, maze_map: MazeMap, critter_tiles: Tuple[Surface, Rect, Rect, Rect, Rect], *groups):\n super().__init__(maze_map, *groups)\n\n self.tiles, *self.dir_rects = critter_tiles\n self.direction = 0\n self.speed = 2\n self.in_transit = False", "title": "" }, { "docid": "ba8a7070a6a244b7e7a457730c51188f", "score": "0.59724826", "text": "def __init__(self, puzzle_height, puzzle_width, initial_grid=None):\n self._height = puzzle_height\n self._width = puzzle_width\n self._grid = [[col + puzzle_width * row\n for col in range(self._width)]\n for row in range(self._height)]\n\n if initial_grid is not None:\n for row in range(puzzle_height):\n for col in range(puzzle_width):\n self._grid[row][col] = initial_grid[row][col]", "title": "" }, { "docid": "5d35ad6935e54dc00aa4834132b4a806", "score": "0.5970884", "text": "def __init__(self, puzzle_height, puzzle_width, initial_grid=None):\r\n self._height = puzzle_height\r\n self._width = puzzle_width\r\n self._grid = [[col + puzzle_width * row\r\n for col in range(self._width)]\r\n for row in range(self._height)]\r\n\r\n if initial_grid != None:\r\n for row in range(puzzle_height):\r\n for col in range(puzzle_width):\r\n self._grid[row][col] = initial_grid[row][col]", "title": "" }, { "docid": "8a5428545efd306d1523a8392efd4daa", "score": "0.59692794", "text": "def __init__(self):\n self._map = self._MapType() # a map instance for storage\n self._n = 0", "title": "" }, { "docid": "52f691437e62d9221d75b09d3051f0be", "score": "0.5966916", "text": "def new_tile(self):\n # locate coordinates of all empty cells.\n location = []\n for idi in range(0,self.grid_width):\n for idj in range(0, self.grid_height):\n if self.cells[idi][idj] == 0:\n location.append([idi,idj])\n \n #assign appropriate numbers to an empty location\n value = random.choice([2,2,2,2,2,2,2,2,2,4])\n if location != []:\n select_key = random.choice(location)\n self.set_tile(select_key[0], select_key[1], value)", "title": "" }, { "docid": "7e20178c3a12bf7fb8779d26ce4d3780", "score": "0.59477556", "text": "def initialize(self):\n\n self.clusters = [Cluster(random() * self.limit_x, random() * self.limit_y) for i in range(self.k)]", "title": "" }, { "docid": "8b080fe8a838c56cd4de5de776147f81", "score": "0.58950484", "text": "def reset(self):\n self._grid = [[0 for _ in range(self._grid_width)]\n for _ in range(self._grid_height)]\n # Set 2 initial values to the grid \n self.new_tile()\n self.new_tile()", "title": "" }, { "docid": "a6f576e5d73a99eb3586b04d03308c8c", "score": "0.5887237", "text": "def __init__(self, args):\n self.args = args\n self.image_block_size = args.image_block_size\n self.xmap = xworld_map.XWorldMap(args.map_config, args.image_block_size)\n self.image = []\n self.inner_state = []\n self.onehot_state = []\n self.origin_image = []\n self.origin_inner_state = []\n self.origin_onehot_state = []\n self.plain_ground_image = []\n self.init_plain_ground_image()", "title": "" }, { "docid": "5449367933fbf6bace4f22dc801b506c", "score": "0.5885774", "text": "def setUp(self):\n # generate lattice\n self.lattice = lattice.Lattice()\n self.lattice.addAtom(\"Au\", [0,0,0], 0.0)\n self.lattice.addAtom(\"Au\", [1,0,0], 0.0)\n self.lattice.addAtom(\"Au\", [0,1,0], 0.0)\n self.lattice.addAtom(\"Au\", [0,0,1], 0.0)\n self.lattice.addAtom(\"Au\", [1,1,0], 0.0)\n self.lattice.addAtom(\"Au\", [0,1,1], 0.0)\n self.lattice.addAtom(\"Au\", [1,1,1], 0.0)\n self.lattice.addAtom(\"Au\", [2,0,0], 0.0)\n self.lattice.addAtom(\"Au\", [0,2,0], 0.0)\n self.lattice.addAtom(\"Au\", [0,0,2], 0.0)\n \n # filter\n self.filter = atomIdFilter.AtomIdFilter(\"Atom ID\")", "title": "" }, { "docid": "c83ad5c017137c24bb9d8f7bc19583d9", "score": "0.5882023", "text": "def __init__(self, size, visualizer: Visualizer):\n\n self.nx = size\n self.ny = size\n self.ix = random.randint(0, size-1)\n self.iy = random.randint(0, size-1)\n self.maze_map = [[Cell(x, y) for y in range(size)] for x in range(size)]\n self.exit_x = random.choice([0, size-1])\n self.exit_y = random.choice([0, size-1])\n self.visualizer = visualizer", "title": "" }, { "docid": "7042f366e4c321ad957f89acc57adf34", "score": "0.58779323", "text": "def initialise(self):", "title": "" }, { "docid": "8d199790ee49f9063d78c556f12f2a19", "score": "0.5871314", "text": "def init_maze(self):\n self.maze = []\n for i in range(self.maze_size[0]):\n self.maze.append([TILES[\"UNKNOWN\"]] * self.maze_size[1])\n self.maze[self.start_position[0]][self.start_position[1]] = TILES[\"START\"]\n self.maze[self.gold_position[0]][self.gold_position[1]] = TILES[\"GOLD\"]", "title": "" }, { "docid": "7298108ba96454edb77d200a8b94ebc5", "score": "0.58709615", "text": "def fill_tiles(self, tiles):\n\t\twidth, height = self.tile_dimensions()\n\t\tcoords = self.coordinates()\n\t\tfor y in range(coords[1], coords[1] + height):\n\t\t\tfor x in xrange(coords[0], coords[0] + width):\n\t\t\t\ttiles[y][x].block = self #TEMP: should probably generate doorblock here", "title": "" }, { "docid": "c4f8af473d14ba71f1ad8fdbd0e340c6", "score": "0.58638626", "text": "def __init__(self, low, high, tiling_specs, action_size, tiling_grids=None, tilings=None):\n if tiling_specs is None:\n # auto random tiling_specs\n intervals = (high-low) / tilings\n tiling_specs = []\n for i in range(tiling_grids):\n tiling_specs.append((tuple([tilings for i in range(len(low))]),\n tuple([(np.random.rand()-0.5)*intr for intr in intervals ]) ))\n\n self.tiling_specs = tiling_specs\n self.tilings = create_tilings(low, high, tiling_specs)\n self.state_sizes = [tuple(len(splits) + 1 for splits in tiling_grid) for tiling_grid in self.tilings]\n self.action_size = action_size\n self.q_tables = [QTable(state_size, self.action_size) for state_size in self.state_sizes]\n print(\"TiledQTable(): no. of internal tables = \", len(self.q_tables))", "title": "" }, { "docid": "d0a8c8f17220a606cbc307f7f3991246", "score": "0.5853465", "text": "def Initialize(self):\n for piece in self.stack:\n piece.Initialize()", "title": "" }, { "docid": "4e3f31aae61cd8c8c69eb068203136b6", "score": "0.5852029", "text": "def new_tile(self):\n #Creates list of empty tiles\n empty_tiles = [(row,col) for row in xrange(self.get_grid_height())\n for col in xrange(self.get_grid_width()) \n if not self.get_tile(row,col)]\n\n new_tile = random.choice(empty_tiles)\n self.set_tile(new_tile[0],new_tile[1],random.choice(self._two_four))", "title": "" }, { "docid": "a6c3b0f92d8c6174cdbb99fc0f1a4c97", "score": "0.5834208", "text": "def __init__(self, starting_point, num_taxis):\n self._STARTING_POINT = starting_point\n self._NUM_TAXIS = num_taxis\n self._free_taxis = SortedDict()\n self._occupied_taxis = SortedDict()\n self.reset()", "title": "" }, { "docid": "00b6bd2dba3bd3297ece6b8cfd21081e", "score": "0.58319706", "text": "def reset(self):\n # create empty grid\n self._grid = [[0 for dummy_col in range(self.get_grid_width())]\n for dummy_row in range(self.get_grid_height())]\n \n # add two initial tiles\n self.new_tile()\n self.new_tile()", "title": "" }, { "docid": "d901026eefd6c952c82c75aad77afd0c", "score": "0.58270055", "text": "def __init__(self):\n self.TILE_DIR = '/ftp/tile'\n self.NEW_SHP_SCAN_DIR = '/ftp/new/animalia/species'\n self.TMP_SHP_SCAN_DIR = '/ftp/tmp/animalia/species'\n self.TILE_DIR = \"/ftp/tile/animalia/species\"\n self.ERR_DIR = \"/ftp/error/\"\n self.SRC_DIR = \"/ftp/tmp/\"\n self.DST_DIR = \"/ftp/archive/\"\n self.MAP_XML = \"/ftp/tile/mapfile.xml\"\n #self.GAE_URL = \"http://localhost:8080/\"\n self.GAE_URL = \"http://prototype.mol-lab.appspot.com/\" \n self.VALID_ID_SERVICE_URL = \"%sapi/validkey\" % self.GAE_URL\n self.LAYER_URL = \"%slayers\" % self.GAE_URL\n #self.TILE_URL = 'http://localhost:5002/api/tiles/animalia/species/%s/zoom/x/y.png'\n self.TILE_URL = 'http://mol.colorado.edu/layers/api/tiles/animalia/species/%s/zoom/x/y.png'\n self.NEW_RASTER_JOB_TYPE = 'newraster'\n self.NEW_SHP_JOB_TYPE = 'newshp'\n self.BULKLOAD_TILES_JOB_TYPE = 'bulkload-tiles'\n self.Q_ITEM_FULL_PATH = 'fullpath'\n self.Q_ITEM_JOB_TYPE = 'jobtype'\n self.TILE_QUEUE_THREADS = 2\n self.TILE_MAX_ZOOM = 6\n self.TILE_JOB_LIMIT = 4 #number of tiling threads\n self.QUEUED_LAYERS = {}", "title": "" }, { "docid": "0aedc4ae4b79f7b0f4baf76dc36290a1", "score": "0.58203125", "text": "def _initialize_sprites(self, lab_map):\n\n height = len(lab_map)\n width = len(lab_map[0])\n\n for y_position in range(height):\n for x_position in range(width):\n cell = lab_map[y_position][x_position]\n normalized_x = x_position * SCALE\n normalized_y = y_position * SCALE\n\n if cell == 0:\n self.floors.add(Floor(normalized_x, normalized_y))\n elif cell == 1:\n self.walls.add(Wall(normalized_x, normalized_y))\n elif cell == 2:\n self.traps.add(Trap(normalized_x, normalized_y))\n elif cell == 3:\n self.cheeses.add(Cheese(normalized_x, normalized_y))\n elif cell == 4:\n self.rat = Rat(normalized_x, normalized_y)\n self.floors.add(Floor(normalized_x, normalized_y))\n\n self.all_sprites.add(\n self.floors,\n self.walls,\n self.traps,\n self.cheeses,\n self.rat\n )", "title": "" }, { "docid": "bc577853bdc964e05aecc8584f7febca", "score": "0.5814719", "text": "def initialize(self):\n global tree, eps, all_data, datasets\n if len(all_data) == 0:\n for dataset in datasets:\n Params.DATASET = dataset\n p = Params(1000)\n data = data_readin(p)\n eps = p.Eps\n tree = Grid_adaptiveM(data, 1, p)\n tree.buildIndex()\n bounds = np.array([[p.x_min, p.y_min], [p.x_max, p.y_max]])\n all_data[dataset] = (tree, bounds, p.NDATA)", "title": "" }, { "docid": "40196888e559aeaabcf239504af1e586", "score": "0.58143824", "text": "def __init__(self, set_id=None, name=None, parent=None):\r\n self.parent = parent\r\n self.id = id # Unique id for the set\r\n self.next_id = 0 # counter for unique feature ids\r\n self.features = {} # Holds features, keyed by ID\r\n self.name = name # String describing the set\r", "title": "" }, { "docid": "5d645f98222b96cc9bbfea2a6b4e974f", "score": "0.5812734", "text": "def __init__(self):\n # set up an empty array\n self.set = []", "title": "" }, { "docid": "6832c46afe529e2f20e2f0aeada8b84e", "score": "0.58124983", "text": "def __init__(self):\n super(HashTreeSet, self).__init__()", "title": "" }, { "docid": "35058eba059d0b50418cc878d05bb976", "score": "0.5796893", "text": "def initialize(self, layers=None):\n raise NotImplementedError()", "title": "" }, { "docid": "2ea12a11a1d5d5c810a82ca18a648eb0", "score": "0.5791868", "text": "def __init__(self, tiles: str = 'Wikipedia', tools: Optional[Iterable[str]] = None,\n active_tools: Optional[Iterable[str]] = None,\n cmap: str = 'CET_L18',\n raster_resolution: float = 40,\n plot_size: Tuple[int, int] = (760, 735),\n progress_callback: Optional[Callable[[str], None]] = None,\n update_callback: Optional[Callable[[str], None]] = None,\n progress_bar_callback: Optional[Callable[[int], None]] = None):\n self.tools = ['crosshair'] if tools is None else tools\n self.active_tools = ['wheel_zoom'] if active_tools is None else active_tools\n\n import colorcet\n self.cmap = getattr(colorcet, cmap)\n\n from geoviews import tile_sources as gvts\n self._base_tiles = getattr(gvts, tiles)\n\n self._time_idx = 0\n\n self._generated_data_layers = {}\n self.data_layer_order = []\n self.data_layers = [\n # TemporalPopulationEstimateLayer('Temporal Pop. Est'),\n # RoadsLayer('Road Traffic Population/Hour')\n FatalityRiskLayer('Fatality Risk'),\n # ResidentialLayer('Residential Layer')\n ]\n\n self.annotation_layers = []\n\n self.plot_size = plot_size\n self._progress_callback = progress_callback if progress_callback is not None else lambda *args: None\n self._update_callback = update_callback if update_callback is not None else lambda *args: None\n self._progress_bar_callback = progress_bar_callback if progress_bar_callback is not None else lambda *args: None\n\n self._x_range, self._y_range = [-1.45, -1.35], [50.85, 50.95]\n\n self.raster_resolution_m = raster_resolution\n\n self._epsg4326_to_epsg3857_proj = None\n self._epsg3857_to_epsg4326_proj = None\n self._preload_started = False\n self._preload_complete = False\n\n from bokeh.io import curdoc\n from bokeh.server.server import Server\n\n self._current_plot = curdoc()\n self._server_thread = None\n self.server = Server({'/': self.plot}, num_procs=1)\n self.server.io_loop.spawn_callback(self._preload_layers)\n self.url = 'http://localhost:{port}/{prefix}'.format(port=self.server.port, prefix=self.server.prefix) \\\n if self.server.address is None else self.server.address", "title": "" }, { "docid": "25c47a943c68b76c5a5970e6070a02a4", "score": "0.57891583", "text": "def setup(self):\n\n map_name = \"../tmx_maps/animation.tmx\"\n\n # Read in the tiled map\n my_map = arcade.tilemap.read_tmx(map_name)\n self.end_of_map = my_map.map_size.width * GRID_PIXEL_SIZE\n\n # --- Platforms ---\n self.wall_list = arcade.tilemap.process_layer(my_map, 'Blocking Sprites', TILE_SCALING)\n\n # --- Other stuff\n # Set the background color\n if my_map.background_color:\n arcade.set_background_color(my_map.background_color)", "title": "" }, { "docid": "7a36426467a5c48ab5ac4c8f2373f919", "score": "0.5784992", "text": "def setup(self):\n # Empty map\n self.map = [[9 for x in range(map_presets[self.capacity][\"Size\"][0])] for y in range(map_presets[self.capacity][\"Size\"][1])]\n \n n = 0\n for p in self.players:\n self.players[p].setup(self.characters[n],self.roles[n])\n\n # Move player to starting location\n self.players[p].move(map_presets[self.capacity][\"Players\"][n][0],map_presets[self.capacity][\"Players\"][n][1])\n self.map[map_presets[self.capacity][\"Players\"][n][1]][map_presets[self.capacity][\"Players\"][n][0]] = n\n\n n += 1", "title": "" }, { "docid": "c466d7395b2fe85571d4f56d4f69b5a9", "score": "0.57840264", "text": "def init(self):\n super().init()\n self.log.info(__name__ + ': ' + 'def ' + self.init.__name__ + '(): ' + self.init.__doc__)\n\n for index, fort in enumerate(self.forts):\n fort.x = self.board.cols - 1\n fort.y = 3 + index * 5\n fort.show = True\n zone_cells = list(product(range(self.board.cols // 2 + 1, self.board.cols - 1), range(self.board.rows)))\n self.init_coordinates(zone_cells)", "title": "" }, { "docid": "77eadee9288d38b89fd0f35571643ddd", "score": "0.57778394", "text": "def initialiseGrid(this):\n # Iterate over number of cells to initialise\n for i in range(int(this.size*this.size*this.popSize)):\n # Get random coordinates\n row = random.randint(0, this.size-1)\n column = random.randint(0, this.size-1)\n # Set cell to 1\n this.grid[row][column] = 1", "title": "" }, { "docid": "a8f3c39610ed735b58889ff4e707acf2", "score": "0.57761365", "text": "def initialize(self):\n self._state = random.choice(self._all_molecules)\n self._target_mol_fingerprint = self.get_fingerprint(\n Chem.MolFromSmiles(self._state))\n if self.record_path:\n self._path = [self._state]\n self._valid_actions = self.get_valid_actions(force_rebuild=True)\n self._counter = 0", "title": "" }, { "docid": "daeea210cb631908d38a54fabee72a59", "score": "0.5774777", "text": "def __init__(self):\n self.t_start = None\n self._load_hexs()\n self._load_value()", "title": "" }, { "docid": "4d584b51d5930302ae49c3776f0ed073", "score": "0.57707953", "text": "def init(self):\r\n\t\tfor (atlas,) in self.atlases:\r\n\t\t\t# print 'creating', atlas\r\n\t\t\t# cast explicit to str because the imagemanager is not able to handle unicode strings\r\n\t\t\timg = horizons.globals.fife.imagemanager.create(str(atlas))\r\n\t\t\tself.atlaslib.append(img)\r\n\t\tself.inited = True", "title": "" }, { "docid": "24756362805e6c9a63510cbb015d884a", "score": "0.5763901", "text": "def __init__(self,num,location, colour = None):\n Tile.__init__(self,num)\n if colour is None:\n self.colour = 250, 235, 30\n else:\n self.colour = colour\n self.new_location = location", "title": "" }, { "docid": "37ea051332b50b1246f669e5f8936318", "score": "0.57626146", "text": "def reset(self):\n self._grid = [[0 for dummy_col in range(self._grid_width)] \n for dummy_row in range(self._grid_height)]\n self._empties = [(x_coord,y_coord) for y_coord in range(0,self._grid_width) \n for x_coord in range(0,self._grid_height)]\n self.new_tile()\n self.new_tile()", "title": "" }, { "docid": "f8863084813650b2efd4e15b9a160b3e", "score": "0.57584435", "text": "def setup_maps(self):\n self.maps = []", "title": "" }, { "docid": "8387aba032855cb2da504e4bb7c2b089", "score": "0.57512426", "text": "def __init(self, db, island_id, preview):\r\n\t\tp_x, p_y, width, height = db(\"SELECT MIN(x), MIN(y), (1 + MAX(x) - MIN(x)), (1 + MAX(y) - MIN(y)) FROM ground WHERE island_id = ?\", island_id - 1001)[0]\r\n\r\n\t\tself.ground_map = {}\r\n\t\tfor (x, y, ground_id, action_id, rotation) in db(\"SELECT x, y, ground_id, action_id, rotation FROM ground WHERE island_id = ?\", island_id - 1001): # Load grounds\r\n\t\t\tif not preview: # actual game, need actual tiles\r\n\t\t\t\tground = Entities.grounds[str('%d-%s' % (ground_id, action_id))](self.session, x, y)\r\n\t\t\t\tground.act(rotation)\r\n\t\t\telse:\r\n\t\t\t\tground = MapPreviewTile(x, y, ground_id)\r\n\t\t\t# These are important for pathfinding and building to check if the ground tile\r\n\t\t\t# is blocked in any way.\r\n\t\t\tself.ground_map[(ground.x, ground.y)] = ground\r\n\r\n\t\tself._init_cache()\r\n\r\n\t\t# Contains references to all resource deposits (but not mines)\r\n\t\t# on the island, regardless of the owner:\r\n\t\t# {building_id: {(x, y): building_instance, ...}, ...}\r\n\t\tself.deposits = defaultdict(dict)\r\n\r\n\t\tself.settlements = []\r\n\t\tself.wild_animals = []\r\n\t\tself.num_trees = 0\r\n\r\n\t\t# define the rectangle with the smallest area that contains every island tile its position\r\n\t\tmin_x = min(zip(*self.ground_map.keys())[0])\r\n\t\tmax_x = max(zip(*self.ground_map.keys())[0])\r\n\t\tmin_y = min(zip(*self.ground_map.keys())[1])\r\n\t\tmax_y = max(zip(*self.ground_map.keys())[1])\r\n\t\tself.position = Rect.init_from_borders(min_x, min_y, max_x, max_y)\r\n\r\n\t\tif not preview:\r\n\t\t\t# This isn't needed for map previews, but it is in actual games.\r\n\t\t\tself.path_nodes = IslandPathNodes(self)\r\n\r\n\t\t\t# Repopulate wild animals every 2 mins if they die out.\r\n\t\t\tScheduler().add_new_object(self.check_wild_animal_population, self,\r\n\t\t\t run_in=Scheduler().get_ticks(120), loops=-1)\r\n\r\n\t\t\"\"\"TUTORIAL:\r\n\t\tThe next step will be an overview of the component system, which you will need\r\n\t\tto understand in order to see how our actual game object (buildings, units) work.\r\n\t\tPlease proceed to horizons/component/componentholder.py.\r\n\t\t\"\"\"", "title": "" }, { "docid": "ec0e76d2962ba2da44b02b546ab9c23c", "score": "0.5748012", "text": "def init_tile(self, node: PVector, tile_value: str) -> None:\n self.tile_vals[node] = int(tile_value)", "title": "" }, { "docid": "3c8a0c610cd6596c77cf013bffe35681", "score": "0.57479733", "text": "def new(self):\n self.all_sprites = pg.sprite.Group()\n self.walls = pg.sprite.Group()\n self.mobs = pg.sprite.Group()\n self.bullets = pg.sprite.Group()\n for row, tiles in enumerate(self.map.map_data):\n for col, tile in enumerate(tiles):\n self.map_row = row\n self.map_col = col\n if tile == '1':\n Wall(self, col, row)\n if tile == '2':\n Wall2(self, col, row)\n if tile == 'M':\n Mob(self, col, row)\n if tile == 'H':\n Mob2(self, col, row)\n if tile == 'T':\n Mob3(self, col, row)\n if tile == 'P':\n self.player = Player(self, col, row)\n self.camera = Camera(self.map.width, self.map.height)\n self.paused = False", "title": "" }, { "docid": "04aa578daa87849bc7f6405ad2f1e824", "score": "0.5745529", "text": "def initialize(self):", "title": "" }, { "docid": "04aa578daa87849bc7f6405ad2f1e824", "score": "0.5745529", "text": "def initialize(self):", "title": "" }, { "docid": "04aa578daa87849bc7f6405ad2f1e824", "score": "0.5745529", "text": "def initialize(self):", "title": "" }, { "docid": "00784226af824d4a21a7e9d1b501dde7", "score": "0.57403183", "text": "def __init__(self, grid):\n self.grid = copy.deepcopy(grid) # No aliasing!", "title": "" }, { "docid": "855f4759c5c494d5c5ea41a02e5e7b07", "score": "0.57334626", "text": "def __init__(self, level_map, cell_size):\n\n self.next_level = False\n self.cell_size = cell_size\n self.walls = pg.sprite.Group()\n self.floors = pg.sprite.Group()\n self.enemies = pg.sprite.Group()\n self.objects = pg.sprite.Group()\n self.all_sprites = pg.sprite.Group()\n self.level_map = level_map\n self.offset_x = 0\n self.offset_y = 0\n self._initialize_sprites(level_map)", "title": "" }, { "docid": "a079d47b23f82d6abd230239741242a9", "score": "0.5730332", "text": "def __init__(self):\n self.template = Template()\n self.add_hash_keys()\n self.add_throughput()\n self.add_tables()", "title": "" }, { "docid": "31ee274dfa3f91519335328be328bcbd", "score": "0.57279897", "text": "def __init__(self,\n tiles_url,\n tiles_size=256,\n tiles_bounds=None,\n tiles_minzoom=0,\n tiles_maxzoom=22,\n legend=False,\n *args,\n **kwargs):\n super(RasterTilesViz, self).__init__(None, *args, **kwargs)\n\n if isinstance(tiles_url, list):\n self.template = 'raster_list'\n else:\n self.template = 'raster'\n self.tiles_url = tiles_url\n self.tiles_size = tiles_size\n self.tiles_bounds = tiles_bounds\n self.tiles_minzoom = tiles_minzoom\n self.tiles_maxzoom = tiles_maxzoom", "title": "" }, { "docid": "5ca87b533f30e2e3fc548663f627cc5d", "score": "0.5726615", "text": "def init(cls):\n cls._num_strands, cls._max_fixtures, cls._max_pixels_per_fixture = cls._app.scene.get_matrix_extents()\n cls._max_pixels_per_strand = cls._max_fixtures * cls._max_pixels_per_fixture\n cls._buffer_length = cls._num_strands * cls._max_pixels_per_strand\n fh = cls._app.scene.fixture_hierarchy()\n\n for strand in fh:\n cls._strand_lengths[strand] = 0\n for fixture in fh[strand]:\n fixture_length = cls._app.scene.fixture(strand, fixture).pixels\n cls._strand_lengths[strand] += fixture_length\n cls._fixture_lengths[(strand, fixture)] = fixture_length\n\n for strand in fh:\n for fixture in fh[strand]:\n for offset in xrange(cls._app.scene.fixture(strand, fixture).pixels):\n cls.logical_to_index((strand, fixture, offset))", "title": "" }, { "docid": "2757779d9704c5183612624255e69df1", "score": "0.5725167", "text": "def reset(self):\n self._tfe_grid = [[0 for dummy_col in range(self.get_grid_width())]\n for dummy_row in range(self.get_grid_height())]\n # create a grid_height by grid_width grid with value 0\n\n for dummy_index in range(0, 2):\n self.new_tile()\n # add 2 tiles with non-zero value", "title": "" }, { "docid": "3c800124c8c7b88d57153c2c641756b2", "score": "0.5715105", "text": "def __init__(self, engine: Engine, x: int, y: int, entities_in_tile: Entities_List):\n super().__init__(engine)\n self.x = x\n self.y = y\n self.entities_in_tile = entities_in_tile\n self.entities_length = len(entities_in_tile)\n self.cursor = self.entities_length - 1", "title": "" }, { "docid": "cff399fdbb320e11bb91016500b6340f", "score": "0.57103866", "text": "def localInitialize(self):\n self.gridEntity.initialize()\n self.limit = self.gridEntity.len()", "title": "" }, { "docid": "f598aec6efb4b2b198da18cf67da1bdf", "score": "0.5707851", "text": "def __init__(self, initial_state):\n self.current_coords = (0, 0)\n self.map = GridGraph()\n self.player = Player()\n self.parser = None\n\n for aspect in initial_state:\n self._read(aspect, initial_state[aspect])", "title": "" }, { "docid": "d33e8ec376782c630e76ef37bfc0968e", "score": "0.5701337", "text": "def loadTiles(self):\n self.tile = pygame.image.load(\"./hextile.png\").convert()\n self.tile.set_colorkey((0x80, 0x00, 0x80), RLEACCEL) \n\n self.up_cursor = pygame.image.load(\"./hexcursor.png\").convert()\n self.up_cursor.set_colorkey((0x80, 0x00, 0x80), RLEACCEL) \n self.cursorPos = self.up_cursor.get_rect()\n self.cursor = self.up_cursor\n \n self.down_cursor = pygame.image.load(\"./hexcursor_down.png\").convert()\n self.down_cursor.set_colorkey((0x80, 0x00, 0x80), RLEACCEL) \n assert(self.down_cursor.get_rect()==self.up_cursor.get_rect())", "title": "" }, { "docid": "7dc7b738c50fa65e30e5c0d6ffbceace", "score": "0.5697659", "text": "def __init__(self, url, crs, layer):\n self.url = url\n self.crs = crs\n self.tms = owslib.tms.TileMapService(url+'/tms/1.0.0')\n self.layer = layer\n # Check if layer exists\n # layers = [k.split('/')[-2] for k in self.tms.contents.keys()]\n # if layer not in layers:\n # raise ValueError('\"{}\" not in WMTS. Choose from: {}'.format(layer, ', '.join(layers)))\n\n # Determine interpolation scheme based on layer\n if 'kaart' in self.layer:\n self.interpolation = 'catrom'\n else:\n self.interpolation = 'none'\n\n self._load_properties_by_scale()\n\n self.level = 1\n self.tiles = {}\n self.images = {}", "title": "" }, { "docid": "575e96b7ab51a1bb9de349bb17167cd8", "score": "0.56856656", "text": "def __init__(self):\n self.tile_array = []\n\n # Create all the tiles.\n for i in range(5):\n row = []\n for j in range(5 - abs(i - 2)):\n row.append(GraphTile())\n self.tile_array.append(row)\n\n # Set directions.\n for row in range(len(self.tile_array)):\n # This denotes the shift of the adjacent row's column index based on\n # direction and location within the board (assuming a 5 row board).\n ne_shift = 1 if row < 2 else 0\n se_shift = 0 if row < 2 else -1\n nw_shift = 0 if row < 3 else 1\n sw_shift = -1 if row < 3 else 0\n for col in range(len(self.tile_array[row])):\n def GetTileOrNone(row, col):\n if (row >= 0 and row < len(self.tile_array) and\n col >= 0 and col < len(self.tile_array[row])):\n return self.tile_array[row][col]\n else:\n return None\n\n curr = self.tile_array[row][col]\n curr.neighbor_n = GetTileOrNone(row, col + 1)\n curr.neighbor_ne = GetTileOrNone(row + 1, col + ne_shift)\n curr.neighbor_se = GetTileOrNone(row + 1, col + se_shift)\n curr.neighbor_s = GetTileOrNone(row, col - 1)\n curr.neighbor_nw = GetTileOrNone(row - 1, col + nw_shift)\n curr.neighbor_sw = GetTileOrNone(row - 1, col + sw_shift)", "title": "" }, { "docid": "418bc62a0fcf7a5bcaca745dd420c1a7", "score": "0.56827533", "text": "def initialize(self, dataset : Dataset):\n pass", "title": "" }, { "docid": "a22ec4a518d9fa704f727f87c428c401", "score": "0.56745636", "text": "def new(self):\r\n self.map = TiledMap(path.join(self.map_folder, 'frogger_map.tmx'))\r\n self.map_img = self.map.make_map()\r\n self.map_rect = self.map_img.get_rect()\r\n\r\n # sprite groups\r\n self.all_sprites = pygame.sprite.Group()\r\n self.homes = pygame.sprite.Group()\r\n self.cars = pygame.sprite.Group()\r\n self.platforms = pygame.sprite.Group()\r\n\r\n # place homes and players\r\n for x in HOME_LOCATIONS:\r\n x = x * TILESIZE\r\n Home(self, x)\r\n\r\n # spawn the cars, platforms, and player\r\n self.create_cars()\r\n self.create_platforms()\r\n self.player = Player(self)", "title": "" }, { "docid": "bca2454d9c5ddd458109037e7c89f828", "score": "0.56714344", "text": "def reset(self):\n self.grid = [[0 for row in range(self.width)]\n for column in range(self.height)]\n self.new_tile()\n self.new_tile()", "title": "" } ]
41b4b6d54777344fd608b93357ba30d7
Test listing directory with client method.
[ { "docid": "d7b854ac41c3b166c486145556ed7936", "score": "0.6901601", "text": "def test_client_list_dir_index():\n # Fetch list\n result = openedgar.clients.edgar.list_path(\"/Archives/edgar/daily-index/1994/\")\n\n # Compare lists\n expected = ['/Archives/edgar/daily-index/1994//QTR3/',\n '/Archives/edgar/daily-index/1994//QTR4/']\n assert_list_equal(result, expected)", "title": "" } ]
[ { "docid": "5f0f5133fb45190a7e0195fba7f7e1d2", "score": "0.7233427", "text": "def listdir(self, path):", "title": "" }, { "docid": "226f798688c7595c07b73d3ef7349b0f", "score": "0.7106514", "text": "def list_directory(self, uri: str) -> DirectoryListing:", "title": "" }, { "docid": "2f98f90a122ea857bb204b8803388d02", "score": "0.6962282", "text": "def listdir(*args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "97053ecaa386e564c41bfd4c4f13df07", "score": "0.6840026", "text": "def listdir(self):\n\t\traise NotImplementedError", "title": "" }, { "docid": "eb6d78724b357128d66bf884aafbfc68", "score": "0.6788653", "text": "def test_list_files_in_dir(requests_mock):\n\n api_raw_response = util_load_json('test_files/list_files_in_dir.json')\n requests_mock.post(BASE_URL + '/api/v2/session/login', json={'data': {'session': 'session-id'}})\n requests_mock.get(\n BASE_URL + '/plugin/products/threat-response/api/v1/conns/remote:host:123:/file/list/C%3A%5CDir%5C',\n json=api_raw_response)\n\n args = {'connection_id': 'remote:host:123:',\n 'path': 'C:\\\\Dir\\\\',\n 'limit': '2', 'offset': '0'}\n human_readable, outputs, _ = TaniumThreatResponseV2.list_files_in_dir(MOCK_CLIENT, args)\n assert 'Files in directory' in human_readable\n assert outputs.get('Tanium.File(val.name === obj.name && val.connectionId === obj.connectionId)', [{}])[0].get(\n 'name') == 'file1.exe'\n assert outputs.get('Tanium.File(val.name === obj.name && val.connectionId === obj.connectionId)', [{}])[0].get(\n 'connectionId') == 'remote:host:123:'", "title": "" }, { "docid": "28db4716d121812aa24df7134ca39a4c", "score": "0.6742762", "text": "def test_client_list_dir_filing():\n # Fetch list\n result = len(openedgar.clients.edgar.list_path(\"/Archives/edgar/data/1297937/000107878205000139/\"))\n\n # Compare length\n expected = 13\n assert_equal(result, expected)", "title": "" }, { "docid": "0c0cf3eb82b4e27f264f9b9e2a4f4eca", "score": "0.66829187", "text": "def test_list_files(self):\n pass", "title": "" }, { "docid": "6a20593667b378e201852b1fe86307de", "score": "0.6682825", "text": "def test_get_folders_list(self):\r\n response = self.client.get(\r\n path=reverse('api_knowledge_folders'), **self.authentication_headers)\r\n self.assertEquals(response.status_code, 200)", "title": "" }, { "docid": "18f73590bdff3fac49348df01faf2261", "score": "0.6637036", "text": "def test_get_folders_list(self):\r\n response = self.client.get(\r\n path=reverse('api_documents_folders'), **self.authentication_headers)\r\n self.assertEquals(response.status_code, 200)", "title": "" }, { "docid": "50006c797294472c95f3d26ceffe256e", "score": "0.6624744", "text": "def test_get_dir(self):\n # delete /lev1/*\n #print \"test_get_dir\"\n app.delete('/fs/dropbox/%s/lev1/' % userid)\n # post /lev1/lev2\n resp = app.post('/fs/dropbox/%s/lev1/lev2' % userid, params=localfile.read() ).json\n self.assertEquals(resp[\"error\"], 0 )\n # Contents of /lev1/ should be the \"/lev1/lev2\" (always receives absolute paths)\n resp = app.get('/fs/dropbox/%s/lev1' % userid, params=localfile.read() ).json\n self.assertEquals(resp[\"metadata\"], [\"/lev1/lev2\",])", "title": "" }, { "docid": "51eae7569aef8934365af9bc8dafc136", "score": "0.6604995", "text": "def file_listing():\n url = SERVER_ADDRESS\n url += 'listing'\n payload = add_auth({})\n r = requests.get(url, data=payload)\n if r.content == FALSE:\n print(\"You are not logged in! Shutting down OneDir...\")\n quit_session()\n return r.content", "title": "" }, { "docid": "c16b6cb7af00e768a577cdc607d98868", "score": "0.6581884", "text": "def list_directory(self, path):\n try:\n list = os.listdir(path)\n except os.error:\n self.send_error(404, \"No permission to list directory\")\n return None\n list.sort(key=lambda a: a.lower())\n f = StringIO()\n displaypath = cgi.escape(urllib.unquote(self.path))\n f.write('<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 3.2 Final//EN\">')\n f.write(\"<html>\\n<title>Directory listing for %s</title>\\n\" % displaypath)\n f.write(\"<body>\\n<h2>Directory listing for %s</h2>\\n\" % displaypath)\n f.write(\"<hr>\\n<ul>\\n\")\n for name in list:\n fullname = os.path.join(path, name)\n displayname = linkname = name\n # Append / for directories or @ for symbolic links\n if os.path.isdir(fullname):\n displayname = name + \"/\"\n linkname = name + \"/\"\n if os.path.islink(fullname):\n displayname = name + \"@\"\n # Note: a link to a directory displays with @ and links with /\n f.write('<li><a href=\"%s\">%s</a>\\n'\n % (urllib.quote(linkname), cgi.escape(displayname)))\n f.write(\"</ul>\\n<hr>\\n</body>\\n</html>\\n\")\n length = f.tell()\n f.seek(0)\n self.send_response(200)\n encoding = sys.getfilesystemencoding()\n self.send_header(\"Content-type\", \"text/html; charset=%s\" % encoding)\n self.send_header(\"Content-Length\", str(length))\n self.end_headers()\n return f", "title": "" }, { "docid": "d89fc0a248e889701e39d5642d16da90", "score": "0.6569629", "text": "def test_list_dirs(self, mgr):\n dirs = mgr.list_dirs('')\n test_names = [model['name'] for model in dirs]\n assert_items_equal(\n test_names,\n ['empty.ipynb', 'not_notebook', 'testing']\n )", "title": "" }, { "docid": "deaa507422bd28b396b59bc7004cd437", "score": "0.6557229", "text": "def test_list_files(self):\n with requests_mock.Mocker() as m:\n get_url = '{}?access_token={}'.format(\n OH_GET_URL, self.oh_member.access_token)\n m.register_uri('GET',\n get_url,\n json={'data': [\n {'id': '1',\n 'basename': 'foo',\n 'download_url': 'www.foobar.com',\n 'metadata': {\n 'description': '',\n 'tags': '[\"foo\"]',\n },\n }]\n },\n status_code=200)\n c = Client()\n c.login(username=self.user.username, password='foobar')\n data = c.get(\"/list\")\n self.assertEqual(data.status_code, 200)\n self.assertIn('<a href=\"www.foobar.com\"', str(data.content))", "title": "" }, { "docid": "1b336cf240dd03cb8c90a819e38864fa", "score": "0.6551339", "text": "def list_directory(self, path):\n try:\n list = os.listdir(path)\n except os.error:\n self.send_error(404, \"No permission to list directory\")\n return None\n list.sort(key=lambda a: a.lower())\n f = BytesIO()\n displaypath = cgi.escape(urllib.parse.unquote(self.path))\n f.write(b'<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 3.2 Final//EN\">')\n f.write((\"<html>\\n<title>Directory listing for %s</title>\\n\" % displaypath).encode())\n f.write((\"<body>\\n<h2>Directory listing for %s</h2>\\n\" % displaypath).encode())\n f.write(b\"<hr>\\n\")\n f.write(b\"<form ENCTYPE=\\\"multipart/form-data\\\" method=\\\"post\\\">\")\n f.write(b\"<input name=\\\"file\\\" type=\\\"file\\\"/>\")\n f.write(b\"<input type=\\\"submit\\\" value=\\\"upload\\\"/></form>\\n\")\n f.write(b\"<hr>\\n<ul>\\n\")\n for name in list:\n fullname = os.path.join(path, name)\n displayname = linkname = name\n # Append / for directories or @ for symbolic links\n if os.path.isdir(fullname):\n displayname = name + \"/\"\n linkname = name + \"/\"\n if os.path.islink(fullname):\n displayname = name + \"@\"\n # Note: a link to a directory displays with @ and links with /\n f.write(('<li><a href=\"%s\">%s</a>\\n'\n % (urllib.parse.quote(linkname), cgi.escape(displayname))).encode())\n f.write(b\"</ul>\\n<hr>\\n</body>\\n</html>\\n\")\n length = f.tell()\n f.seek(0)\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.send_header(\"Content-Length\", str(length))\n self.end_headers()\n return f", "title": "" }, { "docid": "142bba8ef575e2a086115400927e6686", "score": "0.6550284", "text": "def list_directory(self, path):\n try:\n list = os.listdir(path)\n except os.error:\n self.send_error(404, \"No permission to list directory\")\n return None\n list.sort(key=lambda a: a.lower())\n list.sort(key=lambda x: os.path.getmtime(os.path.join(path,x)), reverse=True)\n f = StringIO()\n displaypath = cgi.escape(urllib.parse.unquote(self.path))\n f.write('<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 3.2 Final//EN\">')\n f.write(\"<html>\\n<title>Directory listing for %s</title>\\n\" % displaypath)\n f.write(\"<body>\\n<h1>Directory listing for %s</h1>\\n\" % displaypath)\n f.write('<table><tr><th>Name</th><th>Date Modified</th><th>Size</th></tr><tr><th colspan=\"3\"><hr></th></tr>\\n')\n for name in list:\n fullname = os.path.join(path, name)\n displayname = linkname = name\n # Append / for directories or @ for symbolic links\n if os.path.isdir(fullname):\n displayname = name + \"/\"\n linkname = name + \"/\"\n if os.path.islink(fullname):\n displayname = name + \"@\"\n # Note: a link to a directory displays with @ and links with /\n filename=os.path.join(path,name)\n modified = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(os.path.getmtime(filename)))\n size = self.sizeof_fmt(self.get_size(filename))\n f.write('<tr><td><a href=\"%s\">%s</a></td><td>%s</td><td>%s</td>\\n'\n % (urllib.parse.quote(linkname), cgi.escape(displayname), cgi.escape(modified), cgi.escape(size)))\n f.write(\"</table>\\n<hr>\\n</body>\\n</html>\\n\")\n length = f.tell()\n f.seek(0)\n self.send_response(200)\n encoding = sys.getfilesystemencoding()\n self.send_header(\"Content-type\", \"text/html; charset=%s\" % encoding)\n self.send_header(\"Content-Length\", str(length))\n self.end_headers()\n return f", "title": "" }, { "docid": "0b19fbde001f79a70d05af9bb1f01227", "score": "0.6499869", "text": "def directory_list( host, path=\"/\" ):\n with ftplib.FTP( host, user=\"anonymous\" ) as connection:\n print( \"Welcome\", connection.getwelcome() )\n for name, details in connection.mlsd(path):\n print( name, details['type'], details.get('size') )", "title": "" }, { "docid": "59029331528fa148c62b2dd0e6141943", "score": "0.6445144", "text": "def ls(self, path=None):\n if path is None:\n path = '/'\n print(os.listdir(os.path.join(self.root, path)))", "title": "" }, { "docid": "152191d63fbe4f81175394c359155b71", "score": "0.64213663", "text": "def cmdDirlist(self,path):\n return \"aaaaaaaaa\\n\"", "title": "" }, { "docid": "d2c5a3de6355dbb3eacf819a08a0609c", "score": "0.6420993", "text": "def listdir(dirname):\n logging.debug(\"getting a dirlist %s \" % dirname)\n\n if dirname[0:4] == \"vos:\":\n return client.listdir(dirname, force=True)\n else:\n return os.listdir(dirname)", "title": "" }, { "docid": "8e6cec1814e3b8c3228fde309f4392b6", "score": "0.6413917", "text": "def perform_client_request(self, handler, method, path, headers, payload_type, payload_content):\n response = \"<h1>Contents of current working directory:</h1>\"\n response += '<br>'.join(os.listdir(os.curdir))\n return self.ServiceResponse(payload=response)", "title": "" }, { "docid": "a50a473f920f2d099f6af4b61ee60229", "score": "0.63928807", "text": "def test_folder_listing(self):\n uploader.upload(TEST_IMAGE, public_id=\"{}1/item\".format(PREFIX), tags=[UNIQUE_API_TAG])\n uploader.upload(TEST_IMAGE, public_id=\"{}2/item\".format(PREFIX), tags=[UNIQUE_API_TAG])\n uploader.upload(TEST_IMAGE, public_id=\"{}1/test_subfolder1/item\".format(PREFIX), tags=[UNIQUE_API_TAG])\n uploader.upload(TEST_IMAGE, public_id=\"{}1/test_subfolder2/item\".format(PREFIX), tags=[UNIQUE_API_TAG])\n result = api.root_folders()\n self.assertEqual(result[\"folders\"][0][\"name\"], \"{}1\".format(PREFIX))\n self.assertEqual(result[\"folders\"][1][\"name\"], \"{}2\".format(PREFIX))\n result = api.subfolders(\"{}1\".format(PREFIX))\n self.assertEqual(result[\"folders\"][0][\"path\"], \"{}1/test_subfolder1\".format(PREFIX))\n self.assertEqual(result[\"folders\"][1][\"path\"], \"{}1/test_subfolder2\".format(PREFIX))\n with six.assertRaisesRegex(self, NotFound):\n api.subfolders(PREFIX)", "title": "" }, { "docid": "4d6ab56be63151487489b6751973a687", "score": "0.6390152", "text": "def test_get_directories(self):\n test_paths = ['foo/bar','bar/bar','baz/bar']\n create_tree(bs.WATCHED_FOLDER, test_paths)\n paths = bs.get_directories(bs.WATCHED_FOLDER)\n self.assertEqual(len(paths), len(test_paths))", "title": "" }, { "docid": "d8db7237b4ee230b95f7a27a3ba3ba05", "score": "0.6378964", "text": "def testReaddir(self):\n entries = self.kfsClient.readdir(TESTNAME)\n assert len(entries) == 2\n entries = [e for e in entries if not specialDirEntry(e)]\n assert len(entries) == 0", "title": "" }, { "docid": "50dc56d55f7049f3150f370cfb7f7005", "score": "0.63282704", "text": "def listDirectory(path, skip=SKIPPED_FILES):", "title": "" }, { "docid": "96d10f52037ffdb03eb4d737da614a6c", "score": "0.62947583", "text": "def Start(self, unused_response):\n self.state.urn = self.client_id\n self.state.Register(\"out_urn\", None)\n self.CallClient(\"ListDirectory\", pathspec=self.state.pathspec,\n next_state=\"DownloadDir\",\n request_data=dict(depth=self.state.depth))", "title": "" }, { "docid": "eb4d33b504bf429f18231b48154cce4b", "score": "0.6275151", "text": "def test_listdir_wrapper(self):\n no_hidden_files = helpers.listdir_wrapper(\"tmp_testdir\", False)\n hidden_files = helpers.listdir_wrapper(\"tmp_testdir\", True)\n self.assertEqual(len(no_hidden_files), 1)\n self.assertEqual(len(hidden_files), 2)\n self.assertEqual(hidden_files, sorted(hidden_files))", "title": "" }, { "docid": "2158b22b1731da59267d488db1298f5c", "score": "0.62254333", "text": "def ls(directory):\n return os.listdir(directory)", "title": "" }, { "docid": "1c1dfc1e993b0e0234d6d55aaf537c19", "score": "0.6222988", "text": "def list(self):\r\n\r\n if not self.exists():\r\n pass\r\n raise OSError('\\'{0}\\''.format(self._path))\r\n elif not self.isdir():\r\n raise NotADirectoryError('\\'{0}\\''.format(self._path))\r\n else:\r\n for file in listdir(self._path):\r\n print(file)", "title": "" }, { "docid": "6cfbe8e2f5134f31761ea4c01b643a82", "score": "0.6204718", "text": "def command_list_files(args: Dict[str, str], settings: Settings) -> CommandResults:\n class __MappingValue:\n def __init__(self,\n readable_value: Callable[[Any], Any],\n context_value: Callable[[Any], Any]):\n self.readable_value = readable_value\n self.context_value = context_value\n\n query_params = assign_params(\n q='ls',\n dir=args.get('directory', '/'),\n recursive=args.get('recursive', 'false')\n )\n client = new_client(detect_service_ip_port(settings), settings)\n resp = client._http_request('GET', params=query_params, raise_on_status=True)\n ents = resp.get('data')\n if not isinstance(ents, list):\n raise ValueError('Failed to list file entries')\n\n file_ents = sorted([ent for ent in ents if ent.get('type') == 'F'], key=lambda x: x['path'])\n\n mapping = {\n 'name': __MappingValue(lambda x: x or '', lambda x: x or ''),\n 'path': __MappingValue(lambda x: x or '', lambda x: x or ''),\n 'size': __MappingValue(lambda x: pretty_size(x or 0), lambda x: x or 0),\n 'last-modified': __MappingValue(\n lambda x: datetime.fromtimestamp(int(x or 0), timezone.utc).strftime('%Y-%m-%d %H:%M:%S UTC'),\n lambda x: datetime.fromtimestamp(int(x or 0), timezone.utc).isoformat()),\n }\n\n outputs = [{camelize_string(k, '-'): v.context_value(ent[k]) for k, v in mapping.items()} for ent in file_ents]\n\n return CommandResults(\n outputs_prefix='WebFileRepository.Files',\n outputs=outputs,\n readable_output=tblToMd(\n 'File List',\n [{k: v.readable_value(ent[k]) for k, v in mapping.items()} for ent in file_ents],\n headers=mapping.keys(),\n headerTransform=lambda x: x.replace('-', ' ').title()\n ),\n raw_response=file_ents)", "title": "" }, { "docid": "4951090be4027e33afa7250fcfb9c338", "score": "0.62042993", "text": "def list_directory(self, path, environ, start_response):\n try:\n names = os.listdir(path)\n except os.error: # pragma: no cover\n return self.error(404, environ, start_response)\n names.sort(key=lambda a: a.lower())\n\n items = []\n for name in names:\n fullname = os.path.join(path, name)\n displayname = linkname = name\n # Append / for directories or @ for symbolic links\n if os.path.isdir(fullname):\n displayname = name + \"/\"\n linkname = name + \"/\"\n if os.path.islink(fullname):\n displayname = name + \"@\"\n # Note: a link to a directory displays with @ and links with /\n items.append('<li><a href=\"{}\">{}</a></li>'.format(\n urlquote(linkname), html_escape(displayname)\n ))\n\n f = io.BytesIO()\n f.write(self.directory_template.format(\n displaypath=html_escape(urlunquote(wsgiref.util.request_uri(environ))),\n items=os.linesep.join(items)\n ).encode(self.encoding))\n length = f.tell()\n f.seek(0)\n\n headers = [\n ('Content-Length', str(length)),\n ('Content-type', f'text/html; charset={self.encoding}')\n ]\n start_response(self.get_status(200), headers)\n file_wrapper = environ.get('wsgi.file_wrapper', wsgiref.util.FileWrapper)\n return file_wrapper(f)", "title": "" }, { "docid": "73ad2410afd2db2b9c63a0cf5e8b5f4c", "score": "0.6202785", "text": "def test_list(self):\n response = self.client.get(\n self.endpoint\n )\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "title": "" }, { "docid": "34abfe03f38a02151f4edbce732af434", "score": "0.61747915", "text": "def list_folder(folder):\n folderpath = os.path.join(NOTES_DIR, folder)\n if os.path.exists(folderpath):\n content = \"\\n\".join(os.listdir(folderpath))\n else:\n content = \"\"\n return Response(content, mimetype=\"application/text\")", "title": "" }, { "docid": "ba1ae31e70b3c491a53554039041899e", "score": "0.6173804", "text": "def list(self, path):\n assert \"Not implemented\"", "title": "" }, { "docid": "53205902a8337f87ae2a3fd1e580a7fb", "score": "0.61672175", "text": "def test_list(self):\n # complain if the request is not complete\n self.req.target = None\n with self.assertRaises(RuntimeError):\n self.server.list(self.req)\n\n # test with a bad file path\n self.req.set_target(\"thisFolderDoesNotExist/TEST.txt\")\n with self.assertRaises(IOError):\n self.server.list(self.req)", "title": "" }, { "docid": "3a32ec190c1fe86bd0579cde90dca7fb", "score": "0.61522096", "text": "async def list(self, path: str) -> list:\n\t\traise NotImplementedError(\"{}.list()\".format(self.__class__.__name__))", "title": "" }, { "docid": "24e30e1971c37208ef14ada041125162", "score": "0.6150524", "text": "def ListFiles(self):", "title": "" }, { "docid": "649f370ebe699447ea7269a0f3600b22", "score": "0.61495507", "text": "def test_list_files_logged_out(self):\n c = Client()\n response = c.get(\"/list\")\n self.assertRedirects(response, '/')", "title": "" }, { "docid": "0bf4d15ebf46d5d2d2a3c16f78bfed88", "score": "0.61304015", "text": "def mock_list_dir(sftp:pysftp.Connection):\n\n assert isinstance(sftp, pysftp.Connection)\n print(SUCCESS_STRING)", "title": "" }, { "docid": "57312c5c0f034e873c885eadcb311ec6", "score": "0.61130285", "text": "def DeviceListDir(self, directory):\n return os.listdir(directory)", "title": "" }, { "docid": "f4dc06ef3e238f045ecc9281c053cff2", "score": "0.61070544", "text": "def listdir(self, dirName):\n protocol, abs_path, url = self.getprotocol(dirName)\n\n if protocol == 'hdfs':\n if not self.client:\n if (not url) and (not url == self.url):\n self.connectHDFS(url)\n else:\n print((\"[ERROR]\\tFS: Write Error(Please first\"\n \" build connection by using connectHDFS() method)\\n\"))\n sys.exit(self.error['client'])\n return self.client.list(abs_path)\n if protocol == 'file':\n return os.listdir(abs_path)", "title": "" }, { "docid": "dd05b1bc0aee6e16d273fb0cd3c780e6", "score": "0.61010206", "text": "def test_list(self):\n request = self.factory.get(self.list_path)\n force_authenticate(request, user=self.kive_user)\n response = self.list_view(request, pk=None)\n\n self.assertEquals(len(response.data), 3)\n self.assertEquals(response.data[2]['name'], 'github/cindy/hello')", "title": "" }, { "docid": "b5254ccddba64dce31eb3147ac3624ee", "score": "0.60686696", "text": "def test_get_files_list(self):\r\n response = self.client.get(\r\n path=reverse('api_documents_files'), **self.authentication_headers)\r\n self.assertEquals(response.status_code, 200)", "title": "" }, { "docid": "ef83f1cbcc9e440e31d414e3bb073f6a", "score": "0.60678655", "text": "def do_ls(self, args):\n #-- print either:\n #-- 1) everything within the directories of each argument passed\n #-- 2) everything within the current directory\n if args:\n #-- for each argument\n for a in args.split():\n #-- print contents from remote subdirectories\n RD = posixpath.normpath(posixpath.join(self.remote_directory,a))\n remote_path = posixpath.join('https://',RD)\n req = urllib2.Request(url=remote_path)\n try:\n response = urllib2.urlopen(req, timeout=self.timeout)\n except urllib2.URLError:\n #-- print an error if invalid\n print('ERROR: {0} not a valid path'.format(remote_path))\n else:\n #-- parse response for subdirectories (find column names)\n tree = lxml.etree.parse(response,self.htmlparser)\n colnames = tree.xpath('//td[@class=\"indexcolname\"]//a/@href')\n print('\\n'.join([w for w in colnames]))\n else:\n #-- print contents from remote directory\n remote_path = posixpath.join('https://',self.remote_directory)\n req = urllib2.Request(url=remote_path)\n #-- read and parse request for subdirectories (find column names)\n tree=lxml.etree.parse(urllib2.urlopen(req,timeout=self.timeout),self.htmlparser)\n colnames = tree.xpath('//td[@class=\"indexcolname\"]//a/@href')\n print('\\n'.join([w for w in colnames]))\n #-- close the request\n req = None", "title": "" }, { "docid": "b2824af0c69f3d8c0795c3cf180cc21d", "score": "0.6036534", "text": "def listdir(self):\n return self._accessor.listdir(self)", "title": "" }, { "docid": "20f30fa42bf453d165a13c7d58b6140d", "score": "0.6029461", "text": "def list_dir(self, remote_path):\n print(self.session.run_sftp(\"listdir\", input_file_path=remote_path))", "title": "" }, { "docid": "c21c5d597096cefb98c58b0d6ef9675e", "score": "0.60253465", "text": "def list_dir(self, directory):\n return [f for f in self.ssh.listdir(directory)]", "title": "" }, { "docid": "46552d89c29ddebda82d9e94711c24c1", "score": "0.6018211", "text": "def listdir(path):\n\treturn [i for i in os.listdir(path)]", "title": "" }, { "docid": "eb80ffae8956d2901150661ea23bd767", "score": "0.5991524", "text": "def test_list(self):\n request = self.factory.get(self.list_path)\n force_authenticate(request, user=self.kive_user)\n response = self.list_view(request, pk=None)\n\n self.assertEquals(len(response.data), 3)\n self.assertEquals(response.data[2]['name'], 'mC_name')", "title": "" }, { "docid": "e5d2180222a9f278ad2c8ff9a622825e", "score": "0.5991425", "text": "def listDir(self):\n ldir = os.listdir(os.getcwd())\n LOG.verbose(\"Base directory content:\", \"\\n\".join(ldir))", "title": "" }, { "docid": "ed95cf9b68a0e0056df56b57f563ace4", "score": "0.5984685", "text": "def list_folder(self, folderPath):\n print '# LISTING: %s' % folderPath\n resp = self.api_client.metadata(folderPath)\n\n if 'contents' in resp:\n for f in resp['contents']:\n name = os.path.basename(f['path'])\n encoding = locale.getdefaultlocale()[1]\n if f['is_dir']: \n print ('[D] %s' % name).encode(encoding)\n else:\n print ('[F] %s' % name).encode(encoding)", "title": "" }, { "docid": "9eb619cdae1a33c07a3dc7bdb3c573e6", "score": "0.59792393", "text": "def list_folder(client,folder):\n dropBoxFileList = []\n if folder:\n folder_metadata = client.metadata(folder)\n else:\n folder_metadata = client.metadata(\"/\")\n dropBoxDictionaryList = folder_metadata['contents']\n\n for fileDict in dropBoxDictionaryList:\n dropBoxFileList.append(os.path.basename(fileDict['path']))\n\n return dropBoxFileList", "title": "" }, { "docid": "6c02e866cf5e1a1c3010cd846b3320a9", "score": "0.59719163", "text": "def _listdir_local(path, kwargs):\n request = file_service_pb.ListDirRequest()\n response = file_service_pb.ListDirResponse()\n request.set_path(path)\n\n if kwargs and kwargs.has_key('marker'):\n request.set_marker(kwargs['marker'])\n if kwargs and kwargs.has_key('max-keys'):\n request.set_max_keys(kwargs['max-keys'])\n if kwargs and kwargs.has_key('prefix'):\n request.set_prefix(kwargs['prefix'])\n files._make_call('ListDir', request, response)\n return response.filenames_list()", "title": "" }, { "docid": "26d9705661bbb2c2dd6e08f6d1865d05", "score": "0.59691334", "text": "def list_dir(self):\n self.logger.debug(\"Listing directory\")\n\n QtWidgets.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)\n\n try:\n # first clear the list widget\n self.listWidget.clear()\n\n # add \"..\" to top of listing\n item = SFTPBrowserListWidgetItem(QtGui.QIcon(iconPath(\"oxygen/edit-undo.png\")), \"..\", True)\n self.listWidget.addItem(item)\n\n # add files and directories\n listdir = self.sftp.listdir()\n listdir.sort()\n for name in listdir:\n if name.startswith(\".\") and not self.showHidden:\n continue\n\n # file or directory\n statout = self.sftp.stat(name)\n if stat.S_ISDIR(statout.st_mode):\n # list widget item\n item = SFTPBrowserListWidgetItem(QtGui.QIcon(iconPath(\"oxygen/folder.png\")), name, True)\n\n elif stat.S_ISREG(statout.st_mode):\n # list widget item\n item = SFTPBrowserListWidgetItem(QtGui.QIcon(iconPath(\"oxygen/x-office-document.png\")), name, False)\n\n else:\n logging.warning(\"Item in directory listing is neither file nor directory: '%s (%s)'\", name, statout.st_mode)\n continue\n\n # add to list widget\n self.listWidget.addItem(item)\n\n # apply selected filter\n self.applyFilterToItems()\n\n finally:\n QtWidgets.QApplication.restoreOverrideCursor()", "title": "" }, { "docid": "f64b79baae99ee8a4463ec82d6e478fa", "score": "0.59468645", "text": "def do_list(self, arg):\n for wordsIndex in self.dirIndex.values():\n print(wordsIndex.directoryPath)", "title": "" }, { "docid": "655c1263dd1adb284993c6a145d15732", "score": "0.5942164", "text": "def my_readdir(path,offset=0):\n # at the user list level should never be called\n # you could have 10000s of users under / !\n yy=atpic.log.setname(xx,'my_readdir')\n atpic.log.debug(yy,'input=',path,offset)\n filelist=[b'.',b'..']\n if path==b'/':\n filelist=[b'.',b'..',b'cannot_be_listed']\n else:\n # call a zmq service\n socket=get_socket()\n atpic.log.debug(yy,'socket=',socket)\n socket.send(b'readdir '+path) # send\n filelist_string=socket.recv()\n atpic.log.debug(yy,'filelist_string=',filelist_string)\n if filelist_string==b'':\n filelist=[]\n else:\n filelist=filelist_string.split(b'\\n')\n filelist=[b'.',b'..']+filelist \n atpic.log.debug(yy,'output=',filelist)\n return filelist", "title": "" }, { "docid": "de41d5a5ac425551e038654e3bc05b4d", "score": "0.59357285", "text": "def directory_list(path):\n validate_path(path)\n for item in get_files(path):\n try:\n m_stat = os.stat(item)\n print(\"{access} {owner} {modified} {name}\".format(\n name=os.path.split(item)[1],\n owner=m_stat.st_uid,\n modified=datetime.fromtimestamp(int(m_stat.st_mtime)),\n access=stat.filemode(m_stat.st_mode)\n )\n )\n except PermissionError:\n pass", "title": "" }, { "docid": "9e4b2b67c6146e1112127eeb31a13b40", "score": "0.59298056", "text": "def listdir(self, path):\n # remove any trailing slashes if any; otherwise, iquest would fail\n path = path.strip()\n while path.endswith('/'):\n path = path[:-1]\n\n # check first whether the path is an iRODS collection/directory or not, and if not, need\n # to raise SessionException, and if yes, can proceed to get files and sub-dirs under it\n qrystr = \"select COLL_NAME where {}\".format(IrodsStorage.get_absolute_path_query(path))\n stdout = self.session.run(\"iquest\", None, \"%s\", qrystr)[0]\n if \"CAT_NO_ROWS_FOUND\" in stdout:\n raise SessionException(-1, '', 'folder {} does not exist'.format(path))\n\n fname_list = self._list_files(path)\n\n subdir_list = self._list_sub_dirs(path)\n\n listing = (subdir_list, fname_list)\n\n return listing", "title": "" }, { "docid": "3e1e1864cbf5024558078757008aefaa", "score": "0.5918044", "text": "async def test_store_get_all_items_reads_dir(\n store: ReadStore[CoolModel], store_path: PurePosixPath, mock_filesystem: AsyncMock\n) -> None:\n mock_filesystem.read_json_dir.return_value = [\n DirEntry(path=store_path / \"foo\", contents=CoolModel(foo=\"hello\", bar=0)),\n DirEntry(path=store_path / \"bar\", contents=CoolModel(foo=\"from the\", bar=1)),\n DirEntry(path=store_path / \"baz\", contents=CoolModel(foo=\"other side\", bar=2)),\n ]\n items = await store.get_all_items()\n\n assert items[0] == CoolModel(foo=\"hello\", bar=0)\n assert items[1] == CoolModel(foo=\"from the\", bar=1)\n assert items[2] == CoolModel(foo=\"other side\", bar=2)\n mock_filesystem.read_json_dir.assert_called_with(\n store_path, parse_json=store.parse_json, ignore_errors=False\n )", "title": "" }, { "docid": "a68f444922a0aa941d5ac8b08c15c7cd", "score": "0.59156483", "text": "def ls_command(\n login_manager: LoginManager,\n *,\n endpoint_plus_path: tuple[uuid.UUID, str | None],\n recursive_depth_limit: int,\n recursive: bool,\n long_output: bool,\n show_hidden: bool,\n orderby: tuple[\n tuple[\n Literal[\n \"group\",\n \"last_modified\",\n \"name\",\n \"permissions\",\n \"size\",\n \"type\",\n \"user\",\n ],\n Literal[\"ASC\", \"DESC\"],\n ],\n ...,\n ],\n filter_val: str | None,\n local_user: str | None,\n):\n from globus_sdk.services.transfer.response import IterableTransferResponse\n\n from globus_cli.services.transfer import (\n RecursiveLsResponse,\n autoactivate,\n iterable_response_to_dict,\n )\n\n endpoint_id, path = endpoint_plus_path\n\n # do autoactivation before the `ls` call so that recursive invocations\n # won't do this repeatedly, and won't have to instantiate new clients\n transfer_client = login_manager.get_transfer_client()\n autoactivate(transfer_client, endpoint_id, if_expires_in=60)\n\n # create the query parameters to send to operation_ls\n ls_params: dict[str, t.Any] = {\"show_hidden\": int(show_hidden)}\n if orderby:\n ls_params[\"orderby\"] = \",\".join(f\"{o[0]} {o[1]}\" for o in orderby)\n if path:\n ls_params[\"path\"] = path\n if local_user:\n ls_params[\"local_user\"] = local_user\n\n # this char has special meaning in the LS API's filter clause\n # can't be part of the pattern (but we don't support globbing across\n # dir structures anyway)\n if filter_val and \"/\" in filter_val:\n raise click.UsageError('--filter cannot contain \"/\"')\n\n # get the `ls` result\n if recursive:\n # if we are doing filtering we need to pass multiple filter params. The\n # first allows all directories, as we need them for recursive\n # expansion. The second then filters name by the filter_val\n if filter_val:\n ls_params[\"filter\"] = [{\"type\": \"dir\"}, {\"name\": filter_val}]\n\n res: (\n IterableTransferResponse | RecursiveLsResponse\n ) = transfer_client.recursive_operation_ls(\n endpoint_id, ls_params, depth=recursive_depth_limit\n )\n else:\n # format filter_val into a simple filter clause which operates on name\n if filter_val:\n ls_params[\"filter\"] = f\"name:{filter_val}\"\n\n res = transfer_client.operation_ls(endpoint_id, **ls_params)\n\n # and then print it, per formatting rules\n pathformatter = PathItemFormatter()\n display(\n res,\n fields=[\n Field(\"Permissions\", \"permissions\"),\n Field(\"User\", \"user\"),\n Field(\"Group\", \"group\"),\n Field(\"Size\", \"size\"),\n Field(\"Last Modified\", \"last_modified\"),\n Field(\"File Type\", \"type\"),\n Field(\"Filename\", \"@\", formatter=pathformatter),\n ],\n simple_text=(\n None\n if long_output or is_verbose() or not outformat_is_text()\n else \"\\n\".join(pathformatter.parse(x) for x in res)\n ),\n json_converter=iterable_response_to_dict,\n )", "title": "" }, { "docid": "b2c8896948a596ddf61f3bcd26960e84", "score": "0.5909343", "text": "def do_ls(self,args=None):\r\n return Cli.do_ls(self,args)", "title": "" }, { "docid": "713ee38ddf4f5ab9fcb5829357dea0f3", "score": "0.59092", "text": "def test_list_all_records(self):\n\n directory_manager = DirectoryManager()\n all_records = directory_manager.list_all_records()\n\n for record in all_records:\n print(record)\n\n # No conditions to test\n self.assertTrue(len(all_records) > 0)", "title": "" }, { "docid": "83cbce411c212b23a7764b536e778e35", "score": "0.5908724", "text": "def test_list(self):\n request = self.factory.get(self.list_path)\n force_authenticate(request, user=self.kive_user)\n response = self.list_view(request, pk=None)\n\n self.assertEquals(len(response.data), 3)\n self.assertEquals(response.data[2]['revision_name'], 'mC_name')", "title": "" }, { "docid": "c9a3a46345167718a1fa6f7927ced704", "score": "0.59021896", "text": "def do_get_all_volume_paths(client, args):\n brickclient = brick_client.Client(client)\n\n paths = brickclient.get_all_volume_paths(args.protocol, args.multipath)\n if paths:\n print('\\n'.join(paths))", "title": "" }, { "docid": "ff492401d5ce9b6e6fc5f4d76ec41f3d", "score": "0.58966756", "text": "def open_dir(path):\n client = pool.get_client()\n req_id = client.get_request_id()\n req = \"applet-opendir \" + os.path.normpath(path)\n client.send_request(req_id, req)\n if client.read_response() < 0:\n raise NetworkError(\"Read response error\")\n \n rsp = client.response\n pool.return_client(client)\n if rsp[0] != \"200\":\n raise CcnetError(\"Error received: %s %s\" % (rsp[0], rsp[1]))", "title": "" }, { "docid": "985677d118ebf7f1947c5bbcdd17ee34", "score": "0.5895355", "text": "def listdir(self, path: bytes) -> Iterable[bytes]: # os's name, so pylint: disable=g-bad-name\n with Stdout(self.adb +\n [b'shell',\n b'ls -al %s' % (self.QuoteArgument(path + b'/'),)]) as stdout:\n for line in stdout:\n if line.startswith(b'total '):\n continue\n line = line.rstrip(b'\\r\\n')\n try:\n statdata, filename = self.LsToStat(line)\n except OSError:\n continue\n if filename is None:\n logging.error('Could not parse %r.', line)\n else:\n self.stat_cache[path + b'/' + filename] = statdata\n yield filename", "title": "" }, { "docid": "45a2c63e653387cfdca53f285813523f", "score": "0.586015", "text": "def test_35_query_directory(self):\r\n try:\r\n print \"\\n------------------LOGOFF_TEST35-------------------------------------\"\r\n print \"TC 35 - Testing LOGOFF followed by query directory request.\"\r\n expected_status = \"STATUS_FILE_CLOSED\"\r\n print \"Expected status: \", expected_status\r\n print \"Creating session and tree connect...\"\r\n chan, tree = self.tree_connect()\r\n print \"Tree connection successful.\"\r\n print \"Create a file LOGOFF_TEST35 for testing\"\r\n dir_handle = chan.create(tree, \"LOGOFF_TEST35\", options=pike.smb2.FILE_DIRECTORY_FILE).result()\r\n print \"File created.\"\r\n print \"Sending Logoff request...\"\r\n conv_obj = utils.Convenience()\r\n logoff_req = conv_obj.logoff(chan)\r\n res = conv_obj.transceive(chan, logoff_req)\r\n print \"Session logged off successfully.\"\r\n print \"Querying directory information on LOGOFF_TEST35.\"\r\n names = map(lambda info: info.file_name, chan.query_directory(dir_handle))\r\n actual_status = \"UNEXPECTED_SUCCESS\"\r\n print \"Unexpected success. Querying directory information on LOGOFF_TEST35 done.\"\r\n except Exception as e:\r\n actual_status = str(e)\r\n print \"Actual status: \", actual_status\r\n self.assertIn(expected_status, actual_status)\r\n print \"TC 35 Passed\"", "title": "" }, { "docid": "f22fd28414ac0265ec696d9ffe79522e", "score": "0.5855314", "text": "async def test_store_get_all_entries_reads_dir(\n store: ReadStore[CoolModel], store_path: PurePosixPath, mock_filesystem: AsyncMock\n) -> None:\n mock_filesystem.read_json_dir.return_value = [\n DirEntry(path=store_path / \"foo\", contents=CoolModel(foo=\"hello\", bar=0)),\n DirEntry(path=store_path / \"bar\", contents=CoolModel(foo=\"from the\", bar=1)),\n DirEntry(path=store_path / \"baz\", contents=CoolModel(foo=\"other side\", bar=2)),\n ]\n items = await store.get_all_entries()\n\n assert items[0] == (\"foo\", CoolModel(foo=\"hello\", bar=0))\n assert items[1] == (\"bar\", CoolModel(foo=\"from the\", bar=1))\n assert items[2] == (\"baz\", CoolModel(foo=\"other side\", bar=2))\n mock_filesystem.read_json_dir.assert_called_with(\n store_path, parse_json=store.parse_json, ignore_errors=False\n )", "title": "" }, { "docid": "4df97f0a3ee908a4d3d282c0d3d456b4", "score": "0.58526284", "text": "def list(self, directory) -> None or {}:\n d = directory if directory.endswith('/') else (directory + '/')\n url = parse.urljoin(self.url_base, d)\n headers = {'Accept': 'application/json'}\n try:\n rsp = requests.get(url, headers=headers)\n if not rsp.ok:\n g_logger.error('Error listing \"%s\". [HTTP %d]' % (url, rsp.status_code))\n return None\n return rsp.json()\n except Exception as e:\n g_logger.error('Error listing \"%s\". e: %s' % (url, e))\n return None", "title": "" }, { "docid": "96b34dab94952f9d2ce4fff58ccf602b", "score": "0.5838937", "text": "def test_get_list(self):\n data = {}\n url = api_reverse(\"vtcuser:vl-list_create\")\n response = self.client.get(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "title": "" }, { "docid": "99ce2db3cf4a0189f81da7c5e7142ff4", "score": "0.5822957", "text": "def fixture_ls(request):\n mk_dir(TEST_DIR)\n setup_ls(request.param)\n yield request.param", "title": "" }, { "docid": "940f17aef425d973735b8ab2d6e5da0e", "score": "0.58205384", "text": "async def list_directory(srm, path, semaphore):\n fullPath = joinUrl(srm, path)\n pref = \"{0}/\".format(path.rstrip(\"/\")) ## make sure we have exactly one trailing slash\n async with semaphore:\n output = await subproc_check_output(LS_COMMAND, fullPath)\n entries = [ (int(ln.split()[0]), ln.split()[1].split(pref)[1]) for ln in output.decode().strip().split(\"\\n\") if ln.startswith(LS_L1_PREFIX) ]\n return [ ln for sz,ln in entries if ln.endswith(\"/\") ], [ (sz,ln) for sz,ln in entries if not ln.endswith(\"/\") ]", "title": "" }, { "docid": "087e98b5b58af600b8f3cccae67bd545", "score": "0.5819974", "text": "def list_folder(input_folder: str):\n\n # TODO\n logger = logging.getLogger(__name__)\n logger.debug(\"Listing content of {}\".format(input_folder))\n logger.debug(\"Not implemented\")\n\n return []", "title": "" }, { "docid": "3cf7cb71649829980c3ff5fec5b3318f", "score": "0.58119935", "text": "def listdir(self, path):\n if os.path.isdir(path):\n return os.listdir(path)\n else:\n logging.error('%s: Pool directory does not exist' % path)\n sys.exit(1)", "title": "" }, { "docid": "027d96dcd56a65c40b0e367e4eb2b0a4", "score": "0.58063066", "text": "def list_dir(self):\n return self.ftp.nlst()", "title": "" }, { "docid": "f620065d3c8ce8e224b104517938010a", "score": "0.57910466", "text": "def check_dir(dir_name):\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n val = 0\n while val < 12:\n check_dir_link = \"https://\"+test[\"guiHost\"]+\":\"+test[\"port\"] + \\\n \"/scalemgmt/v2/filesystems/\"+test[\"primaryFs\"]+\"/owner/\"+dir_name\n LOGGER.debug(check_dir_link)\n headers = {\n 'accept': 'application/json',\n }\n response = requests.get(check_dir_link, headers=headers,\n verify=False, auth=(test[\"username\"], test[\"password\"]))\n LOGGER.debug(response.text)\n if response.status_code == 200:\n LOGGER.info(f'Directory Check : directory {dir_name} created successfully')\n return\n time.sleep(15)\n LOGGER.info(f'Directory Check : Checking for directory {dir_name}')\n val += 1\n LOGGER.error(f'directory {dir_name} not created successfully')\n LOGGER.error(str(response))\n LOGGER.error(str(response.text))\n assert False", "title": "" }, { "docid": "ce1034397b71d04a71cdfe4ee23edfb7", "score": "0.57554734", "text": "def list_dir(path):\n return xbmcvfs.listdir(path)", "title": "" }, { "docid": "d428fc11848fe05f7d2a025fccaa9d10", "score": "0.57475615", "text": "def _list_dir(self, path):\n entries = os.listdir(path)\n dir_entries = [\n [['..', quote_plus(os.path.normpath(os.path.join(path, '..')))]]]\n for name in entries:\n if name.startswith('.'):\n # skip invisible files/directories\n continue\n fullname = os.path.join(path, name)\n displayname = linkname = name\n # Append / for directories or @ for symbolic links\n if os.path.isdir(fullname):\n displayname += '/'\n if os.path.islink(fullname):\n displayname += '@'\n dir_entries.append(\n [[displayname, quote_plus(os.path.join(path, linkname))]])\n\n self.render(\n 'dir.html', dir_name=path, dir_entries=dir_entries)", "title": "" }, { "docid": "2ee295e6a2b5a07dc10ca97f39c41130", "score": "0.5736092", "text": "def list_dir(self):\n if self.__data_dir != '':\n print(os.listdir(self.__data_dir))\n else:\n raise FileNotFoundError(\"Data directory does not exist.\")", "title": "" }, { "docid": "c32ab6878af97a7692f0648b0d20870f", "score": "0.5734562", "text": "def _ls(self, path: str) -> List[str]:\n return self._fs.ls(path)", "title": "" }, { "docid": "41b761f2011fc626415e42c19b137a3c", "score": "0.5726049", "text": "def _get_dir_listing(self, path):\n all_fnames = [f for f in os.listdir(path)\n if not f.startswith('.')]\n suffixes = self.page_options.get('suffix_whitelist', '').split(',')\n fnames = []\n for fname in all_fnames:\n for suffix in suffixes:\n if fname.endswith(suffix):\n fnames.append(fname)\n break\n elif os.path.isdir(os.path.join(path, fname)):\n fnames.append(fname + '/')\n break\n fnames.sort()\n base = self._get_safe_base_path(path)\n links = []\n for name in fnames:\n doc = Doc()\n name = os.path.join(base, name)\n with doc.tag('a'):\n doc.attr(href=url_for('RawFile:get',\n name=name.replace('/', '|')))\n doc.text(name)\n links.append(doc.getvalue())\n return list_to_html_list(links)", "title": "" }, { "docid": "09ce5f6ca004a4495a6ad52da49a9971", "score": "0.5724809", "text": "def do_get_folder_listing(self, line,opts):\n result = self.client.get_folder_listing(max_list_count=opts.max_count, \n list_startoffset=opts.start_offset)\n if result is not None:\n header, data = result\n logger.info(\"Result of get_folder_listing:\\n%s\", data)", "title": "" }, { "docid": "14c16fdacce5e434bfca1bb105d03118", "score": "0.5722268", "text": "def test_list(self):\n pass", "title": "" }, { "docid": "75593ae51f0c3a355594d6b6bc2b874f", "score": "0.57006615", "text": "def listdir(self, **kwargs):\n return [self / child\n for child in map(self._always_unicode, os.listdir(self))]", "title": "" }, { "docid": "d0af8c18b1e25ec4158a93dbb43c839d", "score": "0.56997764", "text": "def test_list_all_subdirectories(self) -> None:\n\n dirname = [secrets.token_hex(6) for _ in range(10)]\n\n for directory in dirname:\n self.helper.set_path(os.path.join(self.temp_path.name, directory)).create()\n\n self.helper.set_path(\n os.path.join(self.temp_path.name, directory, directory)\n ).create()\n\n self.helper.set_path(\n os.path.join(self.temp_path.name, directory, directory, directory)\n ).create()\n\n self.helper.set_path(self.temp_path.name)\n\n expected = (\n ListHelper(\n [os.path.join(self.temp_path.name, x) for x in dirname]\n + [os.path.join(self.temp_path.name, x, x) for x in dirname]\n + [os.path.join(self.temp_path.name, x, x, x) for x in dirname]\n )\n .remove_duplicates()\n .sort()\n .subject\n )\n\n actual = self.helper.list_all_subdirectories()\n\n self.assertEqual(expected, actual)", "title": "" }, { "docid": "536c9eaf78a2f75c304cdef000d89e31", "score": "0.5682046", "text": "def ls(self, path=None):\n if path is None:\n path = '/'\n try:\n self.ftp.cwd(path)\n self.log.debug('ls {}'.format(self.ftp.pwd()))\n print(self.ftp.retrlines('LIST'))\n except ftplib.error_perm as e:\n self.log.error('invalid path: {} ({})'.format(path, e))\n except ftplib.all_errors as e:\n self.log.error('FTP error: {}'.format(e))", "title": "" }, { "docid": "f8c08d4b182a6664cdceec9ef7991283", "score": "0.5679996", "text": "def test_list_all_files(self) -> None:\n\n dirname = [secrets.token_hex(6) for _ in range(10)]\n filename = secrets.token_hex(6)\n\n for directory in dirname:\n self.helper.set_path(os.path.join(self.temp_path.name, directory)).create()\n\n with open(\n os.path.join(self.helper.path, filename), \"w\", encoding=\"utf-8\"\n ) as file_stream:\n file_stream.write(\"Hello\")\n\n self.helper.set_path(\n os.path.join(self.temp_path.name, directory, directory)\n ).create()\n\n with open(\n os.path.join(self.helper.path, filename), \"w\", encoding=\"utf-8\"\n ) as file_stream:\n file_stream.write(\"Hello\")\n\n self.helper.set_path(\n os.path.join(self.temp_path.name, directory, directory, directory)\n ).create()\n\n with open(\n os.path.join(self.helper.path, filename), \"w\", encoding=\"utf-8\"\n ) as file_stream:\n file_stream.write(\"Hello\")\n\n self.helper.set_path(self.temp_path.name)\n\n expected = (\n ListHelper(\n [os.path.join(self.temp_path.name, x, filename) for x in dirname]\n + [os.path.join(self.temp_path.name, x, x, filename) for x in dirname]\n + [\n os.path.join(self.temp_path.name, x, x, x, filename)\n for x in dirname\n ]\n )\n .sort()\n .subject\n )\n\n actual = self.helper.list_all_files()\n\n self.assertEqual(expected, actual)", "title": "" }, { "docid": "8ad9319fd921c33244717268ff15ccd5", "score": "0.5675667", "text": "async def test_list_all_clusters(client):\n headers = { \n 'Accept': 'application/json',\n }\n response = await client.request(\n method='GET',\n path='/gamecontrol/1.0.1/clusters',\n headers=headers,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "title": "" }, { "docid": "9e02aa2049fde6c9a995ff10f083479e", "score": "0.56751925", "text": "async def test_list_all_servers(client):\n params = [('cluster', 'cluster_example')]\n headers = { \n 'Accept': 'application/json',\n }\n response = await client.request(\n method='GET',\n path='/gamecontrol/1.0.1/servers',\n headers=headers,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "title": "" }, { "docid": "98c6a4af561086571323557863df851f", "score": "0.5671415", "text": "def cli(ctx):\n default_command(ctx, \"list\")", "title": "" }, { "docid": "630f69fe0a7f4cb68c0b6e90d0e2460c", "score": "0.566296", "text": "def dir_listing(request):\n # Generate random links and files\n m_types = [\n (' ' , 'unknown.gif'),\n ('TXT', 'text.gif'),\n ('DIR', 'folder.gif'),\n ('IMG', 'image2.gif'),\n ]\n\n m_urls = []\n m_urls_append = m_urls.append\n m_rand = Random()\n for x in xrange(m_rand.randint(2,50)):\n l_type = m_rand.randint(0,len(m_types) - 1)\n # [IMG, ALT, URL, DATE, SIZE]\n l_date = datetime.datetime.fromtimestamp(m_rand.randint(10000000,1350000000)).strftime(\"%Y-%m-%d %H:%M\")\n l_url = generate_random_url(\"/home/links/\", 1 , False)[0]\n\n m_urls_append(\n [\n m_types[l_type][1],\n m_types[l_type][0],\n l_url,\n l_date,\n '{:.2f}'.format(m_rand.random() * 10)\n ]\n )\n\n ctx = {'urls':m_urls}\n\n return render_to_response_random_server('home/dir_listing.html', ctx, context_instance=RequestContext(request))", "title": "" }, { "docid": "5393c604160274e8b0131d3708c5dd93", "score": "0.56516194", "text": "def list_directory(self, path):\n\n\t\t# Split the path\n\t\tsplit_path = os.path.normpath(path).split(os.path.sep)\n\t\t# Find the folder by iterating through the path into the file system\n\t\ttraversal_history = [self.filesystem]\n\t\tfor item in split_path:\n\t\t\tif item in ('', '.'):\n\t\t\t\tcontinue\n\t\t\telif item == '..':\n\t\t\t\tif len(traversal_history) == 1:\n\t\t\t\t\treturn (31, \"Cannot traverse back from root directory.\")\n\t\t\t\t# Move back\n\t\t\t\tdel traversal_history[-1]\n\t\t\t# Traverse to the next section\n\t\t\ttry:\n\t\t\t\ttraversal_history.append(traversal_history[-1][item])\n\t\t\texcept (KeyError, TypeError):\n\t\t\t\treturn (32, \"Path is invalid.\")\n\t\t# Check the final directory\n\t\tif not type(traversal_history[-1]) == dict:\n\t\t\treturn (32, \"Path is invalid.\")\n\t\t# List the folder\n\t\tdata = '\\n'.join(traversal_history[-1])\n\t\t# Update\n\t\tself._backend_update()\n\t\treturn (0, data)", "title": "" }, { "docid": "79c0bd602e3e1bf02f9f6c07ef6a9b19", "score": "0.5644712", "text": "def ls(self, argv):\n if len(argv) > 1:\n path = argv[1]\n else:\n path = \"/\"\n self._print(\"mode size time name\")\n self._obj._controller.List(path, self._List_cb)\n self._print(\"\\n\")", "title": "" }, { "docid": "81a59e7306eacd8fffd07e2055b23d9b", "score": "0.5643067", "text": "async def list(self):\n return", "title": "" }, { "docid": "6e0e60e5b84bfbdbe1332151fb1c8ca1", "score": "0.5636709", "text": "async def test_store_get_all_items_sync_reads_dir(\n store: ReadStore[CoolModel], store_path: PurePosixPath, mock_filesystem: AsyncMock\n) -> None:\n mock_filesystem.sync.read_json_dir.return_value = [\n DirEntry(path=store_path / \"foo\", contents=CoolModel(foo=\"hello\", bar=0)),\n DirEntry(path=store_path / \"bar\", contents=CoolModel(foo=\"from the\", bar=1)),\n DirEntry(path=store_path / \"baz\", contents=CoolModel(foo=\"other side\", bar=2)),\n ]\n items = store.get_all_items_sync()\n\n assert items[0] == CoolModel(foo=\"hello\", bar=0)\n assert items[1] == CoolModel(foo=\"from the\", bar=1)\n assert items[2] == CoolModel(foo=\"other side\", bar=2)\n mock_filesystem.sync.read_json_dir.assert_called_with(\n store_path, parse_json=store.parse_json, ignore_errors=False\n )", "title": "" }, { "docid": "3b345fbeb9c9037186f21fde5fe92c08", "score": "0.5632441", "text": "def test_file_list():\n mock = MagicMock(return_value=\"\")\n with patch.dict(rpm.__salt__, {\"cmd.run\": mock}):\n assert rpm.file_list(\"httpd\") == {\"errors\": [], \"files\": []}\n assert not _called_with_root(mock)", "title": "" }, { "docid": "f883fbce64d3ef5de32911a842acc55b", "score": "0.56257087", "text": "def test_file_list_root():\n\n mock = MagicMock(return_value=\"\")\n with patch.dict(rpm.__salt__, {\"cmd.run\": mock}):\n rpm.file_list(\"httpd\", root=\"/\")\n assert _called_with_root(mock)", "title": "" }, { "docid": "4b77ea6d97f6ef6ab3f2714abfa01ba0", "score": "0.561773", "text": "def test_create_list(self):\n\n result = self.client.get(\"/create_list\")\n self.assertIn(\"Choose a location\", result.data)\n print \"completed create list route test\"", "title": "" }, { "docid": "112452be03151d41ffeb6634a5bc34ec", "score": "0.5610625", "text": "def mockedListDir(filepath):\n assert filepath[0] == \"/\"\n if filepath == \"/\":\n return list(fileSystem.keys())\n root = fileSystem\n normpath = os.path.normpath(filepath)\n split = normpath.split(os.sep)[1:]\n listdir = list(fileSystem.keys())\n for filename in split:\n if filename not in root or not isinstance(root[filename], dict):\n raise ValueError(\"invalid filepath given\")\n root = root[filename]\n listdir = list(root)\n return listdir", "title": "" }, { "docid": "d2b4be8b61f82648dc974d443dc831fb", "score": "0.5609688", "text": "def test_retrieve_own_list(self):\n user = make_user()\n headers = make_authentication_headers_for_user(user)\n\n list = mixer.blend(List, creator=user)\n list_id = list.pk\n\n url = self._get_url(list_id)\n response = self.client.get(url, **headers)\n\n response_list = json.loads(response.content)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertTrue(response_list['id'] == list_id)", "title": "" } ]
72c48f399241f9a7b82f9f34ccb5ce32
Function for reading in our full dataset, creating estimates for each citytime pair, and writing these estimates to a new dataset in addition to our old dataset.
[ { "docid": "79c372426cf4ef285186dc49f07be7da", "score": "0.0", "text": "def make_age_data():\n\n # Read in input dataframe\n dataset = pd.read_excel(os.path.join(\"..\", \"data\", \"dataset.xlsx\"))\n age_percents = []\n\n # Read in dataframe for age\n age_data = pd.read_excel(os.path.join(\"..\", \"data\", \"age_stats.xlsx\"))\n\n # Now iterate through and append each column to our aggregate dataset\n percent_vals = age_data['Percent']\n counter = 0\n city_counter = 0\n for percent_val in percent_vals:\n city_counter += 1\n vals_estimate = fit_exp_reg(percent_val) # Fit exponential model\n for year in range(1982, 2018): # Iterate through years\n counter += 1\n if year < 2008: # Don't add significant value before 2008\n age_percents.append(np.nan)\n else: # Add for significant values\n age_percents.append(vals_estimate[year - 2008])\n\n # Write output to dataset\n new_list = [\"\", \"\", \"\", \"\"] + age_percents\n dataset['age_65_perc'] = new_list\n dataset.to_excel(os.path.join(\"..\", \"data\",\n \"dataset_with_age_sigma002.xlsx\"))", "title": "" } ]
[ { "docid": "45a16e3c5f171aa393bb3e0b4a7b0e73", "score": "0.679058", "text": "def load_data(city, month, day):\n \n #Washington data have 8 columns not 10 like others. Gender and Birth Year left.\n \n c_path = 'chicago.csv'\n ny_path = 'new_york_city.csv'\n w_path = 'washington.csv'\n \n c_data = pd.read_csv(c_path)\n ny_data = pd.read_csv(ny_path)\n w_data = pd.read_csv(w_path)\n \n c_data['city'] = pd.Series(['C' for i in range(len(c_data.index))])\n ny_data['city'] = pd.Series(['NY' for i in range(len(ny_data.index))])\n w_data['city'] = pd.Series(['W' for i in range(len(w_data.index))])\n \n data = [c_data, ny_data, w_data]\n \n df = pd.concat(data)\n \n \n #Set Start Time and End Time as datetime column in pandas\n df['Start Time'] = pd.to_datetime(df['Start Time'], format='%Y%m%d %H:%M:%S')\n df['End Time'] = pd.to_datetime(df['End Time'], format='%Y%m%d %H:%M:%S')\n \n #Get rid of july because there are only 40 values\n df = df[(df['End Time'].dt.month <= 6)]\n \n #Add columns for times\n df['Month Start'] = df['Start Time'].dt.month_name()\n df['Month End'] = df['End Time'].dt.month_name()\n df['Day Start'] = df['Start Time'].dt.day_name()\n df['Day End'] = df['End Time'].dt.day_name()\n df['Hour Start'] = df['Start Time'].dt.hour\n df['Hour End'] = df['End Time'].dt.hour\n df['Minute Start'] = df['Start Time'].dt.minute\n df['Minute End'] = df['End Time'].dt.minute\n \n \n #Get another DataFrame for answers with all data\n #df2 = df.copy(deep=True) #----> can't use without changing the variable\n #inputs in the other function\n\n \n if city == 'ALL':\n if (month <= 6) and (day <= 7):\n df = df[(df['End Time'].dt.month == month) &\\\n (df['End Time'].dt.dayofweek == day-1)]\n elif (month <= 6):\n df = df[(df['End Time'].dt.month == month)]\n elif (day <= 7):\n df = df[(df['End Time'].dt.dayofweek == day-1)]\n else:\n df\n else:\n if (month <= 6) and (day <= 7):\n df = df[(df['End Time'].dt.month == month) &\\\n (df['End Time'].dt.dayofweek == day-1) &\\\n (df['city'] == city)]\n elif (month <= 6):\n df = df[(df['End Time'].dt.month == month) &\\\n (df['city'] == city)]\n elif (day <= 7):\n df = df[(df['End Time'].dt.dayofweek == day-1) &\\\n (df['city'] == city)]\n else:\n df = df[df['city'] == city]\n\n return df", "title": "" }, { "docid": "cdbe8528dd1ea2f5cd21457b19b36360", "score": "0.6661183", "text": "def condense_data(in_file, out_file, city): \n with open(out_file, 'w') as f_out, open(in_file, 'r') as f_in:\n # set up csv DictWriter object - writer requires column names for the\n # first row as the \"fieldnames\" argument\n out_colnames = ['duration', 'month', 'hour', 'day_of_week', 'user_type'] \n trip_writer = csv.DictWriter(f_out, fieldnames = out_colnames)\n trip_writer.writeheader()\n \n ## TODO: set up csv DictReader object ##\n trip_reader = csv.DictReader(f_in)\n\n # collect data from and process each row\n for row in trip_reader:\n # set up a dictionary to hold the values for the cleaned and trimmed\n # data point\n new_point = {}\n ## TODO: use the helper functions to get the cleaned data from ##\n ## the original data dictionaries. ##\n ## Note that the keys for the new_point dictionary should match ##\n ## the column names set in the DictWriter object above. ##\n new_point['duration'] = duration_in_mins(row, city)\n new_point['month']= time_of_trip(row, city)[0]\n new_point['hour'] = time_of_trip(row, city)[1]\n new_point['day_of_week'] = time_of_trip(row, city)[2]\n new_point['user_type'] = type_of_user(row, city)\n \n \n ## TODO: write the processed information to the output file. ##\n ## see https://docs.python.org/3/library/csv.html#writer-objects ##\n \n trip_writer.writerow(new_point)", "title": "" }, { "docid": "9c2834da44a88aa83e507c59d898f19f", "score": "0.6259828", "text": "def old_parse_data():\n\n filename = \"data/api/\" + get_todays_date() + \"-external.json\"\n if os.path.exists(filename):\n file_time = datetime.fromtimestamp(os.path.getmtime(filename))\n now = datetime.now()\n file_lifetime = now - file_time\n if (file_lifetime.total_seconds() / 60) / 60 < 1:\n return filename\n\n data = download_dataset()\n if data == None:\n with open(\"data/external/latest.json\", \"r\") as f:\n data = json.load(f)\n\n stats = {\n \"confirmed\": data['latest']['confirmed'],\n \"deaths\": data['latest']['deaths'],\n \"recovered\": data['latest']['recovered'],\n \"countries\": len(list(set([country['country'] for country in data['confirmed']['locations']]))),\n \"updated\": data['confirmed']['last_updated'].split(\"T\")[0]\n }\n\n cities = []\n for c, d, r in zip(data['confirmed']['locations'], data['deaths']['locations'], data['recovered']['locations']):\n if c['country'] == d['country'] == r['country']:\n if not (c['latest'] == d['latest'] == r['latest'] == 0):\n parsed_city = {\n \"type\": \"Feature\",\n \"properties\": {\n \"city\": c['country'],\n \"count\": c['latest'],\n \"deaths\": d['latest'],\n \"recovered\": r['latest'],\n \"icon\": \"theatre\"\n },\n \"geometry\": {\n \"type\": \"Point\",\n \"coordinates\": [float(c['coordinates']['long']), float(c['coordinates']['lat'])]\n }\n }\n cities.append(parsed_city)\n\n with open(filename, \"+w\") as json_file:\n json.dump({\"cities\": cities, \"stats\": stats}, json_file, sort_keys=True, indent=2, separators=(\",\", \":\"))\n\n return filename", "title": "" }, { "docid": "cb0261429241753cde32d6903aa5d9a4", "score": "0.61876017", "text": "def load_data(city, month, day):\n\n \"\"\"create a new dataframe to pull apart day, month and hour because\n they are mixed in the same columns: \"Start Time\" and \"End Time\" from\n the csv files.\"\"\"\n \n # create a datatime object from Start and End Time columns\n df = pd.read_csv(CITY_DATA[city])\n\n # create a datatime object from \"Start\" and \"End Time\" columns\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n\n # new column for hours\n hours_list = [\"12 AM\", \"1 AM\", \"2 AM\", \"3 AM\", \"4 AM\", \"5 AM\", \"6 AM\", \"7 AM\", \"8 AM\", \"9 AM\", \"10 AM\", \"11 AM\",\n \"12 PM\", \"1 PM\", \"2 PM\", \"3 PM\", \"4 PM\", \"5 PM\", \"6 PM\", \"7 PM\", \"8 PM\", \"9 PM\", \"10 PM\", \"11 PM\"]\n df['Hour'] = pd.to_datetime(df['Start Time']).dt.hour\n df['Hour'] = df['Hour'].apply(lambda x: hours_list[x])\n\n # new column for days\n df['Day'] = df['Start Time'].dt.weekday_name\n\n # extract month from Start Time to create new columns\n months_list = ['January', 'February', 'March', 'April', 'May', 'June']\n df['Month'] = df['Start Time'].dt.month - 1\n df['Month'] = df['Month'].apply(lambda x: months_list[x])\n\n # Add new columns: start station and end station\n df['Start and End'] = '(Start) ' + df['Start Station'] + ' (End) ' + df['End Station'] # returns a deltaTime object\n\n # filter by month\n if month != 'all':\n # new dataframe - month\n df = df[df['Month'] == month.title()]\n\n # filter by day\n if day != 'all':\n # new dataframe -day\n df = df[df['Day'] == day.title()]\n\n return df\n\n # Add the functionality of displaying the raw data that was missing", "title": "" }, { "docid": "99835ea1034322e07e70bb49ba2539b6", "score": "0.6162779", "text": "def get_city_data(city, api):\n _, body = api.measurements(city=city, parameter='pm25')\n city_data = api_json_to_tuples(body)\n for cur_line in city_data:\n db_record = Record(\n city=city, datetime=cur_line[0], value=cur_line[1])\n DB.session.add(db_record)", "title": "" }, { "docid": "2f10149f14de50dd4c36f853d447dd2d", "score": "0.6107502", "text": "def summarise_data(trip_in, station_data, trip_out):\n # generate dictionary of station - city mapping\n station_map = create_station_mapping(station_data)\n\n \"\"\"Below implementation is using csv read write module. But this can be very easily implemented \n using Pandas Dataframe\"\"\"\n\n with open(trip_out, 'w') as f_out:\n # set up csv writer object\n out_colnames = ['duration', 'start_date', 'start_year',\n 'start_month', 'start_hour', 'weekday',\n 'start_city', 'end_city', 'subscription_type']\n\n #DictWriter// Dictreader write / read as complete object, the content of / to a complete row\n #If\n trip_writer = csv.DictWriter(f_out, fieldnames=out_colnames) # Writes Python dictionary\n trip_writer.writeheader()\n\n for data_file in trip_in:\n with open(data_file, 'r') as f_in:\n # set up csv reader object\n trip_reader = csv.DictReader(f_in)\n\n # collect data from and process each row\n for row in trip_reader:\n new_point = {}\n\n # convert duration units from seconds to minutes\n ### Question 3a: Add a mathematical operation below ###\n ### to convert durations from seconds to minutes. ###\n new_point['duration'] = float(row['Duration']) / 60\n\n # reformat datestrings into multiple columns\n ### Question 3b: Fill in the blanks below to generate ###\n ### the expected time values. ###\n trip_date = datetime.strptime(row['Start Date'], '%m/%d/%Y %H:%M')\n new_point['start_date'] = trip_date.strftime('%Y-%m-%d')\n new_point['start_year'] = trip_date.strftime('%Y')\n new_point['start_month'] = trip_date.strftime('%m')\n new_point['start_hour'] = trip_date.strftime('%H')\n new_point['weekday'] = trip_date.strftime('%A')\n\n # remap start and end terminal with start and end city\n new_point['start_city'] = station_map[row['Start Terminal']]\n new_point['end_city'] = station_map[row['End Terminal']]\n # two different column names for subscribers depending on file\n if 'Subscription Type' in row:\n new_point['subscription_type'] = row['Subscription Type']\n else:\n new_point['subscription_type'] = row['Subscriber Type']\n\n # write the processed information to the output file.\n trip_writer.writerow(new_point)", "title": "" }, { "docid": "4ee093ddb26ef4099c1e49fe287c9bf6", "score": "0.5994244", "text": "def download_city_bike(source=\"oslobysykkel.no\", years=(2020, 2021)):\n end_data = []\n\n lat = {}\n lon = {}\n name = {}\n for year in years:\n print(f\"Loading {year}\", flush=True)\n for month in range(12):\n month += 1\n df = pd.read_csv(f\"https://data.urbansharing.com/{source}/trips/v1/{year}/{month:02d}.csv\")\n # station_names = sorted(set(df[\"start_station_name\"]) | set(df[\"end_station_name\"]))\n\n df[\"ended_at\"] = df[\"ended_at\"].map(lambda x: x if \".\" in x else \".000000+\".join(x.split(\"+\")))\n df[\"ended_at\"] = pd.to_datetime(df[\"ended_at\"], format=\"%Y-%m-%d %H:%M:%S.%f+00:00\")\n df[\"ended_at\"] = df[\"ended_at\"].dt.tz_localize(\"UTC\")\n df[\"ended_at\"] = df[\"ended_at\"].dt.tz_convert(\"CET\")\n\n end_time = df[\"ended_at\"].dt\n\n df[\"trip\"] = 1\n df[\"Day\"] = end_time.day\n df[\"Day of week\"] = end_time.dayofweek\n df[\"Hour\"] = end_time.hour\n df[\"Month\"] = end_time.month\n df[\"Year\"] = end_time.year\n df = df.rename({\"end_station_id\": \"End station ID\"}, axis=1)\n\n for _, row in df.iterrows():\n lat[row[\"End station ID\"]] = row[\"end_station_latitude\"]\n lon[row[\"End station ID\"]] = row[\"end_station_longitude\"]\n name[row[\"End station ID\"]] = row[\"end_station_name\"]\n\n end = df.groupby([\"End station ID\", \"Year\", \"Month\", \"Day of week\", \"Hour\"]).sum()[[\"trip\"]]\n end_data.append(end)\n\n grouped = pd.concat(end_data).groupby(level=(0, 1, 2, 3, 4)).sum()\n\n dataset = xr.Dataset.from_dataframe(grouped).to_array().squeeze()\n\n # Drop trips that started in the last year of interest and ended the year after\n unwanted_years = set(dataset.coords[\"Year\"].values) - set(years)\n for year in unwanted_years:\n dataset = dataset.drop(year, \"Year\")\n\n # Set equal to 0 for all hours with no trips\n dataset.values[np.isnan(dataset.values)] = 0\n\n # Add station metadata\n lat = [lat[station_id.item()] for station_id in dataset.coords[\"End station ID\"]]\n lon = [lon[station_id.item()] for station_id in dataset.coords[\"End station ID\"]]\n name = [name[station_id.item()] for station_id in dataset.coords[\"End station ID\"]]\n dataset = xr.DataArray(\n dataset.data,\n coords={\n **{key: value for key, value in dataset.coords.items() if key != \"variable\"},\n \"lat\": ((\"End station ID\",), lat),\n \"lon\": ((\"End station ID\",), lon),\n \"name\": ((\"End station ID\",), name),\n },\n dims=dataset.dims,\n name=\"Bike trips\",\n )\n return dataset", "title": "" }, { "docid": "8b711131d54a2ee1cd144146c21e0839", "score": "0.59884346", "text": "def load_data(city, month, day):\n \n # Reading CSV File \n df = pd.read_csv(CITY_DATA[city])\n \n # Change Start Time colomn to datetime in Pandas\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n \n # Create a new coloumns 'month' , 'day_of_week' and 'hour' then get the data from 'Start Time' colomn\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.day_name()\n df['hour'] = df['Start Time'].dt.hour\n \n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n \n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n \n \n # Add a new coloumn 'Start to End Station' to save each trip from start to end\n df['Trip Stations'] = 'From ' + df['Start Station'] + ' to ' + df['End Station']\n \n return df", "title": "" }, { "docid": "6d7d5cb78b329a8bf59268ebb885857c", "score": "0.59709436", "text": "def parse_data():\n filename = \"data/api/\" + get_todays_date() + \"-external.json\"\n if os.path.exists(filename):\n file_time = datetime.fromtimestamp(os.path.getmtime(filename))\n now = datetime.now()\n file_lifetime = now - file_time\n if (file_lifetime.total_seconds() / 60) / 60 < 1:\n return filename\n \n data = download_dataset()\n if data == None:\n with open(\"data/external/latest.json\", \"r\") as f:\n data = json.load(f)\n\n stats = {\n \"confirmed\": data['latest']['confirmed'],\n \"deaths\": data['latest']['deaths'],\n \"recovered\": data['latest']['recovered'],\n \"countries\": len(data['locations']),\n \"updated\": data['locations'][0]['last_updated'].split(\"T\")[0]\n }\n print(stats)\n\n locations = []\n for location in data['locations']:\n parsed_city = {\n \"type\": \"Feature\",\n \"properties\": {\n \"city\": location['country'],\n \"count\": location['latest']['confirmed'],\n \"deaths\": location['latest']['deaths'],\n \"recovered\": location['latest']['recovered'],\n \"icon\": \"theatre\"\n },\n \"geometry\": {\n \"type\": \"Point\",\n \"coordinates\": [float(location['coordinates']['longitude']), float(location['coordinates']['latitude'])]\n }\n }\n locations.append(parsed_city)\n\n with open(filename, \"+w\") as json_file:\n json.dump({\"cities\": locations, \"stats\": stats}, json_file, sort_keys=True, indent=2, separators=(\",\", \":\"))\n\n return filename", "title": "" }, { "docid": "d4bf28d53d652e8d0bfaa0407518b34e", "score": "0.59460115", "text": "def load_data(city, month, day):\n # read city data from CITY_DATA\n df = pd.read_csv(CITY_DATA[city])\n city = city.lower()\n if city == 'Chicago':\n \treturn 'chicago.csv'\n elif city == 'New York':\n \treturn 'new_york_city.csv'\n elif city == 'Washington':\n \treturn 'washington.csv'\n \n # get month filter\n # convert the Start Time column to datetime\n \n df['Start Time'] = pd.to_datetime(df['Start Time'])\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june', 'all']\n month = months.index(month) + 1\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n \n # filter by day if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day'] == day.title()]\n \n return df", "title": "" }, { "docid": "7626898be710d977fa805af736628a01", "score": "0.59440815", "text": "def load_data(city, month, day):\n original_df = pd.read_csv(CITY_DATA[city])\n # change values in start time column to datetime format\n original_df['Start Time'] = pd.to_datetime(original_df['Start Time'])\n # make a new column titled month with month value taken from start time of corresponding trip\n original_df['month'] = original_df['Start Time'].dt.month\n # make a new column titled day_of_week with week-day value taken from start time of corresponding trip\n original_df['day_of_week'] = original_df['Start Time'].dt.weekday_name\n # change values in End Time column to datetime format\n original_df['End Time'] = pd.to_datetime(original_df['End Time'])\n filtered_df = original_df.copy(deep=True)\n if month != 'all':\n # use the index of the months list to get the corresponding int\n month = months.index(month) + 1\n # filter by month to create the new dataframe\n filtered_df = filtered_df[filtered_df['month'] == month]\n \n if day != 'all':\n # filter by day of week to create the new dataframe\n filtered_df = filtered_df[filtered_df['day_of_week'] == day.title()]\n \n return filtered_df, original_df", "title": "" }, { "docid": "c05b6333fd165ce04902a38aa0a4cecb", "score": "0.5940116", "text": "def main():\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')\n files = os.listdir('data/raw/060718')\n# files.remove('.gitkeep')\n df = pd.DataFrame()\n for f in files:\n df = df.append(pd.read_json(f'data/raw/060718/{f}'))\n df['region'] = [x[-1] for x in df.location.str.split(', ').tolist()]\n df['province'] = [x[-2] for x in df.location.str.split(', ').tolist()]\n df.date = pd.to_datetime(df.date)\n df['hour'] = pd.to_datetime(df['time'], format='%H:%M:%S').dt.hour\n df.to_csv('data/processed/crime_incidences.csv', index=False)", "title": "" }, { "docid": "05127570fc748efb0061049e2d6fbc74", "score": "0.591074", "text": "def load_data(city, month, day):\n city_csv_df= pd.read_csv(CITY_DATA[city])\n \"\"\"converting start time column to datetime form readable by python\"\"\"\n city_csv_df['Start Time']= pd.to_datetime(city_csv_df['Start Time'])\n \"\"\"creating new columns with data extracted from start time column, such as month,day,hour\n these new columns ease the filtering process according to user inputs\"\"\"\n city_csv_df['month']= city_csv_df['Start Time'].dt.month\n city_csv_df['day']= city_csv_df['Start Time'].dt.day_name()\n city_csv_df['hour']= city_csv_df['Start Time'].dt.hour\n city_csv_df['trip']= city_csv_df[['Start Station','End Station']].agg(' to '.join, axis=1)\n return city_csv_df", "title": "" }, { "docid": "1543e1773d58c67f4e776665f8422de1", "score": "0.58874846", "text": "def load_data(city, month, day):\n# start of added code\n\n print(\"\\nPythong is loading the data considering your desired filters.\")\n start_time = time.time()\n\n # filter data concerning the entered city filters\n if isinstance(city, list):\n df = pd.concat(map(lambda city: pd.read_csv(CITY_DATA[city]), city),\n sort=True)\n # reorganize data frame\n try:\n df = df.reindex(columns=['Unnamed: 0', 'Start Time', 'End Time',\n 'Trip Duration', 'Start Station',\n 'End Station', 'User Type', 'Gender',\n 'Birth Year'])\n except:\n pass\n else:\n df = pd.read_csv(CITY_DATA[city])\n\n # create columns for time statistics\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['Month'] = df['Start Time'].dt.month\n df['Weekday'] = df['Start Time'].dt.weekday_name\n df['Start Hour'] = df['Start Time'].dt.hour\n\n # filter data concerning the desired month and weekday into new data frames\n if isinstance(month, list):\n df = pd.concat(map(lambda month: df[df['Month'] ==\n (months.index(month)+1)], month))\n else:\n df = df[df['Month'] == (months.index(month)+1)]\n\n if isinstance(day, list):\n df = pd.concat(map(lambda day: df[df['Weekday'] ==\n (day.title())], day))\n else:\n df = df[df['Weekday'] == day.title()]\n\n print(\"\\nThanks to the power of panda, this only took {} seconds.\".format((time.time() - start_time)))\n print('-'*40)\n\n# end of added code\n\n return df", "title": "" }, { "docid": "b8f86c250317966bd266860fab2be2d1", "score": "0.58751136", "text": "def readdata(datetime_to_read, dataset_options_dict, verbose=None):\n\n ## Manage verbose\n if verbose is None:\n verbose_actual = dataset_options_dict['verbose']\n else:\n verbose_actual = verbose\n\n if dataset_options_dict['raw_data_format'] == 'generic_netcdf':\n variable_names = (dataset_options_dict['longitude_variable_name']\n , dataset_options_dict['latitude_variable_name']\n , dataset_options_dict['field_variable_name'])\n\n DATA = read_generic_netcdf_at_datetime(datetime_to_read\n , variable_names = variable_names\n , data_dir = dataset_options_dict['raw_data_parent_dir']\n , fmt = dataset_options_dict['file_name_format']\n , verbose = verbose_actual)\n\n elif dataset_options_dict['raw_data_format'] == 'generic_netcdf_with_multiple_times':\n variable_names = (dataset_options_dict['longitude_variable_name']\n , dataset_options_dict['latitude_variable_name']\n , dataset_options_dict['time_variable_name']\n , dataset_options_dict['field_variable_name'])\n\n DATA = read_generic_netcdf_at_datetime(datetime_to_read\n , variable_names = variable_names\n , dt_to_use = datetime_to_read\n , data_dir = dataset_options_dict['raw_data_parent_dir']\n , fmt = dataset_options_dict['file_name_format']\n , verbose = verbose_actual)\n\n\n elif dataset_options_dict['raw_data_format'] == 'cmorph':\n DATA = read_cmorph_at_datetime(datetime_to_read\n , area = dataset_options_dict['area']\n , data_dir = dataset_options_dict['raw_data_parent_dir']\n , fmt = dataset_options_dict['file_name_format']\n , verbose = verbose_actual)\n\n elif dataset_options_dict['raw_data_format'] == 'imerg_hdf5':\n DATA = read_imerg_hdf5_at_datetime(datetime_to_read\n , area = dataset_options_dict['area']\n , data_dir = dataset_options_dict['raw_data_parent_dir']\n , fmt = dataset_options_dict['file_name_format']\n , verbose = verbose_actual)\n\n elif dataset_options_dict['raw_data_format'] == 'cfs_forecast':\n fcst_hour = int((datetime_to_read - dataset_options_dict['datetime_init']).total_seconds()/3600)\n fcst_resolution_hours = dataset_options_dict['data_time_interval']\n if fcst_hour < 1: # There is no data in the file for fcst = 0. Use 6h fcst values.\n records = [1,]\n else:\n records = [int(fcst_hour/fcst_resolution_hours),]\n\n DATA = read_cfs_rt_at_datetime(dataset_options_dict['datetime_init'] # datetime_to_read\n , data_dir = dataset_options_dict['raw_data_parent_dir']\n , fmt = dataset_options_dict['file_name_format']\n , records = records\n , verbose = verbose_actual)\n DATA['data'] = ma.masked_array(DATA['precip'][0])\n\n ## -- Add an elif block here for new datasets. --\n\n else:\n print(('ERROR! '+dataset_options_dict['raw_data_format'] + ' is not a valid raw_data_format!'), flush=True)\n DATA = None\n\n return DATA", "title": "" }, { "docid": "5ce9316a8d57d2848fe4f2279c2042dd", "score": "0.5873593", "text": "def load_data(city, month, day):\n\n #Loads data from all cities if user chooses no filter\n if city == 'all':\n df = pd.concat([pd.read_csv(f) for f in glob.glob('*.csv')], ignore_index = True, sort=False)\n #Loads data from specified city\n else:\n df = pd.read_csv(CITY_DATA[city])\n\n #Converts the Start & End Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n\n #Extracts month, day of the week and hour from Start Time df to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day'] = df['Start Time'].dt.weekday_name\n df['hour'] = df['Start Time'].dt.hour\n\n #Filter by month if applicable\n if month != 'all':\n #Use the index of the months list to get the corresponding int\n #Need to fix this in order to get the str instead of int\n #Refactoring may be required\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n #Filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n #Filter by day of week if applicable\n if day != 'all':\n #Filter by day of week to create the new dataframe\n df = df[df['day'] == day.title()]\n\n return df", "title": "" }, { "docid": "329eeed3f733129692e1b2a122846704", "score": "0.5853707", "text": "def combine_data(census_data, covid_data):\n covid_data = covid_data.query(\"day < 30\")\n data_dict = {\"geoid\": []}\n for i in range(60):\n data_dict[str(i)] = []\n for geoid, df in tqdm(covid_data.groupby(\"geoid\")):\n data_dict[\"geoid\"].append(geoid)\n df = df.drop(columns=[\"geoid\"])\n df = df.set_index(\"day\")\n for i in range(30):\n data_dict[str(i)].append(df.loc[i][\"cases\"])\n data_dict[str(i + 30)].append(df.loc[i][\"deaths\"])\n covid_data = pd.DataFrame(data_dict)\n data = census_data.merge(covid_data, how=\"inner\", on=\"geoid\")\n x = data.iloc[:, : 77 + 24]\n y = data.iloc[:, 77 + 24 : 77 + 30]\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.1)\n return x_train, x_test, y_train, y_test", "title": "" }, { "docid": "189ac73ed0d36d6e372cfe7cf1362b8f", "score": "0.5818552", "text": "def load_data(city, month, day):\n print(\" \".join(['Filters applied: ',city, month, day]))\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.day_name()\n\n # Before starting to filter, fill the NaN of the Gender and Year of Birth with data\n # to avoid losing both columns\n if 'Gender' in df.columns:\n df['Gender'].fillna('Not specified')\n # Notice that by ffilling the Birth year, we will not lose the youngest or oldes, but it may\n # affect the most frequent value obtained, removing these users would affect other maybe \n #more important stats\n if 'Birth Year' in df.columns:\n df['Birth Year'].fillna(method='ffill', axis=0)\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n month = list_of_months.index(month) \n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n # Create a column for the trip (from - to)\n df['Trip'] = df[['Start Station', 'End Station']].apply(' - '.join, axis=1) \n\n return df", "title": "" }, { "docid": "e7d8d90f3e51435a76fc7986363f522b", "score": "0.5804492", "text": "def load_city_data():\r\n\r\n global data_frame\r\n\r\n data_frame = pd.read_csv(\"resources/bikeshare-2/\" + CITY_DATA[city.lower()])\r\n\r\n # Drop all rows whose 'Start Time' or \"End Time' or 'Start Station' or 'End Station' have no values\r\n data_frame.dropna(subset=['Start Time', 'End Time', 'Start Station', 'End Station'], inplace=True)\r\n\r\n # print(data_frame.columns)\r\n\r\n data_frame['Start Time'] = pd.to_datetime(data_frame['Start Time'])\r\n # data_frame['End Time'] = pd.to_datetime(data_frame['End Time'])\r\n data_frame['month'] = data_frame['Start Time'].dt.month\r\n data_frame['day_of_week'] = data_frame['Start Time'].dt.weekday_name\r\n data_frame['hour'] = data_frame['Start Time'].dt.hour", "title": "" }, { "docid": "6f3c94182e0cf7299f27064998a1285f", "score": "0.57925105", "text": "def load_data(city, month, day):\n path = ''\n #path = 'Sources/bikeshare-2/'\n filepath = path + CITY_DATA[city]\n df = pd.read_csv(filepath)\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'],format='%Y-%m-%d',exact=False )\n df['End Time'] = pd.to_datetime(df['End Time'],format='%Y-%m-%d',exact=False )\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.dayofweek\n df['hour'] = df['Start Time'].dt.hour\n\n\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = {'january':1, 'february':2, 'march':3, 'april':4, 'may':5, 'june':6}\n # filter by month to create the new dataframe\n df = df[df['month'] == months[month]]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n days = {'monday':0, 'tuesday':1, 'wednesday':2, 'thursday':3, 'friday':4, 'saturday':5, 'sunday':6}\n df = df[df['day_of_week'] == days[day]]\n\n return df", "title": "" }, { "docid": "3872b237ca53c4da736f29206decec9b", "score": "0.57828265", "text": "def load_data(city, month, day):\n month = month.title()\n day = day.title()\n city = CITY_DATA.get(city)\n if month == 'All' and day =='All':\n df = pd.read_csv(city)\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n elif month != 'All' and day =='All':\n df = pd.read_csv(city)\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n df =df.loc[df['Start Time'].dt.month == MONTH_DATA.get(month.lower()),:]\n elif month =='All' and day !='All':\n df = pd.read_csv(city)\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n df =df.loc[df['Start Time'].dt.weekday_name == day,:]\n elif month !='All' and day !='All':\n df = pd.read_csv(city)\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n df =df.loc[df['Start Time'].dt.month == MONTH_DATA.get(month.lower()),:]\n df =df.loc[df['Start Time'].dt.weekday_name == day,:]\n \n return df", "title": "" }, { "docid": "4ee953f64e06938ca3aef52002aa70e5", "score": "0.5782737", "text": "def __process_data(self):\n # read the files in as data frame\n self.daily_report = pd.read_csv(self.data_path + \"/\" + file_daily_report)\n self.times_series_confirmed = pd.read_csv(\n self.data_path + \"/\" + file_timeseries_confirmed\n )\n self.times_series_death = pd.read_csv(\n self.data_path + \"/\" + file_timeseries_death\n )\n self.times_series_recovered = pd.read_csv(\n self.data_path + \"/\" + file_timeseries_recovered\n )\n\n # clean up data for timeseries_confirmed\n ## copy data\n confirmed_df = self.times_series_confirmed.copy()\n confirmed_df_2 = self.times_series_confirmed.copy()\n ## summation\n confirmed_concat = confirmed_df[confirmed_df[\"Province/State\"].notna()][\n \"Country/Region\"\n ].unique()\n for country in confirmed_concat:\n new_row = confirmed_df[confirmed_df[\"Country/Region\"] == country].sum()\n new_row[\"Country/Region\"] = country\n new_row[\"Province/State\"] = np.NaN\n new_row[\"Lat\"] = confirmed_df.loc[\n (confirmed_df[\"Country/Region\"] == country)\n & (confirmed_df[\"Province/State\"].isna()),\n \"Lat\",\n ].mean()\n new_row[\"Long\"] = confirmed_df.loc[\n (confirmed_df[\"Country/Region\"] == country)\n & (confirmed_df[\"Province/State\"].isna()),\n \"Long\",\n ].mean()\n confirmed_df = confirmed_df.drop(\n confirmed_df[confirmed_df[\"Country/Region\"] == country].index\n )\n confirmed_df = confirmed_df.append(new_row, ignore_index=True)\n ## add Long and Lat manually\n confirmed_df.loc[\n (confirmed_df[\"Country/Region\"] == \"China\")\n & (confirmed_df[\"Province/State\"].isna()),\n \"Lat\",\n ] = confirmed_df_2[confirmed_df_2[\"Country/Region\"] == \"China\"][\"Lat\"].mean()\n confirmed_df.loc[\n (confirmed_df[\"Country/Region\"] == \"China\")\n & (confirmed_df[\"Province/State\"].isna()),\n \"Long\",\n ] = confirmed_df_2[confirmed_df_2[\"Country/Region\"] == \"China\"][\"Long\"].mean()\n confirmed_df.loc[\n (confirmed_df[\"Country/Region\"] == \"Canada\")\n & (confirmed_df[\"Province/State\"].isna()),\n \"Lat\",\n ] = confirmed_df_2[confirmed_df_2[\"Country/Region\"] == \"Canada\"][\"Lat\"].mean()\n confirmed_df.loc[\n (confirmed_df[\"Country/Region\"] == \"Canada\")\n & (confirmed_df[\"Province/State\"].isna()),\n \"Long\",\n ] = confirmed_df_2[confirmed_df_2[\"Country/Region\"] == \"Canada\"][\"Long\"].mean()\n confirmed_df.loc[\n (confirmed_df[\"Country/Region\"] == \"Australia\")\n & (confirmed_df[\"Province/State\"].isna()),\n \"Lat\",\n ] = confirmed_df_2[confirmed_df_2[\"Country/Region\"] == \"Australia\"][\n \"Lat\"\n ].mean()\n confirmed_df.loc[\n (confirmed_df[\"Country/Region\"] == \"Australia\")\n & (confirmed_df[\"Province/State\"].isna()),\n \"Long\",\n ] = confirmed_df_2[confirmed_df_2[\"Country/Region\"] == \"Australia\"][\n \"Long\"\n ].mean()\n ## make tidy table\n confirmed_df = confirmed_df[confirmed_df[\"Province/State\"].isna()].drop(\n columns=[\"Province/State\", \"Unnamed: 0\"], axis=1\n )\n confirmed_tidy = confirmed_df.melt(id_vars=[\"Country/Region\", \"Lat\", \"Long\"])\n confirmed_tidy[\"variable\"] = pd.to_datetime(confirmed_tidy[\"variable\"])\n self.times_series_confirmed_tidy = confirmed_tidy\n\n # clean up data for timeseries_death\n ## copy data\n death_df = self.times_series_death.copy()\n death_df_2 = self.times_series_death.copy()\n ## summation\n death_concat = death_df[death_df[\"Province/State\"].notna()][\n \"Country/Region\"\n ].unique()\n for country in death_concat:\n new_row = death_df[death_df[\"Country/Region\"] == country].sum()\n new_row[\"Country/Region\"] = country\n new_row[\"Province/State\"] = np.NaN\n new_row[\"Lat\"] = death_df.loc[\n (death_df[\"Country/Region\"] == country)\n & (death_df[\"Province/State\"].isna()),\n \"Lat\",\n ].mean()\n new_row[\"Long\"] = death_df.loc[\n (death_df[\"Country/Region\"] == country)\n & (death_df[\"Province/State\"].isna()),\n \"Long\",\n ].mean()\n death_df = death_df.drop(\n death_df[death_df[\"Country/Region\"] == country].index\n )\n death_df = death_df.append(new_row, ignore_index=True)\n ## add Long and Lat manually\n death_df.loc[\n (death_df[\"Country/Region\"] == \"China\")\n & (death_df[\"Province/State\"].isna()),\n \"Lat\",\n ] = death_df_2[death_df_2[\"Country/Region\"] == \"China\"][\"Lat\"].mean()\n death_df.loc[\n (death_df[\"Country/Region\"] == \"China\")\n & (death_df[\"Province/State\"].isna()),\n \"Long\",\n ] = death_df_2[death_df_2[\"Country/Region\"] == \"China\"][\"Long\"].mean()\n death_df.loc[\n (death_df[\"Country/Region\"] == \"Canada\")\n & (death_df[\"Province/State\"].isna()),\n \"Lat\",\n ] = death_df_2[death_df_2[\"Country/Region\"] == \"Canada\"][\"Lat\"].mean()\n death_df.loc[\n (death_df[\"Country/Region\"] == \"Canada\")\n & (death_df[\"Province/State\"].isna()),\n \"Long\",\n ] = death_df_2[death_df_2[\"Country/Region\"] == \"Canada\"][\"Long\"].mean()\n death_df.loc[\n (death_df[\"Country/Region\"] == \"Australia\")\n & (death_df[\"Province/State\"].isna()),\n \"Lat\",\n ] = death_df_2[death_df_2[\"Country/Region\"] == \"Australia\"][\"Lat\"].mean()\n death_df.loc[\n (death_df[\"Country/Region\"] == \"Australia\")\n & (death_df[\"Province/State\"].isna()),\n \"Long\",\n ] = death_df_2[death_df_2[\"Country/Region\"] == \"Australia\"][\"Long\"].mean()\n ## make tidy table\n death_df = death_df[death_df[\"Province/State\"].isna()].drop(\n columns=[\"Province/State\", \"Unnamed: 0\"], axis=1\n )\n death_tidy = death_df.melt(id_vars=[\"Country/Region\", \"Lat\", \"Long\"])\n death_tidy[\"variable\"] = pd.to_datetime(death_tidy[\"variable\"])\n self.times_series_death_tidy = death_tidy\n\n # clean up data for timeseries_recovered\n ## copy data\n recovered_df = self.times_series_recovered.copy()\n recovered_df_2 = self.times_series_recovered.copy()\n ## summation\n recovered_concat = recovered_df[recovered_df[\"Province/State\"].notna()][\n \"Country/Region\"\n ].unique()\n for country in recovered_concat:\n new_row = recovered_df[recovered_df[\"Country/Region\"] == country].sum()\n new_row[\"Country/Region\"] = country\n new_row[\"Province/State\"] = np.NaN\n new_row[\"Lat\"] = recovered_df.loc[\n (recovered_df[\"Country/Region\"] == country)\n & (recovered_df[\"Province/State\"].isna()),\n \"Lat\",\n ].mean()\n new_row[\"Long\"] = recovered_df.loc[\n (recovered_df[\"Country/Region\"] == country)\n & (recovered_df[\"Province/State\"].isna()),\n \"Long\",\n ].mean()\n recovered_df = recovered_df.drop(\n recovered_df[recovered_df[\"Country/Region\"] == country].index\n )\n recovered_df = recovered_df.append(new_row, ignore_index=True)\n ## add Long and Lat manually\n recovered_df.loc[\n (recovered_df[\"Country/Region\"] == \"China\")\n & (recovered_df[\"Province/State\"].isna()),\n \"Lat\",\n ] = recovered_df_2[recovered_df_2[\"Country/Region\"] == \"China\"][\"Lat\"].mean()\n recovered_df.loc[\n (recovered_df[\"Country/Region\"] == \"China\")\n & (recovered_df[\"Province/State\"].isna()),\n \"Long\",\n ] = recovered_df_2[recovered_df_2[\"Country/Region\"] == \"China\"][\"Long\"].mean()\n recovered_df.loc[\n (recovered_df[\"Country/Region\"] == \"Canada\")\n & (recovered_df[\"Province/State\"].isna()),\n \"Lat\",\n ] = recovered_df_2[recovered_df_2[\"Country/Region\"] == \"Canada\"][\"Lat\"].mean()\n recovered_df.loc[\n (recovered_df[\"Country/Region\"] == \"Canada\")\n & (recovered_df[\"Province/State\"].isna()),\n \"Long\",\n ] = recovered_df_2[recovered_df_2[\"Country/Region\"] == \"Canada\"][\"Long\"].mean()\n recovered_df.loc[\n (recovered_df[\"Country/Region\"] == \"Australia\")\n & (recovered_df[\"Province/State\"].isna()),\n \"Lat\",\n ] = recovered_df_2[recovered_df_2[\"Country/Region\"] == \"Australia\"][\n \"Lat\"\n ].mean()\n recovered_df.loc[\n (recovered_df[\"Country/Region\"] == \"Australia\")\n & (recovered_df[\"Province/State\"].isna()),\n \"Long\",\n ] = recovered_df_2[recovered_df_2[\"Country/Region\"] == \"Australia\"][\n \"Long\"\n ].mean()\n ## make tidy table\n recovered_df = recovered_df[recovered_df[\"Province/State\"].isna()].drop(\n columns=[\"Province/State\", \"Unnamed: 0\"], axis=1\n )\n recovered_tidy = recovered_df.melt(id_vars=[\"Country/Region\", \"Lat\", \"Long\"])\n recovered_tidy[\"variable\"] = pd.to_datetime(recovered_tidy[\"variable\"])\n self.times_series_recovered_tidy = recovered_tidy\n\n return self.times_series_death_tidy", "title": "" }, { "docid": "d8cf42d8dace8f4308ebb3dbf58062dc", "score": "0.5757113", "text": "def load_data(city, month, day):\n if city.lower()=='washington':\n df=pd.read_csv('washington.csv')\n elif city.lower()=='chicago':\n df=pd.read_csv('chicago.csv')\n else :\n df=pd.read_csv('new_york_city.csv')\n count=0\n \n \n df['Start Time']=pd.to_datetime(df['Start Time'])\n df['month']=df['Start Time'].dt.month\n df['day_of_week']=df['Start Time'].dt.weekday_name\n \n if(month!='all'):\n months=['january','february','march','april','may','june']\n month=months.index(month)+1\n df=df[df['month']==month] \n if(day!='all'):\n df=df[df['day_of_week']==day.title()]\n \n \n \n \n return df", "title": "" }, { "docid": "0b0a5a06363f5bf8421bc0298f87a821", "score": "0.5712035", "text": "def load_data(city, month, day):\n if city == 'new york':\n city_file = CITY_DATA['new york city']\n else:\n city_file = CITY_DATA[city]\n df = pd.read_csv(city_file)\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n df['day'] = df['Start Time'].dt.weekday\n df['hour'] = df['Start Time'].dt.hour\n if month != 'all':\n months = ['january', 'february', 'march', 'april', 'may', 'june', 'all']\n df = df[df['month'] == months.index(month) + 1]\n if day != 'all':\n weekdays = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']\n df = df[df['day'] == weekdays.index(day)]\n return df", "title": "" }, { "docid": "683fea2e9e3dd4ff10472337e475b2e3", "score": "0.56988937", "text": "def load_data(city, month, day):\n\n #Load data of the selectd city\n df = pd.read_csv(CITY_DATA[city])\n\n #Convert the Start Time column and End Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n\n\n #Extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n #Filter by month if applicable\n if month != 'all' :\n month = months.index(month) + 1\n df = df[df['month'] == month]\n\n #Filter by day of week if applicable\n if day != 'all':\n #Filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "title": "" }, { "docid": "167bf937cf8ec364ddce4cfc5a996111", "score": "0.56900483", "text": "def prepare_dataset(dataset_path=\"../daedalus/persistent_data/ssm_E08000032_MSOA11_ppp_2011.csv\",\n output_path=\"./persistent_data/test_ssm_E08000032_MSOA11_ppp_2011.csv\",\n columns_map={\"Area\": \"location\",\n \"DC1117EW_C_SEX\": \"sex\",\n \"DC1117EW_C_AGE\": \"age\",\n \"DC2101EW_C_ETHPUK11\": \"ethnicity\"},\n location_code=None,\n lookup_ethnicity=\"./persistent_data/ethnic_lookup.csv\",\n loopup_location_code=\"./persistent_data/Middle_Layer_Super_Output_Area__2011__to_Ward__2016__Lookup_in_England_and_Wales.csv\"):\n # read the dataset\n dataset = pd.read_csv(dataset_path)\n if columns_map:\n # rename columns\n dataset = dataset.rename(columns=columns_map)\n if lookup_ethnicity:\n # map ethnicity from digits to strings as specified in the lookup_ethnicity file\n lookup = pd.read_csv(lookup_ethnicity)\n code_ethnicity = dict(zip(lookup['Base population file (persistent data) From \"C_ETHPUK11\"'],\n lookup['Rate to use (from NewEthpop outputs) Code']))\n dataset.replace({\"ethnicity\": code_ethnicity}, inplace=True)\n if location_code:\n if location_code == 'E09000001' or location_code == 'E09000033':\n location_code = 'E09000001+E09000033'\n if location_code == 'E06000052' or location_code == 'E06000053':\n location_code = 'E06000052+E06000053'\n\n dataset['MSOA'] = dataset['location']\n dataset['location'] = location_code\n else:\n dataset['MSOA'] = dataset['location']\n lookup = pd.read_csv(loopup_location_code)\n code_LAD = dict(zip(lookup['MSOA11CD'],\n lookup['LAD16CD']))\n dataset.replace({\"location\": code_LAD}, inplace=True)\n\n\n dataset.to_csv(output_path, index=False)\n print(f\"\\nWrite the dataset at: {output_path}\")", "title": "" }, { "docid": "314f98d3ce3965eb263d45ea4e22620e", "score": "0.5680046", "text": "def load_data(city, month, day):\n\n df = pd.read_csv(CITY_DATA[city], index_col = 0)\n\n # converts the Start Time column to datetime and creates new month, day of the week, and hour of the day columns\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n df['week_day'] = df['Start Time'].dt.day_name\n df['start_hour'] = df['Start Time'].dt.hour\n\n # combines start and end station into one column\n df['start_end'] = df['Start Station'].astype(str) + ' to ' + df['End Station']\n\n # filters by month if applicable and creates new dataframe\n if month != 'all':\n month_index = data_months.index(month) + 1\n df = df[df[\"month\"] == month_index ]\n\n # filters by day of week if applicable and creates new dataframe\n if day != 'all':\n df = df[df[\"week_day\"] == day.title() ]\n\n return df", "title": "" }, { "docid": "b9e7d968641a7c14b2a0c487f0ccfed0", "score": "0.5675025", "text": "def Save_City_Data(cities_data):\n cities_data.to_csv(cities_file_loc)", "title": "" }, { "docid": "704bbc8badb8bfbb09618e2d37ed552b", "score": "0.56743604", "text": "def load_data(city, month, day):\n\n # Should be able to find always the city, given it had to be picked from\n # a list, but it's safer to check on it.\n if CITY_DATA.get(city) is None:\n return None, None\n\n city_df = pd.read_csv(CITY_DATA.get(city))\n raw_df = city_df.copy()\n\n # Convert Start Time column to date/datetime\n city_df['Start Time'] = pd.to_datetime(city_df['Start Time'])\n\n # Create a column with the name of the month\n city_df['Month'] = city_df['Start Time'].map(lambda x:\n x.strftime('%b').lower())\n\n # Create a column with the day of the week\n city_df['DOW'] = city_df['Start Time'].map(lambda x:\n x.strftime('%a').lower())\n\n # Create a column with hour\n city_df['Hour'] = city_df['Start Time'].map(lambda x: x.strftime('%H'))\n\n # Filter the resulting table according to user's choices of month and day\n if (month is not None) and (day is not None):\n df = city_df[(city_df['Month'] == month) & (city_df['DOW'] == day)]\n elif month is not None:\n df = city_df[city_df['Month'] == month]\n elif day is not None:\n df = city_df[city_df['DOW'] == day]\n else:\n df = city_df\n\n return df, raw_df", "title": "" }, { "docid": "066af61cb31a5f4507bb73d82c3eb14b", "score": "0.5659826", "text": "def load_data(city, month, day):\n\n # load data file of specified city into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month, day of week and hour from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month_name().apply(lambda x: x.lower())\n df['day_of_week'] = df['Start Time'].dt.day_name().apply(lambda x: x.lower())\n df['hour'] = df['Start Time'].dt.hour\n\n # filter by month if applicable\n if month != 'all':\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day]\n \n return df", "title": "" }, { "docid": "70f30ca449b6ad9d7d35cf0556e8ca86", "score": "0.56500524", "text": "def load_data(city, month, day):\n\n # Load city-specific data file into dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['hour'] = df['Start Time'].dt.hour\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "title": "" }, { "docid": "939a5c641248a982b457cd577b8a5a02", "score": "0.56320304", "text": "def load_data(city, month, day):\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city.lower()])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.dayofweek\n\n # create a new column that combines start and end stations\n df['start_and_end_stations'] = df['Start Station'] + ' to ' + df['End Station']\n \n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n month = months.index(month)\n \n # filter by month to create the new dataframe\n df = df[df['month'] == month + 1]\n\n # filter by day of week if applicable\n if day != 'all':\n day = days.index(day)\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day]\n\n return df", "title": "" }, { "docid": "989f7a0c428c3eab30ca70fb6cef54e3", "score": "0.56156164", "text": "def estimates_to_data(origin_lat, origin_lng, dest_lat, dest_lng):\n print \"Getting estimates\"\n uber_est = uber_client.get_price_estimates(\n start_latitude= origin_lat,\n start_longitude= origin_lng,\n end_latitude= dest_lat,\n end_longitude= dest_lng\n ) \n\n uber_estimate = uber_est.json \n\n lyft_est = lyft_client.get_cost_estimates(origin_lat, origin_lng, \n dest_lat, dest_lng)\n lyft_estimate = lyft_est.json \n\n ############ Enter estimates into database\n\n time_requested = datetime.now()\n\n uber_ests = []\n\n i = 0\n for i in range(len(uber_estimate[\"prices\"])):\n uber_ests.append(uber_estimate[\"prices\"][i])\n i += 1\n\n\n for uber in uber_ests:\n\n if uber[\"display_name\"] == \"TAXI\":\n continue\n\n price_min = uber[\"low_estimate\"] * 100\n price_max = uber[\"high_estimate\"] * 100\n\n if uber[\"display_name\"] == \"POOL\":\n ridetype_id = 1\n elif uber[\"display_name\"] == \"uberX\":\n ridetype_id = 2\n elif uber[\"display_name\"] == \"uberXL\":\n ridetype_id = 3\n else:\n continue\n\n distance = uber[\"distance\"]\n time = uber[\"duration\"]\n surge = float(uber[\"surge_multiplier\"])\n\n sql = \"\"\"INSERT INTO estimates (origin_lat, origin_long, dest_lat, \n dest_long, distance, time, time_requested, \n surge, price_min, price_max, ridetype_id)\n VALUES (:origin_lat, :origin_lng, :dest_lat, :dest_lng, \n :distance, :time, :time_requested, \n :surge, :price_min, :price_max, :ridetype_id);\"\"\"\n\n db.session.execute(sql, {\n \"origin_lat\" : origin_lat,\n \"origin_lng\" : origin_lng, \n \"dest_lat\" : dest_lat, \n \"dest_lng\" : dest_lng, \n \"distance\" : distance, \n \"time\" : time, \n \"time_requested\" : time_requested, \n \"surge\" : surge, \n \"price_min\" : price_min, \n \"price_max\" : price_max, \n \"ridetype_id\" : ridetype_id\n })\n\n print \"Uber added\"\n\n lyft_ests = []\n\n i = 0\n for i in range(len(lyft_estimate[\"cost_estimates\"])):\n lyft_ests.append(lyft_estimate[\"cost_estimates\"][i])\n i += 1\n\n\n for lyft in lyft_ests:\n\n price_min = lyft[\"estimated_cost_cents_min\"]\n price_max = lyft[\"estimated_cost_cents_max\"]\n\n if lyft[\"ride_type\"] == \"lyft_line\":\n ridetype_id = 4\n base_min = 475\n base_max = 475\n elif lyft[\"ride_type\"] == \"lyft\":\n ridetype_id = 5\n base_min = 775\n base_max = 1475\n elif lyft[\"ride_type\"] == \"lyft_plus\":\n ridetype_id = 6\n base_min = 1175\n base_max = 1975\n else:\n continue\n\n distance = lyft[\"estimated_distance_miles\"]\n time = lyft[\"estimated_duration_seconds\"]\n\n if price_min <= base_min:\n min_surge = 1\n else:\n min_surge = float(price_min / base_min)\n\n if price_max <= base_max:\n max_surge = 1\n else:\n max_surge = float(price_max / base_max)\n\n total = min_surge + max_surge\n\n surge = round((total / 2.0), 2)\n\n sql = \"\"\"INSERT INTO estimates (origin_lat, origin_long, dest_lat, \n dest_long, distance, time, time_requested, \n surge, price_min, price_max, ridetype_id)\n VALUES (:origin_lat, :origin_lng, :dest_lat, :dest_lng, \n :distance, :time, :time_requested, \n :surge, :price_min, :price_max, :ridetype_id);\"\"\"\n\n db.session.execute(sql, {\n \"origin_lat\" : origin_lat,\n \"origin_lng\" : origin_lng, \n \"dest_lat\" : dest_lat, \n \"dest_lng\" : dest_lng, \n \"distance\" : distance, \n \"time\" : time, \n \"time_requested\" : time_requested, \n \"surge\" : surge, \n \"price_min\" : price_min, \n \"price_max\" : price_max, \n \"ridetype_id\" : ridetype_id\n })\n\n print \"Lyft added\"\n \n db.session.commit()", "title": "" }, { "docid": "172736b7a9a619dfce6ecf1091d4bcef", "score": "0.5612747", "text": "def _read_cweeds_data(metadata):\n # Parse the weather data to a frame of series.\n # Unfortunately, seems pd can't handle appending, so we build lists.\n times = []\n ghi = []\n dni = []\n dhi = []\n dni_extra = []\n wind_speed = []\n temp_air = []\n pressure = []\n albedo = []\n\n # we use these a lot, save some lookups\n albedo_soil = pvlib.irradiance.SURFACE_ALBEDOS['soil']\n albedo_snow = pvlib.irradiance.SURFACE_ALBEDOS['snow']\n\n zipname = '../data/{territory}.zip'.format(**metadata)\n # the station name we use here is the ugly name, not the pretty name in metadata['name']\n # most stations have the territory name but some don't.\n wy2name = '{wban}.WY2'.format(**metadata).format(metadata.name)\n\n latitude = metadata['latitude']\n longitude = metadata['longitude']\n timezone = datetime.timezone(datetime.timedelta(hours=metadata['timezone']))\n\n with zipfile.ZipFile(zipname) as zipf:\n def openwy2():\n # Find the wy2name in the archive. The names aren't consistent enough\n # to just get the right one in one or two tries.\n for zipitem in zipf.infolist():\n if wy2name in zipitem.filename:\n return zipf.open(zipitem)\n raise KeyError(\"Could not find {} in {}\".format(wy2name, zipname))\n with openwy2() as f:\n for line in f:\n # yyyymmddhh but hh is 01-24; shift to 00-23\n times.append(datetime.datetime(int(line[6:10]), int(line[10:12]),\n int(line[12:14]), int(line[14:16]) - 1, tzinfo=timezone))\n\n # values in kJ/m^2 for the entire hour; later we divide by 3.6 to get W/m^2\n dni_extra.append(int(line[16:20])) # extraterrestrial irradiance (sun at ToA)\n ghi.append(int(line[20:24])) # global horizontal irradiance\n dni.append(int(line[26:30])) # direct normal irradiance\n dhi.append(int(line[32:36])) # diffuse horizontal irradiance (ghi - dni)\n\n # pressure in 10 Pa ; divide by 100 to get kPa\n pressure.append(int(line[85:90]))\n # value in 0.1 C ; divide by 10 to get C\n temp_air.append(int(line[91:95]))\n # value in 0.1 m/s ; divide by 10 to get m/s.\n wind_speed.append(int(line[105:109]))\n\n # 0 => no snow; 1 => snow; 9 => missing\n str_snow = chr(line[116])\n if str_snow == '0':\n albedo.append(albedo_soil)\n elif str_snow == '1':\n albedo.append(albedo_snow)\n else:\n # Missing. Shitty guess: assume it's snowy if temp < -3 (bad guess!)\n # we probably should guess based on a model that includes precip data and\n # recent temps, which we have access to\n if temp_air[-1] < -3:\n albedo.append(albedo_snow)\n else:\n albedo.append(albedo_soil)\n\n # Pack the data now, before using it below. Also convert to the units we\n # expect (W/m^2 rather than J/(m^2 h), m/s rather than dm/s, etc)\n # And convert the times to np.datetime64 so pandas can run faster.\n times = np.asarray(times, dtype=np.datetime64)\n ghi = np.asarray(ghi, dtype=np.float32) * (1 / 3.6)\n dni = np.asarray(dni, dtype=np.float32) * (1 / 3.6)\n dhi = np.asarray(dhi, dtype=np.float32) * (1 / 3.6)\n dni_extra = np.asarray(dni_extra, dtype=np.float32) * (1 / 3.6)\n wind_speed = np.asarray(wind_speed, dtype=np.float32) * 0.1\n temp_air = np.asarray(temp_air, dtype=np.float32) * 0.1\n pressure = np.asarray(pressure, dtype=np.float32) * 0.01\n albedo = np.asarray(albedo, dtype=np.float32)\n\n # We don't get zenith/azimuth from the data. Calculate it.\n solpos = pvlib.solarposition.get_solarposition(times, latitude, longitude)\n solar_zenith = np.asarray(solpos['apparent_zenith'], dtype=np.float32)\n solar_azimuth = np.asarray(solpos['azimuth'], dtype=np.float32)\n\n # Get the air mass (?)\n airmass = pvlib.atmosphere.relativeairmass(solar_zenith)\n am_abs = pvlib.atmosphere.absoluteairmass(airmass, pressure)\n am_abs = np.asarray(am_abs, dtype=np.float32)\n\n return pd.DataFrame({\n 'ghi' : ghi,\n 'dni' : dni,\n 'dhi' : dhi,\n 'dni_extra': dni_extra,\n 'wind_speed': wind_speed,\n 'temp_air' : temp_air,\n 'pressure' : pressure,\n 'albedo' : albedo,\n 'solar_zenith' : solar_zenith,\n 'solar_azimuth' : solar_azimuth,\n 'absolute_airmass': am_abs\n }, index = times)", "title": "" }, { "docid": "0cd69c5b91d25df7081d4c255511cc06", "score": "0.55938244", "text": "def load_data(city, month, day):\n # load data into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # Convert the 'Start Time' and 'End Time' column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n\n #Extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # Filter by month if applicable\n if month != 'all':\n # Use the index of the months list to get the corresponding data\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n #Filter by month to create a new dataframe\n df = df[df['month'] == month]\n\n # Filter by day of week if applicable\n if day != 'all':\n #Filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n # Returns the selected file as a dataframe (df) with relevant columns\n return df", "title": "" }, { "docid": "81083d2074ee55a22fd61acc43ce24b1", "score": "0.55818015", "text": "def load_data(city, month, day):\n\n df = pd.read_csv(CITY_DATA[city])\n return df", "title": "" }, { "docid": "300231fa3fb4769c6175d03292a74bd5", "score": "0.55812967", "text": "def _load_data():\n for row in _get_csv_reader('https://raw.githubusercontent.com/OxCGRT/covid-policy-tracker/master/data/OxCGRT_latest.csv'):\n code = row['CountryCode']\n del row['CountryCode']\n date = row['Date']\n del row['Date']\n name = row['CountryName']\n del row['CountryName']\n\n _add(code, date, name, row)\n\n for row in _get_csv_reader('https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/owid-covid-data.csv'):\n code = row['iso_code']\n del row['iso_code']\n date = ''.join(row['date'].split('-'))\n del row['date']\n name = row['location']\n del row['location']\n del row['continent']\n\n _add(code, date, name, row)", "title": "" }, { "docid": "92e8d37dd31825cb8ded7bb73ae83eb9", "score": "0.55613256", "text": "def load_data(city, month, day):\n #Load data for city\n print(\"\\nLoading data...\")\n df = pd.read_csv(CITY_DATA[city])\n\n #Convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n #Extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n #Filter by month if applicable\n if month != 'all':\n #Use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n #Filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n #Filter by day of week if applicable\n if day != 'all':\n #Filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n #Returns the selected file as a dataframe (df) with relevant columns\n return df", "title": "" }, { "docid": "979aa2805fc8cddbe12c1626e4d5676f", "score": "0.55583286", "text": "def loading_datasts(city, month, day):\n\n city = city.lower()\n\n month = month.lower()\n\n day = day.lower()\n\n\n\n # load data file into a dataframe\n\n df = pd.read_csv(CITY_DATA[city])\n\n\n\n # convert the Start Time column to datetime\n\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n\n\n # extract month and day of week from Start Time to create new columns\n\n df['month'] = df['Start Time'].dt.month\n\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n\n\n # filter by month if applicable\n\n if month != 'all':\n\n # use the index of the months list to get the corresponding int\n\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n\n month = months.index(month) + 1\n\n\n\n # filter by month to create the new dataframe\n\n df = df[df['month'] == month]\n\n\n\n # filter by day of week if applicable\n\n if day != 'all':\n\n # filter by day of week to create the new dataframe\n\n df = df[df['day_of_week'] == day.title()]\n\n\n\n return df", "title": "" }, { "docid": "89191c5e57f26dfc32ab362284801244", "score": "0.5556858", "text": "def load_data(city, month, day):\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df[\"Start Time\"])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df[\"Start Time\"].dt.month\n df[\"day_of_week\"] = df[\"Start Time\"].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n #months = ['january', 'february', 'march', 'april', 'may', 'june']\n #month = months.index(month.lower())+1\n #print(month)\n # filter by month to create the new dataframe\n\n df = df[df[\"month\"] == int(month)]\n\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df[\"day_of_week\"] == day.title()]\n\n # Get a column with the start time\n df['Start Time'] = pd.to_datetime(df[\"Start Time\"], dayfirst = True)\n # extract hour from the Start Time column to create an hour column\n df['hour'] =df[\"Start Time\"].dt.hour\n\n return df", "title": "" }, { "docid": "3c33576e9089f69b557fb93c58a8b2fa", "score": "0.55553037", "text": "def process_city_file(cur, filepath, conn):\n # Read cityies information data file\n df3 = pd.read_csv('us-cities-demographics.csv',sep = ';')\n df3.fillna(0,inplace=True)\n \n # using dictionary to convert specific columns \n convert_dict = {'Male Population': int, \n 'Female Population': int\n } \n df3 = df3.astype(convert_dict)\n\n #Fetching required columns from the data\n cities_data = df3.iloc[:,[0,1,9,2,3,4,5]].values.tolist()\n \n # insert city data records\n for i in range(len(cities_data)):\n try:\n cur.execute(dimcities_table_insert, cities_data[i])\n except psycopg2.IntegrityError as e:\n conn.rollback()\n print('insert failure:',e)\n #continue\n except ValueError as e:\n conn.rollback()\n print('insert failure:',e)\n #continue\n else:\n conn.commit()", "title": "" }, { "docid": "49529f06333ca14d6c1ca4b91dcf4d16", "score": "0.5549042", "text": "def load_(self):\n\n if os.path.exists(self.data_folder + 'poll.csv'):\n self.poll_df = pd.read_csv(self.data_folder + 'poll.csv')\n self.poll_df['datetime'] = pd.to_datetime(self.poll_df['datetime'])\n self.poll_df.set_index('datetime', inplace=True)\n # add pollution list\n self.gas_list = self.poll_df.columns.to_list()\n\n if (self.city_name == 'Chiang Mai') :\n # for Thailand, delete all PM2.5 record before 2010\n self.poll_df.loc[:'2010', 'PM2.5'] = np.nan\n\n elif (self.city_name == 'Bangkok'):\n # for Thailand, delete all PM2.5 record before 2014\n self.poll_df.loc[:'2013', 'PM2.5'] = np.nan\n \n elif (self.city_name == 'Bangkok'):\n # for Thailand, delete all PM2.5 record before 2014\n self.poll_df.loc[:'2012', 'NO2'] = np.nan\n\n elif (self.city_name == 'Hat Yai'):\n # for Thailand, delete all PM2.5 record before 2014\n self.poll_df.loc[:'2015', 'PM2.5'] = np.nan\n #pass\n\n else:\n print('no pollution data. Call self.build_pollution first')\n\n\n if os.path.exists(self.data_folder + 'weather.csv'):\n self.wea = pd.read_csv(self.data_folder + 'weather.csv')\n try:\n self.wea.drop(['Time'],\n axis=1,\n inplace=True)\n except BaseException:\n pass\n self.wea['datetime'] = pd.to_datetime(self.wea['datetime'])\n self.wea.set_index('datetime', inplace=True)\n else:\n print('no weather data. Call self.build_weather first')\n\n if os.path.exists(self.data_folder + 'imp_data.csv'):\n self.data = pd.read_csv(\n self.data_folder + 'imp_data.csv')\n self.data['datetime'] = pd.to_datetime(\n self.data['datetime'])\n self.data.set_index('datetime', inplace=True)", "title": "" }, { "docid": "a0a46c43a376a239a7480242251866ce", "score": "0.5533267", "text": "def load_data(city, month, day):\r\n #Load data for city\r\n print(\"\\nPlease wait data is loading...\")\r\n df = pd.read_csv(CITY_DATA[city])\r\n\r\n #Convert the Start Time column to datetime\r\n df['Start Time'] = pd.to_datetime(df['Start Time'])\r\n\r\n #Extract month and day of week from Start Time to create new columns\r\n df['month'] = df['Start Time'].dt.month\r\n df['day_of_week'] = df['Start Time'].dt.weekday_name\r\n\r\n #Filter by month if applicable\r\n if month != 'all':\r\n #Use the index of the months list to get the corresponding int\r\n months = ['january', 'february', 'march', 'april', 'may', 'june']\r\n month = months.index(month) + 1\r\n\r\n #Filter by month to create the new dataframe\r\n df = df[df['month'] == month]\r\n\r\n #Filter by day of week if applicable\r\n if day != 'all':\r\n #Filter by day of week to create the new dataframe\r\n df = df[df['day_of_week'] == day.title()]\r\n\r\n #Returns the selected file as a dataframe (df) with relevant columns\r\n return df", "title": "" }, { "docid": "1f4c40c40056262cb822f651ecf1797c", "score": "0.5530101", "text": "def load_data(city, month, day):\n try:\n file = CITY_DATA[city.lower()]\n\n # load data file into a dataframe\n df = pd.read_csv(file)\n\n # extract month and day of week from Start Time to create new columns\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n # months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month)+1\n # print(month)\n\n # filter by month to create the new dataframe\n df = df.loc[df['month'] == month]\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df.loc[df['day_of_week'] == day.title()]\n\n return df\n except Exception as e:\n print('Exception occured: {}'.format(e))\n return None", "title": "" }, { "docid": "759b1ee46ae34f63668d97e6c5dd99db", "score": "0.5529535", "text": "def getData(file, test=False, perc=0.75, rand=False):\n\n date = []\n year = []\n month = []\n category = []\n day = []\n district = [] \n address = []\n lng = []\n lat = []\n with open(file, 'r') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n date.append(row['Dates'])\n if not test:\n category.append(row['Category'])\n day.append(row['DayOfWeek'])\n district.append(row['PdDistrict'])\n address.append(row['Address'])\n lng.append(row['X'])\n lat.append(row['Y'])\n\n\n # format date\n for i in range(len(date)):\n d = date[i].split(' ')[0]\n t = date[i].split(' ')[1]\n hour = int(t.split(':')[0])\n minute = int(t.split(':')[1])\n # set date to 0 or 1 (5am to 5pm) morning otw night\n if hour >= 17 or hour < 5:\n date[i] = 0\n else:\n date[i] = 1\n #date[i] = hour * 60 + minute\n year.append(int(d.split('-')[0]))\n month.append(int(d.split('-')[1]))\n\n # format category\n crimes = list(set(category))\n crimes = sorted(crimes)\n c = {}\n for i in range(len(crimes)):\n c[crimes[i]] = i\n for i in range(len(category)):\n category[i] = c[category[i]]\n\n # format day\n d = {'Sunday':0,'Monday':1,'Tuesday':2,'Wednesday':3,'Thursday':4,\n 'Friday':5,'Saturday':6}\n for i in range(len(day)):\n day[i] = d[day[i]]\n\n # format district\n a = list(set(district))\n d = {}\n for i in range(len(a)):\n d[a[i]] = i\n for i in range(len(district)):\n district[i] = d[district[i]]\n\n # format address\n a = list(set(address))\n d = {}\n for i in range(len(a)):\n d[a[i]] = i\n for i in range(len(address)):\n address[i] = d[address[i]]\n\n # format lng/lat\n lng = [float(x) for x in lng]\n lat = [float(y) for y in lat]\n\n date_un = np.array(date)\n year_un = np.array(year)\n month_un = np.array(month)\n category_un = np.array(category)\n day_un = np.array(day)\n district_un = np.array(district)\n address_un = np.array(address)\n lng_un = np.array(lng)\n lat_un = np.array(lat)\n\n date = np.array(date)\n year = np.array(year)\n month = np.array(month)\n category = np.array(category)\n day = np.array(day)\n district = np.array(district)\n address = np.array(address)\n lng = np.array(lng)\n lat = np.array(lat)\n\n # select train/test set\n if test: \n perc = 1.0\n m = len(date)\n indexes = list(range(m))\n if rand:\n shuffle(indexes)\n train_part = indexes[0 : int(perc * m)]\n test_part = indexes[int(perc * m) :]\n date = date[train_part]\n year = year[train_part]\n month = month[train_part]\n if not test:\n category = category[train_part]\n day = day[train_part]\n district = district[train_part]\n address = address[train_part]\n lng = lng[train_part]\n lat = lat[train_part]\n\n # form X and Y matrices\n #X = np.concatenate(([date], [year], [month], [day], [district], [address], \n # [lng], [lat]), axis=0).T\n \n X = np.concatenate(([date], [year], [month], [day], [district],\n ), axis=0).T\n Y = []\n for i in range(len(category_un)):\n temp = [0] * len(crimes)\n temp[category_un[i]] = 1\n Y.append(temp)\n Y = np.array(Y)\n\n #X_test = np.concatenate(([date_un], [year_un], [month_un], [day_un], [district_un], [address_un], \n # [lng_un], [lat_un]), axis=0).T\n\n X_test = np.concatenate(([date_un], [year_un], [month_un], [day_un],\n [district_un]), axis=0).T\n\n if not test:\n X_test = X_test[test_part]\n else:\n X_test = []\n if not test:\n Y_test = Y[test_part]\n else:\n Y_test = []\n if not test:\n Y = Y[train_part]\n else:\n Y = []\n\n\n return {'X' : X, 'Y' : Y, 'X_test' : X_test, 'Y_test' : Y_test, 'crimes' : crimes}", "title": "" }, { "docid": "a7b2bc5eaaa1ccb52153ad5cd9382cd6", "score": "0.5523657", "text": "def load_data(city, month, day):\n #Load data for city\n print(\"\\nLoading data...\")\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n #Extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.day_name()\n\n #Filter by month if applicable\n if month != 'all':\n #Use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n #Filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n #Filter by day of week if applicable\n if day != 'all':\n #Filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n \n\n return df", "title": "" }, { "docid": "bcb9daf4c7d4b78f69a29316879929d9", "score": "0.55187654", "text": "def load_data(city, month, day):\r\n cityfile = city[1]\r\n\r\n df = pd.read_csv(cityfile)\r\n df['Start Time'] = pd.to_datetime(df['Start Time'])\r\n df['month'] = df['Start Time'].dt.month\r\n df['hour'] = df['Start Time'].dt.hour\r\n df['day_of_week'] = df['Start Time'].dt.weekday_name\r\n \r\n \r\n \r\n if (day != 'all'):\r\n df = df[df['day_of_week'] == day]\r\n \r\n if (month != 0):\r\n df = df[df['month'] == month]\r\n\r\n if df.shape[0] == 0:\r\n df = None\r\n\r\n return df", "title": "" }, { "docid": "d18b0cdbc46c1bdc1f38df4f0047cbf6", "score": "0.55033207", "text": "def load_data(city, month, day):\n start_time = time.time()\n \n df = pd.read_csv(CITY_DATA.get(city), parse_dates=['Start Time', 'End Time'])\n df['Start Month'] = df['Start Time'].dt.month_name()\n df['Start Day'] = df['Start Time'].dt.day_name()\n df['Start hour'] =df['Start Time'].dt.hour\n\n if month != \"All\":\n df = df[df['Start Month'] == month]\n\n if day != \"All\":\n df = df[df['Start Day'] == day]\n\n\n return df", "title": "" }, { "docid": "8686a6f1f26091c4ff26132c5241adfc", "score": "0.549777", "text": "def load_data(city, month, day):\n # Select csv file based on city specified by user\n df = pd.read_csv(CITY_DATA[city])\n\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # Create additional columns for month, weekday name, hour and journey(start station to end station)\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['start_hour'] = df['Start Time'].dt.hour\n df['journey'] = df['Start Station'] + ' to ' + df['End Station']\n\n # Apply month filter if specified\n if month != 'All':\n months = ['January','February','March','April','May','June']\n month = months.index(month) + 1\n\n df = df[df['month'] == month]\n\n # Apply day filter if specified\n if day != 'All':\n\n df = df[df['day_of_week'] == day.title()]\n\n return df", "title": "" }, { "docid": "8aa2149684ee610ff4dab35aa9a15a49", "score": "0.5496692", "text": "def load_data(city, month, day):\n #read city csv\n df = pd.read_csv(CITY_DATA[city])\n\n #convert start_times to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n #extracting hours to new columns\n df['hour'] = df['Start Time'].dt.hour\n\n #extracting months and days to new columns in dataframe\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n #filtering by month if possible\n if month != 'all':\n months = ['january', 'february', 'march', 'april','may','june','all']\n month = months.index(month) + 1\n\n df = df[df['month'] == month]\n\n #filtering by day if possible\n if day != 'all':\n df = df[df['day_of_week'] == day.title()]\n\n return df", "title": "" }, { "docid": "1eb061dfe87dfb78698715497e349551", "score": "0.5487498", "text": "def load_data(city, month, day):\n # load data based on the city input\n df = pd.read_csv(CITY_DATA[city])\n\n # Filter by month and day (copied from my solution to Practice 3)\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday\n\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['jan', 'feb', 'mar', 'apr', 'may', 'jun']\n month = months.index(month)+1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n days = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']\n day = days.index(day)\n df = df[df['day_of_week'] == day]\n\n\n return df", "title": "" }, { "docid": "3873956f3386155d718b4e1dfab4a3e5", "score": "0.54829913", "text": "def handle_filethree(self, geofile_name, year, state, geoids_by_record):\n file3_name = geofile_name[:-11] + \"000032010.sf1\"\n datafile = open(file3_name, 'r')\n race, hispanic, stats = [], [], []\n skip_race = Census2010Race.objects.filter(\n geoid__state=state, geoid__year=year).exists()\n skip_hisp = Census2010HispanicOrigin.objects.filter(\n geoid__state=state, geoid__year=year).exists()\n skip_stats = Census2010RaceStats.objects.filter(\n geoid__state=state, geoid__year=year).exists()\n\n if not skip_race or not skip_hisp or not skip_stats:\n for row in reader(datafile):\n recordnum = row[4]\n if recordnum in geoids_by_record:\n data = Census2010Race(\n total_pop=int(row[5]), white_alone=int(row[6]),\n black_alone=int(row[7]), amind_alone=int(row[8]),\n asian_alone=int(row[9]), pacis_alone=int(row[10]),\n other_alone=int(row[11]), two_or_more=int(row[12]))\n # Save geoid separately so we don't need to load the\n # Tracts\n data.geoid_id = geoids_by_record[recordnum]\n race.append(data)\n\n data = Census2010HispanicOrigin(\n total_pop=int(row[13]), non_hispanic=int(row[14]),\n hispanic=int(row[15]))\n data.geoid_id = geoids_by_record[recordnum]\n hispanic.append(data)\n\n data = Census2010RaceStats(\n total_pop=int(row[16]), hispanic=int(row[25]),\n non_hisp_white_only=int(row[18]),\n non_hisp_black_only=int(row[19]),\n non_hisp_asian_only=int(row[21]))\n data.geoid_id = geoids_by_record[recordnum]\n data.auto_fields()\n stats.append(data)\n datafile.close()\n\n if not skip_race:\n Census2010Race.objects.bulk_create(race)\n if not skip_hisp:\n Census2010HispanicOrigin.objects.bulk_create(hispanic)\n if not skip_stats:\n Census2010RaceStats.objects.bulk_create(stats)", "title": "" }, { "docid": "7f5c9c6d4d0a887cf9138e936a32aa9e", "score": "0.5481812", "text": "def scrap_initial_training_data():\n\tdata_interest_obj = np.zeros((50,3)) #for objective functions\n\tfor i in range(50):\n\t\tdata = pd.read_csv('Meshes/gen_1/random_design'+str(i)+'/history.csv',\n\t\t\theader=0, usecols=[' \"CD\" ',' \"CL\" ',' \"CMz\" '])\n\t\tinterest = np.array([data.iloc[len(data)-1]])\n\t\tdata_interest_obj[i] = interest \n\n\t\"\"\"Scraping the data of interest from random designs as design variables\"\"\"\n\tdata_interest_dv = np.zeros((1,28)) #for design variables\n\tfor i in range(50):\n\t\tdata_upper = np.genfromtxt('Designs/initial_samples/control_points/random_design'\n\t\t\t+ str(i) + '.dat', skip_header=1, skip_footer=17,\n\t\t\tusecols=(1), delimiter=' ')\n\t\tdata_lower = np.genfromtxt('Designs/initial_samples/control_points/random_design'\n\t\t\t+ str(i) + '.dat', skip_header=17, skip_footer=1,\n\t\t\tusecols=(1), delimiter=' ')\n\t\tdata_upper = np.array([data_upper])\n\t\tdata_lower = np.array([data_lower])\n\t\tinterest = np.append(data_upper, data_lower, axis=1)\n\t\tdata_interest_dv = np.append(data_interest_dv, interest, axis=0)\n\tdata_interest_dv = np.delete(data_interest_dv, 0, 0)\n\n\t\"\"\"Saving to dat files\"\"\"\n\tnp.savetxt('Data/Training/X.dat',\n\t\t\t\tdata_interest_dv,\n\t \t\t\tdelimiter=' ',\n\t \t\t\theader='',\n\t \t\t\tfooter='')\n\tnp.savetxt('Data/Training/OUT.dat',\n\t\t\t\tdata_interest_obj,\n\t \t\t\tdelimiter=' ',\n\t \t\t\theader='',\n\t \t\t\tfooter='')", "title": "" }, { "docid": "0e13aad3477e5e088ea41582f194d285", "score": "0.5479693", "text": "def readTMY(filepath=os.path.join(\"TMY\", \"Germany DEU Koln (INTL).csv\")):\n # get data\n data = pd.read_csv(\n os.path.join(tsib.data.PATH, \"weatherdata\", filepath),\n skiprows=([0, 1]),\n sep=\",\",\n )\n data.index = pd.date_range(\n \"2010-01-01 00:30:00\", periods=8760, freq=\"H\", tz=\"Europe/Berlin\"\n )\n data = data.rename(\n columns={\"Beam\": \"DNI\", \"Diffuse\": \"DHI\", \"Tdry\": \"T\", \"Wspd\": \"WS\"}\n )\n location_data = pd.read_csv(\n os.path.join(tsib.data.PATH, \"profiles\", filepath), nrows=1, sep=\",\"\n )\n\n location = {\n \"name\": location_data[\"City\"].values[0],\n \"latitude\": location_data[\"Latitude\"].values[0],\n \"longitude\": location_data[\"Longitude\"].values[0],\n }\n return data, location", "title": "" }, { "docid": "e162f1f2e18d93d24280214a6419e7e7", "score": "0.5476033", "text": "def load_data(city, month, day):\n #loads data into df pandasdata frame for city\n df = pd.read_csv(CITY_DATA[city])\n #converts start time to cloumn to datetime column\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n #for extracting month and day of week from start time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n #filtering by month if applicable\n if month != 'all':\n #we are using the index of the months list to get corresponding int\n months = ['january','february','march','april','may','june']\n month = months.index(month) + 1\n\n #filter data for the entered month where new df contains this Data\n df = df[df['month'] == month]\n\n #filter by day of week if applicable\n if day != 'all':\n #filtering data by day of week to create new data DataFrame\n df = df[df['day_of_week'] == day.title()]\n return df", "title": "" }, { "docid": "8eeca80059581c4b916c2991c4c132e8", "score": "0.5472", "text": "def load_data(city, month, day):\n file_name = CITY_DATA[city]\n print(\"The data for the city of {} are in the file: {} \".format(city,file_name))\n\n # load data file into a DataFrame\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n\n # get the numerical index for the corresponding month\n month = int(getKeyByValue(month_options,month))\n\n # filter by month to create the new dataframe\n df = df[df['month']==month]\n\n # filter by day of week if applicable\n if day != 'all':\n\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week']==day.title()]\n\n print('-'*40)\n\n return df", "title": "" }, { "docid": "fdf20bfe877a490b3186a68756a7db92", "score": "0.5471478", "text": "def load_data(city, month, day):\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month, day of week and hour from Start Time to add new columns to dataframe\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['hour'] = df['Start Time'].dt.hour\n\n # filter by month to create new dataframe\n if month != 'all':\n MONTHS = ['january', 'february', 'march', 'april', 'may', 'june']\n month = MONTHS.index(month) + 1\n df = df[ df['month'] == month ]\n\n # filter by day of week to create new dataframe\n if day != 'all':\n df = df[ df['day_of_week'] == day.title()]\n\n return df", "title": "" }, { "docid": "dab7216e8f68ad12dda595c462365c12", "score": "0.54665864", "text": "def load_data(city, month, day):\n \n # Load data file into dataframe\n df = pd.read_csv(CITY_DATA[city], index_col = 0)\n \n # Convert start times into date times and extract month and day\n df['start time'] = pd.to_datetime(df['Start Time'])\n df['month'] = pd.to_datetime(df['Start Time']).dt.month\n df['day_of_week'] = pd.to_datetime(df['Start Time']).dt.weekday_name\n df['start_hour'] = pd.to_datetime(df['Start Time']).dt.hour\n \n # Filter by month and week\n if month != 'all':\n allmonths = ['january', 'february', 'march', 'april', 'may', 'june']\n month = allmonths.index(month) + 1\n df = df[df['month'] == month]\n\n if day != 'all':\n df = df[df['day_of_week'] == day.title()]\n \n return df", "title": "" }, { "docid": "314dc05b2c1f1f6c0f2c789eec24db5c", "score": "0.54602337", "text": "def load_data(city, month, day):\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month, day of week and hour from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['hour'] = df['Start Time'].dt.hour\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "title": "" }, { "docid": "e323bc3320622da8db869c0f90b8d609", "score": "0.5453136", "text": "def load_data():\n # Load data from source\n print(\"Dataset is loading...\")\n if not isdir('Data/'):\n mkdir('Data/')\n update_data()\n\n dataset = pd.read_csv('Data/time-series-19-covid-combined.csv')\n \n\n\n # Austria\n austria = WebDataReader(dataset, 'Austria', custom_threshold=1).Region\n austria.N = 8994481 # Worldometer.info\n LOCATIONS[austria.name] = austria\n\n # Italy\n italy = WebDataReader(dataset, 'Italy', custom_threshold=1).Region\n italy.N = 60483054 # Worldometer.info\n LOCATIONS[italy.name] = italy # 30\n\n # Spain\n spain = WebDataReader(dataset, 'Spain', custom_threshold=1).Region\n spain.N = 46754778 # Worldometer.info\n LOCATIONS[spain.name] = spain # 24\n\n # Hubei\n hubei = WebDataReader(dataset, 'China', 'Hubei', custom_threshold=1).Region\n hubei.N = 59170000 # statista.com\n LOCATIONS[hubei.name] = hubei\n\n print(list(LOCATIONS.keys()))\n\n return", "title": "" }, { "docid": "ff0a014dbbed493d72adf84a004e5d6b", "score": "0.5448266", "text": "def load_data(city, month, day):\n\n # load data file into a dataframe\n file_path = 'data/' + CITY_DATA[city]\n df = pd.read_csv(file_path)\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month, day of week, and hour from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['hour'] = df['Start Time'].dt.hour\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n int_month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[(df['month'] == int_month)]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "title": "" }, { "docid": "4c1bf2e20c501d6e347947d85a8d0545", "score": "0.5440694", "text": "def load_data(city, month, day):\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day'] = df['Start Time'].dt.weekday_name\n df['Start hour'] = df['Start Time'].dt.hour\n df['End hour'] = df['End Time'].dt.hour\n\n # filter by month\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february','march', 'april','may','june']\n month = months.index(month) + 1\n df = df[df['month'] == month]\n # filter by day of week\n if day != 'all':\n df = df[df['day'] == day.title()]\n\n\n\n return df", "title": "" }, { "docid": "614dd5293e1cf0768204edabdf6c14c2", "score": "0.54390883", "text": "def get_day_data_interior(file):\r\n timestamps = []\r\n interior_data = {}\r\n\r\n with open(file, 'r') as csv_file:\r\n csv_reader = csv.DictReader(csv_file)\r\n\r\n for row in csv_reader:\r\n if row['timestamp'] not in timestamps:\r\n timestamps.append(row['timestamp'])\r\n\r\n for timestamp in timestamps:\r\n interior_data[timestamp] = {}\r\n interior_data[timestamp]['totalTradedWatts'] = []\r\n interior_data[timestamp]['averagePrice'] = []\r\n interior_data[timestamp]['maximumPrice'] = []\r\n interior_data[timestamp]['minimumPrice'] = []\r\n interior_data[timestamp]['runningTime'] = []\r\n interior_data[timestamp]['numberOfProducers'] = []\r\n interior_data[timestamp]['numberOfConsumers'] = []\r\n interior_data[timestamp]['dataPrepTime'] = []\r\n interior_data[timestamp]['usedReductions'] = []\r\n\r\n with open(file, 'r') as csv_file:\r\n csv_reader = csv.DictReader(csv_file)\r\n\r\n for row in csv_reader:\r\n for timestamp in interior_data.keys():\r\n if row['timestamp'] == timestamp:\r\n interior_data[timestamp]['totalTradedWatts'].append(float(row['totalTradedWatts']))\r\n interior_data[timestamp]['averagePrice'].append(float(row['averagePrice']))\r\n interior_data[timestamp]['maximumPrice'].append(float(row['maximumPrice']))\r\n interior_data[timestamp]['minimumPrice'].append(float(row['minimumPrice']))\r\n interior_data[timestamp]['runningTime'].append(float(row['runningTime']))\r\n interior_data[timestamp]['dataPrepTime'].append(float(row['dataPrepTime']))\r\n interior_data[timestamp]['numberOfProducers'].append(float(row['numberOfProducers']))\r\n interior_data[timestamp]['numberOfConsumers'].append(float(row['numberOfConsumers']))\r\n interior_data[timestamp]['usedReductions'].append(row['usedReductions'])\r\n\r\n return interior_data", "title": "" }, { "docid": "9508a124801aa24251191f8794ce92ab", "score": "0.54381436", "text": "def load_data(city, month, day):\n CITY_DATA = { 'chicago': 'chicago.csv',\n 'new york city': 'new_york_city.csv',\n 'washington': 'washington.csv' }\n\n filename = CITY_DATA[city]\n df = pd.read_csv(filename)\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n df['year'] = df['Start Time'].dt.year\n df['day_of_week'] = df['Start Time'].dt.weekday\n\n# filter by month if applicable\n month2='all'\n if month != 'all':\n\n try:\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n #month = months.index(month) + 1\n\n monthnum = get_month_num(month)\n # filter by month to create the new dataframe\n df = df[df['month'] == monthnum]\n\n except KeyError as e:\n print(\"Invalid Key in Months {} \".format(e))\n\n\n\n if day != 'all':\n try:\n # filter by day of week to create the new dataframe\n # filter by day of week if applicable\n # The day of the week with Monday=0, Sunday=6.\n weekday_num = ['monday','tuesday','wednesday','thursday','friday','saturday','sunday']\n daynum = weekday_num.index(day)\n\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == daynum]\n\n\n except KeyError as e:\n print(\"Invalid Key in days {} \".format(e))\n print('\\nFilter used are: City: {} Month: {} Day: {}'.format(city,month,day))\n return df", "title": "" }, { "docid": "0e9a41f945587a023ab0ba5ce1a66ec5", "score": "0.54352516", "text": "def load_data(city, month, day):\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['hour'] = df['Start Time'].dt.hour\n df['month'] = df['Start Time'].dt.month\n df['month_name'] = df['Start Time'].dt.month_name()\n df['day_of_week'] = df['Start Time'].dt.day_name()\n\n # filter by month if applicable\n if month != 'All':\n # use the index of the months list to get the corresponding int\n months = ['January', 'February', 'March', 'April', 'May', 'June']\n month = months.index(month) + 1\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n \n # filter by day of week if applicable\n if day != 'All':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "title": "" }, { "docid": "da5e06c9801ef392d983e79a15552223", "score": "0.542036", "text": "def load_data(city, month, day):\n df = pd.read_csv(CITY_DATA[city])\n df['Start Time'] = pd.to_datetime(df['Start Time']) \n df['End Time'] = pd.to_datetime(df['End Time']) \n df['month'] = df['Start Time'].dt.month_name() \n df['day'] = df['Start Time'].dt.day_name()\n if month != \"all\":\n df =df[df.month == month]\n if day !=\"all\":\n df =df[df.day == day]\n return df", "title": "" }, { "docid": "0b5a8a0a5f0909813086369a22daaff4", "score": "0.54166776", "text": "def pass_to_daily():\n df = pd.read_csv('../data/hourly_predictions.csv')\n df2 = pd.read_csv(r'../data/weather_forecast.csv')\n\n df = df.groupby(np.arange(len(df)) // 24).sum()\n df['timestamp'] = df.index.map(lambda t: pd.Timestamp(year=2017, month=1, day=1) + pd.Timedelta(days=t))\n df.set_index('timestamp').to_csv('data/daily_predictions.csv')\n\n df2 = df2.drop(['h', 'timestamp'], axis=1)\n header = df2.columns.tolist()\n df2['h_day'] = df2['G_Dh'].groupby(np.arange(len(df2)) // 24).apply(np.count_nonzero)\n for head in header:\n df2[head + '_min'] = df2[head].groupby(np.arange(len(df2)) // 24).min()\n df2[head + '_mean'] = df2[head].groupby(np.arange(len(df2)) // 24).mean()\n df2[head + '_var'] = df2[head].groupby(np.arange(len(df2)) // 24).std()\n df2 = df2.drop(head, axis=1)\n df2 = df2.dropna()\n df2['timestamp'] = df2.index.map(lambda t: pd.Timestamp(year=2017, month=1, day=1) + pd.Timedelta(days=t))\n\n df2.set_index('timestamp').to_csv('data/daily_forecast.csv')", "title": "" }, { "docid": "d722264f734db58c8063a1ed49f18f24", "score": "0.5412758", "text": "def import_from_file(self, filename):\n \n self.data = [] # Initialize the array,\n self.long_data = []\n self.long_time = []\n self.locs = []\n \n f = open(filename, 'r') # open the file,\n line = f.readline() # read the first line to initiate\n while line: # the while loop.\n loc = emergeDatum()\n # read off the metadata for the location\n loc.lat, loc.lon, loc.year, num = np.array(line.split('\\t')[:4]).astype(np.float)\n unit, sign, typ, age, tect, tectup, loc.recnbr = np.array(\n f.readline().split('\\t')[:7]).astype(np.float)\n loc.auth = f.readline().split('\\t')[0]\n loc.desc = f.readline().split('\\t')[0]\n loc.comm = f.readline().split('\\t')[0]\n \n if 100 <= recnbr < 400: # apply a correction for misentered\n loc.comm, loc.desc = loc.desc, loc.comm # data.\n \n times = [] # initiate the data_dict\n emerg = []\n if loc.recnbr < 400 or loc.recnbr >= 500:\n for i in range(int(num)):\n line = np.array(f.readline().split('\\t')[:2]).astype(np.float)\n times.append(line[0])\n self.long_time.append(line[0])\n emerg.append(line[1])\n self.long_data.append(line[1])\n else:\n # Newer data were given time bounds to read in as well (we're \n # skipping them for now and reading in the most-likely time).\n for i in range(int(num)):\n line = np.array(f.readline().split('\\t')[:5]).astype(np.float)\n times.append(line[0])\n self.long_time.append(line[0])\n emerg.append(line[3])\n self.long_data.append(line[3])\n \n # Post processing of data based on metadata\n if unit == 2: emerg = np.array(emerg)*0.3048 # ft -> m\n if sign >=10: \n times = np.array(times)/1000.\n sign = sign/10\n if sign == 1: times = -1*np.array(times)\n \n loc.ts=np.array(times)\n loc.ys=np.array(emerg)\n loc.es=[] \n \n # Form the dictionary entry for this location\n self.data.append(loc)\n self.locs.append([lon, lat])\n line = f.readline() # step on.\n \n f.close()", "title": "" }, { "docid": "7eb89f4c23fcb62e183f30b39feae5b0", "score": "0.54104745", "text": "def load_data(city, month, day):\n df = pd.read_csv(CITY_DATA[city])\n df = pd.read_csv(CITY_DATA[city])\n # convert the Start Time column to datetime\n df[\"Start Time\"] = pd.to_datetime(df[\"Start Time\"])\n\n # extract month and day of week from Start Time to create new columns\n df[\"month\"] = df[\"Start Time\"].dt.month\n df[\"day_of_week\"] = df[\"Start Time\"].dt.weekday_name\n\n if month == \"all\":\n df[\"month\"] = df[\"Start Time\"].dt.month\n else:\n months = [\"january\", \"february\", \"march\", \"april\", \"may\", \"june\"]\n month = months.index(month) + 1\n df = df[df[\"month\"] == month]\n\n if day == \"all\":\n df[\"day_of_week\"] = df[\"Start Time\"].dt.weekday_name\n else:\n # days=[\"sunday\", \"monday\", \"tuesday\", \"wednesday\", \"thursday\", \"friday\", \"saturday\"]\n df = df[df[\"day_of_week\"] == day.title()]\n\n return df", "title": "" }, { "docid": "4e679d21985297f98c2cf6745ccd5e6a", "score": "0.54054165", "text": "def load_data(city, month, day):\n\n # get the city csv file\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # Filter by month\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'febuary', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "title": "" }, { "docid": "af37289d9f6e1dda4d7f5202bf67a17f", "score": "0.5402068", "text": "def main(input_filepath, output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')\n\n data = pd.read_csv(input_filepath)\n data.datetime = pd.to_datetime(data.datetime)\n data['date'] = data['datetime'].dt.date\n\n data['hour'] = data['datetime'].dt.hour\n each_day = data.pivot(index='date', columns='hour',\n values='capacity_factor')\n\n each_day = each_day.dropna()\n logger.info(each_day.head())\n each_day.to_csv(output_filepath)", "title": "" }, { "docid": "8548f1d0e58a1e43653a5b76e1adb47f", "score": "0.5400565", "text": "def read_hourly_ims_climate_database(path=ims_path / 'ground',\n savepath=None):\n import pandas as pd\n import xarray as xr\n from aux_gps import print_saved_file\n da_list = []\n for file in sorted(path.glob('*.csv')):\n name = file.as_posix().split('/')[-1].split('_')[0]\n sid = file.as_posix().split('/')[-1].split('_')[1]\n array_name = '_'.join([name, sid])\n print('reading {} station...'.format(array_name))\n df = pd.read_csv(file, index_col='time')\n df.index = pd.to_datetime(df.index)\n df.drop(labels=['Unnamed: 0', 'name'], axis=1, inplace=True)\n lat = df.loc[:, 'lat'][0]\n lon = df.loc[:, 'lon'][0]\n height = df.loc[:, 'height'][0]\n df.drop(labels=['lat', 'lon', 'height'], axis=1, inplace=True)\n da = df.to_xarray().to_array(dim='var')\n da.name = array_name\n da.attrs['station_id'] = sid\n da.attrs['lat'] = lat\n da.attrs['lon'] = lon\n da.attrs['height'] = height\n da_list.append(da)\n ds = xr.merge(da_list)\n print('Done!')\n if savepath is not None:\n comp = dict(zlib=True, complevel=9) # best compression\n encoding = {var: comp for var in ds.data_vars}\n ds.to_netcdf(savepath / 'hourly_ims.nc', 'w', encoding=encoding)\n print_saved_file('hourly_ims.nc', savepath)\n return ds", "title": "" }, { "docid": "873705a445c2864199b3f77dd6c661a6", "score": "0.5399236", "text": "def loaddata(self):\n # Update the name of the text file here\n text_file_doc = self.files.find_one({\"file_name\": \"plot_final.txt\"})\n test_file_doc = text_file_doc['contents']\n self.data = pd.concat([pd.read_csv(StringIO(d), sep=\",\") for d in [test_file_doc]])\n self.client.close()\n\n self.data = self.data.astype('float')\n self.data.to_csv('cloud_plot_data.txt')", "title": "" }, { "docid": "46452751e7c8273278395343681dfe4b", "score": "0.539747", "text": "def load_data(city, month, day):\n\n # load data file into a dataframe\n try:\n df = pd.read_csv(CITY_DATA[city])\n except Exception as e:\n print('Load data failed. {}'.format(e))\n\n # ignore the first unnamed column\n df = df.iloc[:, 1:] \n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.day_name() ## in pandas version >= 0.23.0\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the MONTHS list to get the corresponding int\n month = MONTHS.index(month) + 1\n \n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "title": "" }, { "docid": "eebc0f80c0985b30fb214c46084ca2c5", "score": "0.5388394", "text": "def load_data(city, month, day):\n #load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n #convert the Start Time column to datrtime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n #extract month, day of week, hour from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['hour'] = df['Start Time'].dt.hour\n\n # filter by month if applicable\n if month != 'all':\n #use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n #filter by month to create the new dateframe\n df = df[df['month'] == month] \n\n # filter by day of week if applicale\n if day != 'all':\n #filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "title": "" }, { "docid": "6937dc84cc03756f5c6c1f0a42395273", "score": "0.5387639", "text": "def load_data(city, month, day):\n\n # load data file into a dataframe\n\n\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month_name()\n df['week_day'] = df['Start Time'].dt.day_name()\n\n # filter by month if applicable\n if month != 'all':\n # filter by month to create the new dataframe\n df = df[df['month'] == month.title()]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['week_day'] == day.title()]\n\n return df", "title": "" }, { "docid": "9be40385ebe17085fe2dbc8eff21e4c9", "score": "0.5380968", "text": "def Load_City_Data():\n cities_df = pd.read_csv(cities_file_loc)\n return cities_df", "title": "" }, { "docid": "4afa8e75f1e0993534dcf1df5c194a18", "score": "0.5380228", "text": "def load_data(city, month, day):\n print('Loading data, please wait','.'*20)\n df = pd.read_csv(CITY_DATA[city])\n \n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n\n df['month'] = df['Start Time'].dt.month\n df['Day_Of_Week'] = df['Start Time'].dt.weekday_name\n \n # filter by month if applicable\n if month != '0':\n # use the index of the months list to get the corresponding int\n #months = ['January', 'February', 'March', 'April', 'May', 'June']\n #month = months.index(month)\n\n # filter by month to create the new dataframe\n df = df[df['month'] == int(month)]\n\n # filter by day of week if applicable\n if day != '0':\n # filter by day of week to create the new dataframe\n str_day = DAY_SELECTION[day]\n df = df[df['Day_Of_Week'] == str_day]\n print('Data loading completed','.'*20)\n print('-'*40)\n return df", "title": "" }, { "docid": "d7e8aa070b15fc80d807d0e400e4b794", "score": "0.5377513", "text": "def load_data(city, month, day):\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] =df['Start Time'].dt.weekday_name\n df['hour'] =df['Start Time'].dt.hour\n # filter by month\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june','july','august','september','october','november','december']\n month = months.index(month)+1\n\n # filter by month to create the new dataframe\n df = df[df['month']==month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week']==day.title()]\n return df", "title": "" }, { "docid": "cbd251d612e32fa2ba24ee1a7e11454a", "score": "0.5370446", "text": "def load_data(city, month, day):\n \n # Clear the screen and display loading message\n clear()\n print(\"Loading data set based on selected conditions...\")\n\n # Load the appropriate csv based on city provided by user\n df = pd.read_csv(CITY_DATA[city])\n\n # Convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # Extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n \n # Filter by month if applicable\n if month != 'all':\n # Use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month.lower()) + 1\n \n # Filter by month to create the new dataframe\n df = df.loc[df['month'] == month]\n\n # Filter by day of week if applicable\n if day != 'all':\n # Filter by day of week to create the new dataframe\n df = df.loc[df['day_of_week'] == day.title()]\n\n\n return df", "title": "" }, { "docid": "8d9ad2d092b76be32be304fba2755a01", "score": "0.53700346", "text": "def load_data(city, month, day):\n \n input_file = CITY_DATA[city] #define input file for city and select from the dictionary the corresponding csv file\n df = pd.read_csv(input_file) #dataframe variable to read the file csv\n \n df[\"start_time_dt\"] = pd.to_datetime(arg = df['Start Time'], format = '%Y-%m-%d %H:%M:%S') # convert the string start time in a data format\n df[\"month\"] = df['start_time_dt'].dt.month # extract the month from the date column\n df[\"day_of_week\"] = df['start_time_dt'].dt.day_name() # extract the day of the week from the date column\n \n if month != 'all': #if you select different from all you have to filter according the different months\n months_map = { \"january\":1,\"february\":2,\"march\":3,\"april\":4,\"may\":5,\"june\":6} #create a map where each month has an associated number\n month_id = months_map[month]\n df = df.loc[df['month'] == month_id] # dataframe becomes filter by month = to month_id\n \n if day != 'all':\n df = df.loc[df['day_of_week'] == day.title()]\n \n return df", "title": "" }, { "docid": "9a535d845ada9d82f9f16d65cfeefa59", "score": "0.5369693", "text": "def load_data(city, month, day):\n try:\n df = pd.read_csv(CITY_DATA[city])\n \n df['city'] = city\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month \n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1 \n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df\n\n except:\n print('\\nInvalid data entered. Please enter data that matches given criteria\\n')\n get_filters()", "title": "" }, { "docid": "3879fb293034f3947ff96c6f2469bd7b", "score": "0.5367309", "text": "def load_data(city, month, day):\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month, day of week and hour from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.dayofweek\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n month = months.index(month)\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # use the index of the days list to get the corresponding int\n day = days.index(day)\n\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day]\n\n return df", "title": "" }, { "docid": "14ba5b0d4c215400ef10a1c5b2fd9ac6", "score": "0.53661793", "text": "def load_data(city, month, day):\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df[\"Start Time\"] = pd.to_datetime(df[\"Start Time\"])\n\n # extract month and day of week and hour by adding new columns\n df[\"month\"] = df[\"Start Time\"].dt.month\n df[\"day_of_week\"] = df[\"Start Time\"].dt.weekday_name\n df[\"hour\"] = df[\"Start Time\"].dt.hour\n\n # filter by month if asked\n if month != \"all\":\n month = MONTHS_LIST.index(month) + 1\n df = df[df[\"month\"] == month]\n\n # filter by day of week if asked\n if day != \"all\":\n df = df[df[\"day_of_week\"] == day.title()]\n\n return df", "title": "" }, { "docid": "03df49d5c2eb477444999db8d538ea5c", "score": "0.5363263", "text": "def main():\n # import the CSV into Pandas dataframe\n print('Open file...')\n df = pd.read_csv('csv/DailyWeather.csv')\n print('Split dates into multiple columns...')\n split_dates_into_multiple_cols(df)\n\n stations = get_stations()\n\n for start_year, end_year in RANGES:\n year_range = list(range(start_year, end_year + 1))\n year_df = grab_data_in_particular_year_range(df, year_range)\n\n folder_name = get_output_foldername(start_year, end_year)\n\n for station in stations:\n station_code = int(station['code'])\n station_df = year_df[year_df['station_code'] == station_code]\n\n if station_df.empty:\n print('Data not available for {} creating empty summary...'.format(\n station_code))\n empty_summary = create_null_summary(station, year_range)\n dump_summary_in_json(folder_name, station_code, empty_summary)\n continue\n\n season = get_core_fire_season(station)\n remove_data_outside_fire_season(station_df, season)\n percentiles = calculate_percentile(station_df, PERCENTILE)\n\n years = get_years_for_valid_fwi_values(station_df)\n\n summary = {\n 'ffmc': percentiles['ffmc'],\n 'isi': percentiles['isi'],\n 'bui': percentiles['bui'],\n 'years': years,\n 'station': station\n }\n\n dump_summary_in_json(folder_name, station_code, summary)\n\n print('--- Done creating data under {} folder ---'.format(folder_name))", "title": "" }, { "docid": "ebc9bfb393bba8ab5b91974b88503f8a", "score": "0.53535694", "text": "def load_data(city_file, month, day):\r\n\r\n # load data file into a dataframe\r\n df = pd.read_csv(city_file)\r\n\r\n # convert the Start Time column to datetime\r\n df['Start Time'] = pd.to_datetime(df['Start Time'])\r\n\r\n # extract month and day of week from Start Time to create new columns\r\n df['month'] = df['Start Time'].dt.month\r\n df['day_of_week'] = df['Start Time'].dt.weekday_name\r\n df['start_hour'] = df['Start Time'].dt.hour\r\n\r\n # filter by month if applicable\r\n if month != 'all':\r\n # use the index of the months list to get the corresponding int\r\n months = ['January', 'February', 'March', 'April', 'May', 'June']\r\n month = months.index(month) + 1\r\n\r\n # filter by month to create the new dataframe\r\n df = df[df['month'] == month]\r\n\r\n # filter by day of week if applicable\r\n if day != 'all':\r\n # filter by day of week to create the new dataframe\r\n df = df[df['day_of_week'] == day.title()]\r\n return df", "title": "" }, { "docid": "0ed5a3068531e51e2938874df0bfd885", "score": "0.53383714", "text": "def datasets():\n\n\n\tdata= OrderedDict()\n\n\tdata[\"GIMMS3gv1.1\"] = ({\n\t\t'fname':\"./data/veg/GIMMS31g/GIMMS31v1/timecorrected/ndvi3g_geo_v1_1_1982to2017_annualmax.nc\",\n\t\t'var':\"ndvi\", \"gridres\":\"GIMMS\", \"region\":\"Global\", \"Periods\":[\"AnnualMax\"]\n\t\t})\n\tdata[\"GIMMS3gv1.0\"] = ({\n\t\t'fname':\"./data/veg/GIMMS31g/3.GLOBAL.GIMMS31.1982_2015_AnnualMax.nc\",\n\t\t'var':\"ndvi\", \"gridres\":\"GIMMS\", \"region\":\"Global\", \"Periods\":[\"AnnualMax\"]\n\t\t})\n\tdata[\"COPERN\"] = ({\n\t\t'fname':\"./data/veg/COPERN/NDVI_AnnualMax_1999to2018_global_at_1km_compressed.nc\",\n\t\t'var':\"NDVI\", \"gridres\":\"COPERN\", \"region\":\"Global\", \"Periods\":[\"AnnualMax\"]\n\t\t})\n\tdata[\"MOD13C1\"] = ({\n\t\t\"fname\":\"/media/ubuntu/Seagate Backup Plus Drive/Data51/NDVI/5.MODIS/terra/processed/MODIS_terra_MOD13C1_5kmCMG_anmax.nc\",\n\t\t'var':\"ndvi\", \"gridres\":\"MODIS_CMG\", \"region\":\"Global\", \"Periods\":[\"AnnualMax\"], \n\t\t\"start\":2000, \"end\":2018\n\t\t})\n\tdata[\"MYD13C1\"] = ({\n\t\t\"fname\":\"/media/ubuntu/Seagate Backup Plus Drive/Data51/NDVI/5.MODIS/aqua/5km/processed/MODIS_aqua_MYD13C1_5kmCMG_anmax.nc\",\n\t\t'var':\"ndvi\", \"gridres\":\"MODIS_CMG\", \"region\":\"Global\", \"Periods\":[\"AnnualMax\"], \n\t\t\"start\":2002, \"end\":2018\n\t\t})\n\t# ========== DO NOT REMOVE I MAY NEED THIS LATER =\n\t# data[\"MODISaqua\"] = ({\n\t# \t'fname': sorted(glob.glob(\"./data/veg/MODIS/aqua/processed/MYD13Q1_A*_final.nc\"))[1:],\n\t# \t'var':\"ndvi\", \"gridres\":\"MODIS\", \"region\":\"Siberia\", \"Periods\":[\"All\"]\n\t# \t})\n\treturn data", "title": "" }, { "docid": "e8c1d1c5292a3a7a7afd3e0cc5b5c3a3", "score": "0.53348386", "text": "def get_data(year: Union[str, int], population) -> pd.DataFrame:\n\n def map_f_scale(item):\n fscale_patt = re.compile(\"(\\d)\")\n match = fscale_patt.search(str(item))\n if match:\n return int(match.group(1))\n return np.nan\n\n def map_damage(item):\n item = (\n str(item)\n .upper()\n .replace(\"?\", \"\")\n .replace(\"T\", \"K\" * 4)\n .replace(\"B\", \"K\" * 3)\n .replace(\"M\", \"K\" * 3)\n .replace(\"K\", \"000\")\n .replace(\"H\", \"00\")\n )\n try:\n item = float(item)\n except ValueError:\n item = np.nan\n return item\n\n def split_mag(row):\n # split magnitude into wind speeds and hail sizes\n mag_type = row[\"MAGNITUDE_TYPE\"]\n mag = row[\"MAGNITUDE\"]\n if mag_type:\n row[\"WIND_SPEED\"] = mag\n row[\"HAIL_SIZE\"] = 0.0\n else:\n row[\"WIND_SPEED\"] = 0.0\n row[\"HAIL_SIZE\"] = mag\n return row\n\n year = str(year)\n for filename in os.listdir(\"data\"):\n filename = os.path.join(\"data\", filename)\n if YEAR_PATT.search(filename).group(1) == year:\n break\n print(\"Cleaning\", filename)\n with gzip.open(filename) as f:\n data = pd.read_csv(f, usecols=GET_COLUMNS, low_memory=False)\n\n # exclude maritime event types\n data = data[data[\"CZ_TYPE\"] != \"M\"]\n # combine type and fips to single column\n data = join_columns(\n data,\n lambda y, z: \"{:02d}{:03d}\".format(int(y), int(z)),\n (\"FIPS\", [\"STATE_FIPS\", \"CZ_FIPS\"]),\n )\n # combine city/state into same column\n data = join_columns(\n data,\n lambda x, y: \"{}, {}\".format(x, y),\n (\"LOC_NAME\", [\"CZ_NAME\", \"STATE\"]),\n )\n for col in [\"DAMAGE_PROPERTY\", \"DAMAGE_CROPS\"]:\n data[col] = data[col].map(map_damage)\n\n # scale by population\n pop = population[[\"FIPS\", \"POPESTIMATE{}\".format(year)]]\n pop = pop.set_index(\"FIPS\")[\"POPESTIMATE{}\".format(year)]\n pop = pop / pop.mean()\n\n def div_pop(row):\n for col in [\n \"DAMAGE_PROPERTY\",\n \"DAMAGE_CROPS\",\n \"INJURIES_DIRECT\",\n \"INJURIES_INDIRECT\",\n \"DEATHS_DIRECT\",\n \"DEATHS_INDIRECT\",\n ]:\n if row[\"FIPS\"] not in pop:\n row[col] = np.nan\n else:\n row[col] = row[col] / pop[row[\"FIPS\"]]\n return row\n\n data = data.apply(div_pop, axis=1)\n\n # combine lat/lng into single column\n data = join_columns(\n data,\n lambda x, y: \"{},{}\".format(x, y),\n (\"BEGIN_LOC\", [\"BEGIN_LAT\", \"BEGIN_LON\"]),\n (\"END_LOC\", [\"END_LAT\", \"END_LON\"]),\n )\n\n # combine date/time related columns into single datetime column\n data = join_columns(\n data,\n lambda x, y, z: \"{}{}:{}\".format(\n str(x).rjust(6, \"0\"), str(y).rjust(2, \"0\"), str(z).rjust(4, \"0\")\n ),\n (\"BEGIN_DATE_TIME\", [\"BEGIN_YEARMONTH\", \"BEGIN_DAY\", \"BEGIN_TIME\"]),\n (\"END_DATE_TIME\", [\"END_YEARMONTH\", \"END_DAY\", \"END_TIME\"]),\n )\n # format datetime columns as such\n date_fmt = \"%Y%m%d:%H%M\"\n data[\"BEGIN_DATE_TIME\"] = pd.to_datetime(\n data[\"BEGIN_DATE_TIME\"], format=date_fmt\n )\n data[\"END_DATE_TIME\"] = pd.to_datetime(\n data[\"END_DATE_TIME\"], format=date_fmt\n )\n # calculate duration\n data = join_columns(\n data,\n lambda x, y: (y - x).total_seconds() / 3600,\n (\"DURATION\", [\"BEGIN_DATE_TIME\", \"END_DATE_TIME\"]),\n keep_old=True,\n )\n\n # fill NaN with 0 for columns that it makes sense for\n for col in [\n \"MAGNITUDE\",\n \"TOR_LENGTH\",\n \"TOR_WIDTH\",\n ]:\n data[col] = data[col].fillna(0)\n # remove \"EF\" or \"F\" from tornado scale entries\n data[\"TOR_F_SCALE\"] = data[\"TOR_F_SCALE\"].map(map_f_scale)\n # split MAGNITUDE according to MAGNITUDE_TYPE\n data = data.apply(split_mag, axis=1)\n # remove unneeded columns\n data = data.drop(columns=[\"MAGNITUDE\", \"MAGNITUDE_TYPE\"])\n return data", "title": "" }, { "docid": "6f181c6c4e6b434c7aa146760cd5485d", "score": "0.533464", "text": "def load_data(city, month, day):\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n # convert the Start Time column to datetime\n #print(df) \n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n \n \n return df", "title": "" }, { "docid": "4097dd8a8641d25730ce95ea4cbbc3f4", "score": "0.5326769", "text": "def extract_city_data(city, out_path, do_plot=False, use_german=False,\n save_tikz=False, save_as_xlsx=True):\n\n if use_german:\n\n city_path = os.path.join(out_path, 'Stadt')\n gen_path_if_not_existent(city_path)\n\n city_out = 'Stadt_Daten.txt'\n data_file = os.path.join(city_path, city_out)\n\n else:\n\n city_path = os.path.join(out_path, 'city')\n gen_path_if_not_existent(city_path)\n\n city_out = 'city_data.txt'\n data_file = os.path.join(city_path, city_out)\n\n # Extract city base data\n extract_city_base_data(city=city, out_file_path=data_file, do_plot=do_plot,\n use_german=use_german, save_tikz=save_tikz)\n\n # Extract data into single file\n if use_german:\n save_path = os.path.join(city_path, 'Stadt_Gebaeudedaten.txt')\n x_label = 'X-Koordinate in m'\n y_label = 'Y-Koordinate in m'\n\n else:\n save_path = os.path.join(city_path, 'city_data_buildings.txt')\n x_label = 'x-coordinate in m'\n y_label = 'y-coordinate in m'\n\n savcit.save_city_data_to_file(city=city, save_path=save_path,\n use_german=use_german,\n save_as_xlsx=save_as_xlsx)\n\n # Generate plot with ids and save it to out_path\n citvis.plot_city_district(city=city,\n city_list=None,\n plot_buildings=True,\n plot_street=True,\n plot_lhn=False, plot_deg=False,\n plot_esys=False,\n offset=7,\n plot_build_labels=True, plot_str_labels=False,\n plot_heat_labels=False,\n equal_axis=False, font_size=16, plt_title=None,\n x_label=x_label,\n y_label=y_label,\n show_plot=False,\n fig_adjust=None,\n plot_elec_labels=False, save_plot=True,\n save_path=city_path, dpi=300, plot_color=True,\n plot_engl=not use_german,\n auto_close=True, plot_str_dist=150,\n node_size=50)\n\n # Extract and save city profiles\n extract_city_profiles(city=city, city_path=city_path, do_plot=do_plot,\n use_german=use_german, save_tikz=save_tikz,\n save_as_xlsx=save_as_xlsx)", "title": "" }, { "docid": "69d89d5950c0d804eb68bfabb759a302", "score": "0.5324713", "text": "def modify_travel_data(self):\n \n # using default cbd \n cbd = 129\n # set the preferred zone\n study_zone = 908\n # set travel times for the preferered zone and other zones\n min_travel_time = '0.40' # time in minutes\n min_travel_cost = '3.47' # travel const in ???\n \n logger.log_status(\"Set the following travel time and cost between cbd and study zone:\")\n logger.log_status(\"Zone ID cbd = %s\" %cbd)\n logger.log_status(\"Zone ID study zone = %s\" %study_zone)\n logger.log_status(\"Travel time = %s\" %min_travel_time)\n logger.log_status(\"Travel cost = %s\" %min_travel_cost)\n \n travel_data = paths.get_opus_home_path( \"opus_matsim\", \"tmp\", \"travel_data.csv\" )\n if not self.travel_data_exsists(travel_data):\n raise StandardError('Travel data not found! %s' % travel_data)\n \n in_file = open(travel_data, 'r')\n str_list = []\n # read header of travel data to get the indices of the colums (from_zone, to_zone, single_vehicle_travel_time)\n line = in_file.readline()\n # init indices\n get_indices = GetIndices(line)\n index_from_zone = get_indices.get_from_zone_index()\n index_to_zone = get_indices.get_to_zone_index()\n index_travel_times = get_indices.get_am_single_vehicle_to_work_travel_time_index()\n index_travel_costs = get_indices.get_single_vehicle_to_work_travel_cost_index()\n number_of_colums = get_indices.get_number_of_colums()\n \n # prepare header line for the output file\n row = line.split(',')\n str_list.append( row[index_from_zone].strip('\\r\\n') +','+ row[index_to_zone].strip('\\r\\n') +','+ row[index_travel_times].strip('\\r\\n') + ',' + row[index_travel_costs].strip('\\r\\n') +'\\r\\n')\n \n # get first line of the table content\n line = in_file.readline()\n \n # replaces the travel times as decribed above...\n while line:\n row = line.split(',')\n # consistency check\n if len(row) != number_of_colums:\n raise StandardError('Error in number of colums: %s' %row)\n \n from_zone_id = int(row[index_from_zone].strip('\\r\\n'))\n to_zone_id = int(row[index_to_zone].strip('\\r\\n'))\n \n \n # just sets the travel time and cost from cbd2studyzone and \n # from stuyzone2cbd to the defined values above\n if (from_zone_id == cbd and to_zone_id == study_zone):\n row[index_travel_times] = min_travel_time\n row[index_travel_costs] = min_travel_cost\n \n elif (from_zone_id == study_zone and to_zone_id == cbd):\n row[index_travel_times] = min_travel_time\n row[index_travel_costs] = min_travel_cost\n \n # append modified row to the new travel data content\n str_list.append( row[index_from_zone].strip('\\r\\n') +','+ row[index_to_zone].strip('\\r\\n') +','+ row[index_travel_times].strip('\\r\\n') + ',' + row[index_travel_costs].strip('\\r\\n') +'\\r\\n')\n\n line = in_file.readline()\n \n # finished modifying traval data\n in_file.close()\n # now write new travel data onto disc\n out_file = open(travel_data, 'w')\n logger.log_status(\"Copying modified travel data onto disc.\")\n for row in str_list:\n out_file.write(row)\n out_file.close();\n logger.log_status(\"Finished copy process.\")", "title": "" }, { "docid": "e7b5f1cb0c67f96b1532e30570bf691c", "score": "0.53191125", "text": "def main(savefile):\n stations = ['yvr', 'yyj', 'yxs', 'yxy', 'yzf', 'yfb', 'yeg', 'yyc', 'yxe',\n 'yqr', 'ywg', 'yqt', 'yyz', 'yow', 'yul', 'yqb', 'yfc', 'yhz',\n 'yyg', 'yyt']\n df_list = []\n for station in stations:\n df_in = get_data(station)\n df_list.append(df_in)\n data = pd.concat(df_list, axis=0)\n columns = sorted(list(data.columns))\n first_cols = ['Timezone', 'Hour of Day', 'Station ID', 'Station Name',\n 'Latitude', 'Longitude', 'Conditions', 'Temperature (C)',\n 'Relativehumidity(%)']\n for col in first_cols:\n columns.remove(col)\n columns = first_cols + columns\n\n data = data[columns]\n print('Saving to ' + savefile)\n data.to_csv(savefile)\n return data", "title": "" }, { "docid": "12be7578214407a6ec570679a334cc5c", "score": "0.5316142", "text": "def load_data(city, month, day):\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month_name()\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n # filter by month to create the new dataframe\n df = df[df['month'] == month.title()]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "title": "" }, { "docid": "e2bad828e58e94fc762ab6c67ab8021c", "score": "0.5314856", "text": "def load_data(city, month, day):\n\n df = pd.read_csv(CITY_DATA[city])\n\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n\n df['day_of_week'] = df['Start Time'].dt.dayofweek\n days = {0: 'monday', 1: 'tuesday', 2: 'wednesday', 3: 'thursday', 4: 'friday', 5: 'saturday', 6: 'sunday'}\n df['day_of_week'] = df[\"day_of_week\"].apply(lambda x: days[x])\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[(df['day_of_week'] == day)]\n\n return df", "title": "" }, { "docid": "5361e8053446d9b438cbd83042623545", "score": "0.5312348", "text": "def load_data(city, month, day):\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['hour'] = df['Start Time'].dt.hour\n df['month_name'] = df['Start Time'].dt.strftime('%B')\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "title": "" }, { "docid": "80df8a63a8a3d97640c104d850581407", "score": "0.53089315", "text": "def load_data(city, month, day):\n df = pd.read_csv(CITY_DATA[city])\n\n print(city)\n # add city name as a column\n df['city'] = city.lower()\n \n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n return df", "title": "" }, { "docid": "9f8705c4c0d90d0c9fc945da3f5412a6", "score": "0.530764", "text": "def load_data(city, month, day):\n df = pd.read_csv(city)\n df['day_of_week'] = pd.to_datetime(df['Start Time']).dt.dayofweek\n df['month'] = pd.to_datetime(df['Start Time']).dt.month\n if day != 'all':\n df = df[df['day_of_week'] == day]\n if month != 'all':\n df = df[df['month'] == month]\n df.drop('day_of_week', axis = 1, inplace=True)\n df.drop('month', axis = 1, inplace=True)\n return df", "title": "" }, { "docid": "01bb049f55f076c11c20be085eff591d", "score": "0.5307264", "text": "def load_data(city, month, day):\n df = pd.read_csv(CITY_DATA.get(city))\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.dayofweek\n df['start_hour'] = df['Start Time'].dt.hour\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df.loc[(df['month'] == month)]\n\n # filter by day of week if applicable\n if day != 'all':\n days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']\n day = days.index(day.title())\n # filter by day of week to create the new dataframe\n df = df.loc[(df['day_of_week'] == day)]\n\n return df", "title": "" }, { "docid": "ee3d5256d53366db0b023dd3e98c7849", "score": "0.5306839", "text": "def load_data(city, month, day):\n\n # Load Data File into Dataframe ( Information on this steps in Udacity Bikeshare Project Practice quiz 3)\n df = pd.read_csv(CITY_DATA[city])\n\n # Convert the Start Time column to datetime (Information on this steps in Udacity Bikeshare Project Practice quiz 3)\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # Extract month and day of week from Start Time to create new columns (Information on this steps in Udacity Bikeshare Project Practice quiz 3)\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable (Information on this steps in Udacity Bikeshare Project Practice quiz 3)\n if month != 'all':\n\n # use the index of the months list to get the corresponding int (Information on this steps in Udacity Bikeshare Project Practice quiz 3)\n months = ['January', 'February', 'March', 'April', 'May', 'June']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe (Information on this steps in Udacity Bikeshare Project Practice quiz 3)\n df = df[df['month'] == month]\n\n # filter by day of week if applicable(Information on this steps in Udacity Bikeshare Project Practice quiz 3)\n if day != 'all':\n\n # filter by day of week to create the new dataframe(Information on this steps in Udacity Bikeshare Project Practice quiz 3)\n df = df[df['day_of_week'] == day.title()]\n\n return df", "title": "" }, { "docid": "612340920e20fa7b412b5084e5ef2be6", "score": "0.53023326", "text": "def load_data(city, month, day):\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n\n # filter by month if applicable\n if month != 'all':\n\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n\n # month is returned in integers where January=1, December=12\n df['month'] = df['Start Time'].dt.month\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n\n # filter by day of week if applicable\n if day != 'all':\n # weekday_name is returned in strings e.g. Monday, Tuesday, etc\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "title": "" }, { "docid": "beb33ab1d79d2a536f029a76a05537c1", "score": "0.5300624", "text": "def load_data(city, month, day):\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n \t \t# use the index of the months list to get the corresponding int\n months = ['January', 'February', 'March', 'April', 'May', 'June']\n month = months.index(month) + 1\n\n \t# filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "title": "" } ]
dc6c2135517ccd8025af10f5c0c7967e
Return the reference ID for the current command section.
[ { "docid": "60f7faf63c367a3e1cdf197b3fb8a03f", "score": "0.7987671", "text": "def get_current_command_section_ref_id(env, for_subcommand=False):\n if for_subcommand:\n key = 'rbt-subcommand'\n else:\n key = 'rbt-command'\n\n return env.temp_data['%s:doc-prefix' % key]", "title": "" } ]
[ { "docid": "b94e34f76c06201a2311a9fa0ae4c714", "score": "0.7150189", "text": "def get_current_command_usage_ref_id(env, for_subcommand=False):\n return ('%s-usage'\n % get_current_command_section_ref_id(\n env,\n for_subcommand=for_subcommand))", "title": "" }, { "docid": "20b37d0f4cebacb3154db6af91a04b53", "score": "0.69801944", "text": "def make_command_section_ref_id(command_name, subcommand_name=None):\n ref_id = 'rbt-%s' % command_name\n\n if subcommand_name:\n ref_id = '%s-%s' % (ref_id, subcommand_name)\n\n return ref_id", "title": "" }, { "docid": "eca4f2bf90b48e03378c4ced4482354f", "score": "0.69737726", "text": "def reference_id(self) -> str:\r\n return self._reference_id", "title": "" }, { "docid": "e45b6db4b36328835bd82501c02195b4", "score": "0.69715697", "text": "def get_current_command_options_ref_id(env, for_subcommand=False):\n return ('%s-options'\n % get_current_command_section_ref_id(\n env,\n for_subcommand=for_subcommand))", "title": "" }, { "docid": "4c05f3ef8ae9c184b21ca6ff0e65b908", "score": "0.6969557", "text": "def get_current_command_option_ref_id(env, option_name,\n for_subcommand=False):\n return ('%s-%s'\n % (get_current_command_section_ref_id(\n env,\n for_subcommand=for_subcommand),\n option_name.lower().replace(' ', '-')))", "title": "" }, { "docid": "5badd2ac4d01016718dc248ebb6f98ab", "score": "0.6890261", "text": "def reference(self):\n id_match = self.REFERENCE_RE.match(self.path)\n if id_match:\n return id_match.group(1)", "title": "" }, { "docid": "36d98a225f056b711bea6fa307167812", "score": "0.66051316", "text": "def reference_id(self):\n return self._sdk_task.id", "title": "" }, { "docid": "5caa08ba5cc784d3de2e1b9b2e63db45", "score": "0.609923", "text": "def identifier(self):\n return self.__id", "title": "" }, { "docid": "73c19fc1c987ffa97fa720f5e096c34f", "score": "0.6081845", "text": "def key_vault_reference_identity(self) -> str:\n return pulumi.get(self, \"key_vault_reference_identity\")", "title": "" }, { "docid": "83b2c5fb27c31971c85f21d93678405c", "score": "0.603807", "text": "def getIdentifier(self):\n return self._config['identifier']", "title": "" }, { "docid": "70ad80529bd9d216f13047af521bb0b8", "score": "0.6007338", "text": "def get_new_reference_id(self) -> int:\n return next(self.reference_id_generator)", "title": "" }, { "docid": "e4e90caf9880c607f712c2f77e7a4337", "score": "0.60012484", "text": "def id(self) -> str:\n return self.__getitem__('id')", "title": "" }, { "docid": "e4e90caf9880c607f712c2f77e7a4337", "score": "0.60012484", "text": "def id(self) -> str:\n return self.__getitem__('id')", "title": "" }, { "docid": "a10dd5cf0d9b5dcd869fced5194dee62", "score": "0.5992599", "text": "def getReferenceSelect(self):\n return int(self.sendCommand('x?'))", "title": "" }, { "docid": "f938857aef8b082399d876e80236be43", "score": "0.5965478", "text": "def getID(self):\n return self.identifier", "title": "" }, { "docid": "35bd49483e2270ab55305c2ff0896a1f", "score": "0.5919608", "text": "def get_id_from_ref(cls, ref):\n ref_id = None\n if ref is not None and len(ref) > 0:\n ref_id = path.split(ref)[1]\n return ref_id", "title": "" }, { "docid": "cc3910eb3919633801ad686629aa90ce", "score": "0.5890677", "text": "def identifier(self):\n return self.__identifier", "title": "" }, { "docid": "cc3910eb3919633801ad686629aa90ce", "score": "0.5890677", "text": "def identifier(self):\n return self.__identifier", "title": "" }, { "docid": "cc3910eb3919633801ad686629aa90ce", "score": "0.5890677", "text": "def identifier(self):\n return self.__identifier", "title": "" }, { "docid": "cc3910eb3919633801ad686629aa90ce", "score": "0.5890677", "text": "def identifier(self):\n return self.__identifier", "title": "" }, { "docid": "fcd7e2ae8bab1dc648c283a8f71f5595", "score": "0.58814985", "text": "def get_id(self):\n return \"{}#{}\".format(self.rule.name, self.id_number)", "title": "" }, { "docid": "f6052021d4933c7595473f7bb076b015", "score": "0.58508974", "text": "def identifier(self):\n return self.identifiers[0]", "title": "" }, { "docid": "ce5b95c0699c8e0ec435ba72d6af7061", "score": "0.58437026", "text": "def __get_id(self):\n return self.get_id()", "title": "" }, { "docid": "5b30e6b916dc558cc2587f6ee007e953", "score": "0.5836853", "text": "def id(self) -> str:\n return self._target_id", "title": "" }, { "docid": "0c6a7c34307db57029eac0dbd0a38866", "score": "0.58240646", "text": "def _id(self):\n return self.__id", "title": "" }, { "docid": "2d5ff5134d404f300c6cc2c0db3b3f76", "score": "0.58146495", "text": "def get_id(self):\n return self.fetch_interface().id()", "title": "" }, { "docid": "35af848ddecc4b0320a318d230624205", "score": "0.5804678", "text": "def id(self):\n return self.getattr('id')", "title": "" }, { "docid": "875d056ae9d5a140fabcc8df5e607100", "score": "0.5803586", "text": "def current_id(self):\n cmd = ['hg', '-q', 'id', '-i']\n return self.sh(cmd, shell=False).rstrip().rstrip('+') # TODO", "title": "" }, { "docid": "784f35bfe5d4af01ae39dd14d0a01766", "score": "0.57778066", "text": "def key_ref(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"key_ref\")", "title": "" }, { "docid": "b1ad433d654521b2cd6b094150da0ae9", "score": "0.5771636", "text": "def identifier(self):\n return self._identifier", "title": "" }, { "docid": "b1ad433d654521b2cd6b094150da0ae9", "score": "0.5771636", "text": "def identifier(self):\n return self._identifier", "title": "" }, { "docid": "b1ad433d654521b2cd6b094150da0ae9", "score": "0.5771636", "text": "def identifier(self):\n return self._identifier", "title": "" }, { "docid": "6e786744c773c0e3ce6f2f1dd2ed82a1", "score": "0.5767258", "text": "def current_id(self):\n pass", "title": "" }, { "docid": "f2622e12045ca507ecce59d4815ac137", "score": "0.5763572", "text": "def id(self):\n return self._path", "title": "" }, { "docid": "f4286112f60f8e02ce73a0c1a5b7028a", "score": "0.57581115", "text": "def identifier(self):\n if self._identifier is None:\n self._identifier = self.metadata['id']\n return self._identifier", "title": "" }, { "docid": "bf3b26da25bc3e72adb6db8b04564b98", "score": "0.57561576", "text": "def target_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"target_id\")", "title": "" }, { "docid": "bf3b26da25bc3e72adb6db8b04564b98", "score": "0.57561576", "text": "def target_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"target_id\")", "title": "" }, { "docid": "1bdfc200d710700f86589f606ba4de65", "score": "0.57403207", "text": "def _get_id(self):\n return self._id", "title": "" }, { "docid": "fb089b1e18159b03398d86078781705b", "score": "0.5735704", "text": "def link_id(self) -> int:\n return pulumi.get(self, \"link_id\")", "title": "" }, { "docid": "973b72a1fe0d858ff0a6ae4d610f240e", "score": "0.5731833", "text": "def get_id(self):\n\n\t\treturn self.__id", "title": "" }, { "docid": "973b72a1fe0d858ff0a6ae4d610f240e", "score": "0.5731833", "text": "def get_id(self):\n\n\t\treturn self.__id", "title": "" }, { "docid": "7ebff6b311665bb04b280d3feb287881", "score": "0.5725361", "text": "def instance_identifier(self):\n return self.c_instance.instance_identifier()", "title": "" }, { "docid": "971915621f0f1403e2b95787bf9f1f94", "score": "0.5719465", "text": "def ident(self):\n return self._ident", "title": "" }, { "docid": "54b8ba49de9f22734566d19e9968e850", "score": "0.5714078", "text": "def retrieval_reference_number(self):\n return self._retrieval_reference_number", "title": "" }, { "docid": "6aa94915a5ffec9d723cc450b7285408", "score": "0.5707676", "text": "def get_id(self):\n\t\treturn self.id", "title": "" }, { "docid": "6aa94915a5ffec9d723cc450b7285408", "score": "0.5707676", "text": "def get_id(self):\n\t\treturn self.id", "title": "" }, { "docid": "8cb95d46307d678e1de3ab78b2a005fb", "score": "0.5696584", "text": "def identifier(self) -> int:\n return self._identifier", "title": "" }, { "docid": "8cb95d46307d678e1de3ab78b2a005fb", "score": "0.5696584", "text": "def identifier(self) -> int:\n return self._identifier", "title": "" }, { "docid": "b3d692638dddc6c0635a58a18487a8bb", "score": "0.5690155", "text": "def get_id(self):\n return self.key.id()", "title": "" }, { "docid": "143db2e9b2ecd06b63b8be9d4c9248f6", "score": "0.56859505", "text": "def id(self):\n return self._impl.id", "title": "" }, { "docid": "143db2e9b2ecd06b63b8be9d4c9248f6", "score": "0.56859505", "text": "def id(self):\n return self._impl.id", "title": "" }, { "docid": "143db2e9b2ecd06b63b8be9d4c9248f6", "score": "0.56859505", "text": "def id(self):\n return self._impl.id", "title": "" }, { "docid": "bc576340bf9652adf232034622b867d9", "score": "0.56859374", "text": "def current_id(self):\n # from xml.etree import ElementTree as ET\n # info = ET.fromstringlist(self.sh('svn info --xml'))\n # return info.find('entry').get('revision')\n return (\n self.sh('svn info | grep \"^Revision: \"',\n shell=True)\n .split(': ', 1)[1].strip())", "title": "" }, { "docid": "a5e029014cdeb1319ef653e0a2ec19d5", "score": "0.56689274", "text": "def get_id(self):\n return self._read(REG_ID, 2)", "title": "" }, { "docid": "e8d775916572c5425eea2ed5d868a4ed", "score": "0.56642103", "text": "def get_ID(self):\n self.Gauge.write(b\"*IDN?\")\n return self.Gauge.readline(4).strip()", "title": "" }, { "docid": "994a26d22b475fa66db2e69b2e28a6ec", "score": "0.56574315", "text": "def target_id(self) -> str:\n return pulumi.get(self, \"target_id\")", "title": "" }, { "docid": "994a26d22b475fa66db2e69b2e28a6ec", "score": "0.56574315", "text": "def target_id(self) -> str:\n return pulumi.get(self, \"target_id\")", "title": "" }, { "docid": "0587904014550a819c14a7486b1c4a99", "score": "0.5656773", "text": "def id(self):\n return self.__id", "title": "" }, { "docid": "fb0121ef126100e4c1c790badfe62f55", "score": "0.5650478", "text": "def get_ident():\n return id(greenthread.getcurrent())", "title": "" }, { "docid": "b9167ee72c9fb113d68529c97c8590b7", "score": "0.56487095", "text": "def get_id(self):\n\t\treturn self._id", "title": "" }, { "docid": "4618abfeefe8411b8a2c566ed2d581b6", "score": "0.5647836", "text": "def id(self):\n return self.__id", "title": "" }, { "docid": "4618abfeefe8411b8a2c566ed2d581b6", "score": "0.5647836", "text": "def id(self):\n return self.__id", "title": "" }, { "docid": "4618abfeefe8411b8a2c566ed2d581b6", "score": "0.5647836", "text": "def id(self):\n return self.__id", "title": "" }, { "docid": "ab07dcca5ad0b1c5092479172174bc3d", "score": "0.56477296", "text": "def _get_id(self):\n return self.__id", "title": "" }, { "docid": "ab07dcca5ad0b1c5092479172174bc3d", "score": "0.56477296", "text": "def _get_id(self):\n return self.__id", "title": "" }, { "docid": "ab07dcca5ad0b1c5092479172174bc3d", "score": "0.56477296", "text": "def _get_id(self):\n return self.__id", "title": "" }, { "docid": "ab07dcca5ad0b1c5092479172174bc3d", "score": "0.5647186", "text": "def _get_id(self):\n return self.__id", "title": "" }, { "docid": "ab07dcca5ad0b1c5092479172174bc3d", "score": "0.5647186", "text": "def _get_id(self):\n return self.__id", "title": "" }, { "docid": "ab07dcca5ad0b1c5092479172174bc3d", "score": "0.5647186", "text": "def _get_id(self):\n return self.__id", "title": "" }, { "docid": "ab07dcca5ad0b1c5092479172174bc3d", "score": "0.5647186", "text": "def _get_id(self):\n return self.__id", "title": "" }, { "docid": "ab07dcca5ad0b1c5092479172174bc3d", "score": "0.5647186", "text": "def _get_id(self):\n return self.__id", "title": "" }, { "docid": "ab07dcca5ad0b1c5092479172174bc3d", "score": "0.5647186", "text": "def _get_id(self):\n return self.__id", "title": "" }, { "docid": "ab07dcca5ad0b1c5092479172174bc3d", "score": "0.5647186", "text": "def _get_id(self):\n return self.__id", "title": "" }, { "docid": "ab07dcca5ad0b1c5092479172174bc3d", "score": "0.5647186", "text": "def _get_id(self):\n return self.__id", "title": "" }, { "docid": "75879b5a83390166af4cd4b01be7d291", "score": "0.5641143", "text": "def association_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"association_id\")", "title": "" }, { "docid": "eb863c30089045bf864f4b88495cc13c", "score": "0.562554", "text": "def id(self) -> str:\n return self.__id__", "title": "" }, { "docid": "0a6cb1b409896b754db9e409da6cb8a7", "score": "0.56189096", "text": "def reference_number_prefix(self):\n return self.position[-1]", "title": "" }, { "docid": "95f84bcb4bfa277e2bc619a2d14cc996", "score": "0.5610314", "text": "def ref(reference):\n settings = context.config.util_properties(\"ref\")\n records = context.data.get(settings.get(\"source\", \"references\")).shape(\"dict\")\n for entry in records:\n if entry.get(\"ID\") == reference:\n return \"[[\" + reference + \"](#\" + _anchor_name(reference) + \")]\"", "title": "" }, { "docid": "e417cd781f5d328ebba4fd9f4d48c495", "score": "0.5608579", "text": "def get_id(self):\r\n return self.id", "title": "" }, { "docid": "e417cd781f5d328ebba4fd9f4d48c495", "score": "0.5608579", "text": "def get_id(self):\r\n return self.id", "title": "" }, { "docid": "ebeb4f13f6592525b4d681c44478f83f", "score": "0.56051224", "text": "def getId(self):\n return self.__name__.replace('.', '').replace(' ', '')", "title": "" }, { "docid": "3489c052f1920f84c8c442fdef7aa60d", "score": "0.5601353", "text": "def reference(self):\n return self._reference", "title": "" }, { "docid": "3489c052f1920f84c8c442fdef7aa60d", "score": "0.5601353", "text": "def reference(self):\n return self._reference", "title": "" }, { "docid": "3489c052f1920f84c8c442fdef7aa60d", "score": "0.5601353", "text": "def reference(self):\n return self._reference", "title": "" }, { "docid": "3489c052f1920f84c8c442fdef7aa60d", "score": "0.5601353", "text": "def reference(self):\n return self._reference", "title": "" }, { "docid": "6c92667771283d73d973ed0bf5131ed9", "score": "0.56013155", "text": "def getID(self):\n return self.__ID", "title": "" }, { "docid": "47adb8774448e1702ecd622206397edf", "score": "0.5594009", "text": "def get_id(self):\n return self.id", "title": "" }, { "docid": "47adb8774448e1702ecd622206397edf", "score": "0.5594009", "text": "def get_id(self):\n return self.id", "title": "" }, { "docid": "47adb8774448e1702ecd622206397edf", "score": "0.5594009", "text": "def get_id(self):\n return self.id", "title": "" }, { "docid": "47adb8774448e1702ecd622206397edf", "score": "0.5594009", "text": "def get_id(self):\n return self.id", "title": "" }, { "docid": "47adb8774448e1702ecd622206397edf", "score": "0.5594009", "text": "def get_id(self):\n return self.id", "title": "" }, { "docid": "47adb8774448e1702ecd622206397edf", "score": "0.5594009", "text": "def get_id(self):\n return self.id", "title": "" }, { "docid": "47adb8774448e1702ecd622206397edf", "score": "0.5594009", "text": "def get_id(self):\n return self.id", "title": "" }, { "docid": "47adb8774448e1702ecd622206397edf", "score": "0.5594009", "text": "def get_id(self):\n return self.id", "title": "" }, { "docid": "47adb8774448e1702ecd622206397edf", "score": "0.5594009", "text": "def get_id(self):\n return self.id", "title": "" }, { "docid": "47adb8774448e1702ecd622206397edf", "score": "0.5594009", "text": "def get_id(self):\n return self.id", "title": "" }, { "docid": "47adb8774448e1702ecd622206397edf", "score": "0.5594009", "text": "def get_id(self):\n return self.id", "title": "" }, { "docid": "47adb8774448e1702ecd622206397edf", "score": "0.5594009", "text": "def get_id(self):\n return self.id", "title": "" }, { "docid": "47adb8774448e1702ecd622206397edf", "score": "0.5594009", "text": "def get_id(self):\n return self.id", "title": "" }, { "docid": "47adb8774448e1702ecd622206397edf", "score": "0.5594009", "text": "def get_id(self):\n return self.id", "title": "" } ]
e3f2ebcfdf18e31720df7eb11cb19c61
Return a custom 404 error.
[ { "docid": "1af86d53d490245475ae70338cc85d7b", "score": "0.72552013", "text": "def page_not_found(e):\n return 'Sorry, Nothing at this URL.', 404", "title": "" } ]
[ { "docid": "6105f813d803e8846ed5b1e4e8e3cdd2", "score": "0.8263737", "text": "def not_found(error):\n return \"404\"", "title": "" }, { "docid": "1013b6ae3377c6fd3298615cea6c209e", "score": "0.79933435", "text": "def notFound404Error():\n return Response (HTTP_STATUS_NOT_FOUND,\n '<h1>404 Error : Some crap went down, so just <a href=\"/\">HEAD HOME</a>.</h1>',\n [])", "title": "" }, { "docid": "0186d4d0869f9e10a50d7d16c2825999", "score": "0.79192984", "text": "def custom404handler(err):\n return dict(err=err)", "title": "" }, { "docid": "a4b4fb40801caf6fef9bb5301dec43b2", "score": "0.78825426", "text": "def not_found(error):\n return create_error_response('Not Found', 404);", "title": "" }, { "docid": "349e253b0560331166ed92e9d2597d99", "score": "0.7845269", "text": "def error_404(error):\n return 'Sorry, Nothing at this URL.'", "title": "" }, { "docid": "4a1864cee639b94ad96bf6a21814451a", "score": "0.78424567", "text": "def not_found_error_handler(error):\n return \"This object does not exist\", 404", "title": "" }, { "docid": "2541857d4f34a6173a5af6d93b92272e", "score": "0.7818006", "text": "def error_404(error):\n return 'Sorry, nothing at this URL.'", "title": "" }, { "docid": "2541857d4f34a6173a5af6d93b92272e", "score": "0.7818006", "text": "def error_404(error):\n return 'Sorry, nothing at this URL.'", "title": "" }, { "docid": "2541857d4f34a6173a5af6d93b92272e", "score": "0.7818006", "text": "def error_404(error):\n return 'Sorry, nothing at this URL.'", "title": "" }, { "docid": "2a0debdbe3dd51112b422fa233c42f0b", "score": "0.7816168", "text": "def _custom404(_: HTTPException) -> Response:\n response = make_response(\"\")\n response.mimetype = \"application/json\"\n response.status_code = 404\n\n return response", "title": "" }, { "docid": "5a317ebbf4a601229122291f60be1a94", "score": "0.78139937", "text": "def not_found():\n errors = {\n '_resource': ['not found']\n }\n return error(404, errors)", "title": "" }, { "docid": "508729385c5feda1248a3b0d2fda7866", "score": "0.78001934", "text": "def error_404(error):\n return 'Sorry, nothing at this URL. (404)'", "title": "" }, { "docid": "7e5a1756d1a20bfa4078a61a85baeff8", "score": "0.77720594", "text": "def error_404(e):\n return render_template(\"404.html\", exception=e), 404", "title": "" }, { "docid": "6336ebf451b58f0597aedeb0b589bb50", "score": "0.77618265", "text": "def not_found(e):\n return {\"code\": 404, \"msg\": \"not found\"}, 404", "title": "" }, { "docid": "6f4521bb436fe1f114ee98dc96547b0c", "score": "0.7702134", "text": "def route_error_404(error):\n return 'Not found', 404", "title": "" }, { "docid": "59ed669445727cc98cc51f0f534204bd", "score": "0.76248384", "text": "def error_404(request):\n return error_view(request, 404, _(\"Not found\"),\n _(\"Could not found the resource %(res)s.\") % {\"res\": request.path})", "title": "" }, { "docid": "1fb6314415568fc2153627e712991e6b", "score": "0.7620647", "text": "def not_found_404(error):\n return make_response(jsonify({'message': 'Not found, Invalid Parameters or URL'}), 401)", "title": "" }, { "docid": "e02c3301b852fbba67ce4f1ffeb00ba2", "score": "0.76148254", "text": "def error_404(error):\n return render_template(\"404.html\"), 404", "title": "" }, { "docid": "5260266d8691d6aa9b4fdde4e64bb40a", "score": "0.75642604", "text": "def error_404(error):\n \n return render_template('404.html'), 404", "title": "" }, { "docid": "5906bd31e4eda7a68f3cb74fbf998642", "score": "0.75578934", "text": "def error_404(error):\n return render_template('error/404.html'), 404", "title": "" }, { "docid": "91dce3472fc590d4490481c2580041b8", "score": "0.75420976", "text": "def not_found(error):\r\n return make_response(jsonify({'error': 'Not found',\r\n 'status': 'failed'}), 404)", "title": "" }, { "docid": "ddd576c4a99e3a9a9b1dbe9707f8b600", "score": "0.7541908", "text": "def not_found_error(error):\n return render_template('404.html', error=error), 404", "title": "" }, { "docid": "ddd576c4a99e3a9a9b1dbe9707f8b600", "score": "0.7541908", "text": "def not_found_error(error):\n return render_template('404.html', error=error), 404", "title": "" }, { "docid": "c2f735aab0cc843f26a4b84da3d655a4", "score": "0.7540676", "text": "def not_found(err):\n response = {\n 'error_code': 404,\n 'error_message': str(err)\n }\n \n return make_response(jsonify(response), 404)", "title": "" }, { "docid": "31bdf40535a31aac5acdd883a8f3196f", "score": "0.75351083", "text": "def PageNotFound(e): # pylint: disable=unused-argument\n return 'Sorry, Nothing at this URL.', 404", "title": "" }, { "docid": "81effcf22d1bea12c8eafedeb29f6dc0", "score": "0.7529275", "text": "def error_404(err):\n return make_response(jsonify(error=\"oops, I did it again...\"), 404)", "title": "" }, { "docid": "cd1516cff0b9270049bf139d8581e0b9", "score": "0.7517956", "text": "def page_not_found(error):\n return '404 ' + ver_name + \"\\n\", 404", "title": "" }, { "docid": "eae07d644d4b6baf3dfd48451a2331ac", "score": "0.7515539", "text": "def not_found_error(error):\n return render_template(\"404.html\"), 404", "title": "" }, { "docid": "b4fa5157e529b0e020fff409f2ec99aa", "score": "0.75024813", "text": "def error404(error):\n\treturn render_template('error.html',error=error,title=\"Not found\",message=\"Sorry, I couldn't find what you were after.\"), 404", "title": "" }, { "docid": "6084d07b1a17fc281eb8465be95fc502", "score": "0.74954927", "text": "def not_found(error):\n return render_template('404.html'), 404", "title": "" }, { "docid": "6084d07b1a17fc281eb8465be95fc502", "score": "0.74954927", "text": "def not_found(error):\n return render_template('404.html'), 404", "title": "" }, { "docid": "088987b0cbdb18537a9c72d5e6f20981", "score": "0.74828064", "text": "def not_found(error):\n return jsonify(error=\"Not found\"), 404", "title": "" }, { "docid": "ef4af437717a7e3e80cd6343c18d5d88", "score": "0.7479199", "text": "def not_found(error): # pylint: disable=unused-argument\n response = jsonify(\n {\n \"success\": False,\n \"error_code\": \"not_found\",\n \"description\": \"The resource could not be found on the server\",\n }\n )\n return response, 404", "title": "" }, { "docid": "3a9ebab27586d53ce4f8db6167a350c0", "score": "0.7451121", "text": "def not_found(error):\n return make_response(jsonify({\"error\": \"Not found\"}), 404)", "title": "" }, { "docid": "ef51e817238f4b751af2777d63e44fb3", "score": "0.744812", "text": "def notFoundErrorHandler(e):\n return getErrorObject(e), 404", "title": "" }, { "docid": "835c24bffd4617ce68b0995e12fdec14", "score": "0.7442597", "text": "def page_not_found(error):\n return 'Esta Pagina no existe', 404", "title": "" }, { "docid": "28ec5bda6f05390a9b11faab7e9ec5fa", "score": "0.7439076", "text": "def not_found(request, error=None):\n logger.warning(\"404 return\")\n message = {\n 'status': 404,\n 'error': str(error),\n 'message': 'Not Found: ' + request.url,\n }\n resp = jsonify(message, status=404)\n # resp.status_code = 404\n return resp", "title": "" }, { "docid": "43ea16e13260de3a4712e86c7d03e554", "score": "0.74300945", "text": "def not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)", "title": "" }, { "docid": "43ea16e13260de3a4712e86c7d03e554", "score": "0.74300945", "text": "def not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)", "title": "" }, { "docid": "43ea16e13260de3a4712e86c7d03e554", "score": "0.74300945", "text": "def not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)", "title": "" }, { "docid": "3b018f6e0355769289b5be93f1563606", "score": "0.7404607", "text": "def not_found():\r\n return make_response(jsonify({'error': 'Not found'}), 404)", "title": "" }, { "docid": "3c71fc8ce9586a709f73d96d65937ff0", "score": "0.74022955", "text": "def page_not_found(e):\n return 'Nothing at this URL.', 404", "title": "" }, { "docid": "68acbdf3c6da03a2f71734042b788ccc", "score": "0.7397737", "text": "def custom_404(request):\n featured_rep = utils.latest_object_or_none(FeaturedRep)\n t = loader.get_template('404.html')\n return http.HttpResponseNotFound(\n t.render(RequestContext(request, {'request_path': request.path,\n 'featuredrep': featured_rep})))", "title": "" }, { "docid": "e6fc9c0037e787ce1d3dcac97f9c29c7", "score": "0.73958933", "text": "def not_found(error):\n response = {'error': 'Not found'}\n return make_response(jsonify(response), 404)", "title": "" }, { "docid": "b420d7e34c3ffb23576c46bf9a17790f", "score": "0.7385746", "text": "def not_found(error):\n return make_response(jsonify({'error': 'Not found'}, 404))", "title": "" }, { "docid": "1c012ffd5cdf07bc0d972912432ac4d5", "score": "0.73732454", "text": "def not_found(e):\n return render_template(\"error_404.html\")", "title": "" }, { "docid": "6d1b34008e06d6bfe5d2d20d60b4a2d7", "score": "0.73703396", "text": "def not_found_error(error:str) -> Response:\n if wants_json_response():\n return api_error_response(404)\n return render_template('errors/404.html'), 404", "title": "" }, { "docid": "fe19d992e55b65681f5d66e0e0f851e3", "score": "0.73695695", "text": "def page_not_found(e):\n return render_template('errors/404.html'), 404", "title": "" }, { "docid": "3163eda80693a11529797fa303b684a5", "score": "0.7366952", "text": "def error_handler(error):\n response = jsonify({\"error\": \"Not found\"})\n return response, 404", "title": "" }, { "docid": "b8412a7706cb20773319ea3ca73bb8fc", "score": "0.73636734", "text": "def handle_error_code_404(error):\n return handle_error_code(404, error)", "title": "" }, { "docid": "74a38674f0df1c5d34176e0cbaa6ed63", "score": "0.73582596", "text": "def error404(request):\n\n\n response = render_to_response('404.html', {},\n context_instance = RC(request))\n response.status_code = 404\n return response", "title": "" }, { "docid": "eadf91a54747ca3b2884a46593e1ec9e", "score": "0.7349892", "text": "def page_not_found(e):\n return 'Nothing to see, please go away.', 404", "title": "" }, { "docid": "74c6dac64c5f14dd5cfd45db53d19e70", "score": "0.73372906", "text": "def not_found(e):\n print('attempt to access missing: {}'.format(e))\n return render_template('404.html'), 404", "title": "" }, { "docid": "ff317d4b870b246c2365d1386e6b25f1", "score": "0.7328981", "text": "def page_not_found(e):\n return '<h1>404</h1><p>The resource could not be found.</p><p>{e}</p>', 404", "title": "" }, { "docid": "b2e9a6afcda9ff23f7c0f99df7d0ef4c", "score": "0.7322948", "text": "def not_found():\n return make_response(jsonify({'error': 'Not found'}), 404)", "title": "" }, { "docid": "b7152b2bf72fe0b770e3165f5f536c70", "score": "0.73181015", "text": "def page_not_found(e):\n return 'Sorry, Nothing at this URL.'.format(e), 404", "title": "" }, { "docid": "f5776eb4472880be956b4c948e7be8d3", "score": "0.73140246", "text": "def page_404(request):\n return standard_error_page(request, 404, \"404.html\")", "title": "" }, { "docid": "c92af562773003a1dbbc65ceb4d9ca0a", "score": "0.7299257", "text": "def page_not_found(e):\n return \"Sorry, Nothing at this URL.\", 404", "title": "" }, { "docid": "6e505803b97c608f6f74839fde82029c", "score": "0.7298491", "text": "def handle_404_error(_error):\r\n return make_response(jsonify({'error': 'Not found'}), 404)", "title": "" }, { "docid": "52dd6ff797ca24f4831f1ae714fe4d06", "score": "0.7284485", "text": "def page_not_found(e):\n # note that we set the 404 status explicitly\n return render_template('404.html'), 404", "title": "" }, { "docid": "ce1362965f0cd8a58c76265e1b02dc46", "score": "0.72806025", "text": "def not_found(error):\n\n return make_response(jsonify({\n 'status': 'error',\n 'message': 'not Found'\n }), 404)", "title": "" }, { "docid": "58f5967ac6a682a182acc675701eb634", "score": "0.72681904", "text": "def page_not_found(e):\n return 'Sorry, nothing at this URL.', 404", "title": "" }, { "docid": "58f5967ac6a682a182acc675701eb634", "score": "0.72681904", "text": "def page_not_found(e):\n return 'Sorry, nothing at this URL.', 404", "title": "" }, { "docid": "58f5967ac6a682a182acc675701eb634", "score": "0.72681904", "text": "def page_not_found(e):\n return 'Sorry, nothing at this URL.', 404", "title": "" }, { "docid": "58f5967ac6a682a182acc675701eb634", "score": "0.72681904", "text": "def page_not_found(e):\n return 'Sorry, nothing at this URL.', 404", "title": "" }, { "docid": "58f5967ac6a682a182acc675701eb634", "score": "0.72681904", "text": "def page_not_found(e):\n return 'Sorry, nothing at this URL.', 404", "title": "" }, { "docid": "58f5967ac6a682a182acc675701eb634", "score": "0.72681904", "text": "def page_not_found(e):\n return 'Sorry, nothing at this URL.', 404", "title": "" }, { "docid": "58f5967ac6a682a182acc675701eb634", "score": "0.72681904", "text": "def page_not_found(e):\n return 'Sorry, nothing at this URL.', 404", "title": "" }, { "docid": "58f5967ac6a682a182acc675701eb634", "score": "0.72681904", "text": "def page_not_found(e):\n return 'Sorry, nothing at this URL.', 404", "title": "" }, { "docid": "58f5967ac6a682a182acc675701eb634", "score": "0.72681904", "text": "def page_not_found(e):\n return 'Sorry, nothing at this URL.', 404", "title": "" }, { "docid": "58f5967ac6a682a182acc675701eb634", "score": "0.72681904", "text": "def page_not_found(e):\n return 'Sorry, nothing at this URL.', 404", "title": "" }, { "docid": "58f5967ac6a682a182acc675701eb634", "score": "0.72681904", "text": "def page_not_found(e):\n return 'Sorry, nothing at this URL.', 404", "title": "" }, { "docid": "58f5967ac6a682a182acc675701eb634", "score": "0.72681904", "text": "def page_not_found(e):\n return 'Sorry, nothing at this URL.', 404", "title": "" }, { "docid": "58f5967ac6a682a182acc675701eb634", "score": "0.72681904", "text": "def page_not_found(e):\n return 'Sorry, nothing at this URL.', 404", "title": "" }, { "docid": "58f5967ac6a682a182acc675701eb634", "score": "0.72681904", "text": "def page_not_found(e):\n return 'Sorry, nothing at this URL.', 404", "title": "" }, { "docid": "6da670c5ad3b329cadebddfc4f74e522", "score": "0.72626674", "text": "def page_not_found(e):\n return 'Sorry, Nothing at this URL.', 404", "title": "" }, { "docid": "6833e1e33c8c162767c57193e43c1b86", "score": "0.7249924", "text": "def not_found(error):\n return render_template('404.html')", "title": "" }, { "docid": "fa2ee317290730f6cc5d23453d8f4945", "score": "0.7247093", "text": "def handle_404_error(_error):\n return make_response(jsonify({'error': 'Not found'}), 404)", "title": "" }, { "docid": "5ec2e1b8e7e29e0c5ee36ee01c5213dd", "score": "0.72469175", "text": "def page_not_found(e):\n return render_template('404.html'), 404", "title": "" }, { "docid": "5ec2e1b8e7e29e0c5ee36ee01c5213dd", "score": "0.72469175", "text": "def page_not_found(e):\n return render_template('404.html'), 404", "title": "" }, { "docid": "5ec2e1b8e7e29e0c5ee36ee01c5213dd", "score": "0.72469175", "text": "def page_not_found(e):\n return render_template('404.html'), 404", "title": "" }, { "docid": "5ec2e1b8e7e29e0c5ee36ee01c5213dd", "score": "0.72469175", "text": "def page_not_found(e):\n return render_template('404.html'), 404", "title": "" }, { "docid": "0e9ea659385d978e6c351ad0a9bf3245", "score": "0.7244242", "text": "def not_found(error):\n return jsonify(dict(error='The Resource requested is not' +\n ' available')), 404", "title": "" }, { "docid": "b6c1cae23588038bcd515364e32f755f", "score": "0.72424537", "text": "def page_not_found(e):\n return render_template(\"404.html\"), 404", "title": "" }, { "docid": "b6c1cae23588038bcd515364e32f755f", "score": "0.72424537", "text": "def page_not_found(e):\n return render_template(\"404.html\"), 404", "title": "" }, { "docid": "b6c1cae23588038bcd515364e32f755f", "score": "0.72424537", "text": "def page_not_found(e):\n return render_template(\"404.html\"), 404", "title": "" }, { "docid": "17d17ccd48450f84b5e62513fbe9872a", "score": "0.7240599", "text": "def page_not_found(error):\n return jsonify({\"error\": \"Not found\"}), 404", "title": "" }, { "docid": "4c7c6c71a00ac8377c7ff701de897b85", "score": "0.7238573", "text": "def page_not_found(e):\n return jsonify({\"error\": \"Not found\"}), 404", "title": "" } ]
af97e0480981b7f93e21e2b4b8b745de
Copy the configuration directory from keycloak into the temporary storage directory. If an error is raised because the copied directory already exists at the destination, abort the copy and ignore the error.
[ { "docid": "d6f701fa44752d860340bd53e4c4c5bd", "score": "0.64012563", "text": "def copy_base_files(keycloak_path, temp_dir):\n config_src = keycloak_path / \"standalone/configuration\"\n\n if not os.path.exists(config_src):\n raise FileNotFoundError(f\"The configuration file '{config_src}' does not exist\")\n\n keycloak_config = get_keycloak_config_path(temp_dir)\n with suppress(FileExistsError):\n shutil.copytree(config_src, keycloak_config)", "title": "" } ]
[ { "docid": "a96c21963a51716355afa845ded38cad", "score": "0.63141984", "text": "def _copy_setup_storage(self, tmp):\n for d in [ 'setup-storage', 'setup-storage/conf.d' ]:\n for src in glob.glob('%s/*' % d):\n if os.path.isfile(src):\n tmp_src = self._connection._shell.join_path(\n tmp, src)\n display('Copy %s -> %s' % (src, tmp_src))\n self._transfer_file(src, tmp_src)", "title": "" }, { "docid": "9c2d938b3b7a2365a85d1d77bbd0925d", "score": "0.6270966", "text": "def _copy_temp_directory(self):\n\n if os.path.exists(self.paths['output']):\n\n oldfiles = [f for f in os.listdir(self.paths['output'])]\n for oldfile in oldfiles:\n os.remove(os.path.join(self.paths['output'], oldfile))\n\n newfiles = [f for f in os.listdir(self.paths['temp'])]\n for newfile in newfiles:\n shutil.copy2(os.path.join(self.paths['temp'], newfile), self.paths['output'])\n\n else:\n try:\n shutil.copytree(self.paths['temp'], self.paths['output'])\n except shutil.Error as err:\n self.exit(err)\n\n shutil.rmtree(self.paths['temp'])", "title": "" }, { "docid": "8c4fd5263860987d21aee08436110caa", "score": "0.62253904", "text": "def copy_configuration():\n previous = abspath_join(realpath(CURRENT), 'etc')\n cache['previous_symlink_target'] = previous # we may require this later when restoring\n new = abspath_join(APP_ROOT, VERSION, 'etc')\n root_conf = abspath_join(APP_ROOT, 'etc')\n\n if os.path.exists(previous):\n LOG.info('Copying configuration from {0} to {1}. Overwrite existing'.format(previous, new))\n for name in os.listdir(previous):\n name_new = abspath_join(new, name)\n if os.path.exists(name_new):\n rm_r(name_new)\n cp_r(abspath_join(previous, name), name_new)\n\n if os.path.exists(new):\n LOG.info('Copying configuration from {0} to {1}. No overwrite.'.format(new, root_conf))\n for name in os.listdir(new):\n name_prev = abspath_join(root_conf, name)\n if not os.path.exists(name_prev):\n cp_r(abspath_join(new, name), name_prev)", "title": "" }, { "docid": "a5cec2661f8a426c169204a71359a60e", "score": "0.58508366", "text": "def overwrite_dir(source, destination='/opt/ICE/ceph-repo/'):\n if not os.path.exists(os.path.dirname(destination)):\n logger.info('creating destination path: %s' % destination)\n os.makedirs(destination, 0755)\n\n # remove destination to ensure we get a fresh copy\n try:\n shutil.rmtree(destination)\n logger.debug('ensuring destination is new')\n logger.debug('removed destination directory: %s' % destination)\n except OSError:\n pass\n\n # now copy the contents\n shutil.copytree(source, destination)\n logger.debug('copied contents from: %s to %s' % (source, destination))", "title": "" }, { "docid": "cbba667a51aa0066a9560530ce33cadf", "score": "0.5799762", "text": "def copyConfig(self, src, dst):\n if not os.path.isdir(dst):\n raise shutil.Error('Destination is not a directory')\n\n self.copies.extend(self.copyfile('gmakefile.test',dst))\n #newConfigDir=os.path.join(dst,'config') # Am not renaming at present\n #if not os.path.isdir(newConfigDir): os.mkdir(newConfigDir)\n #testConfFiles=\"\".split()\n #for tf in testConfFiles:\n # self.copies.extend(self.copyfile(os.path.join('config',tf),newConfigDir))\n #return", "title": "" }, { "docid": "fa68f34885b85625e230b171842ff0e4", "score": "0.5728927", "text": "def _copy_dir(self, src, dst):\n try:\n shutil.copytree(src, dst)\n except OSError as e:\n if e.errno == errno.EEXIST:\n print(\"Representation dir existed.\")\n else:\n raise OSError(\"An unexpected error occurred.\")", "title": "" }, { "docid": "f25b6c88ac92ef55c071dd38e55ddd5b", "score": "0.5706775", "text": "def copy2tmpdir(self):\n for info in self.handle2info.viewvalues():\n if info.embedded:\n source = info.filename\n bname = osp.basename(info.filename)\n dest = osp.join(self.model.tmpdir, bname) # pragma pylint: disable=no-member\n if source != dest:\n copy_file(source, dest)\n info.filename = dest", "title": "" }, { "docid": "1432ab61a7a41d8578fc4b475c7fcf46", "score": "0.5687538", "text": "def test_config_in_dest(self):\n prep.create_default_config(self.dest_root)\n with assert_raises(prep.InvalidFoldersException):\n prep.start_backup(self.source_root, self.dest_root)", "title": "" }, { "docid": "27531583987656e70a2d5d9b374569ac", "score": "0.568603", "text": "def _transfer_config(self, config_dirs):\n for c in config_dirs:\n container = c[0]\n from_dir = c[1]\n to_dir = c[2]\n self.docker.copy([container], from_dir, to_dir)\n logging.warning(\"transferred config %s -> %s\" % (from_dir, to_dir))", "title": "" }, { "docid": "50ce78cf3151be8713c00745508b361f", "score": "0.56654745", "text": "def auto_copy(config):\n # only one instance running at a time\n try:\n my_lock = Lock()\n except CouldNotAcquireLockException:\n return\n if not sanity_checks_pass(config, my_lock):\n return\n ###\n # Action\n ###\n try:\n media_type = determine_media_type(config)\n if media_type == 'VIDEO_DVD':\n rip_large_tracks(config)\n elif media_type == 'DATA':\n copy_large_files(config)\n elif media_type == 'AUDIO':\n rip_audio_cd(config)\n else:\n LOGGER.warn('Could not determine media type')\n # eject when done\n LOGGER.info('All tasks finished, ejecting')\n subprocess.call(['eject'], stdout=DEV_ZERO, stderr=DEV_ZERO)\n except Exception:\n LOGGER.warn('Something went wrong, ejecting anyway')\n subprocess.call(['eject'], stdout=DEV_ZERO, stderr=DEV_ZERO)\n finally:\n LOGGER.debug('Explicitly releasing lock in finally block')\n my_lock.release_lock(None, None)\n subprocess.call(['eject'], stdout=DEV_ZERO, stderr=DEV_ZERO)", "title": "" }, { "docid": "0cd33f7b871b3ceccfaf139d7cf7a63c", "score": "0.5656778", "text": "def _copy_workspace(self):\r\n if self.cfg.remote_workspace:\r\n self._workspace_paths['remote'] = self.cfg.remote_workspace\r\n return\r\n self._transfer_data(\r\n source=self._workspace_paths['local'],\r\n target='{}@{}:{}'.format(\r\n self._user, self.cfg.index, self._remote_testplan_path))\r\n self._workspace_paths['remote'] = '{}/{}'.format(\r\n self._remote_testplan_path,\r\n self._workspace_paths['local'].split(os.sep)[-1])", "title": "" }, { "docid": "c3ad9668d38eb4410d10b033b3185af0", "score": "0.56527114", "text": "def test_config_copy_error(tmp_path, keep_cwd, monkeypatch: MonkeyPatch):\n os.chdir(tmp_path)\n config_dir = pathlib.Path(const.TRESTLE_CONFIG_DIR)\n config_dir.mkdir()\n config_file = config_dir / const.TRESTLE_CONFIG_FILE\n config_file.touch()\n config_file.chmod(stat.S_IREAD)\n testargs = ['trestle', 'init']\n monkeypatch.setattr(sys, 'argv', testargs)\n with pytest.raises(SystemExit) as pytest_wrapped_e:\n cli.run()\n assert pytest_wrapped_e.type == SystemExit\n assert pytest_wrapped_e.value.code == 1\n for directory in const.MODEL_DIR_LIST:\n assert os.path.isdir(directory)\n assert os.path.isdir(os.path.join(const.TRESTLE_DIST_DIR, directory))\n assert os.path.isdir(const.TRESTLE_CONFIG_DIR)\n assert os.path.isfile(os.path.join(const.TRESTLE_CONFIG_DIR, const.TRESTLE_CONFIG_FILE))\n assert os.stat(os.path.join(const.TRESTLE_CONFIG_DIR, const.TRESTLE_CONFIG_FILE)).st_size == 0", "title": "" }, { "docid": "73b6ea84a889bd4cc55712e96bc75810", "score": "0.5651249", "text": "def test_invalid_destination_directory(self):\n if not os.path.isdir(MOUNT_POINT):\n return self.skipTest(\"Skipping test because %s doesn't exist!\", MOUNT_POINT)\n with prepared_image_file():\n program = RsyncSystemBackup(\n crypto_device=CRYPTO_NAME,\n destination='/some/random/directory',\n mount_point=MOUNT_POINT,\n notifications_enabled=False,\n )\n self.assertRaises(InvalidDestinationDirectory, program.transfer_changes)", "title": "" }, { "docid": "a0dfae4450489dd7a691c7817b987d4f", "score": "0.56407225", "text": "def _copy_dir_and_preserve_ownership(src: Path, dst: Path, verbose: bool) -> None:\n run_command('cp -prv {src} {dst}'.format(src=src, dst=dst), verbose)", "title": "" }, { "docid": "d1b8e3193acc962def6a80fc167fc9af", "score": "0.5634088", "text": "def create_backup():\n shutil.copyfile(LOG_CONFIGS_PATH, LOG_CONFIGS_BAK_PATH)", "title": "" }, { "docid": "cc7386f15b55f2271149fdbd958ed28e", "score": "0.56246984", "text": "def copyConfigFile():\n if (OUTPUT_DIR is None) or (not os.path.isdir(OUTPUT_DIR)):\n print('#### ERROR ####')\n print(\"Library folder doesn't exist, cannot copy configuration file\")\n print('###############')\n return\n\n # Copy the LVGL config file to its dependency folder for building\n SOURCE_FILE = os.path.join(DIR, CONFIG_FILE)\n DEST_FILE = os.path.join(OUTPUT_DIR, CONFIG_FILE)\n\n if not os.path.isfile(SOURCE_FILE):\n print('#### ERROR ####')\n print(\"LVGL Configuration file not present.\")\n print(\"Copy the conf_template file from {}/{}/lvgl to {}\".format(OUTPUT_DIR, ENV_FOLDER, DIR))\n print('###############')\n return\n\n print('Copying LVGL Config file to Libdeps folder for compile...')\n\n try:\n shutil.copy(SOURCE_FILE, DEST_FILE)\n except Error:\n print('Unable to copy LVGL config automatically')", "title": "" }, { "docid": "678487b9e2afbc3a4da91741f769db44", "score": "0.5623803", "text": "def define_config(duthost, template_src_path, template_dst_path):\n duthost.shell(\"mkdir -p {}\".format(DUT_TMP_DIR))\n duthost.copy(src=template_src_path, dest=template_dst_path)", "title": "" }, { "docid": "21d21ddaec37b1abf46037ddec125f79", "score": "0.55499065", "text": "def test_copy(self):\n self.test_ok = False\n self.tfile = make_temp_file(\"dummy\")\n # add leading '/' like clush so that WorkerTree knows it's a dir\n task_self().copy(self.tfile.name,\n join(dirname(self.tfile.name), ''),\n \"n60\")\n task_self().resume()\n self.assertTrue(self.test_ok)", "title": "" }, { "docid": "5974a3f4f0e01ff92c11a932fda656d5", "score": "0.55339426", "text": "def copy(ctx, host, directory):\n if host is None:\n host = ctx.obj['host']\n verbose = ctx.obj['verbose']\n\n print \"> copying directory %s\" % directory\n cp(host, directory, \"~/\", directory=True, verbose=verbose)", "title": "" }, { "docid": "ba2963907e53a2cec5beced3a4c46fc8", "score": "0.55250895", "text": "def _ensure_temp(self):\n\n temp = packages_path(TEMP_PATH)\n if not exists(temp):\n makedirs(temp)", "title": "" }, { "docid": "a51fdf689d50cef45a5a874ad0338e60", "score": "0.55214685", "text": "def _copy(src, dst, removesrc):\n try:\n os.makedirs(os.path.dirname(dst), exist_ok=True)\n if removesrc:\n shutil.move(src, dst)\n else:\n shutil.copy2(src, dst)\n except:\n _callback('warning', msg=\"Failed to copy {} to {}\".format(src, dst))", "title": "" }, { "docid": "cceff743569958d9f310ef14dc3f4e1a", "score": "0.5517826", "text": "def test_copy_dir(self):\n # TODO: Test that all subfiles and folders are copied.\n test_dir = 'test'\n test_path = os.path.join(DIR_TO_BACKUP, test_dir)\n os.mkdir(test_path)\n copy_dir_if_valid(test_path, BACKUP_DIR)\n assert os.path.isdir(test_path)\n assert os.path.isdir(os.path.join(BACKUP_DIR, test_dir))", "title": "" }, { "docid": "16cd1a9da26eace9452f4df4874107dd", "score": "0.5512195", "text": "def copy_config(self, host, cfg_path):\n config_file = get_param('LYDIAN_HOSTPREP_CONFIG') or cfg_path\n if not config_file:\n config_file = os.path.join(self.data_dir, self.CONFIG_FILE)\n\n host.req_call('mkdir -p /etc/lydian')\n host.put_file(config_file, self.CONFIG_DEST_PATH)", "title": "" }, { "docid": "5bbe43e7232abd465678ba9cdb204b51", "score": "0.5462842", "text": "def prepare(self):\n if self.copy_from_directory:\n src = self.copy_from_directory\n if self.working_directory:\n dst = self.working_directory\n else:\n dst = None\n return self.copy(src, dst)\n else:\n return False", "title": "" }, { "docid": "c95449dc000b6df22c2f03a94005c258", "score": "0.54587066", "text": "def prepare_first_backup(source_root, dest_root):\n logger.info('Looks like this is the first using the source.',\n extra={'color':'g'})\n if listdir(dest_root):\n logger.warning('Destination is not empty. By proceeding '\n 'you risk overwriting the contents.')\n if not query_yes_no('Would you still like to proceed?'):\n raise InvalidFoldersException('Destination was not accepted.')\n\n config = create_default_config(source_root)\n\n return config", "title": "" }, { "docid": "86878f9900baf396d6730159bf963978", "score": "0.54283434", "text": "def _safe_move_tmp_to_folder(temp_hed_xml_file, dest_filename):\n _, temp_xml_file = os.path.split(temp_hed_xml_file)\n dest_folder, _ = os.path.split(dest_filename)\n\n temp_filename_in_cache = os.path.join(dest_folder, temp_xml_file)\n copyfile(temp_hed_xml_file, temp_filename_in_cache)\n try:\n os.replace(temp_filename_in_cache, dest_filename)\n except OSError:\n os.remove(temp_filename_in_cache)\n return None\n\n return dest_filename", "title": "" }, { "docid": "f73ac54eb7145cf95a8cbc2ef2c822a5", "score": "0.5427591", "text": "def test_rsync_module_path_as_destination(self):\n with TemporaryDirectory() as temporary_directory:\n try:\n os.environ['RSYNC_MODULE_PATH'] = temporary_directory\n program = RsyncSystemBackup()\n assert program.destination.directory == temporary_directory\n assert not program.destination.hostname\n assert not program.destination.username\n assert not program.destination.module\n finally:\n os.environ.pop('RSYNC_MODULE_PATH')", "title": "" }, { "docid": "cc85a7a9f4f330b52a4e18b8df0a2691", "score": "0.5420833", "text": "def _duplicate_config_file(dst_config_file, test_datadir):\n src_config_file_path = os.path.join(str(test_datadir), \"pcluster.config.yaml\")\n dst_config_file_path = os.path.join(str(test_datadir), dst_config_file)\n shutil.copy(src_config_file_path, dst_config_file_path)", "title": "" }, { "docid": "f1e38b0f0882cb164144acd2e5897617", "score": "0.54046905", "text": "def upload_dir(ctx, source: DcmDir, dest: Orthanc, fkey, salt):\n handle_upload_dir(source, dest, fkey=fkey, anon_salt=salt)", "title": "" }, { "docid": "ddf202a76e2eae44599148d5e2a45ef1", "score": "0.53875875", "text": "def delete_and_copy_to_dest_dir(src: str, dest: str):\n del_directory(dest)\n time.sleep(0.5)\n print(f\"Copying {src} dir and contents to {dest}\")\n shutil.copytree(src, dest)", "title": "" }, { "docid": "582d827f8b09b951e0b57d60f920f449", "score": "0.5382395", "text": "def atomic_copy(orig, dest):\n\tglobal copyfun\n\ttmp = dest + '.tmp'\n\tup = os.path.dirname(dest)\n\ttry:\n\t\tos.makedirs(up)\n\texcept OSError:\n\t\tpass\n\n\ttry:\n\t\tcopyfun(orig, tmp)\n\texcept OSError as e:\n\t\tif e.errno == errno.EXDEV:\n\t\t\tcopyfun = shutil.copy2\n\t\t\tcopyfun(orig, tmp)\n\t\telse:\n\t\t\traise\n\tos.rename(tmp, dest)", "title": "" }, { "docid": "8db43b96ecfbaf22904fa0bf6e446483", "score": "0.53647834", "text": "def backup_tempest_config(conf_file):\n if not os.path.exists(TEMPEST_RESULTS_DIR):\n os.makedirs(TEMPEST_RESULTS_DIR)\n shutil.copyfile(conf_file,\n os.path.join(TEMPEST_RESULTS_DIR, 'tempest.conf'))", "title": "" }, { "docid": "d252826f73c9452c013fd3bc36b60250", "score": "0.5363531", "text": "def copy(self, **kwargs) -> None:\n dest = self.get_destination(**kwargs)\n self.check_conflicts(self.local_path)\n source_path = f\"{self.local_path}/{kwargs.get('sub_path', '')}\".rstrip(\"/\")\n shutil.copytree(source_path, dest, dirs_exist_ok=True)", "title": "" }, { "docid": "2b8902e09ad6ef367de704a0d624c2ee", "score": "0.5362928", "text": "def test_copy_dir_invalid(self, invalid):\n copy_dir_if_valid(invalid, DIR_TO_BACKUP)\n assert not os.path.isdir(os.path.join(BACKUP_DIR, invalid))", "title": "" }, { "docid": "3d07b2d687fc05d322c4275faab7e850", "score": "0.5357373", "text": "def copy_directory(self, source: PATH_TYPE, destination: PATH_TYPE) -> None:\n src = Path(source)\n dst = Path(destination)\n\n if not src.is_dir():\n raise NotADirectoryError(f\"Source {src} is not a directory\")\n if dst.exists():\n raise FileExistsError(f\"Destination {dst} already exists\")\n\n shutil.copytree(src, dst)", "title": "" }, { "docid": "cdae0cdd5ca83e8a976c713bb5035008", "score": "0.53564405", "text": "def copy_directory_remote(self, source, dst, dst_ip = None):\n import os\n reload(os)\n\n if not dst_ip:\n dst_ip = self._dst_ip\n source = self._wrap(os.path.abspath(source))\n dst = self._wrap(dst.replace('\\\\', '\\\\\\\\'))\n\n return self._submit_request('FS', 'COPY DIRECTORY %s TODIRECTORY %s TOMACHINE %s RECURSE KEEPEMPTYDIRECTORIES'\n % (source, dst, self._wrap(dst_ip)))", "title": "" }, { "docid": "99435260173b832cd8298028037fb381", "score": "0.5355885", "text": "def temp_volume(target):\n tmp = tempdir()\n\n os.makedirs(os.path.join(tmp, 'output'))\n copy(target, os.path.join(tmp, os.path.basename(target)))\n\n return tmp", "title": "" }, { "docid": "0a9cab5a6d97c3d8e48f5ac36f5129a4", "score": "0.53547", "text": "def copy_directory(source, dest):\n if os.path.exists(dest):\n raise ValueError(dest + \" already exists!\")\n shutil.copytree(source, dest)", "title": "" }, { "docid": "690647531b4014c42cb6dc12a3760a0c", "score": "0.5349814", "text": "def copy_dir_helper(src, dst, force=False):\n if not os.path.isdir(src):\n raise CommandError(\"Source directory %s does not exists\" % src)\n if os.path.exists(dst):\n if force:\n logging.debug('Force mode was turned on. Removing %s' % dst)\n shutil.rmtree(dst)\n else:\n raise CommandError(\"Target %s already exists\" % dst)\n copytree(src, dst, draw_pbar=True)\n logging.info(\"Copied %s into %s\" % (src, dst))\n make_writeable(dst)", "title": "" }, { "docid": "c7f1506165e78e369850ccb6e6a2fd87", "score": "0.5349291", "text": "def test_config_in_both(self):\n prep.create_default_config(self.source_root)\n config_file = join(self.source_root, prep.CONFIG_FILENAME)\n shutil.copy(config_file, self.dest_root)\n prep.start_backup(self.source_root, self.dest_root)", "title": "" }, { "docid": "d12b9e563ceeb23e126e4a679d438bbb", "score": "0.53378505", "text": "def auto_copy(wrap_from, wrap_to, copy_subdirs=True, exclude=[]):\n\n # unwrap input arguments to machine names and directory paths\n [machine_from, path_from] = wrap_from\n [machine_to, path_to] = wrap_to \n \n try:\n with labrad.connect(machine_from) as manager_from:\n try:\n dv_from = manager_from.data_vault\n except NotFoundError: # can't connect to dv_from\n print('Unable to connect to data_vault through LabRAD manager on' + \n str(machine_from) + '.')\n print(\"Check that the data_vault is running and connected to the manager\")\n return\n \n try:\n with labrad.connect(machine_to) as manager_to:\n try:\n dv_to = manager_to.data_vault\n except NotFoundError: # can't connect to dv_to\n print('Unable to connect to data_vault through LabRAD manager on' + \n str(machine_to) + '.')\n print('Check that the data_vault is running and connected to the manager')\n return\n \n # navigate to source\n current_source_directory = navigate(dv_from, path_from)\n if not current_source_directory == path_from:\n print \"Source directory '\" + str(path_from) + \"' not found.\"\n print \"...\"\n print \"Copy operation aborted\"\n return dv_from\n \n # navigate to destination\n current_dest_directory = navigate(dv_to, path_to)\n if not current_dest_directory == path_to:\n print \"Destination directory '\" + str(path_to) + \"' not found.\"\n if query_yes_no(\"Would you like to create it?\"):\n print \"...\"\n navigate(dv_to, path_to, write=True)\n else:\n print \"...\"\n print \"Copy operation aborted.\"\n return\n \n if copy_subdirs:\n \"\"\"\n out = {\n num_datasets_present\n num_datasets_included\n num_datasets_created\n num_subdirs_present\n num_subdirs_included\n num_subdirs_created}\n \"\"\"\n out = copy_tree(dv_from, dv_to, write=False, exclude=exclude)\n print(\"Ready to copy \" + str(out[1]) + \" of \" + str(out[0]) + \" datasets\")\n print(\"including \" + str(out[4]) + \" of \" + str(out[3]) + \" subdirectories\")\n print(\"from \" + str(path_from) + \" via machine '\" + str(machine_from) + \"'\")\n print(\"to \" + str(path_to) + \" via machine '\" + str(machine_to) + \"'.\")\n print(str(out[5]) + \" directories will need to be created.\")\n print \"\"\n if query_yes_no(\"Would you like to continue?\"):\n print \"...\"\n out = copy_tree(dv_from, dv_to, exclude=exclude)\n print \"Created \" + str(out[5]) + \" new subdirectories.\"\n print \"Created \" + str(out[2]) + \" new datasets.\"\n return\n else:\n print \"...\"\n print \"Copy operation aborted.\"\n return\n \n else:\n \"\"\"\n out = {\n num_datasets_present\n num_datasets_included\n num_datasets_created}\n \"\"\"\n out = recreate_datasets(dv_from, dv_to, write=False, exclude=exclude)\n print(\"Ready to copy\" + str(out[1]) + \" of \" + str(out[0]) + \" datasets \")\n print(\"from \" + str(path_from) + \" via machine '\" + str(machine_from))\n print(\"to \" + str(path_to) + \" via machine '\" + str(machine_to) + \"'.\")\n print(\"\")\n if query_yes_no(\"Would you like to continue?\"):\n print \"...\"\n out = recreate_datasets(dv_from, dv_to, exclude=exclude)\n print \"Created \" + str(out[2]) + \" new datasets.\"\n return\n else:\n print \"...\"\n print \"Copy operation aborted.\"\n return\n \n except TypeError: # can't connect to machine_to\n print('Unable to connect to LabRAD manager on \"' + \n str(machine_to) + '\".\\nCheck that we are on the LabRAD whitelist for \"' + \n str(machine_to) + '\".')\n return\n \n except TypeError: # can't connect to machine_from\n print('Unable to connect to LabRAD manager on \"' + \n str(machine_from) + '\".\\nCheck that we are on the LabRAD whitelist for \"' + \n str(machine_from) + '\".')\n return", "title": "" }, { "docid": "8dc94a3b5e79f2ffd0d10afe5ea1c9f2", "score": "0.5330625", "text": "def CopyDirectory(context, srcDir, destDir):\n instDestDir = os.path.join(context.libInstDir, destDir)\n if os.path.isdir(instDestDir):\n shutil.rmtree(instDestDir)\n\n PrintCommandOutput(\"Copying {srcDir} to {destDir}\\n\"\n .format(srcDir=srcDir, destDir=instDestDir))\n shutil.copytree(srcDir, instDestDir)", "title": "" }, { "docid": "24af0146ed0816c062a329b76a897385", "score": "0.5317605", "text": "def pre_command_copy_file(config, source_testcase, source_directory, destination_testcase, destination, job_id, tmp_logs):\n\n source_testcase = os.path.join(str(os.getcwd()), source_testcase)\n\n if not os.path.isdir(source_testcase):\n raise RuntimeError(\"ERROR: The directory {0} does not exist.\".format(source_testcase))\n\n if not os.path.isdir(destination_testcase):\n raise RuntimeError(\"ERROR: The directory {0} does not exist.\".format(destination_testcase))\n\n source = os.path.join(source_testcase, source_directory)\n target = os.path.join(destination_testcase, destination)\n\n # The target without the potential executable.\n target_base = '/'.join(target.split('/')[:-1])\n\n # If the source is a directory, we copy the entire thing into the\n # target.\n if os.path.isdir(source):\n # We must copy from directory to directory\n copy_contents_into(config, job_id, source, target, tmp_logs)\n\n # Separate ** and * for simplicity.\n elif '**' not in source:\n # Grab all of the files that match the pattern\n files = glob.glob(source, recursive=True)\n\n # The target base must exist in order for a copy to occur\n if target_base != '' and not os.path.isdir(target_base):\n raise RuntimeError(\"ERROR: The directory {0} does not exist.\".format(target_base))\n # Copy every file. This works whether target exists (is a directory) or does not (is a target file)\n for file in files:\n try:\n shutil.copy(file, target)\n except Exception as e:\n traceback.print_exc()\n config.logger.log_message(\n f\"Pre Command could not perform copy: {file} -> {target}\",\n job_id=job_id\n )\n else:\n # Everything after the first **.\n source_base = source[:source.find('**')]\n # The full target must exist (we must be moving to a directory.)\n if not os.path.isdir(target):\n raise RuntimeError(\"ERROR: The directory {0} does not exist.\".format(target))\n\n # Grab all of the files that match the pattern.\n files = glob.glob(source, recursive=True)\n\n # For every file matched\n for file_source in files:\n file_target = os.path.join(target, file_source.replace(source_base, ''))\n # Remove the file path.\n file_target_dir = '/'.join(file_target.split('/')[:-1])\n # If the target directory doesn't exist, create it.\n if not os.path.isdir(file_target_dir):\n os.makedirs(file_target_dir)\n # Copy.\n try:\n shutil.copy(file_source, file_target)\n except Exception as e:\n traceback.print_exc()\n config.logger.log_message(\n f\"Pre Command could not perform copy: {file_source} -> {file_target}\",\n job_id=job_id\n )", "title": "" }, { "docid": "426e25640010d64c35e9b84e00b4a75e", "score": "0.531613", "text": "def copyTenants():\n\n print(\"Ready to copy tenants to new version of APIM...\")\n src_dir = '%s/wso2am-%s/repository/tenants/' % (APIM_HOME_PATH, OLD_VERSION)\n src_seq = '%s/wso2am-%s/repository/deployment/server/synapse-configs/default/sequences/' % (\n APIM_HOME_PATH, OLD_VERSION)\n dst_dir = '%s/wso2am-%s/repository/tenants/' % (APIM_HOME_PATH, NEW_VERSION)\n\n if os.path.isdir('%s/1' % src_dir):\n print(\"Please wait, moving tenants...\")\n copydir(src_dir, dst_dir)\n\n # Since there is no copy method in shutil module to copy selected files from except ignoring and there are small number of files to move; had to copy one by one using defined copydir method\n for item in os.listdir(src_dir):\n d = os.path.join(dst_dir, item)\n copydir('%s/_auth_failure_handler_.xml' % src_seq, '%s/synapse-configs/default/sequences' % d)\n copydir('%s/_cors_request_handler_.xml' % src_seq, '%s/synapse-configs/default/sequences' % d)\n copydir('%s/fault.xml' % src_seq, '%s/synapse-configs/default/sequences' % d)\n copydir('%s/main.xml' % src_seq, '%s/synapse-configs/default/sequences' % d)\n copydir('%s/_production_key_error_.xml' % src_seq, '%s/synapse-configs/default/sequences' % d)\n copydir('%s/_resource_mismatch_handler_.xml' % src_seq, '%s/synapse-configs/default/sequences' % d)\n copydir('%s/_sandbox_key_error_.xml' % src_seq, '%s/synapse-configs/default/sequences' % d)\n copydir('%s/_throttle_out_handler_.xml' % src_seq, '%s/synapse-configs/default/sequences' % d)\n\n print(\"Tenants configuration is successful!\")\n else:\n print(\"No tenants to move!!!\")", "title": "" }, { "docid": "7a6aa9f47ffaecf6c6c99f0dc0810707", "score": "0.5305589", "text": "def copy_templates(dest):\n \n shutil.copytree(find_template_dir(), dest)\n \n sys.exit(0)", "title": "" }, { "docid": "ecc2f4f6cc2068c7543bc90960c52a9d", "score": "0.52936846", "text": "def local_copy_dir(self, destination: Union[str, Path]) -> True or Exception:\n if not destination.exists():\n try:\n copytree(src=self.directory, dst=destination)\n return True\n except PermissionError as e:\n if self.logger:\n self.logger.warning(f\"You don't have permission to {self.__str__()}, \"\n f\"please try with the right permissions\")\n return e\n except FileNotFoundError as e:\n if self.logger:\n self.logger.warning(f\" {self.__str__()} file no longer exists or never did, please try again\")\n return e\n except NotADirectoryError as e:\n if self.logger:\n self.logger.warning(f\" {self.__str__()} is a file and not a directory, please try again\")\n return e\n else:\n if self.logger:\n self.logger.warning(\"Directory already exists, select a new location\")\n return IsADirectoryError", "title": "" }, { "docid": "17edf3a2fe3327bbff40043a42ecf2a6", "score": "0.52921844", "text": "def test_config_in_source_non_empty_dest(self):\n prep.create_default_config(self.source_root)\n dest_file = join(self.dest_root, 'file')\n with open(dest_file, 'w') as f:\n f.write('foo')\n with patch.object(prep, 'query_yes_no', return_value=False):\n with assert_raises(prep.InvalidFoldersException):\n prep.start_backup(self.source_root, self.dest_root)", "title": "" }, { "docid": "e1da417214c3a999c61c9f9696a724ea", "score": "0.52795696", "text": "async def temp_dir(hass: HomeAssistant) -> AsyncGenerator[str, None]:\n with TemporaryDirectory() as tmpdirname:\n target_dir = Path(tmpdirname) / \"another_subdir\"\n target_dir.mkdir()\n await async_process_ha_core_config(\n hass, {\"media_dirs\": {\"test_dir\": str(target_dir)}}\n )\n assert await async_setup_component(hass, const.DOMAIN, {})\n\n yield str(target_dir)", "title": "" }, { "docid": "e76ed88d5aaccd1f7b61d3ce7838bbe5", "score": "0.52768165", "text": "def copy_and_create_dir(src: str, dest: str):\n try:\n shutil.copyfile(src, dest)\n except IOError as e:\n if e.errno != errno.ENOENT:\n raise\n os.makedirs(os.path.dirname(dest), exist_ok=True)\n shutil.copyfile(src, dest)", "title": "" }, { "docid": "21c0cb46d0a902fa4f64d91c741492c1", "score": "0.527248", "text": "def _prepare_dest(source_root, dest_root):\n source_base = basename(source_root)\n dest_base = basename(dest_root)\n\n if source_base == dest_base:\n return dest_root\n\n dest_root = join(dest_root, source_base)\n\n if exists(dest_root):\n if not isdir(dest_root):\n raise InvalidFoldersException('Destination contains an object of '\n 'the same name as source, but it is '\n 'not a folder.')\n else:\n logger.info('Creating folder {}'.format(dest_root))\n mkdir(dest_root)\n\n return dest_root", "title": "" }, { "docid": "b91b17d423326ff82c4378224a917377", "score": "0.5268517", "text": "def copy(self):\n shutil.rmtree(self.root, ignore_errors=True)\n mkdir_p(self.root)\n marker = self.source.encode('utf-8')\n for src in self.package_config.files():\n assert src.startswith(self.source)\n relative = src[len(self.source)+1:]\n assert not os.path.isabs(relative)\n dst = os.path.join(self.staging, self.config.name, relative)\n if os.path.islink(src):\n log.debug('Symlink {0}'.format(relative))\n mkdir_p(os.path.dirname(dst))\n linkto = os.readlink(src)\n if not os.path.isabs(linkto): # make linkto absolute path\n linkto = os.path.join(os.path.dirname(src), linkto)\n relative_dir = os.path.relpath(\n os.path.dirname(linkto),\n os.path.dirname(src)\n )\n relative_file = os.path.join(relative_dir, os.path.basename(linkto))\n os.symlink(relative_file, dst)\n elif os.path.isdir(src):\n log.debug('Directory {0}'.format(relative))\n mkdir_p(dst)\n self.copy_mtime(src, dst)\n elif os.path.isfile(src):\n f = InstallFile(src, dst)\n f.copy()\n if self.package_config.rewrite_path_filter.match(relative) == True:\n log.debug('Copying {0}, checking path'.format(relative))\n patch = f.find_patch(marker)\n if patch:\n self._patch[relative] = patch\n else:\n log.debug('Copying {0}, ignoring path'.format(relative))\n self.copy_mtime(src, dst)\n else:\n raise ValueError('{0} is not a file, symlink, or directory'.format(relative))\n self.print_patch_summary()\n return self", "title": "" }, { "docid": "9b28a9f7405a8700cd61e2a66fb76a7e", "score": "0.52573967", "text": "def GsutilCopySilent(src, dst):\n env = os.environ.copy()\n env['PATH'] = '/b/build/scripts/slave' + os.pathsep + env['PATH']\n # Retry to compensate for storage flake.\n for attempt in range(3):\n process = subprocess.Popen(\n ['gsutil', 'cp', src, dst],\n env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n process_stdout, process_stderr = process.communicate()\n if process.returncode == 0:\n return\n time.sleep(math.pow(2, attempt + 1) * 5)\n raise DownloadError(\n 'Unexpected return code: %s\\n'\n '>>> STDOUT\\n%s\\n'\n '>>> STDERR\\n%s\\n' % (\n process.returncode, process_stdout, process_stderr))", "title": "" }, { "docid": "fadc080ae256d00b795ad2803395ee11", "score": "0.52542746", "text": "def copy_config(conf):\n output_dir = conf.output_dir\n config_name = os.path.basename(conf['config_path'])\n output_path = os.path.join(output_dir, config_name)\n with open(output_path, 'w') as fp:\n out_cfg = conf.copy()\n if 'manifest' in out_cfg:\n del out_cfg['manifest']\n json.dump(out_cfg, fp, indent=2)", "title": "" }, { "docid": "a12d1778841f6e9b24733630b885a59e", "score": "0.5251045", "text": "def duplicate_folder(\n src_path: str,\n target_path: str,\n preserve_target: bool = False,\n exit_on_error: bool = True,\n) -> bool:\n return copy(src_path, target_path, preserve_target, exit_on_error)", "title": "" }, { "docid": "9829050c6e7f2e39bd92edb03fa814c3", "score": "0.52461445", "text": "def _copy(self):\n\n self._log.info('-' * 50)\n self._log.info(\"COPYING\")\n\n build_state = {'status': \"PASS\"}\n if self._build_state_file.exists():\n with self._build_state_file.open() as state:\n build_state = json.load(state)\n\n if build_state['status'] == \"FAIL\":\n build_dir = get_build_dir(self._manifest, self._component.name, is_failed=True)\n build_url = get_build_url(self._manifest, self._component.name, is_failed=True)\n else:\n build_dir = get_build_dir(self._manifest, self._component.name)\n build_url = get_build_url(self._manifest, self._component.name)\n\n build_root_dir = get_build_dir(self._manifest, self._component.name, link_type='root')\n rotate_dir(build_dir)\n\n self._log.info('Copy to %s', build_dir)\n self._log.info('Artifacts are available by: %s', build_url)\n\n last_build_path = build_dir.relative_to(build_root_dir)\n last_build_file = build_dir.parent.parent / f'last_build_{self._component.build_info.product_type}'\n is_latest_build = self._is_latest_revision(last_build_file)\n\n # Workaround for copying to samba share on Linux\n # to avoid exceptions while setting Linux permissions.\n _orig_copystat = shutil.copystat\n shutil.copystat = lambda x, y, follow_symlinks=True: x\n shutil.copytree(self._options['PACK_DIR'], build_dir)\n shutil.copystat = _orig_copystat\n\n if not self._run_build_config_actions(Stage.COPY.value):\n return False\n\n if build_state['status'] == \"PASS\" and is_latest_build:\n last_build_file.write_text(str(last_build_path))\n\n return True", "title": "" }, { "docid": "235f23817ac229093d9a5e14cb4a115f", "score": "0.5245018", "text": "def _reset_file(self):\n if self.client == \"test\":\n shutil.copy(self.backup_path, self.remote_path)\n else:\n copy_files_to(self.address, self.client, self.username,\n self.password, self.port, self.backup_path,\n self.remote_path, self.limit, self.log_filename,\n self.verbose, self.timeout)", "title": "" }, { "docid": "1ef059d3301b1ec6da4717ba868ab522", "score": "0.524043", "text": "def _copy_config(config=CONFIG_FILE,\n install_dir=INSTALL_DIR, \n hypertable_version=HYPERTABLE_VERSION):\n local('rsync -e \"ssh -o StrictHostKeyChecking=no\" %s %s/%s/conf/' \n %(config, install_dir, hypertable_version)\n )", "title": "" }, { "docid": "5d196f6a8ee3aaa16b58ad02d8830363", "score": "0.5233731", "text": "def copy_crash_if_needed(self, testcase_path):\n logs.log('Copying crash directory from device to local.')\n\n # Copy the crashes dir from device to local.\n local_directory = os.path.join(self.instance_directory, 'crashes')\n device_directory = android.util.get_device_path(local_directory)\n\n shell.remove_directory(local_directory, recreate=True)\n android.adb.copy_remote_directory_to_local(device_directory,\n local_directory)\n\n super().copy_crash_if_needed(testcase_path)", "title": "" }, { "docid": "009fcaec3c20740129c9874d263134dc", "score": "0.5231848", "text": "def copy(ctx, src, dest):\n auth_provider = ctx.obj[\"auth_factory\"].get()\n wss = Gen3WsStorage(auth_provider)\n wss.copy(src, dest)", "title": "" }, { "docid": "2143165cbd310d24344047d4587dc95c", "score": "0.52311647", "text": "def __enter__(self):\n self.dut.command(\n 'mv -f {} {}'.format(self.origin_config, self.backup_config))\n self.dut.copy(src=os.path.join(FILES_DIR, self.src),\n dest=self.origin_config)", "title": "" }, { "docid": "be6b1d071e35cb94fd090ea401adcd6b", "score": "0.52279425", "text": "def prefill_serverdir(server_options):\n serverdir_new = server_options['serverdir']\n\n if 'master-url' in server_options:\n return # always has to be a fresh sync\n elif os.path.exists(serverdir_cache):\n shutil.rmtree(serverdir_new)\n shutil.copytree(serverdir_cache, serverdir_new)\n else:\n with DevpiServer(server_options):\n shutil.copytree(serverdir_new, serverdir_cache)", "title": "" }, { "docid": "33785f08b43960c7c5284353621d043b", "score": "0.5219471", "text": "def copy_one(src, dest):\n if dest.is_dir():\n shutil.rmtree(dest)\n elif dest.exists():\n dest.unlink()\n\n if not dest.parent.exists():\n dest.parent.mkdir(parents=True)\n\n if src.is_dir():\n shutil.copytree(src, dest)\n else:\n shutil.copy2(src, dest)", "title": "" }, { "docid": "920ce595708b9375e4aef5661f86946e", "score": "0.520879", "text": "def copyDir(srcDir, dstDir):\n try:\n if not os.path.isdir(dstDir):\n os.makedirs(dstDir)\n names = os.listdir(srcDir)\n for name in names:\n srcPath = os.path.join(srcDir, name)\n if os.path.isfile(srcPath):\n shutil.copy2(srcPath, os.path.join(dstDir, name))\n except (IOError, OSError) as e:\n if str(e).find('Permission denied') >= 0:\n print('Error - must be root to install files')\n cleanSource()\n sys.exit(4)\n raise", "title": "" }, { "docid": "72dbe0ce13602f383727741f989a764b", "score": "0.5199466", "text": "def prepare(self):\n if self.copy_from_directory:\n if self.working_directory != self.bind_path and len(self.artifacts) > 0:\n self.copy(self.copy_from_directory, Path(self.host_wd).resolve())\n if not self.container:\n self.provision()\n try:\n self.container.exec_run(cmd=f'mkdir {self.working_directory}')\n self.container.exec_run(cmd=f'/bin/sh -c \"cp -R {self.bind_path}/* {self.working_directory}\"')\n except ContainerError:\n return False\n return True\n return False", "title": "" }, { "docid": "d9491b2851d89f4e50ddd3bd68a5c108", "score": "0.51990855", "text": "def copy(self, src, dest):\n helper = self.load_conf(self.config)\n\n if not os.path.exists(src):\n src = helper.volumes.get(src)['path']\n\n if not os.path.exists(dest):\n dest = helper.volumes.get(dest)['path']\n\n if not confirm(\"Copy from '%s' to '%s'\" % (src, dest)):\n return\n\n ISCSIDevice.copy_volume(src, dest)", "title": "" }, { "docid": "029ff393383fa7954a5dc7587d573756", "score": "0.5197063", "text": "def copyDir(srcDir, dstDir):\n try:\n if not os.path.isdir(dstDir):\n os.makedirs(dstDir)\n names = os.listdir(srcDir)\n for name in names:\n srcPath = os.path.join(srcDir, name)\n if os.path.isfile(srcPath):\n shutil.copy2(srcPath, os.path.join(dstDir, name))\n except (IOError, OSError), e:\n if str(e).find('Permission denied') >= 0:\n print 'Error - must be root to install files'\n cleanSource()\n sys.exit(4)\n raise", "title": "" }, { "docid": "96ee8ee49b6483e3e98b8627bcaf437d", "score": "0.5197053", "text": "def copy_conf(pwd):\n orig = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'static_files')\n files = ANSIBLE_FILES + VAGRANT_FILES\n\n def copy(f, orig, dest):\n try:\n shutil.copy(os.path.join(orig, f), os.path.join(dest, f))\n except FileNotFoundError as e:\n print(\"COPY: \" + f + \", \" + e.strerror)\n\n for f in files:\n copy(f, orig, pwd)", "title": "" }, { "docid": "4ea4567d6e97244078d612062e33bcba", "score": "0.51888627", "text": "def copy_log(log_path, destination_path):\n dest_path = osp.join(destination_path, 'logs')\n if os.path.exists(dest_path):\n shutil.rmtree(dest_path)\n os.makedirs(dest_path)\n copy_tree(log_path, dest_path)", "title": "" }, { "docid": "3913b38c88e432e24e9a71752c64280a", "score": "0.5185598", "text": "def test_copy_config_files(self, tmpdir):\n build = self._create_build(tmpdir)\n\n build.workspace.make_build_dir()\n build.workspace.make_package_dir()\n\n nginx = ConfigFiles.nginx(['config/nginx.conf'])\n supervisor = ConfigFiles.supervisor(['config/my-app.conf'])\n build.deploy = build.deploy.override(config_files=[nginx, supervisor])\n\n # Make some pretend config files\n build_config_path = tmpdir.mkdir('test_id', 'build', 'config')\n open(str(build_config_path.join('nginx.conf')), 'a').close()\n open(str(build_config_path.join('my-app.conf')), 'a').close()\n\n build.copy_config_files()\n\n package_path = tmpdir.join('test_id', 'package')\n assert package_path.join('etc', 'nginx', 'sites-enabled',\n 'nginx.conf').check()\n assert package_path.join('etc', 'supervisor', 'conf.d',\n 'my-app.conf').check()", "title": "" }, { "docid": "3a6ae2c95e54a82fc253e89d34f04fa3", "score": "0.518286", "text": "def copy_config_log_files():\n\n reason = get_failure_reason()\n if not reason:\n return\n\n srcroot = os.path.join(outputdir, \"build\", '-'.join(reason))\n destroot = os.path.join(resultdir, '-'.join(reason))\n config_files = ('config.log', 'CMakeCache.txt', 'CMakeError.log',\n 'CMakeOutput.log')\n\n for root, dirs, files in os.walk(srcroot):\n dest = os.path.join(destroot, os.path.relpath(root, srcroot))\n\n for fname in files:\n if fname in config_files:\n if not os.path.exists(dest):\n os.makedirs(dest)\n shutil.copy(os.path.join(root, fname), os.path.join(dest, fname))", "title": "" }, { "docid": "3a6ae2c95e54a82fc253e89d34f04fa3", "score": "0.518286", "text": "def copy_config_log_files():\n\n reason = get_failure_reason()\n if not reason:\n return\n\n srcroot = os.path.join(outputdir, \"build\", '-'.join(reason))\n destroot = os.path.join(resultdir, '-'.join(reason))\n config_files = ('config.log', 'CMakeCache.txt', 'CMakeError.log',\n 'CMakeOutput.log')\n\n for root, dirs, files in os.walk(srcroot):\n dest = os.path.join(destroot, os.path.relpath(root, srcroot))\n\n for fname in files:\n if fname in config_files:\n if not os.path.exists(dest):\n os.makedirs(dest)\n shutil.copy(os.path.join(root, fname), os.path.join(dest, fname))", "title": "" }, { "docid": "c8d1d57f28108daefce2d977c0fbf39f", "score": "0.51804626", "text": "def get_temp_dir(self):\n return os.path.join(self.config_dir, 'tmp')", "title": "" }, { "docid": "ba8669ab3b5d0794e85448547e558601", "score": "0.5180143", "text": "def test_backup_files_skip_directory(tmpdir, settings, config_writer, caplog):\n settings.MEDIA_ROOT = str(tmpdir.mkdir(\"media\"))\n settings.NON_EXISTING_DIR = \"NON_EXISTING_DIR\"\n\n config_writer(files={\"directories\": [\"MEDIA_ROOT\", \"NON_EXISTING_DIR\"], \"overwrite_existing_directory\": \"yes\"},)\n backup = Backup.from_config(str(tmpdir.join(\"config.yml\")))\n\n with caplog.at_level(logging.DEBUG):\n backup.full(db=False, files=True)\n\n assert \"Source directory NON_EXISTING_DIR does not exist, skipping\" in caplog.text", "title": "" }, { "docid": "1589870951fdae1939e0b635c3c4e544", "score": "0.5175624", "text": "def _copy_local_directory_to_device(self, local_directory):\n device_directory = android.util.get_device_path(local_directory)\n android.adb.remove_directory(device_directory, recreate=True)\n android.adb.copy_local_directory_to_remote(local_directory,\n device_directory)", "title": "" }, { "docid": "ab272de3d5b732fe27efb014461ad0d1", "score": "0.5175026", "text": "def dest(tmpdir):\n name = str(tmpdir.join(\"dest\"))\n utils.create_directory(name)\n yield name\n utils.restore_tree_permissions(tmpdir)", "title": "" }, { "docid": "eac4f51e0f4f371f76903d9c64be71f9", "score": "0.5160841", "text": "def __copy_configuration(self, dockerfiles, src):\n src = os.path.abspath(os.path.join(self.provision, src)) + \"/.\"\n if Output.VERBOSITY_VERBOSE <= self.output.get_verbosity():\n self.line('<comment>src :</comment> %s' % src)\n for dest in dockerfiles:\n dest = os.path.abspath(os.path.join(dest, 'conf/'))\n if not os.path.exists(dest):\n if Output.VERBOSITY_DEBUG <= self.output.get_verbosity():\n self.line('<comment>create :</comment> %s' % dest)\n os.mkdir(dest)\n if Output.VERBOSITY_VERBOSE <= self.output.get_verbosity():\n self.line('<comment>dest :</comment> %s' % dest)\n copy_tree(src, dest, 1, 1, 0, 0, 0)", "title": "" }, { "docid": "d70e1d6541246f3b5e2543134674f3bb", "score": "0.51596487", "text": "def cp(backup_context, dryrun):\n # Check for working dir and run mkdir() if it doesn't exist\n backup_context.mkdir()\n click.echo(\n \"About to copy files from: {}\".format(backup_context.dropbox_camera_uploads_dir)\n )\n click.echo(\"To local working dir: {}\".format(backup_context.local_working_dir))\n dest_images = backup_context.local_working_dir\n dest_videos = backup_context.local_working_dir / \"video\"\n\n for dropbox_file_name in backup_context.dropbox_filenames:\n file_row = backup_context.get_file_db_row(dropbox_file_name)\n if file_row[\"InWorkingDir\"]:\n click.echo(\n \"Skipping file '{}'; it already exists in workdir\".format(\n dropbox_file_name\n )\n )\n continue\n # Set destination dir\n file_ext = os.path.splitext(dropbox_file_name)[1]\n if file_ext in backup_context.video_file_extensions:\n dest_root = dest_videos\n else:\n dest_root = dest_images\n if dryrun:\n click.echo(\n \"Dry run; would have copied '{}' to workdir\".format(dropbox_file_name)\n )\n else:\n click.echo(\"Copying '{}' to {}\".format(dropbox_file_name, dest_root))\n copy2(\n backup_context.dropbox_camera_uploads_dir / dropbox_file_name, dest_root\n )", "title": "" }, { "docid": "4e10f9e58171f8e3fb2ef8b78c0e6170", "score": "0.5154797", "text": "def destdir(tmpdir):\n dst = join(tmpdir, \"simple\")\n makedirs(dst)\n yield dst\n if exists(dst):\n shutil.rmtree(dst)", "title": "" }, { "docid": "fefdd06fe4600c5bfbc55d6038b915b9", "score": "0.5148598", "text": "def destdir(tmpdir):\n dst = join(tmpdir, 'simple')\n makedirs(dst)\n yield dst\n if exists(dst):\n shutil.rmtree(dst)", "title": "" }, { "docid": "c3e06aeb6d9c811c5d8cac38aa3f78e7", "score": "0.51435465", "text": "def copyDirectory(src, dest):\n try:\n shutil.copytree(src, dest)\n # Directories are the same\n except shutil.Error as e:\n print('Directory not copied. Error: %s' % e)\n # Any error saying that the directory doesn't exist\n except OSError as e:\n print('Directory not copied. Error: %s' % e)", "title": "" }, { "docid": "51357bbb229587577957172a31d8c6a9", "score": "0.5141798", "text": "def copy_folder(self, key: str, dst: str):\n raise NotImplementedError() # pragma: no cover", "title": "" }, { "docid": "5c2277690973584703f716cbd9adb65a", "score": "0.5139141", "text": "def prepare(self):\n new_dir = os.path.dirname(self.scenario_fpath).split('/')[-1]\n infraform_dir = os.path.expanduser('~') + '/.infraform/'\n self.execution_dir = infraform_dir + new_dir\n if os.path.isdir(self.execution_dir):\n shutil.rmtree(self.execution_dir)\n subprocess.call(['cp', '-r', os.path.dirname(self.scenario_fpath),\n infraform_dir])", "title": "" }, { "docid": "c6a0a1ad6800af96f0609eef7336e379", "score": "0.51384705", "text": "def make_copy(self, temp, field, src, dst, redo=False):\n dst_path = self.get(temp, mkdir=True, **{field: dst})\n if not redo and exists(dst_path):\n return\n\n src_path = self.get(temp, **{field: src})\n if isdir(src_path):\n raise ValueError(\"Can only copy files, not directories.\")\n shutil.copyfile(src_path, dst_path)", "title": "" }, { "docid": "c73151bf2ae6fe177a4a5adc5c2fd33d", "score": "0.51371336", "text": "def cp_r(src, dst):\n try:\n shutil.copytree(src, dst)\n except OSError as exc: # python >2.5\n if exc.errno == errno.ENOTDIR:\n shutil.copy(src, dst)\n else:\n raise", "title": "" }, { "docid": "31eaba61e7cc92e50ab9b4ff23dddf38", "score": "0.51365507", "text": "def backup_config(self):\n\n for index, files_metadata in enumerate(self.backup_files):\n for file_name, source_path in files_metadata.iteritems():\n\n dest_file = self.backup_working_dir + '/' + file_name\n self.log.info(\"Backing up config file %s of %s..\" %\n (index+1, len(self.backup_files)))\n\n try:\n admin_tasks.copy_file(source_path, dest_file)\n admin_tasks.set_permissions(dest_file, 0400)\n except admin_tasks.AdminTasksError, e:\n self.log.error(str(e))\n sys.exit(1)\n else:\n # Create a list for output summary\n self.files_backed_up.append(dest_file)\n self.log.info(\"File Details: %s \" %\n admin_tasks.get_file_details(dest_file))", "title": "" }, { "docid": "9067a202ca3f055f61fa083a3276c0c5", "score": "0.5133873", "text": "def copy(self, source, dest):\n dest = self.buildLocation(dest)\n print(\"copy {0} to {1}\".format(source, dest), file=sys.stderr)\n directory = dest.rsplit(\"/\",1)[0]\n directory = fileurl2path(directory)\n print(\" Making shure directory {0} exists\".format(directory), file=sys.stderr)\n os.makedirs(directory, mode=0o777, exist_ok=True)\n shutil.copy(fileurl2path(source), fileurl2path(dest))\n print(\" done\", file=sys.stderr)", "title": "" }, { "docid": "d374f99aa12a55418ebf30be0c2b2272", "score": "0.5133178", "text": "def test_conflicting_path(tmpdir, aws_cf_config_path):\n with tmpdir.as_cwd():\n shutil.copyfile(aws_cf_config_path, str(tmpdir.join('config.yaml')))\n assert main(['create']) == 0\n assert main(['create']) == 1", "title": "" }, { "docid": "550f9430c698c762581c313f4816f928", "score": "0.51327324", "text": "def freeze(self, tmp_dir):\n for sfile in self.secrets():\n src_file = hard_path(sfile, self.opt.secrets)\n if not os.path.exists(src_file):\n raise aomi_excep.IceFile(\"%s secret not found at %s\" %\n (self, src_file))\n\n dest_file = \"%s/%s\" % (tmp_dir, sfile)\n dest_dir = os.path.dirname(dest_file)\n if not os.path.isdir(dest_dir):\n os.mkdir(dest_dir, 0o700)\n\n shutil.copy(src_file, dest_file)\n LOG.debug(\"Froze %s %s\", self, sfile)", "title": "" }, { "docid": "19f25a52a9e0eee035f052ca342031c7", "score": "0.51295984", "text": "def make_destinations(self):\n for k in config.destinations:\n path = join(config.DATA_DIR, config.destinations[k])\n if not exists(path):\n makedirs(path)", "title": "" }, { "docid": "639e65e22f5e4030acbb87f689e0dbee", "score": "0.5127833", "text": "def make_copy(self, temp, field, src, dst, redo=False):\n dst_path = self.get(temp, mkdir=True, **{field: dst})\n if not redo and os.path.exists(dst_path):\n return\n\n src_path = self.get(temp, **{field: src})\n if os.path.isdir(src_path):\n raise ValueError(\"Can only copy files, not directories.\")\n shutil.copyfile(src_path, dst_path)", "title": "" }, { "docid": "580fae4e5f7a148f33f895275c31fb79", "score": "0.5126314", "text": "def tempDirectory(self,key='__SlicerTestTemp__',tempDir=None):\n if not tempDir:\n tempDir = qt.QDir(slicer.app.temporaryPath)\n tempDirName = key\n if self.uniqueDirectory:\n key += qt.QDateTime().currentDateTime().toString(\"yyyy-MM-dd_hh+mm+ss.zzz\")\n fileInfo = qt.QFileInfo(qt.QDir(tempDir), tempDirName)\n dirPath = fileInfo.absoluteFilePath()\n qt.QDir().mkpath(dirPath)\n return dirPath", "title": "" }, { "docid": "c474c3a85e19dcb3777ba2aff3836642", "score": "0.5118077", "text": "def get_keycloak_config_path(temp_dir):\n return temp_dir / \"configuration\"", "title": "" }, { "docid": "4e1aee3968b2eee5262819317ce4ac20", "score": "0.5118047", "text": "def _copy(self) -> None:\n self.copy = self.config.copy()", "title": "" }, { "docid": "bb931bde91ea6e74147366d4dc50d171", "score": "0.5117482", "text": "def copy(self, destination: str) -> \"FileHelper\":\n\n if self.exists():\n shutil.copy(self.path, destination)\n\n return self", "title": "" }, { "docid": "3d7f23b1cc724577483ca91d8bba1baa", "score": "0.50971705", "text": "def copyLogFile(destination):\n shutil.copy2(getLogFile(), _expandpath(destination))", "title": "" }, { "docid": "d021413b1a5727ffdf051c5168e776bc", "score": "0.50921", "text": "def _copy(self, src, dest):\n shutil.copyfile(src, dest)\n try:\n shutil.copystat(src, dest)\n except OSError as e:\n self.log.debug(\"copystat on %s failed\", dest, exc_info=True)", "title": "" }, { "docid": "f3c70a962cc8c886f45eda418cd8bbcf", "score": "0.50910485", "text": "def auto_makedirs(destination):\n if not os.path.exists(os.path.dirname(destination)):\n os.makedirs(os.path.dirname(destination))", "title": "" }, { "docid": "f62327b2a9d188f80c2f9e3c44396049", "score": "0.50853634", "text": "def _copy_vsphere_configuration_to_manager(vcloud_config):\n tmp = tempfile.mktemp()\n with open(tmp, 'w') as f:\n json.dump(vcloud_config, f)\n fabric.api.put(tmp,\n vcloud_plugin_common.Config.VCLOUD_CONFIG_PATH_DEFAULT)", "title": "" }, { "docid": "9a80347db821975a8ad3cd29f971713c", "score": "0.50808704", "text": "def build(self, backend, destdir):\n orig, dest = self.get_value()\n dest = self.ensure_dir(destdir, dest)\n orig = os.path.abspath(orig)\n if os.path.isfile(orig):\n copy_file(orig, dest)\n elif os.path.isdir(orig):\n copy_tree(orig, dest)", "title": "" }, { "docid": "80e8e3091fa4ab42dce3f23f08c15502", "score": "0.5079925", "text": "def test_src_block_copy_from_dir(self):\n src = os.path.join(self.worktree, \"dir\")\n os.makedirs(src)\n cf = self.CopyFile(\"dir\", \"foo\")\n self.assertRaises(error.ManifestInvalidPathError, cf._Copy)", "title": "" } ]
5eb4f9e0ed93aa4bbff6383b5a544e3a
Return whether proper Docker version is installed.
[ { "docid": "ae878a289e1b547295f2bc227ba43dcb", "score": "0.8707321", "text": "def supported_docker_installed():\n\n try:\n clean_version = remove_leading_zeros_from_version(commands.version())\n return Version(clean_version) >= Version(SUPPORTED_DOCKER_V)\n # OSError = Not installed\n # CommandError = docker versions less than 1.5 give exit code 1\n # with 'docker --version'.\n except (OSError, CommandError):\n return False", "title": "" } ]
[ { "docid": "70b9e6dcc648fff5b07e32daadad2677", "score": "0.77932274", "text": "def docker_is_present():\n return which('docker') is not None", "title": "" }, { "docid": "78f6a116b9517d3022997fb477775efb", "score": "0.7396688", "text": "def _check_docker_version():\n if os.environ.get(\"CIRCLECI\"):\n # Skip\n return\n\n MIN_DOCKER_VERSION = distutils.version.StrictVersion('19.03.8')\n\n try:\n p = subprocess.run(['docker', '--version'],\n capture_output=True,\n check=True,\n encoding='utf8')\n except Exception as e:\n raise Exception(\"cannot run 'docker --version'\") from e\n match = re.search(r\"Docker version ((\\d+)\\.(\\d+)\\.(\\d+))\", p.stdout)\n if match:\n version = distutils.version.StrictVersion(match.group(1))\n else:\n raise Exception(\n f\"cannot determine version from 'docker --version' output: <<< {p.stdout} >>>\"\n )\n\n if version < MIN_DOCKER_VERSION:\n raise Exception(\n f\"minimum Docker version: {MIN_DOCKER_VERSION}; \" +\n f\"you've got {version}: please upgrade your local installation of Docker\"\n )", "title": "" }, { "docid": "557a5ad3d84a40e2adf89db0bcb03f1a", "score": "0.7374182", "text": "def is_docker():\n return _is_docker(_o=__opts__)", "title": "" }, { "docid": "cfc58549b36d3a326266a4c360d9fe3d", "score": "0.7215797", "text": "def check_dockermachine() -> bool:\n logger.debug(\"checking docker-machine presence\")\n try:\n out = subprocess.check_output(['docker-machine', 'version'])\n out = out.decode(\"utf-8\")\n out = out.replace('docker-machine.exe', '').replace('docker-machine', '')\n out = out.strip()\n logger.debug(f\"Using docker machine version {out}\")\n return True\n except Exception as e:\n logger.debug(f\"Docker machine not present\")\n return False", "title": "" }, { "docid": "2f05b8ad1842586d3df773328511c41f", "score": "0.7164112", "text": "def if_docker():\n import os\n return os.path.exists('/.dockerenv') or os.path.exists('/.dockerinit')", "title": "" }, { "docid": "2d6ae7a5f2c9284121da147d5de9ff78", "score": "0.70061547", "text": "def checkDocker(self):\n pass", "title": "" }, { "docid": "2816bd1920116fe7c404caddd15b61d8", "score": "0.69938123", "text": "def check_docker() -> CheckLevels:\n try:\n docker.from_env(version='auto')\n except docker.errors.DockerException:\n message = (\n 'Docker is not running. '\n 'Docker is required for the \"create\" command to determine the '\n 'DC/OS variant of the given DC/OS installer. '\n 'Use the \"--variant\" option when using the \"create\" command or '\n 'install and run Docker.'\n )\n warn(message=message)\n return CheckLevels.WARNING\n return CheckLevels.NONE", "title": "" }, { "docid": "f4d434270cb5363f7e14bfea82abb168", "score": "0.6925132", "text": "def is_running_in_docker():\n root = conf\n docker = root.find(\"docker\")\n return docker is not None", "title": "" }, { "docid": "b699941dde8c48332cb388b4aba255c8", "score": "0.6902662", "text": "def is_component_dockerised(component):\n if os.path.exists(get_srcdir(component) + os.sep + \"Dockerfile\"):\n return True\n return False", "title": "" }, { "docid": "403706e86f4c855e7fff756c4f073099", "score": "0.6753392", "text": "def _check_docker(self, install: bool = False) -> None:\n self.LOGS.debug(\"CLIENT: Client._check_docker() method was called\")\n\n # Check Docker version as proxy for installation\n version = None\n while not version:\n try:\n if self.os == \"Windows\":\n version = subprocess.check_output(\"docker version\")\n install = False\n elif self.os == \"Darwin\" or self.os == \"Linux\":\n docker_executable = subprocess.check_output([\"which\", \"docker\"]).decode('ascii').rstrip()\n version = subprocess.check_output([docker_executable, \"--version\"])\n install = False\n else:\n warnings.warn(\"No common OS detected!\")\n\n except (subprocess.CalledProcessError,docker.errors.DockerException):\n\n if install: # If installation is to be attempted\n msg = \"Trying to install Docker automatically\"\n print(msg)\n self.LOGS.info(\"CLIENT: \" + msg)\n Installers(self.os, self.LOGS)\n else:\n msg = \"No Docker application detected\"\n self.LOGS.error(\"CLIENT: \" + msg)\n warnings.warn(msg)\n raise EnvironmentError(\n \"Please start docker. \" +\n \"And make sure your user has the rights to access to docker. \" +\n \"You may need to install Docker from https://docs.docker.com/get-docker/\"\n )", "title": "" }, { "docid": "63ac54a0257f3529ba723740f5dda645", "score": "0.67382157", "text": "def __virtual__():\n if \"docker.version\" in __salt__:\n return __virtualname__\n return (False, __salt__.missing_fun_string(\"docker.version\"))", "title": "" }, { "docid": "bb6becd37256b76ee4902ace870ea294", "score": "0.6676748", "text": "def check_dependencies(self):\n return docker.image_exists('nodejs')", "title": "" }, { "docid": "f524131862e3a54efa9cf3afa40f18a6", "score": "0.66063714", "text": "def check_docker_service():\n # Check if it's already started:\n p = subprocess.Popen(['docker',\n 'inspect',\n 'cstar_perf_selenium'],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = p.communicate()\n if p.returncode != 0:\n return (False, False, 'fail')\n data = json.loads(stdout)\n return (True, data[0]['State']['Running'], data[0]['NetworkSettings']['IPAddress'])", "title": "" }, { "docid": "878dbc6173dc147326e0cf2a61519949", "score": "0.65206635", "text": "def checkDocker(self):\n try:\n # Check docker client state\n docker.from_env().ping()\n except:\n raise ConnectionError(\"Please check that the Docker Daemon is installed and running.\")", "title": "" }, { "docid": "6c4b582bbc2cb198e191ffea1fa9d926", "score": "0.64801687", "text": "def version_match(self, container: Container) -> bool:\n if f'nvidia/bobber:{version}' not in container.image.tags:\n return False\n return True", "title": "" }, { "docid": "ad2ddd6c1b292507ad6c7cfaccbd8c25", "score": "0.6324579", "text": "def have_environment_marker_support():\n try:\n from pkg_resources import parse_version\n from setuptools import __version__\n return parse_version(__version__) >= parse_version('0.7.2')\n except Exception:\n return False", "title": "" }, { "docid": "d0b6806264cad3e8b96aec386fc9aa2e", "score": "0.625412", "text": "def version(self):\n try:\n res = self.client.version()\n except docker.errors.APIError as e:\n raise DockerError(\"Failed to query docker version: %s\" % e)\n return res", "title": "" }, { "docid": "a7a3e9bc06d4a30eefd833c1d4564ee6", "score": "0.6242226", "text": "def is_installed(name, root):", "title": "" }, { "docid": "3f4cb936ff83a2022dc7e04d2c6d2e67", "score": "0.6198438", "text": "def is_app_version_ok(version):\n if version==\"1.0\":\n return True\n return False", "title": "" }, { "docid": "4c112fdf59cdac106628066da4dfcb25", "score": "0.61940104", "text": "def check(self):\r\n return os.path.isfile('/etc/debian_version')", "title": "" }, { "docid": "017c026ab135ab98067b473cfca5b0a8", "score": "0.618453", "text": "def is_docker_image_changed(self):\n # type: () -> bool\n # Unnecessary to check docker image only on 5.0 and up\n if server_version_compare(self.old_file.get('fromversion', '0'), '5.0.0') < 0:\n old_docker = get_dockerimage45(self.old_file)\n new_docker = get_dockerimage45(self.current_file)\n if old_docker != new_docker:\n print_error(Errors.breaking_backwards_docker(self.file_path, old_docker, new_docker))\n return True\n return False", "title": "" }, { "docid": "54ab0ae4562f4f4da5c5341c7859bd28", "score": "0.6150462", "text": "def is_update_available():\n url = \"https://pypi.org/pypi/micropy-cli/json\"\n data = get_cached_data(url)\n versions = [k for k in data[\"releases\"].keys() if \"rc\" not in k]\n if versions:\n latest = version.parse(versions[-1])\n cur_version = version.parse(metadata.version(\"micropy-cli\"))\n if cur_version < latest:\n return str(latest)\n return False", "title": "" }, { "docid": "46e8953636a03c033f2ce4704d9d808f", "score": "0.61462486", "text": "def _gitPresent():\r\n try:\r\n gitvers = subprocess.check_output(['git','--version'],stderr=subprocess.PIPE)\r\n if gitvers.startswith('git version'):\r\n return True\r\n except OSError:\r\n return False", "title": "" }, { "docid": "7b16102b0091bb2f9294bfb78beb5cb4", "score": "0.60970014", "text": "def test_old_yum_packages(host):\n with host.sudo():\n for pkg in ['docker', 'docker-client', 'docker-client-latest', 'docker-common', 'docker-latest',\n 'docker-latest-logrotate', 'docker-logrotate', 'docker-engine']:\n assert not host.package(pkg).is_installed", "title": "" }, { "docid": "444376f3996852ef9e5625d48eed7963", "score": "0.60736424", "text": "def is_centos(self):\n ret, out = self.run_cmd(\"lsb_release -a\")\n if ret == 0:\n if \"CentOS\" in out:\n return True\n return False", "title": "" }, { "docid": "7e829ecdd3e0503913d134e6f14411b9", "score": "0.6054076", "text": "def _is_inside_container() -> bool:\n return os.path.exists('/.dockerenv')", "title": "" }, { "docid": "bd159d77a1079ba3d91e9838ee0bc5ac", "score": "0.60266334", "text": "def is_package_exist(package_name, image_name):\n # run the corresponding image with the package to test and get the output\n res = subprocess.run(\n [\"cmd\", \"/c\", \"docker\", \"run\", \"-t\", \"--name\", \"c_sai_\" + image_name, \"c_sai_\" + image_name, \"dpkg\", \"-s\", package_name],\n capture_output=True)\n return_code = res.returncode\n output = res.stdout.decode(\"utf-8\")\n # remove the container\n res = subprocess.run(\n [\"cmd\", \"/c\", \"docker\", \"rm\", \"c_sai_\" + image_name], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL\n )\n if return_code != 0:\n return False\n # find on the output the correct message\n str_ok = \"Status: install ok installed\"\n # return the corresponding boolean\n if str_ok not in output:\n return False\n return True", "title": "" }, { "docid": "d117002cc2e36194af112031cef0a9b9", "score": "0.60171074", "text": "def has_environment_marker_platform_impl_support():\n return parse_version(setuptools_version) >= parse_version('18.5')", "title": "" }, { "docid": "5feded83178ac0d89e837e938ff5d750", "score": "0.6016842", "text": "def _buildable(package, version=\"\"):\n available = False\n if os.path.isdir(package):\n metadata, _, _ = api.render(package)\n match_dict = {'name': metadata.name(),\n 'version': metadata.version(),\n 'build': metadata.build_number(), }\n ms = conda_interface.MatchSpec(\" \".join([package, version]))\n available = ms.match(match_dict)\n return available", "title": "" }, { "docid": "78ff3fd0fdafc8bb5e952c6133f39f05", "score": "0.6008825", "text": "def _checkDockerImage(self):\n\n try:\n self.checkRequirements()\n except DockerImageMissingException as e:\n log.error(e.message)\n sys.exit(E_DOCKER_IMAGE_MISSING)", "title": "" }, { "docid": "e790125327d4dd746c01c6a7710c464f", "score": "0.5992728", "text": "def image_needs_building(image):\n d = docker_client()\n\n # first, check for locally built image\n try:\n d.images.get(image)\n except docker.errors.ImageNotFound:\n # image not found, check registry\n pass\n else:\n # it exists locally, no need to check remote\n return False\n\n # image may need building if it's not on the registry\n return image_needs_pushing(image)", "title": "" }, { "docid": "7eaa04ec2d95aca18497123331b9c920", "score": "0.595574", "text": "def has_skimage(version='0.11'):\n try:\n import skimage\n except ImportError:\n return False\n sk_version = LooseVersion(skimage.__version__)\n return sk_version >= LooseVersion(version)", "title": "" }, { "docid": "62359105dd689262dd86782042cf3377", "score": "0.59524995", "text": "def is_installed(self):\n return not run('go list ' + self.package,\n stdout=Capture(),\n stderr=Capture()).returncode", "title": "" }, { "docid": "4fa5408b6123698b6c4439736b94602c", "score": "0.5937628", "text": "def is_available():\n return core.is_compiled_with_dist()", "title": "" }, { "docid": "60ce112356b420d92e72fe258696e6fc", "score": "0.5895965", "text": "def is_enterprise_cli_package_installed():\n stdout, stderr, return_code = shakedown.run_dcos_command('package list --json')\n print('package list command returned code:{}, stderr:{}, stdout: {}'.format(return_code, stderr, stdout))\n try:\n result_json = json.loads(stdout)\n except JSONDecodeError as error:\n raise DCOSException('Could not parse: \"{}\"'.format(stdout))(error)\n return any(cmd['name'] == 'dcos-enterprise-cli' for cmd in result_json)", "title": "" }, { "docid": "39db35c9178350ad4548ecc83121ed95", "score": "0.58936954", "text": "def is_python3():\n\n return version_info >= (3, 0)", "title": "" }, { "docid": "b10c4cfdcad1028666e4826a7b03e9e0", "score": "0.5878436", "text": "def __is_installed(cls, version: str)-> bool:\n\n if not version or version == \"'b'\":\n return False\n\n is_non_in_version = cls.found_char(version, \"none\")\n\n if is_non_in_version:\n return False\n\n return True", "title": "" }, { "docid": "1147e1f3cc04e789d725c2d02076187d", "score": "0.58690476", "text": "def is_installed(root, extension):", "title": "" }, { "docid": "953b53e342ef35071e013f3ef721e719", "score": "0.58511317", "text": "def isInstalled(self):\n return False", "title": "" }, { "docid": "e448ce8ee75bc29d6725c5859f69a0a6", "score": "0.5843688", "text": "def is_installed(self):\n return False", "title": "" }, { "docid": "41e3bb601b940848f2321f5dfbda8ee5", "score": "0.5836495", "text": "def _EnsureDockerRunning():\n docker = file_utils.FindExecutableOnPath('docker')\n if not docker:\n raise RuntimeMissingDependencyError(\n 'Cannot locate docker on $PATH. Install docker from '\n 'https://docs.docker.com/get-docker/.')\n try:\n # docker info returns 0 if it can connect to the docker daemon and\n # returns a non-zero error code if it cannot. run_subprocess\n # checks raises an error if the process does not return 0.\n run_subprocess.Run([docker, 'info'], timeout_sec=20, show_output=_IsDebug())\n except subprocess.CalledProcessError:\n raise RuntimeMissingDependencyError(\n 'Unable to reach docker daemon. Make sure docker is running '\n 'and reachable.')", "title": "" }, { "docid": "1d285e9f918509fc12a44d6ea432934c", "score": "0.5836297", "text": "def it_should_run_docker_build():", "title": "" }, { "docid": "55b37b9970768bcee200ab8ac6b2e169", "score": "0.5829774", "text": "def has_ansible():\n try:\n subprocess.call([\"which\", \"ansible-playbook\"])\n return True\n except:\n return False", "title": "" }, { "docid": "62f18fb5ee1e8881cb7c6cab57c79e47", "score": "0.5823921", "text": "def julia_is_installed():\n\n if sys.version_info[:2] >= (3, 7):\n kwargs = {'capture_output': True, 'text': True}\n if (3, 5) <= sys.version_info[:2] <= (3, 7):\n kwargs = {'universal_newlines': True,\n 'stdout': subprocess.PIPE, 'stderr': subprocess.PIPE,\n }\n\n result = subprocess.run([\"julia\", \"--version\"], **kwargs)\n\n if result.stdout.startswith(\"julia version\"):\n bits = result.stdout.split(sep=\".\")\n # if the version is 1.X.X or 0.7.X is installed\n if int(bits[0][-1]) >= 1 or int(bits[1]) >= 7:\n print(\"It appears julia 1.X.X or 0.7.X is installed, install should proceed successfully\")\n return True\n else:\n print(\"It appears julia is installed but the version is too old please update to at least 1.0.0 or 0.7.0\")\n else:\n print(\"Julia is not installed and so VibronicToolkit cannot be installed\",\n \"some features such as: analytic/sos/trotter will not be available\")\n return False", "title": "" }, { "docid": "36fe6275dd9bd7804417bd262c3320ff", "score": "0.5821413", "text": "def check_python3():\n\tpythonVersion = sys.version_info[:3]\n\treturn (pythonVersion[0] == 3)", "title": "" }, { "docid": "211a536daf7b02c70551665a57bc537d", "score": "0.58112127", "text": "def get_version(self):\n return self.docker_image.get_version()", "title": "" }, { "docid": "4d6df0061e02ab6e9c593e3169d06118", "score": "0.57665354", "text": "def check(self):\r\n return (os.path.isfile('/etc/redhat-release')\r\n and not os.path.isfile('/etc/fedora-release'))", "title": "" }, { "docid": "c71ab0d8a465a121b4be15509e4f7783", "score": "0.57634556", "text": "def check(self):\r\n return os.path.isfile('/etc/fedora-release')", "title": "" }, { "docid": "9fa1e7a4c0c93bb49ee0eebe5734bed1", "score": "0.57561666", "text": "def check_version(self, version):\n # TODO\n return False", "title": "" }, { "docid": "7feaa0e67d40f2a4d65eda02a0bf1a94", "score": "0.57505214", "text": "def is_sc_image_installed(raise_error=False):\n try:\n import skimage # noqa\n is_installed = True\n except:\n is_installed = False\n # Raise error (if needed) :\n if raise_error and not is_installed:\n raise IOError(\"scikit-image not installed. In a terminal, run : pip\"\n \" install scikit-image\")\n return is_installed", "title": "" }, { "docid": "c00134121156dc19d57e296262146c5b", "score": "0.57368726", "text": "def docker_client_api_version(self) -> Optional[str]:\n return self.__docker_client_api_version", "title": "" }, { "docid": "f90d784ea0283a833487b88baca433e1", "score": "0.57286465", "text": "def _is_it_a_version(version: str) -> bool:\n return re.match(r\"^v?\\d+\\.\\d+(\\.\\d+)?$\", version) is not None", "title": "" }, { "docid": "261d8b9bf241b0224ce07ae77821b315", "score": "0.5725917", "text": "def _is_ers_installed(processes):\n enrepserver = shell.find_pattern(r'enrepserver, EnqueueReplicator,.*', processes.output)\n return bool(enrepserver)", "title": "" }, { "docid": "c9edc4f0d246aad18caa0fe5d9d9e9d2", "score": "0.5722736", "text": "def docker(self):\n try:\n result = PackageBuild.thread_local.docker\n except AttributeError:\n result = PackageBuild.thread_local.docker = \\\n docker.from_env(timeout=300)\n\n return result", "title": "" }, { "docid": "b84ba0ef1df5a0f2353723fb1ac172ad", "score": "0.5715018", "text": "def is_keepalived_installed():\n if not os.path.exists(\"/etc/init.d/keepalived\"):\n return False\n else:\n return True", "title": "" }, { "docid": "982d2ebcbe3f132ab2d553c8c67419f4", "score": "0.5714488", "text": "def install_ce_ubuntu(version=None) -> bool:\n # https://docs.docker.com/v17.09/engine/installation/linux/docker-ce/ubuntu/\n from carnival.cmd import apt\n pkgname = \"docker-ce\"\n if apt.is_pkg_installed(pkgname, version):\n log(f\"{pkgname} already installed\")\n return False\n\n log(f\"Installing {pkgname}...\")\n\n cmd.cli.run(\"sudo apt-get remove docker docker-engine docker.io\")\n cmd.cli.run(\"sudo apt-get update\")\n cmd.cli.run(\"sudo apt-get install -y apt-transport-https ca-certificates curl software-properties-common\")\n cmd.cli.run(\"curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -\", pty=True)\n cmd.cli.run('sudo add-apt-repository \"deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable\"')\n\n apt.force_install(pkgname=pkgname, version=version, update=True, hide=True)\n return True", "title": "" }, { "docid": "e3c773832ce3933b588bbc04da9526c5", "score": "0.57096875", "text": "def check_python():\n if (sys.version_info[:2] > (2, 6) or sys.version_info[:2] > (3, 1)):\n return True\n\n return False", "title": "" }, { "docid": "9097d92053949dbec46e81022b10b54e", "score": "0.5705405", "text": "def dependency_is_installed(a_dependency):\n status, stdout, stderr = system(('rpm', '--query', '--whatprovides', a_dependency),\n log_output=False)\n return (status == 0) and not stdout.startswith('no package provides')", "title": "" }, { "docid": "14f73ba5c550a807e37a21f82acfd3f6", "score": "0.5699685", "text": "def _installable(package, version, conda_resolve):\n return conda_resolve.valid(conda_interface.MatchSpec(\" \".join([package, version])),\n filter=conda_resolve.default_filter())", "title": "" }, { "docid": "6bd26001c642ed805d37b5650cc5a7ff", "score": "0.56939304", "text": "def _supports_git_config_flag(self):\n if not hasattr(self, '_git_version_at_least_180'):\n self._git_version_least_180 = False\n\n version_str = (\n self._run_git(['version'],\n ignore_errors=True)\n .stdout\n .read()\n .strip()\n )\n\n if version_str:\n m = re.search(r'(\\d+)\\.(\\d+)\\.(\\d+)', version_str)\n\n if m:\n git_version = (int(m.group(1)),\n int(m.group(2)),\n int(m.group(3)))\n\n self._git_version_at_least_180 = (git_version >= (1, 8, 0))\n\n return self._git_version_at_least_180", "title": "" }, { "docid": "904147b04fcc59a093c805619c617b1e", "score": "0.5693173", "text": "def docker(self) -> Optional[pulumi.Input[Union['DockerBuildArgs', 'DockerImageArgs']]]:\n return pulumi.get(self, \"docker\")", "title": "" }, { "docid": "5596369e6711ac4cb2adbf445c9efa70", "score": "0.5676979", "text": "def _get_client(self):\n\n try:\n client = docker.from_env()\n self.client = client\n return True\n except docker.errors.DockerException: # If connection to Docker app is not open\n return False", "title": "" }, { "docid": "e4df40fbb091426ee218354b414a67c3", "score": "0.5662514", "text": "def is_docker_image_already_in_registry(service: str, soa_dir: str, sha: str, image_version: Optional[str] = None) -> bool: # type: ignore\n registry_uri = get_service_docker_registry(service, soa_dir)\n repository, tag = build_docker_image_name(service, sha, image_version).split(\":\", 1)\n\n creds = read_docker_registry_creds(registry_uri)\n uri = f\"{registry_uri}/v2/{repository}/manifests/{tag}\"\n\n with requests.Session() as s:\n try:\n url = \"https://\" + uri\n r = (\n s.head(url, timeout=30)\n if creds[0] is None\n else s.head(url, auth=creds, timeout=30)\n )\n except SSLError:\n # If no auth creds, fallback to trying http\n if creds[0] is not None:\n raise\n url = \"http://\" + uri\n r = s.head(url, timeout=30)\n\n if r.status_code == 200:\n return True\n elif r.status_code == 404:\n return False # No Such Repository Error\n r.raise_for_status()", "title": "" }, { "docid": "88a4f992d43f4f44fff3650abfcb40ce", "score": "0.56535256", "text": "def is_installed(name: str) -> bool:\n try:\n return find_spec(name) is not None\n except ModuleNotFoundError:\n return False\n except ValueError as inst:\n if str(inst).endswith('.__spec__ is None'):\n logging.info('Missing __spec__ for %s. Marking package as installed.',\n str(inst).split('.', 1)[0])\n return True\n raise inst\n except: # pylint: disable=bare-except\n logging.exception('Unhandled exception for is_installed, returning False')\n return False", "title": "" }, { "docid": "8edc42d35c03cadc273ebad2e9b41708", "score": "0.5648256", "text": "def needs_installation() -> bool:\n if not BUILD_BREEZE_VENV_DIR.exists() or not BUILD_BREEZE_CFG_SAVED.exists():\n return True\n return BREEZE_SETUP_CFG_PATH.read_text() != BUILD_BREEZE_CFG_SAVED.read_text()", "title": "" }, { "docid": "7f6064a91297984c438452be7c28b0f9", "score": "0.56435925", "text": "def CheckVersion(self):\n booted_version = self._GetReleaseVersion()\n return (self.update_version and\n self.update_version.endswith(booted_version))", "title": "" }, { "docid": "d7b2b9ac91bf83cdf739afe78b950fa8", "score": "0.56370974", "text": "def check_versions_module():\n \n if [key['module_name'] for key in modules.run_ptr if key['module_name'] == 'versions']:\n return True\n return False", "title": "" }, { "docid": "9b6adee15341e3980a7b515263f2896c", "score": "0.5629164", "text": "def is_pip6_or_higher(pip_version=None):\n\n major, minor, micro = parse_pip_version(pip_version)\n if int(major) >= 6:\n return True\n else:\n return False", "title": "" }, { "docid": "c544feb551f0a0e8ead0c6bedbe5e28d", "score": "0.5623851", "text": "def is_installed(self):\n return cups is not None and self.name is not None", "title": "" }, { "docid": "01d76e25dc16a958213a506d11966283", "score": "0.5598595", "text": "def rpm_is_installed(a_package):\n status, stdout, stderr = system(('rpm', '--query', a_package),\n log_output=False)\n return (status == 0) and stdout.startswith(a_package)", "title": "" }, { "docid": "01d76e25dc16a958213a506d11966283", "score": "0.5598595", "text": "def rpm_is_installed(a_package):\n status, stdout, stderr = system(('rpm', '--query', a_package),\n log_output=False)\n return (status == 0) and stdout.startswith(a_package)", "title": "" }, { "docid": "855a3b2f9dd3fea7cc687ffe1bb9b492", "score": "0.55937356", "text": "def is_blueutil() -> bool:\n blueutil: str = os.popen(\"blueutil -v\").readline()\n if blueutil == \"\":\n return False\n else:\n return True", "title": "" }, { "docid": "586bceaef7d8cda571670665bbe4b26a", "score": "0.5585673", "text": "def has_package_installed(self, pack_name):\n return pack_name in self.installed_packages", "title": "" }, { "docid": "fdb1612f0fe95dd79b114dbc138c2a4d", "score": "0.55840427", "text": "def check_installation(self):\n try:\n return self.sshconnection.execute('dpkg-query -l %s' % (self.package))[2]\n except Exception as e:\n print('Caught exception: %s: %s' % (e.__class__, e))\n traceback.print_exc()\n sys.exit(1)", "title": "" }, { "docid": "40c1fe304051c72c7c5f94663d3fe729", "score": "0.5582656", "text": "def _exists(self):\n try:\n self.dock.images.get(self.image)\n except docker.errors.DockerException:\n return False\n\n return True", "title": "" }, { "docid": "03f3e5b611f805af283c3bd0037caf41", "score": "0.5578805", "text": "def has_package(self, package_metadata):\n candidates = self._name_to_packages.get(package_metadata.name, [])\n for candidate in candidates:\n if candidate.full_version == package_metadata.full_version:\n return True\n return False", "title": "" }, { "docid": "9263cc6ec4194f12397a6d1e165c29a8", "score": "0.55726033", "text": "def _is_enterprise(build_artifact: Path, workspace_dir: Path) -> bool:\n if ' ' in str(build_artifact):\n raise ValueError('No spaces allowed in path to the build artifact.')\n\n result = subprocess.check_output(\n args=['bash', str(build_artifact), '--version'],\n cwd=str(workspace_dir),\n stderr=subprocess.PIPE,\n )\n\n result = result.decode()\n result = ' '.join(\n [\n line for line in result.splitlines()\n if not line.startswith('Extracting image')\n and not line.startswith('Loaded image') and '.tar' not in line\n ],\n )\n\n version_info = json.loads(result)\n variant = version_info['variant']\n return bool(variant == 'ee')", "title": "" }, { "docid": "7a838c91abad944cf3f6083d168bf006", "score": "0.55701643", "text": "def is_installed():\n return os.path.isfile(get_target_filename())", "title": "" }, { "docid": "583aef68aefcca8c93bc02679457c0b6", "score": "0.5566736", "text": "def check_version():\n installed_version = parse_version(__version__)\n\n # fetch package metadata from PyPI\n pypi_url = \"https://pypi.org/pypi/msticpy/json\"\n pkg_data = httpx.get(\n pypi_url, timeout=httpx.Timeout(10.0, connect=30.0), headers=mp_ua_header()\n ).json()\n latest_version = pkg_data.get(\"info\", {}).get(\"version\", None)\n if latest_version:\n latest_version = parse_version(latest_version)\n else:\n latest_version = max(parse_version(s) for s in pkg_data[\"releases\"].keys())\n\n print(\n \"msticpy version\",\n \"installed:\",\n installed_version,\n \"latest published:\",\n latest_version,\n )\n if installed_version < latest_version:\n print(f\"A newer version of msticpy - {latest_version} is available.\")\n print(\"Upgrade with 'pip install --upgrade msticpy'\")\n else:\n print(\"Latest version is installed.\")", "title": "" }, { "docid": "8769437b899d8159264ce232419425cf", "score": "0.5563942", "text": "def is_datera_os(self):\n if self.is_rhel():\n release_file_path = \"/etc/redhat-release\"\n if self.path_isfile(release_file_path):\n release_info = self.file_open(release_file_path,\n 'r').readlines()\n if re.search(\"DaterOS\", release_info):\n return True\n return False", "title": "" }, { "docid": "bedee88501e0763192b70e5ec529a51a", "score": "0.55594987", "text": "def _is_installed(self):\n return self._system.exists(self.get_install_path())", "title": "" }, { "docid": "5380fedc78a49c791d2b87f149d9d90b", "score": "0.5559098", "text": "def isVersion(version):\r\n\treturn getVersion() >= version", "title": "" }, { "docid": "842ab79ee1336860fca5f19f7e018c66", "score": "0.55586445", "text": "def is_installed(name: str, version: str, dir: str = \".\") -> bool:\n return os.path.isdir(get_path(name, version, dir) + \"/\" + name)", "title": "" }, { "docid": "6d09c0ade503425df1c8e2212d73e2e0", "score": "0.5556765", "text": "def _check_current_device_version_available(self):\n try:\n if self.detected_firmware_version in self.firmware_file_list:\n return True\n else:\n return False\n except TypeError:\n return False", "title": "" }, { "docid": "ec8b1e8d7cf5ce2f8cbf0df2bc657bfb", "score": "0.5554758", "text": "def check(self):\r\n try:\r\n with open('/etc/lsb-release', 'r') as fp:\r\n return \"Ubuntu\" in fp.read()\r\n except:\r\n return False", "title": "" }, { "docid": "0e03e01a15f151bba4dae2795a3e630a", "score": "0.55513936", "text": "async def test_docker_image_default_platform(coresys: CoreSys):\n with patch.object(\n type(coresys.supervisor), \"arch\", PropertyMock(return_value=\"i386\")\n ), patch.object(\n coresys.docker.images, \"pull\", return_value=Mock(id=\"test:1.2.3\")\n ) as pull:\n instance = DockerInterface(coresys)\n await instance.install(AwesomeVersion(\"1.2.3\"), \"test\")\n assert pull.call_count == 1\n assert pull.call_args == call(\"test:1.2.3\", platform=\"linux/386\")", "title": "" }, { "docid": "d0b8e1109808baba8cbc878ee57eb1bd", "score": "0.5540276", "text": "def image_needs_pushing(image):\n d = docker_client()\n try:\n d.images.get_registry_data(image)\n except docker.errors.APIError:\n # image not found on registry, needs pushing\n return True\n else:\n return False", "title": "" }, { "docid": "24d95dca209b4c48e2d922b150f67693", "score": "0.55370986", "text": "def _is_version_supported(self, requires_version):\n if requires_version is not None and self.version is not None:\n if version_compare(self.version, requires_version) < 0:\n return False\n return True", "title": "" }, { "docid": "b654f5e488a64aa27fdcce7fc8c3bc82", "score": "0.55366915", "text": "def is_rhel(self):\n if self.path_isfile(\"/etc/redhat-release\"):\n return True\n else:\n return False", "title": "" }, { "docid": "e2a37722c050b1fb08078e6a1f81405e", "score": "0.5527887", "text": "def verify_base_docker_images(arch, os, rosdistro, image_name):\n sysroot_base_image = None\n docker_network_mode = 'host'\n sysroot_nocache = 'False'\n assert DockerConfig(\n arch, os, rosdistro, sysroot_base_image,\n docker_network_mode, sysroot_nocache).base_image == image_name", "title": "" }, { "docid": "652b4f297923ac013aeed9ac838c3b0b", "score": "0.55275404", "text": "def is_distribution():\n svn_variable = 'SVN_REVISION'\n if svn_variable in os.environ:\n # Usage in Hudson build\n return False\n\n try:\n import tvb_bin\n except ImportError:\n # No tvb_bin, it means usage from pip or conda\n return False\n\n try:\n _proc = Popen([\"svnversion\", \".\"], stdout=PIPE)\n version = VersionSettings.parse_svn_version(_proc.communicate()[0])\n if version:\n # usage from SVN\n return False\n except Exception:\n # Usage from tvb_distribution\n return True", "title": "" }, { "docid": "5ab6b0329dabceea8e26582f69565c17", "score": "0.5515581", "text": "def is_python3():\n\n return sys.version_info.major == 3", "title": "" }, { "docid": "d2cfca30b96e14f19a9c77e263f1f5d4", "score": "0.55124027", "text": "def _is_available():\n p = subprocess.Popen(\"which sbatch\".split(), stdin=subprocess.PIPE,stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)\n (stdout, stderr) = p.communicate(\"\")\n\n success = not p.returncode #returncode is 0 if everything went fine!\n return success and stdout.strip(\"\\n\") == \"/cvmfs/atlas.cern.ch/repo/ATLASLocalRootBase/wrappers/containers/slurm/sbatch\"", "title": "" }, { "docid": "3f5f63a5d14aefcc6551840cc3b99686", "score": "0.5508605", "text": "def is_python3():\n return sys.version_info.major == 3", "title": "" }, { "docid": "e8b67861fbe00c065a0df286a6f61be6", "score": "0.5505995", "text": "def image_exists_on_dockerhub(image_name: str, image_tag: str) -> bool:\n url = (\n \"https://auth.docker.io/token?scope=repository:\"\n f\"{image_name}:pull&service=registry.docker.io\"\n )\n res = requests.get(url=url)\n res.raise_for_status()\n token = res.json()[\"token\"]\n res = requests.get(\n url=f\"https://registry-1.docker.io/v2/{image_name}/manifests/{image_tag}\",\n headers={\n \"Accept\": \"application/vnd.docker.distribution.manifest.v2+json\",\n \"Authorization\": f\"Bearer {token}\",\n },\n )\n return res.status_code == 200", "title": "" }, { "docid": "29d69ea20aa23a8885528df33588b426", "score": "0.55022204", "text": "def package_ensure(package):\n status = run(\"dpkg-query -W -f='${Status}' %s ; true\" % package)\n if status.find(\"not-installed\") != -1 or status.find(\"installed\") == -1:\n package_install(package)\n return False\n else:\n return True", "title": "" }, { "docid": "8cfadb9338dc23c063944dd014f18e22", "score": "0.5501923", "text": "def is_image_exist(name):\n client = docker.from_env()\n images_raws = client.images.list()\n images = [image.tags[0] for image in images_raws if image.tags != []]\n client.close()\n if name not in [image.split(\":\")[0] for image in images if image != \"\"]:\n return False\n return True", "title": "" }, { "docid": "7fde8ce29ae256104f415ec693896dc6", "score": "0.5496051", "text": "def platform_is_6_5(self, **kwargs):\n ret = True\n if self.force_server_version:\n ret = False if not self.force_server_version >= '6.5' else ret\n else:\n if self._invalid_server_version():\n # server version is not valid, force a refresh right now\n self.get_server_version(**kwargs)\n if not self._invalid_server_version():\n ret = False if not self.server_version >= '6.5' else ret\n return ret", "title": "" }, { "docid": "db07e7f4b3024c6be5b9fabbf584f077", "score": "0.5486552", "text": "def supported_juju_version():\n if (not min_version('2.1') and\n min_version('2.0')):\n raise UnsupportedJujuVersion(\"Kiki does not support Juju 2.0.x \"\n \"command structure\")\n return True", "title": "" }, { "docid": "49dbd3add47f617d606a5c2f5be615a4", "score": "0.54696083", "text": "def local_on_pypi(package_name=package_name, local_version=local_version) -> bool:\n pypi_versions = get_pypi_versions(package_name)\n return local_version in pypi_versions", "title": "" } ]
e76147aff1fffc6c19c1dc908a51f3de
Return the queryset backing annotations. Executing this queryset is costly because there is no way to optimize the query execution. Since this is a related_set queryset, that was further filtered, each item in the queryset causes a db hit.
[ { "docid": "52440f6bcf1797455bab587bc901e505", "score": "0.6528129", "text": "def GetAnnotationsQS(self):\n return self._costly_annotations_qs", "title": "" } ]
[ { "docid": "187637764ad2b8c1f55ef062d620afcb", "score": "0.6831465", "text": "def get_queryset(self):\r\n queryset: QuerySet = super().get_queryset().prefetch_related('film_work_genre', 'genres', 'film_work_person',\r\n 'persons', ) \\\r\n .annotate(\r\n actors=ArrayAgg('persons__person__full_name', filter=Q(persons__role__exact='actor'), distinct=True),\r\n directors=ArrayAgg('persons__person__full_name', filter=Q(persons__role__exact='director'), distinct=True),\r\n writers=ArrayAgg('persons__person__full_name', filter=Q(persons__role__exact='writer'), distinct=True),\r\n genres=ArrayAgg('film_genres__genre__name', distinct=True)\r\n )\r\n\r\n return queryset.values()", "title": "" }, { "docid": "22eda7c7e2619cb392b4b4c8e59df1ba", "score": "0.6727052", "text": "def setup_eager_loading(cls, queryset):\n queryset = queryset.prefetch_related('keywords_str')\n queryset = queryset.prefetch_related('tags_str')\n # queryset = queryset.prefetch_related('keywords')\n # queryset = queryset.prefetch_related('tags')\n return queryset", "title": "" }, { "docid": "a2d89c6903d29425228c52637f33d775", "score": "0.65054554", "text": "def get_prefetched_queryset(self, *args, **kwargs):\n\n return (\n super()\n .get_prefetched_queryset(*args, **kwargs)\n .prefetch_related(\n \"assignment_related_users\",\n \"agenda_items\",\n \"lists_of_speakers\",\n \"tags\",\n \"attachments\",\n \"polls\",\n \"polls__options\",\n )\n )", "title": "" }, { "docid": "0d44df133d4ef09ff4c24d6dca8dd0cf", "score": "0.6459458", "text": "def overview(cls, queryset, *annotations):\n if select_related := cls.select_related:\n queryset = queryset.select_related(*select_related)\n if prefetch_related := cls.prefetch_related:\n queryset = queryset.prefetch_related(*prefetch_related)\n if all_annotations := cls.get_overview_annotations():\n if annotations:\n _annotations = {k: v for k, v in all_annotations.items() if k in annotations}\n else:\n _annotations = all_annotations\n queryset = queryset.annotate(**_annotations)\n return queryset", "title": "" }, { "docid": "15cc13a2cbe615096aadbf2a843714fb", "score": "0.64486736", "text": "def queryset(self, request):\n qs = super(AdRepAdmin, self).queryset(request)\n qs = AdRep.objects.select_related().filter(id__in=qs\n ).defer('site__envelope',\n 'site__geom',\n 'site__point')\n return qs", "title": "" }, { "docid": "5f7ea98ab003c78dfa75d316e62d447f", "score": "0.6312835", "text": "def optimice_query(self, foreignkey_fields, many_to_many_fields):\n query = self.model.objects.select_related(\n *foreignkey_fields) if len(foreignkey_fields) else self.model.objects\n query = query.prefetch_related(\n *many_to_many_fields) if len(many_to_many_fields) else query\n query = query.annotate(**self.include)\n return query", "title": "" }, { "docid": "e94e5cbb8a14256de58ccf08b7cd2001", "score": "0.62815255", "text": "def setup_eager_loading(queryset):\n queryset = queryset.select_related('user')\n return queryset", "title": "" }, { "docid": "a89f3a4e2554c2da02ba5196d449117d", "score": "0.6255363", "text": "def get_prefetched_queryset(self, *args, **kwargs):\n return (\n super()\n .get_prefetched_queryset(*args, **kwargs)\n .select_related(\"assignment\")\n .prefetch_related(\n \"options\", \"options__user\", \"options__votes\", \"voted\", \"groups\"\n )\n )", "title": "" }, { "docid": "2331031801c517a18159eb9c2987e4b5", "score": "0.62418616", "text": "def queryset(self, request):\n qs = super(AdRepAdvertiserAdmin, self).queryset(request)\n qs = AdRepAdvertiser.objects.select_related().filter(id__in=qs\n ).defer('advertiser__site__envelope',\n 'advertiser__site__geom',\n 'advertiser__site__point')\n return qs", "title": "" }, { "docid": "f44f72d2925f19d988f4d27678fdc647", "score": "0.6220894", "text": "def prefetch(self, queryset: models.QuerySet) -> models.QuerySet:\n subquery = self.model.objects.all()\n \n if self.filters:\n q = reduce(operator.and_, [f.get() for f in self.filters])\n subquery = subquery.filter(q)\n \n if self.sort:\n subquery = subquery.order_by(*self.sort)\n \n subquery = subquery.select_related(\n *[f for f in self._one_fields if f not in self.joins.keys()]\n )\n subquery = subquery.prefetch_related(\n *[f for f in self._many_fields if f not in self.joins.keys()]\n )\n \n new = queryset.prefetch_related(models.Prefetch(self.field, queryset=subquery))\n \n # Recursively prefetch inner joins\n for j in self.joins.values():\n new = j.prefetch(new)\n \n return new", "title": "" }, { "docid": "af658717b3fcb034a1be8d964a7b611a", "score": "0.61762536", "text": "def get_queryset(self, request):\n qs = super().get_queryset(request)\n qs = qs.prefetch_related('work__writers')\n qs = qs.prefetch_related('artist')\n qs = qs.prefetch_related('record_label')\n return qs", "title": "" }, { "docid": "6645320e53538ed49a3a1009893bc99e", "score": "0.61750937", "text": "def queryset(self, request):\n qs = super(AdRepConsumerAdmin, self).queryset(request)\n qs = AdRepConsumer.objects.select_related().filter(id__in=qs\n ).defer('consumer__site__envelope',\n 'consumer__site__geom',\n 'consumer__site__point')\n return qs", "title": "" }, { "docid": "f78845ca786992e3b1139e30be75acb7", "score": "0.6167035", "text": "def queryset(self, request):\n qs = super(TwitterAccountAdmin, self).queryset(request)\n qs = TwitterAccount.objects.select_related().filter(id__in=qs\n ).defer('site__envelope', 'site__geom', 'site__point')\n return qs", "title": "" }, { "docid": "d07e71199cfd9b3481850263db393e99", "score": "0.6163675", "text": "def get_queryset(self):\n acc = self.kwargs['accession'].lstrip('MGYA')\n job_query = Q(pk=acc)\n\n if self.analysis_job_filters:\n job_query &= self.analysis_job_filters\n\n job = get_object_or_404(emg_models.AnalysisJob, job_query)\n\n analysis = None\n try:\n analysis = self.annotation_model.objects \\\n .get(analysis_id=str(job.job_id))\n except self.annotation_model.DoesNotExist:\n # Return an empty EmbeddedDocumentList, the entity exists\n # but it doesn't have annotations\n return EmbeddedDocumentList([], self.annotation_model, self.annotation_model_property)\n\n if hasattr(self, \"annotation_model_property_resolver\"):\n return self.annotation_model_property_resolver(analysis)\n\n return getattr(analysis, self.annotation_model_property)", "title": "" }, { "docid": "e6963233e3b5e89ba851b9846fc73a71", "score": "0.6159714", "text": "def get_queryset(self, *args, **kwargs):\n queryset = super().get_queryset(*args, **kwargs)\n\n queryset = StockSerializers.StockItemSerializer.annotate_queryset(queryset)\n\n return queryset", "title": "" }, { "docid": "f78271e65d30227099e5d58592d2176f", "score": "0.61537594", "text": "def get_queryset(self, *args, **kwargs):\n\n queryset = super().get_queryset(*args, **kwargs)\n queryset = StockSerializers.LocationSerializer.annotate_queryset(queryset)\n return queryset", "title": "" }, { "docid": "f78271e65d30227099e5d58592d2176f", "score": "0.61537594", "text": "def get_queryset(self, *args, **kwargs):\n\n queryset = super().get_queryset(*args, **kwargs)\n queryset = StockSerializers.LocationSerializer.annotate_queryset(queryset)\n return queryset", "title": "" }, { "docid": "7ded678c8ab9324fa0636e9052b793b4", "score": "0.61159205", "text": "def get_queryset(self):\n return self.queryset().select_related(\n 'image'\n ).prefetch_related(\n 'authors',\n 'categories',\n )", "title": "" }, { "docid": "7c068d00c37b126c0ee57a4cc3cba02c", "score": "0.6111904", "text": "def queryset(self, request):\n qs = super(AdRepLeadAdmin, self).queryset(request)\n qs = AdRepLead.objects.select_related().filter(id__in=qs\n ).defer('site__envelope',\n 'site__geom',\n 'site__point')\n return qs", "title": "" }, { "docid": "c2aea0d8fbb2e0aca9003dcf6bf0930c", "score": "0.6069471", "text": "def get_queryset(self):\n return self._queryset_class(\n model=self.model,\n using=self._db,\n hints=self._hints,\n query=CustomQuery(self.model)\n )", "title": "" }, { "docid": "f3ec9d5ff760df11d0dba00dc5f2a5e8", "score": "0.60628784", "text": "def get_queryset(self):\n if getattr(self, 'use_this_queryset', None):\n return self.use_this_queryset\n return self.model().objects.all()", "title": "" }, { "docid": "cfc89ef7be3957074191af5fa664480a", "score": "0.60476565", "text": "def queryset(cls):\n return cls.model._default_manager.all()", "title": "" }, { "docid": "fef6f9274e7fc8a2f7a4f454edb21391", "score": "0.60201", "text": "def queryset(self, request):\n qs = super(AdRepOrderAdmin, self).queryset(request)\n qs = AdRepOrder.objects.select_related().filter(id__in=qs\n ).defer('ad_rep__site__envelope',\n 'ad_rep__site__geom',\n 'ad_rep__site__point')\n return qs", "title": "" }, { "docid": "6fea061d8da3f5097fe56fd06836a545", "score": "0.59917945", "text": "def get_source_query(self) -> QuerySet:\n raise NotImplementedError", "title": "" }, { "docid": "7c983512d594a21cbb9d5ec1bd7609e5", "score": "0.59785336", "text": "def annotate(self, **annotations):\n return AnnotatedQuery(self, annotations)", "title": "" }, { "docid": "a95e13908877b1e930b998ccbd7a9501", "score": "0.5971418", "text": "def queryset(self, request):\n qs = super(AdRepSiteAdmin, self).queryset(request)\n qs = AdRepSite.objects.select_related().filter(\n id__in=qs).defer('site__envelope', 'site__geom', 'site__point')\n return qs", "title": "" }, { "docid": "3d8a13d25ff31102c055c285742e8fce", "score": "0.59698504", "text": "def get_queryset(self):\n queryset = self.queryset.all()\n \n #Filter based on query\n query = self.request.query_params.get('q', None)\n if query:\n queryset = queryset.filter(Q(pk__icontains=query) | \n Q(customer__name__icontains=query))\n \n offset = int(self.request.query_params.get('offset', 0))\n limit = int(self.request.query_params.get('limit', settings.REST_FRAMEWORK['PAGINATE_BY']))\n if offset and limit:\n queryset = queryset[offset - 1:limit + (offset - 1)]\n else:\n queryset = queryset[0:50]\n\n queryset = queryset.select_related('acknowledgement',\n 'pdf',\n 'customer',\n 'employee',\n 'project')\n queryset = queryset.prefetch_related('items',\n 'customer__addresses',\n 'items__item')\n \n return queryset", "title": "" }, { "docid": "ee6631ebd0773eab5dae0f2ed979021c", "score": "0.5963528", "text": "def get_prefetched_queryset(self, *args, **kwargs):\n return (\n super()\n .get_prefetched_queryset(*args, **kwargs)\n .select_related(\"user\", \"poll\")\n .prefetch_related(\"votes\")\n )", "title": "" }, { "docid": "9eb1f2ac70ab08d6f75cae861904749e", "score": "0.5953542", "text": "def get_queryset(self):\n queryset = Favorites.objects.get(owner=self.request.user)\n return queryset.anuncios.published(). select_related('owner').\\\n prefetch_related('image_anuncio').select_subclasses()", "title": "" }, { "docid": "760ee8cd4ec0ed8648064ad3c87b27bf", "score": "0.58643544", "text": "def get_queryset(self):\n return ArticleRating.objects.filter(article=self.get_object())", "title": "" }, { "docid": "b7ff9552dd0b38b4bd2c4bb15eb7a630", "score": "0.58540493", "text": "def qs(self) -> MIZQuerySet:\n if isinstance(self, type):\n raise TypeError(\n f\"Calling qs() from class level is prohibited. Use {self.__name__}.objects instead.\"\n )\n # noinspection PyUnresolvedReferences\n return self._meta.model.objects.filter(pk=self.pk)", "title": "" }, { "docid": "5552ec277e5f05b9df459c862ffb52e8", "score": "0.5834514", "text": "def queryset(self, request, queryset):\n\n return (\n queryset if self.value() is None\n else queryset.filter(instrument__id=self.value())\n )", "title": "" }, { "docid": "4646d179f2d3cd0f7d15e3b4a2a041c4", "score": "0.58183724", "text": "def get_queryset(self):\n # Get tags from the request if it was specified\n tags = self.request.query_params.get('tags')\n # Get authors from the request if it was specified\n authors = self.request.query_params.get('authors')\n # Make copy of queryset as to not modify the original queryset\n queryset = self.queryset\n if tags:\n # Get list of ids specified\n tag_ids = self._params_to_ints(tags)\n # Filter on the foreign key object with tags__id__in\n queryset = queryset.filter(tags__id__in=tag_ids)\n if authors:\n # Get list of ids specified\n author_ids = self._params_to_ints(authors)\n # Filter by the author\n queryset = queryset.filter(authors__id__in=author_ids)\n\n return queryset.filter(user=self.request.user)", "title": "" }, { "docid": "110d84525137f9939e5c1903fde86b23", "score": "0.5815705", "text": "def filter(self, **kwargs):\n related_names = []\n for argname, _ in kwargs.iteritems():\n related_name = argname.split('__')\n if len(related_name) > 1:\n related_names.append(\"__\".join(related_name[:-1]))\n if len(related_names) > 0:\n return super(\n JeevesQuerySet, self).filter(\n **kwargs).select_related(*related_names)\n else:\n return super(JeevesQuerySet, self).filter(**kwargs)", "title": "" }, { "docid": "6266cbb8710bc9a8f27a7a09cc65dedf", "score": "0.5776908", "text": "def _queryset(self):\n return self.type.objects.filter(id__in=self.ids)", "title": "" }, { "docid": "14eca152568cfeb8ce2c2ed9c7666e43", "score": "0.57721496", "text": "def queryset(self, request, queryset):\n if self.value() == 'syndicated':\n return queryset.filter(syndicated_at__isnull=False)\n if self.value() == 'ready_to_syndicate':\n return queryset.filter(ignored_at=None,\n syndicated_at=None,\n longitude_position__isnull=False,\n merchantwebsite__deleted_at=None,\n merchantdoc__deleted_at=None,\n ).distinct()\n if self.value() == 'ignore':\n return queryset.filter(ignored_at__isnull=False)", "title": "" }, { "docid": "c6d82aac5157610547f9e1524489ed7b", "score": "0.576574", "text": "def get_queryset(self):\n now = timezone.localtime(timezone.now())\n\n # get exams that are currently in progress\n exams = Exam.objects.filter(begin_timestamp__lte=now, end_timestamp__gt=now)\n\n # get ExamProgress objects for this user for each exam\n progress_objects = ExamProgress.objects.filter(\n exam__in=exams, user=self.request.user, current_question__isnull=False\n )\n\n # get default queryset\n queryset = super(QuestionViewSet, self).get_queryset()\n\n # get questions that appear as `current_question` in one of the ExamProgress object\n queryset = queryset.filter(\n pk__in=list(map(lambda p: p.current_question.pk, progress_objects))\n )\n return queryset.prefetch_related(\"answers\")", "title": "" }, { "docid": "230dad0ab6ed2d8f37b80be0241d3f5a", "score": "0.5762568", "text": "def get_queryset(self):\n\t\treturn super(CourseDocument, self).get_queryset().select_related(\n\t\t 'belongs_to'\n\t\t)", "title": "" }, { "docid": "2dccf5796efb10704b537c1f115c6ddb", "score": "0.5757942", "text": "def get_queryset(self):\n\n qs = Aid.objects \\\n .published() \\\n .open() \\\n .select_related('perimeter', 'author') \\\n .prefetch_related('financers', 'instructors')\n\n filter_form = self.form\n results = filter_form.filter_queryset(qs)\n ordered_results = filter_form.order_queryset(results).distinct()\n return ordered_results", "title": "" }, { "docid": "a77fe5e9f1ae30a45cb9cfa861f0bf36", "score": "0.5748905", "text": "def get_queryset(self):\n return self.model.objects.all()", "title": "" }, { "docid": "4a19a373e60198ea5029ca80ee7770c8", "score": "0.573783", "text": "def get_queryset(self) -> QuerySet:\n raise NotImplementedError()", "title": "" }, { "docid": "e5a00eef6846ab2d5412e94556e8e821", "score": "0.5733546", "text": "def get_qs(self):\n if self.qs == None:\n # Get the Lemma PKs\n qs = self.get_queryset()\n else:\n qs = self.qs\n return qs", "title": "" }, { "docid": "e5a00eef6846ab2d5412e94556e8e821", "score": "0.5733546", "text": "def get_qs(self):\n if self.qs == None:\n # Get the Lemma PKs\n qs = self.get_queryset()\n else:\n qs = self.qs\n return qs", "title": "" }, { "docid": "b0847c7787fadc0230db0e49c6a2cd9d", "score": "0.5731321", "text": "def order_queryset(self, queryset):\n if ordering := self.request.query_params.get(\"ordering\"):\n order_by = []\n regex = re.compile(r\"-?annotations__(?P<field_id>\\d+)\")\n fields = [field.strip() for field in ordering.split(\",\")]\n for match in filter(None, map(regex.match, fields)):\n field_id = match.group(\"field_id\")\n annotation_value = AnnotationValue.objects.filter(\n entity_id=OuterRef(\"pk\"), field_id=field_id\n ).values(\"_value__value\")\n annotate = {f\"_order_{field_id}\": Subquery(annotation_value)}\n queryset = queryset.annotate(**annotate)\n sign = \"-\" if match.string.startswith(\"-\") else \"\"\n order_by.append(f\"{sign}_order_{field_id}\")\n if order_by:\n queryset = queryset.order_by(*order_by)\n return queryset", "title": "" }, { "docid": "07c7320e1e7a804d32aa784ab03b5445", "score": "0.567102", "text": "def get_queryset(self):\n assert self.queryset is not None, (\n \"'%s' should either include a `queryset` attribute, \"\n \"or override the `get_queryset()` method.\"\n % self.__class__.__name__\n )\n\n queryset = self.queryset\n if isinstance(queryset, QuerySet):\n # Ensure queryset is re-evaluated on each request.\n queryset = queryset.all()\n return queryset", "title": "" }, { "docid": "9bce5dbcb3b770441b063429b7cb4d4c", "score": "0.56580806", "text": "def get_related_objects(self):\n result = []\n if self['name'] != None:\n tmp = ObjectDefinition.objects.filter(use__has_field=self['name'], object_type=self['object_type'])\n for i in tmp: result.append(i)\n return result", "title": "" }, { "docid": "430709aa8ab1602bd02c2d890d4f04a6", "score": "0.5655805", "text": "def get_queryset(self, request):\n querys = self.model.all_objects.get_queryset()\n ordering = self.get_ordering(request)\n if ordering:\n querys = querys.order_by(*ordering)\n return querys.prefetch_related('tags')", "title": "" }, { "docid": "17fb50a6dd97936a2b903d225dae1356", "score": "0.56551605", "text": "def get_queryset(self):\n if self.queryset is None:\n return self.model.objects.all()\n return self.queryset", "title": "" }, { "docid": "b9b31609a55a35a18f2c51da5d9e64a3", "score": "0.56520486", "text": "def _get_invoice_report_queryset(self, queryset):\n return queryset.select_related(*self._invoice_report_select_related)", "title": "" }, { "docid": "8d3c0ce6f6160b5d4a8395cfe636e5c3", "score": "0.56508267", "text": "def get_all_associations(self):\n return", "title": "" }, { "docid": "e6165501bb0c89122b9c86b6346fc229", "score": "0.5641876", "text": "def associated_objects(self):\n return self._associated_objects", "title": "" }, { "docid": "2e01e945bcfa52666d23904a2c0c0c39", "score": "0.5610131", "text": "def get_queryset(self):\n return NoneToEmptyQuerySet(self.model, using=self._db)", "title": "" }, { "docid": "75ec221f7cb649483e32390f559acea1", "score": "0.5603495", "text": "def annotations(self) -> Iterator['Annotation']:\n return itertools.chain(self.footnotecitations.all(),\n self.cites.all(),\n self.externallinks.all(),\n self.inlinerequirements.all())", "title": "" }, { "docid": "4a493eeb9bf99fa348821517a962474a", "score": "0.56022906", "text": "def get_queryset(self, request, queryset):\n\n return queryset", "title": "" }, { "docid": "84624022c7678d5d1528fae82a192bb6", "score": "0.55929613", "text": "def get_queryset(self):\n # Check if the parameter assigned_only is on the request\n assigned_only = bool(\n int(self.request.query_params.get('assigned_only', 0))\n )\n # Make copy of queryset so we do not modify the original\n queryset = self.queryset\n # If the parameter was passed filter on the book not\n # being specified\n if assigned_only:\n queryset = queryset.filter(book__isnull=False)\n\n # Remove duplicates\n return queryset.filter(\n user=self.request.user\n ).order_by('-name').distinct()", "title": "" }, { "docid": "b4b152496efd32eafe474de3bdb337a3", "score": "0.5586108", "text": "def get_queryset(self):\r\n return self.model.objects.all()", "title": "" }, { "docid": "b278807372647d51567ac30fc93d14f8", "score": "0.55682343", "text": "def related_entities(self):\n related_entities = []\n for point in self.accesspoint_set.all():\n related_entities.append({\n 'name': str(point),\n 'archive_url': point.archive_url,\n 'page_number': point.trigger,\n 'accessed_on': point.accessed_on,\n 'url': reverse_lazy(\n 'update-access-point',\n kwargs={'source_id': self.uuid, 'pk': point.uuid}\n )\n })\n return related_entities", "title": "" }, { "docid": "ba4cf3e74281c4107d9971f12f931a47", "score": "0.5556633", "text": "def get_prefetched_queryset(self, *args, **kwargs):\n return (\n super()\n .get_prefetched_queryset(*args, **kwargs)\n .select_related(\"user\", \"option\", \"option__poll\")\n )", "title": "" }, { "docid": "9cbc505100471770906ef505cc644882", "score": "0.55545425", "text": "def relevant():\n query = (self.query[exp[\"ids\"][0]]\n if exp[\"object_name\"] == \"__previous__\" else exp)\n return object_class.id.in_(\n RelationshipHelper.get_ids_related_to(\n object_class.__name__,\n query[\"object_name\"],\n query[\"ids\"],\n )\n )", "title": "" }, { "docid": "62d5fcfbc0e76cc785d1244cc1840d49", "score": "0.55534333", "text": "def queryset(self, request):\n qs = super(PrescriptionAdmin, self).queryset(request)\n qs.prefetch_related('approval_set')\n\n return qs", "title": "" }, { "docid": "f6454fd06f3ae70617fc1deaa14002be", "score": "0.55500513", "text": "def get_queryset(self):\n return Item.objects.filter(owner=self.request.user).order_by('-created').prefetch_related('tags')", "title": "" }, { "docid": "188966a31d4cd83c80a5f11551d93a1b", "score": "0.5544964", "text": "def get_queryset(self, request):\n qs = super().get_queryset(request)\n qs = qs.prefetch_related('writerinwork_set')\n qs = qs.prefetch_related('writers')\n qs = qs.prefetch_related('library_release__library')\n qs = qs.annotate(models.Count('cwr_exports', distinct=True))\n qs = qs.annotate(models.Count('recordings', distinct=True))\n return qs", "title": "" }, { "docid": "0260756653a4a7232739a07c5f09f644", "score": "0.5543793", "text": "def get_related_indicators(self):\n # imported here to prevent circular deps\n from fn_threatq.threatqsdk.indicator import Indicator\n return self.get_related_objects(Indicator)", "title": "" }, { "docid": "ba731982ef94186a1428d3ebd3a576f0", "score": "0.5536279", "text": "def get_queryset(self):\n\n return Relationship.objects.filter(\n Q(from_person=self.request.user.person) |\n Q(to_person=self.request.user.person))", "title": "" }, { "docid": "42d411a253f750239cb3300396cb0d86", "score": "0.5530928", "text": "def get_queryset(self):\n if self.queryset is None:\n raise ImproperlyConfigured(\"%(cls)s is missing a QuerySet.\" % {\n 'cls': self.__class__.__name__\n })\n\n return self.queryset.all()", "title": "" }, { "docid": "b0db3bf9154d72db2fec3804da2e7d45", "score": "0.5526397", "text": "def get_queryset(self): # pylint: disable=arguments-differ\n qs = super(AbstractDocumentSummaryViewset, self).get_queryset()\n return qs.only(*self.query_fields)", "title": "" }, { "docid": "9aa4500d25f9844fa514c60dcf4b2611", "score": "0.5524275", "text": "def get_queryset(self):\n return Strategy.objects.select_related('author').order_by('-pub_date')[:5]", "title": "" }, { "docid": "9aa4500d25f9844fa514c60dcf4b2611", "score": "0.5524275", "text": "def get_queryset(self):\n return Strategy.objects.select_related('author').order_by('-pub_date')[:5]", "title": "" }, { "docid": "9cf536f0492a98f1ced55c9a5a459f22", "score": "0.55152816", "text": "def get_queryset(self):\n queryset = IntakeDistributionCoordinates.objects.all().order_by('nutrient')\n\n # Dramatically improves performance\n queryset = queryset.prefetch_related('age_group')\n\n sex = self.request.query_params.get('sex', None)\n nutrient = self.request.query_params.get('nutrient', None)\n age_group_value = self.request.query_params.get('age_group_value', None)\n\n if sex is not None:\n queryset = queryset.filter(sex=sex)\n if nutrient is not None:\n queryset = queryset.filter(nutrient__icontains=nutrient)\n if age_group_value is not None:\n queryset = queryset.filter(age_group__age_group__icontains=age_group_value)\n return queryset", "title": "" }, { "docid": "74fef4d9daa6ed23f977843cd13a54c9", "score": "0.5508545", "text": "def get_queryset(\n cls,\n qs: Union[models.QuerySet[_T], models.Manager[_T]],\n info: ResolverInfo,\n ) -> models.QuerySet[_T]:\n if isinstance(qs, models.Manager):\n qs = qs.get_queryset()\n\n if not cls.check_permissions(info.context.user):\n return qs.none()\n\n if cls._meta.object_permissions and isinstance(\n cls._meta.model.objects, GuardedModelManager\n ):\n qs &= cls._meta.model.objects.for_user(\n info.context.user,\n cls._meta.object_permissions,\n any_perm=cls._meta.object_permissions_any,\n with_superuser=cls._meta.object_permissions_with_superuser,\n )\n\n ret = qs\n if gql_optimizer is not None:\n ret = gql_optimizer.query(ret, info)\n prl = {\n i.to_attr if isinstance(i, Prefetch) else i: i # type:ignore\n for i in ret._prefetch_related_lookups\n }\n ret._prefetch_related_lookups = tuple(prl.values())\n\n return ret", "title": "" }, { "docid": "166c4b4f8687dc32c826c1a2ab0715b3", "score": "0.5482961", "text": "def get_queryset(self):\n category_qs = Category.objects \\\n .select_related('theme') \\\n .order_by('theme__name', 'name')\n\n base_qs = Aid.objects \\\n .select_related('perimeter', 'author') \\\n .prefetch_related('financers', 'instructors') \\\n .prefetch_related(Prefetch('categories', queryset=category_qs))\n\n user = self.request.user\n if user.is_authenticated and user.is_superuser:\n qs = base_qs\n elif user.is_authenticated:\n q_published = Q(status='published')\n q_is_author = Q(author=user)\n qs = base_qs.filter(q_published | q_is_author)\n else:\n qs = base_qs.published()\n\n return qs", "title": "" }, { "docid": "38b038851b3949734ecd99da5154ae04", "score": "0.54737544", "text": "def annotate(self, *args, **kwargs):\n self._not_support_combined_queries(\"annotate\")\n return self._annotate(args, kwargs, select=True)", "title": "" }, { "docid": "78dbd224b633f8969942006c519b3f8e", "score": "0.54642886", "text": "def get_all_queryset(self):\n # return Post.objects.all()\n return Savingrequest.objects.get_queryset().order_by('pk')", "title": "" }, { "docid": "a1a5bc93f2bcfb0347f3173c961371d5", "score": "0.546129", "text": "def annotations(self):\n\n return self._annotations", "title": "" }, { "docid": "102cf5d91fdae51a8d957c0fc58bd431", "score": "0.5454763", "text": "def filter_related_filtersets(self, queryset):\n for related_name, related_filterset in self.related_filtersets.items():\n # Related filtersets should only be applied if they had data.\n prefix = '%s%s' % (related(self, related_name), LOOKUP_SEP)\n if not any(value.startswith(prefix) for value in self.data):\n continue\n\n field_name = self.filters[related_name].field_name\n lookup_expr = LOOKUP_SEP.join([field_name, 'in'])\n subquery = Subquery(related_filterset.qs.values('pk'))\n queryset = queryset.filter(**{lookup_expr: subquery})\n\n return queryset", "title": "" }, { "docid": "5bba53a5aec89930f3f20fae5aca8896", "score": "0.54506904", "text": "def associatedObjects (self):\n return self.__associatedObjects", "title": "" }, { "docid": "76242b57f3f28da7eb27bfb6bae107e9", "score": "0.544466", "text": "def queryset(self, request: 'HttpRequest', queryset: 'QuerySet') -> 'QuerySet':\n queryset = queryset.annotate(citation_count=Count('images'))\n if self.value() == 'Yes':\n return queryset.exclude(citation_count__lt=2)\n if self.value() == 'No':\n return queryset.filter(citation_count__gte=2)", "title": "" }, { "docid": "38f7cc9be08a4611daf64794573892f8", "score": "0.5435545", "text": "def get_rows(self) -> QuerySet:\n return self.get_source_query().values(*self.COLUMNS)", "title": "" }, { "docid": "d06120c719a53b4764049d452e91353a", "score": "0.54286456", "text": "def queryset(self, request):\n qs = self.model.all_objects.get_query_set()\n ordering = self.ordering or ()\n if ordering:\n qs = qs.order_by(*ordering)\n return qs", "title": "" }, { "docid": "466a360bf475509e44730e4919a4ab39", "score": "0.5425586", "text": "def all(self):\n\n return self.__model__.query.all()", "title": "" }, { "docid": "6aa31100e354f1cf36e4258fbc8fc400", "score": "0.5420084", "text": "def get_related_field_queryset(self, request, list_queryset, field, queryset):\n if hasattr(queryset, 'list_permissions'):\n return queryset.list_permissions(request.user)\n else:\n return queryset", "title": "" }, { "docid": "b13785efb0da7e41f45fedc31b0d7d36", "score": "0.5411604", "text": "def queryset(self, request, queryset):\n if self.value() == 'examine':\n return queryset.filter(ignored_at=None, address_1__isnull=False, longitude_position=None).exclude(address_1='')\n if self.value() == 'done':\n return queryset.filter(longitude_position__isnull=False)\n if self.value() == 'ignore':\n return queryset.filter(ignored_at__isnull=False)", "title": "" }, { "docid": "e494a624bb86e66b166150f301071745", "score": "0.5407376", "text": "def annotations(self):\n return self._annotations", "title": "" }, { "docid": "4e090f7d13109fde38e0a9dd717b45a2", "score": "0.54073715", "text": "def get_queryset(self):\n print(self.kwargs['collection'])\n collection_tags = Collection.objects.values_list('tags__name', flat=True)\n return Post.objects.filter(tags__name__in=collection_tags).distinct()", "title": "" }, { "docid": "f1789b4e2d7621a9f327f972db6f000d", "score": "0.53971773", "text": "def get_related_trackers(self):\n\n return Tracker.objects.filter(product=self.pk)", "title": "" }, { "docid": "028f476ea217281608042a97c424de97", "score": "0.53846765", "text": "def get_queryset(self):\n now = timezone.localtime(timezone.now())\n\n # get exams that are currently in progress\n exams = Exam.objects.filter(begin_timestamp__lte=now, end_timestamp__gt=now)\n\n # get ExamProgress objects for this user for each exam\n progress_objects = ExamProgress.objects.filter(\n exam__in=exams, user=self.request.user, current_exercise__isnull=False\n )\n\n # get default queryset\n queryset = super(ExerciseViewSet, self).get_queryset()\n\n # get questions that appear as `current_question` in one of the ExamProgress object\n queryset = queryset.filter(\n pk__in=list(map(lambda p: p.current_exercise.pk, progress_objects))\n )\n return queryset.prefetch_related(\"testcases\")", "title": "" }, { "docid": "f0e4e0327b217d49cae80428d4886268", "score": "0.5381825", "text": "def load_all_queryset(self):\n return self.get_model()._default_manager.all()", "title": "" }, { "docid": "5614245aafc9ed2c2fa46bd472e12923", "score": "0.53758967", "text": "def get_queryset(self):\n return self._get_base_queryset().filter(deleted__isnull=True)", "title": "" }, { "docid": "5df2a927073937861e157dfdb52b2b5f", "score": "0.53755254", "text": "def get_queryset(self):\n queryset = Comment.objects.filter(issue_id=self.issue.pk)\n return queryset", "title": "" }, { "docid": "c862a89aa467fbb78964a28b26a7db6d", "score": "0.5368971", "text": "def pre_filter(self, qs):\n return qs", "title": "" }, { "docid": "c10e634f450ce0f4faaf1f7aed16075a", "score": "0.5358097", "text": "def queryset(self, request, queryset):\n if self.value() == 'ignore':\n return queryset.filter(ignored_at__isnull=False)\n if self.value() == 'active':\n return queryset.filter(ignored_at=None)", "title": "" }, { "docid": "509c85341076d0a50a73bded805d548e", "score": "0.5357638", "text": "def get_queryset(self):\n assigned_only = bool(\n int(self.request.query_params.get('assigned_only', 0))\n )\n queryset = self.queryset\n if assigned_only:\n queryset = queryset.filter(recipe__isnull=False)\n\n return queryset.filter(user=self.request.user).order_by('-name').distinct()", "title": "" }, { "docid": "1dfb0bfc6404bd65606b571d663920f8", "score": "0.5353283", "text": "def get_queryset(self):\n if self.queryset is not None:\n queryset = self.queryset\n if hasattr(queryset, '_clone'):\n queryset = queryset._clone()\n elif self.model is not None:\n queryset = self.model._default_manager.all()\n else:\n raise ImproperlyConfigured(\"'%s' must define 'queryset' or 'model'\"\n % self.__class__.__name__)\n return queryset", "title": "" }, { "docid": "df1d27d074ca97b92c4661875fac27df", "score": "0.5350659", "text": "def all_with_avg_mark(self):\n return self.annotate(\n avg_mark=Coalesce(models.Avg(\n \"evaluated_solution__mark\",\n output_field=models.FloatField(),\n ), 0.0)\n ).prefetch_related(\"evaluated_solution\")", "title": "" }, { "docid": "7f08a6d9ef9ece8a27ce1ba258752fd0", "score": "0.5327867", "text": "def get_queryset(self):\n queryset = super().get_queryset()\n today = datetime.datetime.today()\n return queryset.annotate(\n relevance=models.Case(\n models.When(date__gte=today, then=1),\n models.When(date__lt=today, then=2),\n output_field=models.IntegerField(),\n )).order_by('relevance', 'date')", "title": "" }, { "docid": "ad02202d1efebffeedd42b46701c0190", "score": "0.5318062", "text": "def base_queryset(self):\n return self.get_query_set().select_related(\n 'product_class',\n ).prefetch_related(\n 'variants',\n 'product_options',\n 'product_class__options',\n 'stockrecords',\n 'images',\n ).all()", "title": "" }, { "docid": "902b6263aa426c47dc754b7c136fc578", "score": "0.5317417", "text": "def apply_queryset_rules(self, qs):\n clauses = {\n 'filter': [],\n 'exclude': []}\n\n for rule in self.drip_model.queryset_rules.all():\n\n clause = clauses.get(rule.method_type, clauses['filter'])\n\n kwargs = rule.filter_kwargs(qs, now=self.now)\n clause.append(Q(**kwargs))\n\n qs = rule.apply_any_annotation(qs)\n\n if clauses['exclude']:\n qs = qs.exclude(functools.reduce(operator.or_, clauses['exclude']))\n qs = qs.filter(*clauses['filter'])\n\n return qs", "title": "" }, { "docid": "6e21208c88385b5145c79e82d1df3611", "score": "0.53156406", "text": "def get_queryset(self):\n\n if self.request.user.is_staff:\n queryset = Retreat.objects.all()\n else:\n queryset = Retreat.objects.filter(\n is_active=True,\n hidden=False\n )\n queryset = queryset.filter(hide_from_client_admin_panel=False)\n\n queryset = queryset.annotate(\n max_end_date=Max('retreat_dates__end_time'),\n min_start_date=Min('retreat_dates__start_time'),\n )\n\n # Filter by display_start_time lower than\n display_start_time_lte = self.request.query_params.get(\n 'display_start_time_lte',\n None,\n )\n if display_start_time_lte:\n queryset = queryset.filter(\n display_start_time__lte=display_start_time_lte\n )\n\n # Filter by display_start_time greater than\n display_start_time_gte = self.request.query_params.get(\n 'display_start_time_gte',\n None,\n )\n if display_start_time_gte:\n queryset = queryset.filter(\n display_start_time__gte=display_start_time_gte\n )\n return queryset", "title": "" }, { "docid": "4896c226490d149833a6747fe19c87d2", "score": "0.53035784", "text": "def get_ratings(self):\n return Vote.objects.filter(content_type=self.get_content_type(), object_id=self.instance.pk, key=self.field.key)", "title": "" }, { "docid": "60fbb2c03e8c0b34b9933a7eabb6d17b", "score": "0.5297794", "text": "def queryset(self, request):\n qs = super(SiteAdmin, self).queryset(request)\n qs = Site.admin.select_related().filter(id__in=qs)\n ordering = self.ordering or ()\n if ordering:\n qs = qs.order_by(*ordering)\n return qs", "title": "" } ]
7106374d64bb075e2635bcc0187ac5f2
This function needs to be hooked into the 100ms hook
[ { "docid": "216068cebb425908243433a81679ee0c", "score": "0.5795586", "text": "def clock_timer_100ms():\n global lastTime\n cTime = get_tmr_count(TMR5) >> 8\n if cTime < lastTime:\n clock_tick()\n lastTime = cTime", "title": "" } ]
[ { "docid": "d2757f26752208b1a80b70734a7e3c96", "score": "0.6760372", "text": "def work(self):\r\n time.sleep(1)", "title": "" }, { "docid": "843d0bf6411351159fc198d19ed80c12", "score": "0.6745067", "text": "def slow_down(self):\n pass", "title": "" }, { "docid": "52ade0fa3f368ee30569fd04224c5c04", "score": "0.6647468", "text": "def timer100msEvent(msTick):\r\n global secondCounter\r\n #DS2764FetchBasic()\r\n secondCounter += 1\r\n if secondCounter >= 30:\r\n DS2764FetchBasic()\r\n doEverySecond() \r\n secondCounter = 0", "title": "" }, { "docid": "9be754c576ace203b181bb912597cc8d", "score": "0.66327375", "text": "def on_timeout(self):\r\n return", "title": "" }, { "docid": "4341fd942fbc2a324d2c8fec9d0dad2b", "score": "0.6538584", "text": "def beat():", "title": "" }, { "docid": "26d57eb6d0c88bdba755496dd08020f9", "score": "0.6466992", "text": "def shortPoll(self):\n pass", "title": "" }, { "docid": "ec15798646dcb80c5da0cdd8625a9622", "score": "0.6437889", "text": "def _loop(self):", "title": "" }, { "docid": "b7e8c4ab30fda01171db765747234193", "score": "0.6422247", "text": "def on_overrun(self, time_taken: float) -> None:", "title": "" }, { "docid": "f7c58cc5333fa0fced9851dbd0fc4ae2", "score": "0.62924176", "text": "def on_timeout(self):\n return", "title": "" }, { "docid": "4dc66f567fcd3d3f692db47b93a77d0d", "score": "0.62349486", "text": "def on_loop(self):\n pass", "title": "" }, { "docid": "ea2a622657b312e82a127c8d5e29325b", "score": "0.6198146", "text": "def main_loop(self, elapsed):\n pass", "title": "" }, { "docid": "7fcdba49c5cbb891c09f130b35b0639d", "score": "0.61932737", "text": "def run(self):\n time.sleep(self.how_long)", "title": "" }, { "docid": "e37dc3a3de45ed17aad959b1788e1599", "score": "0.6176745", "text": "def poll( ):", "title": "" }, { "docid": "5b2a22a8df571442aadeeacd91852ae8", "score": "0.6174352", "text": "def teleopPeriodic(self) -> None:", "title": "" }, { "docid": "5b2a22a8df571442aadeeacd91852ae8", "score": "0.6174352", "text": "def teleopPeriodic(self) -> None:", "title": "" }, { "docid": "75d009cb21807065fb45a5a6601e9683", "score": "0.6164433", "text": "def switch_timer(self):", "title": "" }, { "docid": "014ae355bb343f92b9979f73d64b909d", "score": "0.6152506", "text": "def poll(self):", "title": "" }, { "docid": "7117ef3283bbb3b5d320e20aed978a37", "score": "0.6150074", "text": "def timer100msEvent(currentMs):\r\n global secondCounter\r\n global runFlag, canLead, isLeader\r\n global wakeTime, graceTime\r\n global wakeTimer, electTimer\r\n global sleepTime\r\n global pollStopped\r\n\r\n every100ms() # invoke user customizations\r\n\r\n secondCounter += 1\r\n if secondCounter >= 10:\r\n everySecond() # invoke user customizations\r\n secondCounter = 0\r\n\r\n if electTimer > 0 : # election in progress, only do this if in run mode\r\n electTimer -= 1\r\n if electTimer <= 0: # time's up!\r\n showLeadership(isLeader)\r\n if isLeader:\r\n if runFlag:\r\n sendSleep()\r\n elif not runFlag:\r\n return\r\n \r\n elif runFlag:\r\n wakeTimer += 1\r\n if pollStopped and isLeader and wakeTimer <= wakeTime:\r\n sendSleep()\r\n elif wakeTimer > wakeTime:\r\n if isLeader:\r\n sendSleep()\r\n elif canLead:\r\n # The following timeout may seem long, but is necessary\r\n # to prevent \"independent sleepy meshes\" from forming\r\n if wakeTimer > (sleepTime+wakeTime+graceTime):\r\n election()", "title": "" }, { "docid": "352688bd0b0d03b2302af2b1bb178070", "score": "0.61343473", "text": "def poll():", "title": "" }, { "docid": "d8197d4c27a9f5a0be95112ad24fbd72", "score": "0.61113775", "text": "def postloop(self):\n pass", "title": "" }, { "docid": "48a6f87c661ffc21f013154377785dea", "score": "0.6111295", "text": "def wait(self):", "title": "" }, { "docid": "cf1b9191f810cff3b704a3d067ca5e4e", "score": "0.61058265", "text": "def before_loop(self):\n pass", "title": "" }, { "docid": "4b896883eb6a9b880b4c64f1bea0e1ff", "score": "0.60926837", "text": "def on_loop_start(self):\n pass", "title": "" }, { "docid": "8e32a9605921200dfbd413fa240c27e5", "score": "0.6088966", "text": "def after_charged():", "title": "" }, { "docid": "289128f51442577cf1c8189c9b749aae", "score": "0.60884887", "text": "def ProcessIdle(self):", "title": "" }, { "docid": "649af8f0d5f5640acbfa8dee10236eef", "score": "0.6056695", "text": "def ProcessPendingEvents(self):", "title": "" }, { "docid": "649af8f0d5f5640acbfa8dee10236eef", "score": "0.6056695", "text": "def ProcessPendingEvents(self):", "title": "" }, { "docid": "5fc2e5ebbbf71bb8c5eb7d0cd1c58ad5", "score": "0.6040787", "text": "def testPeriodic(self):\n pass", "title": "" }, { "docid": "5efd385ba6653d2e967117592c4d277a", "score": "0.60275465", "text": "def GetTimer(self):", "title": "" }, { "docid": "13010994b3fdb1e1d4967723d557c6f8", "score": "0.60239077", "text": "def hanger(ctx):\n time.sleep(1.00)", "title": "" }, { "docid": "d553e8aa13c3d4d5d22e0abbb8ec12b2", "score": "0.6012918", "text": "def sleep(ms: int):", "title": "" }, { "docid": "f48876e6038ebd1a6adadb3ac210bce4", "score": "0.5996687", "text": "def autonomousPeriodic(self) -> None:", "title": "" }, { "docid": "f48876e6038ebd1a6adadb3ac210bce4", "score": "0.5996687", "text": "def autonomousPeriodic(self) -> None:", "title": "" }, { "docid": "2bf29ceab3a07990cd596fc19dec90e4", "score": "0.59887487", "text": "def preloop(self):\n pass", "title": "" }, { "docid": "e1743b1d94ceeaaa0454917f91610b2e", "score": "0.5974806", "text": "def Sleep(secs):", "title": "" }, { "docid": "1645cf0d21bea447691dd43085e0e14b", "score": "0.59727633", "text": "def test_recognize_long_running(self):\n pass", "title": "" }, { "docid": "6a21a3f1984074fecdf2033923ea3246", "score": "0.59507567", "text": "def DispatchTimeout(self, timeout):", "title": "" }, { "docid": "81accd7f70908bc78ccefaf290f10102", "score": "0.5943967", "text": "def _delay(self, n=None):", "title": "" }, { "docid": "ac15b924234e47a349051decb6f0891c", "score": "0.5942724", "text": "def tick(self):\n\t\tpass", "title": "" }, { "docid": "197c259f6c82782ad02c34f2e3dd6b87", "score": "0.59300095", "text": "def loop_hook(self,*args,**kwargs):\n return [],{}\n if self.MaxEvents and self.sent_total<self.MaxEvents:\n self.lost_time += time.time()-tnext #Thread switching also included ", "title": "" }, { "docid": "729c043225c5b6bd61f043eb0db502d6", "score": "0.5921579", "text": "def longPoll(self):\n self.heartbeat()", "title": "" }, { "docid": "314ae6c79adc03a70ee4a33eb3068d78", "score": "0.59046626", "text": "def ResetUpdateTime():", "title": "" }, { "docid": "2fc743667e8326fbf55f176e04d23c98", "score": "0.59014064", "text": "def _on_timer() -> None:\n monitor.poll()", "title": "" }, { "docid": "fd6ed7bc127f2a4978d9ff5444e5486e", "score": "0.58999676", "text": "def __run__(self):\n while True:\n time.sleep(1)", "title": "" }, { "docid": "c23310ab9e457403accf750cefd52142", "score": "0.5898424", "text": "def _on_timer(self):\r\n self._reset_timer()\r\n self._flush_and_switch_aggregator()", "title": "" }, { "docid": "0f67eb07ac9961aa3fef4fa041d53727", "score": "0.58947545", "text": "def _pre_work_loop(self):\n pass", "title": "" }, { "docid": "8b8b292ebe4aff6b4977a4877d39e284", "score": "0.5892582", "text": "def handle_query(self, time):", "title": "" }, { "docid": "3205cf7012ac83f9b0b75c8a61ed4315", "score": "0.5889348", "text": "def onTick(self):\n\n #do nothing\n pass", "title": "" }, { "docid": "1e95c6131a94c0725a76d2d05a54db29", "score": "0.58846146", "text": "def Start(self, milliseconds=0):", "title": "" }, { "docid": "f7379fdd1c728095028a355ff5f6c61b", "score": "0.586841", "text": "def update_timer(self, timer, action, usecs):", "title": "" }, { "docid": "c34e77aefab45df64075e2eb630ce3a1", "score": "0.58653545", "text": "def pre_update(self):", "title": "" }, { "docid": "89edb0641b91ac001dd07186f9b2bda0", "score": "0.585217", "text": "def testTiming():\n pass", "title": "" }, { "docid": "d5b8f417bf0a234c8878695babaae741", "score": "0.58424836", "text": "def Pulse(self):", "title": "" }, { "docid": "8694d72c179127359d59c59d2e285045", "score": "0.5824241", "text": "def delay():\n time.sleep(300)", "title": "" }, { "docid": "5c170c6b07c3e741a2ee9d0de40dcbc0", "score": "0.5815359", "text": "def on_start(self):", "title": "" }, { "docid": "2f6f5ee005441ad7a3d82b426860a794", "score": "0.5801752", "text": "def teleopPeriodic(self):\n pass", "title": "" }, { "docid": "b1e16322557fc14e7f8e28843f3d941e", "score": "0.5801231", "text": "def work(self):\n pass", "title": "" }, { "docid": "7ae5ad7bc2d33e48e90728fc6be4c0bf", "score": "0.58001393", "text": "def time():", "title": "" }, { "docid": "d0d849e9cbbe925a5280f2e7bd54e2de", "score": "0.5799343", "text": "def delay(self):\n sleep(1)", "title": "" }, { "docid": "53a8819fae6ef2a3d31fb396024e9f5d", "score": "0.57989067", "text": "def on_warmup_completed(self, myo, timestamp, warmup_result):", "title": "" }, { "docid": "51a7ad395b0c9451c1bbba7bb5b55f65", "score": "0.5797967", "text": "def before_request():\n g.start = time.time()", "title": "" }, { "docid": "f29e434c732209900cf59e37e9d6f35e", "score": "0.5797038", "text": "def on_loop(self) -> None:\n pass", "title": "" }, { "docid": "cffb850948162a9eff33d0e0d65f092b", "score": "0.5794502", "text": "def StartOnce(self, milliseconds=-1):", "title": "" }, { "docid": "22f52c70c6c8f3f17f3f80c03ef583cf", "score": "0.5787381", "text": "async def poll(self, ctx):", "title": "" }, { "docid": "9431f55afcd37a2a6831ab6c50584bd9", "score": "0.5787074", "text": "def work(self):", "title": "" }, { "docid": "c506d78da564e17ee7986aebc6a92bef", "score": "0.5773695", "text": "def testPeriodic(self):\n # wpilib.LiveWindow.run()\n pass", "title": "" }, { "docid": "3352690bb2e756de3b02a58095b011e5", "score": "0.5768796", "text": "def OnInternalIdle(self):", "title": "" }, { "docid": "1b6b1bd584c22242e4298f4327cf5ef6", "score": "0.5767867", "text": "def test_read_throttle_events_alarm():", "title": "" }, { "docid": "585d2f156b21d0c5f97a7be868383e96", "score": "0.57618207", "text": "def beginUpdate(self):", "title": "" }, { "docid": "9a8b31d343acba7942cb79884054b0b7", "score": "0.57566124", "text": "def loop(self) -> None:", "title": "" }, { "docid": "114678942ca76b8ad6ff0e490c05853c", "score": "0.5755469", "text": "def MilliSleep(milliseconds):", "title": "" }, { "docid": "8316804bb717225b84d154fbcbb7accc", "score": "0.5750129", "text": "def speed_up(self):\n pass", "title": "" }, { "docid": "8603d3e25d573d2482126479eac3477a", "score": "0.5743084", "text": "def backoff(self):\n ...", "title": "" }, { "docid": "ea08ffc4b79dc913dd4db327d42bceb1", "score": "0.5741802", "text": "def hook(self):\n pass", "title": "" }, { "docid": "7756d6eb29b1063eaff8dc3ce08a6c7b", "score": "0.57415444", "text": "def _UpdateHook(self):\n pass", "title": "" }, { "docid": "cc829713c7ca8f5c40d9585254998886", "score": "0.5740997", "text": "def OnRun(self):", "title": "" }, { "docid": "73c2d85603b1836dd9c962a8372c414c", "score": "0.57327443", "text": "def wait():", "title": "" }, { "docid": "fa5a83b9fdff499d8536f6e3e5f373f6", "score": "0.57314974", "text": "def dgTimer(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "adaf1582f64ed357db99250adfaf9ae6", "score": "0.57232213", "text": "def on_heartbeat(self):\n pass", "title": "" }, { "docid": "456bdcd3911661825b91660a729d9b7c", "score": "0.57094175", "text": "def real_life():", "title": "" }, { "docid": "bd7e5c41a1ecb6bc7d5ff9df56d7bd64", "score": "0.5707459", "text": "def fire_async(self, event):", "title": "" }, { "docid": "b3c96c503b00f479e33a525c06914fe9", "score": "0.5705356", "text": "def testPeriodic(self):\r\n wpilib.LiveWindow.run()", "title": "" }, { "docid": "b18a994abeb5fdebf51118777ec7f59e", "score": "0.57022464", "text": "def refreshlock(timeout):", "title": "" }, { "docid": "bbc0e28ba90faf3625f351ce1fd29c76", "score": "0.5696913", "text": "def test_polling_plugin_timeout(self):\n pass", "title": "" }, { "docid": "3e49f0450be0a9d6a9ff95f5c2e32f13", "score": "0.5683846", "text": "def fake_sleep(seconds):\n pass", "title": "" }, { "docid": "7489ad660ef2d858ce2c203202c16705", "score": "0.5680719", "text": "def monitored(self):", "title": "" }, { "docid": "65cef1b691eb762e8133164a0dc67b18", "score": "0.5678441", "text": "def before():\n time.sleep(0.5)", "title": "" }, { "docid": "7aa5bb13cd61959872cedf5ea0de7825", "score": "0.5678021", "text": "async def setraffle(self, ctx):\r\n pass", "title": "" }, { "docid": "22997c476417dc23e13693cae1c1dff0", "score": "0.5671687", "text": "def after_start(self):\r\n pass", "title": "" }, { "docid": "8c923db4db394ab0a78ef8513af191e6", "score": "0.5668611", "text": "def MicroSleep(microseconds):", "title": "" }, { "docid": "20fa70716a447765189e65e68d8ee367", "score": "0.5658965", "text": "def schedule(self):", "title": "" }, { "docid": "204a7e4e2278887c8b609d247b2cce81", "score": "0.5658126", "text": "def run_time_step(self) -> None:\n pass", "title": "" }, { "docid": "495a16824f021776c3453dc41c8f1abe", "score": "0.5655366", "text": "def TryAfter(self, event):", "title": "" }, { "docid": "eb8a4266061af28326aeb1c759e2e5ca", "score": "0.5629126", "text": "def getDelayForNext(self):", "title": "" }, { "docid": "bb62957c4c6de4562c2b8103c7d779c6", "score": "0.56284857", "text": "def idle(self):\r\n pass", "title": "" }, { "docid": "78f76e5c780baac8e3a622748ffaf210", "score": "0.5624609", "text": "def on_loop_end(self):\n pass", "title": "" }, { "docid": "07048cf31b5837f9308180b07567477f", "score": "0.5619291", "text": "def setTimeout(newtimeout):", "title": "" }, { "docid": "a14152b9bfddff78e26ab4132498d743", "score": "0.56184775", "text": "def _post_run(self):\n pass", "title": "" }, { "docid": "3198c024b20cd8f9a04c439907d120a4", "score": "0.5610029", "text": "def timer_callback(*args):\n logging.debug(\"timer callback at %s\" % datetime.now())", "title": "" }, { "docid": "7773cd7b7d72b6e66ab11fe56b1eb05d", "score": "0.5609733", "text": "def handler(signum, frame):\n raise Exception(\"Too long execution time\")", "title": "" } ]
15443b0a0d8c8f241d58c896a0964e66
Edit the inherit replication flag for volume.
[ { "docid": "c4835037b882671342707bd1e738352f", "score": "0.80905753", "text": "def edit_inherit_replication_flag(self, pool, project, volume, set=True):\n svc = ('/api/storage/v1/pools/%(pool)s/projects/%(project)s'\n '/filesystems/%(volume)s/replication'\n % {'pool': pool,\n 'project': project,\n 'volume': volume})\n arg = {'inherited': set}\n ret = self.rclient.put(svc, arg)\n\n if ret.status != restclient.Status.ACCEPTED:\n exception_msg = (_('Error setting replication inheritance '\n 'to %(set)s '\n 'for volume: %(vol)s '\n 'project %(project)s '\n 'Return code: %(ret.status)d '\n 'Message: %(ret.data)s .')\n % {'set': set,\n 'project': project,\n 'vol': volume,\n 'ret.status': ret.status,\n 'ret.data': ret.data})\n LOG.error(exception_msg)\n raise exception.VolumeBackendAPIException(data=exception_msg)", "title": "" } ]
[ { "docid": "46fd0cf5360d7f2c5330c34ba629af79", "score": "0.55705565", "text": "def extend_volume(self, volume, size):\n return self.set_volume(volume, size=size, truncate=False)", "title": "" }, { "docid": "ecad12e7ca1d65657ff9c51410beccdc", "score": "0.5562828", "text": "def set_volume(self, num):\n self._run_command('volume {0} 1'.format(num))", "title": "" }, { "docid": "a3d5a9baa134a85255c49a5d6423f20c", "score": "0.55062836", "text": "def volume(self,vol=100):\n self._set_volume(vol,False)", "title": "" }, { "docid": "90d6027a74d82ebb3a999dcb29dddd2f", "score": "0.53808284", "text": "def extend_volume(self, volume, new_size):\n fs = self._get_service(volume)\n self.backend.extend_lu(fs, new_size, volume.name)", "title": "" }, { "docid": "5ed131b2f24c1e40151e231f243c9af8", "score": "0.53505886", "text": "def set_volume_level(self, volume):\n pass", "title": "" }, { "docid": "50b0e96d15b9203123ba1907e0741fce", "score": "0.5297575", "text": "def set_volume_up(self):\n self.set_volume(\"+\")", "title": "" }, { "docid": "5919c036271b2fe2fce31debb1005d2d", "score": "0.5273238", "text": "def extend_volume(self, volume, new_size):\n sfaccount = self._get_sfaccount(volume['project_id'])\n params = {'accountID': sfaccount['accountID']}\n\n sf_vol = self._get_sf_volume(volume['id'], params)\n\n if sf_vol is None:\n LOG.error(\"Volume ID %s was not found on \"\n \"the SolidFire Cluster while attempting \"\n \"extend_volume operation!\", volume['id'])\n raise exception.VolumeNotFound(volume_id=volume['id'])\n qos = self._retrieve_qos_setting(volume, new_size)\n params = {\n 'volumeID': sf_vol['volumeID'],\n 'totalSize': int(new_size * units.Gi),\n 'qos': qos\n }\n self._issue_api_request('ModifyVolume',\n params, version='5.0')\n\n rep_settings = self._retrieve_replication_settings(volume)\n if self.replication_enabled and rep_settings:\n if len(sf_vol['volumePairs']) != 1:\n LOG.error(\"Can't find remote pair while extending the \"\n \"volume or multiple replication pairs found!\")\n raise exception.VolumeNotFound(volume_id=volume['id'])\n\n tgt_endpoint = self.cluster_pairs[0]['endpoint']\n target_vol_id = sf_vol['volumePairs'][0]['remoteVolumeID']\n params2 = params.copy()\n params2['volumeID'] = target_vol_id\n self._issue_api_request('ModifyVolume',\n params2, version='5.0',\n endpoint=tgt_endpoint)", "title": "" }, { "docid": "cbc203e492abd8e924179447fd88c912", "score": "0.51819", "text": "def manage_existing(self, volume, existing_ref):\n pass", "title": "" }, { "docid": "96203a3a1db5a92f4fa78aa1407678f1", "score": "0.5166891", "text": "def setup_lun_replication(self, volume, primary_lun_id):\n specs = common.ExtraSpecs.from_volume(volume)\n provision = specs.provision\n tier = specs.tier\n rep_update = {'replication_driver_data': None,\n 'replication_status': fields.ReplicationStatus.DISABLED}\n mirror_name = utils.construct_mirror_name(volume)\n\n if specs.is_replication_enabled:\n LOG.debug('Starting setup replication '\n 'for volume: %s.', volume.id)\n lun_size = volume.size\n pool_name = utils.get_remote_pool(self.config, volume)\n emc_taskflow.create_mirror_view(\n self.mirror_view, mirror_name,\n primary_lun_id, pool_name,\n volume.name, lun_size,\n provision, tier)\n LOG.info('Successfully setup replication for %s.', volume.id)\n rep_update.update({'replication_status':\n fields.ReplicationStatus.ENABLED})\n group_specs = common.ExtraSpecs.from_group(volume.group)\n if volume.group and group_specs.is_group_replication_enabled:\n # If in a group, add it to group then.\n LOG.debug('Starting add volume %(volume)s to group %(group)s',\n {'volume': volume.id, 'group': volume.group.id})\n group_name = utils.construct_group_name(volume.group)\n self.client.add_mirror(group_name, mirror_name)\n\n return rep_update", "title": "" }, { "docid": "01bae699b2b5af32a698f77eadcc5c9a", "score": "0.51418984", "text": "def adjust_volume(adjustment=None):\n\n if adjustment == \"up\":\n subprocess.call('./scripts/volume_up.sh')\n elif adjustment == \"down\":\n subprocess.call('./scripts/volume_down.sh')", "title": "" }, { "docid": "be4cf71a831e848163d4b8ee44035aff", "score": "0.51323444", "text": "def set_volume(self, volume):\n self.exec_command(CMD_VOLUME, '=', float(volume))", "title": "" }, { "docid": "ab34661fed8822800cf86b515c52e2ca", "score": "0.51079774", "text": "def set_volume(self, v):\n self.volume = self._constrain(v)\n debug(\"set volume: {}\".format(self.volume))\n\n output = self.amixer(\"set '{}' unmute {}%\".format(DEVICE_NAME, v))\n self._sync(output)\n return self.volume", "title": "" }, { "docid": "a236c11533d4fd285724658228da44d8", "score": "0.51029587", "text": "def extend_volume(self, volume, new_size):\n self.common._extend_lun(volume, new_size)", "title": "" }, { "docid": "e87680355bde04cf1a891989d4bf9577", "score": "0.5084736", "text": "def set_volume_level(self, volume: float) -> None:\n self._remote.cmus.set_volume(int(volume * 100))", "title": "" }, { "docid": "2deac2cc08c242eb586e70c0d6f63990", "score": "0.50824046", "text": "def control_ren(self, mode):\n return self.visalib.gpib_control_ren(self.session, mode)", "title": "" }, { "docid": "2c783ba408bdab52f55fe4052cb6ab21", "score": "0.50019044", "text": "def extend_volume(self, volume, new_size):\n\n LOG.info(\"Extend volume %(vol_id)s to size %(size)s.\",\n {\"vol_id\": volume.id, \"size\": new_size})\n volume_new_size = flex_utils.round_to_num_gran(new_size)\n volume_real_old_size = flex_utils.round_to_num_gran(volume.size)\n if volume_real_old_size == volume_new_size:\n return\n if volume.is_replicated():\n pair_id, remote_pair_id, vol_id, remote_vol_id = (\n self._get_client().get_volumes_pair_attrs(\"localVolumeId\",\n volume.provider_id)\n )\n self._get_client(secondary=True).extend_volume(remote_vol_id,\n volume_new_size)\n self._get_client().extend_volume(volume.provider_id, volume_new_size)", "title": "" }, { "docid": "389a940feff167aed112e79b4bf82298", "score": "0.4993729", "text": "def extend_volume(self, volume, new_size):\n LOG.debug('Entering extend_volume volume=%(vol)s '\n 'new_size=%(size)s',\n {'vol': volume['display_name'], 'size': new_size})\n\n volume['size'] = new_size\n self._extend_lun(volume, '')", "title": "" }, { "docid": "c6b7d29d3cbbd1e4f030c76e3991bbef", "score": "0.49795955", "text": "def _setup_volume_replication(self, vol_or_snap, source_provider_id):\n try:\n # If vol_or_snap has 'volume' attribute we are dealing\n # with snapshot. Necessary parameters is stored in volume object.\n entity = vol_or_snap.volume\n entity_type = \"snapshot\"\n except AttributeError:\n entity = vol_or_snap\n entity_type = \"volume\"\n LOG.info(\"Configure replication for %(entity_type)s %(id)s. \",\n {\"entity_type\": entity_type, \"id\": vol_or_snap.id})\n try:\n protection_domain_name, storage_pool_name = (\n self._extract_domain_and_pool_from_host(entity.host)\n )\n self._check_volume_creation_safe(protection_domain_name,\n storage_pool_name,\n secondary=True)\n storage_type = self._get_volumetype_extraspecs(entity)\n rcg_name = storage_type.get(REPLICATION_CG_KEY)\n LOG.info(\"Replication Consistency Group name: %s.\", rcg_name)\n provisioning, compression = self._get_provisioning_and_compression(\n storage_type,\n protection_domain_name,\n storage_pool_name,\n secondary=True\n )\n dest_provider_id = self._get_client(secondary=True).create_volume(\n protection_domain_name,\n storage_pool_name,\n vol_or_snap.id,\n entity.size,\n provisioning,\n compression)\n self._get_client().create_volumes_pair(rcg_name,\n source_provider_id,\n dest_provider_id)\n LOG.info(\"Successfully configured replication for %(entity_type)s \"\n \"%(id)s.\",\n {\"entity_type\": entity_type, \"id\": vol_or_snap.id})\n except exception.VolumeBackendAPIException:\n with excutils.save_and_reraise_exception():\n LOG.error(\"Failed to configure replication for \"\n \"%(entity_type)s %(id)s.\",\n {\"entity_type\": entity_type, \"id\": vol_or_snap.id})", "title": "" }, { "docid": "3137ea5f460264a8bf2091c081e78974", "score": "0.49663228", "text": "def applyVisualizationVolume(self):\n\n\t\t# make sure there's a mixer available\t\t\n\t\tif not Gizmod.DefaultMixerVolume or not Gizmod.DefaultMixerSwitch:\n\t\t\treturn\n\t\t\t\n\t\t# update the Powermates' LEDs\n\t\tif Gizmod.DefaultMixerSwitch.SwitchPlayback:\n\t\t\t# if not muted set LED to volume level\n\t\t\tfor Powermate in Gizmod.Powermates:\n\t\t\t\tPowermate.LEDPercent = Gizmod.DefaultMixerVolume.VolumePlaybackPercent\t\n\t\telse:\n\t\t\t# if muted pulse the led\n\t\t\tfor Powermate in Gizmod.Powermates:\n\t\t\t\tPowermate.pulseLED(255, 257, 2)", "title": "" }, { "docid": "c7b8989dc9b3af4ae1b184730556b5b6", "score": "0.49599102", "text": "def update(self, volume, **kwargs):\n if not kwargs:\n return\n if kwargs.get(\"initiator_iqns\"):\n iqns = kwargs.get(\"initiator_iqns\")\n kwargs[\"initiator_iqns\"] = [iqn for iqn in iqns if iqn != '']\n\n body = {\"volume\": kwargs}\n\n return self._update(\"/volumes/%s\" % base.getid(volume), body, \"volume\")", "title": "" }, { "docid": "6e6ed95fcbc3a555797b16fe94ed4563", "score": "0.4958905", "text": "def adjust_volume(self, parameter=1, option='double'):\r\n self.volume = adjust(self.volume, parameter, option)\r\n if self.volume> 127:\r\n self.volume= 127\r\n elif self.volume< 0:\r\n self.volume= 0", "title": "" }, { "docid": "53350a245a1fd8f7aeee449e0b7a9ae3", "score": "0.49540752", "text": "def set_volume_down(self):\n self.set_volume(\"-\")", "title": "" }, { "docid": "79dd0afc51cd4f53aa9fb4fa019fac54", "score": "0.4906476", "text": "def enable_replica(self):\n\t\ttry:\n\t\t\tos.remove(self.exit_file)\n\t\t\tself.logger.info(\"Replica enabled\")\n\t\texcept:\n\t\t\tself.logger.info(\"Replica already enabled\")", "title": "" }, { "docid": "78b8459dce9a82297d91d5b5d848d0ae", "score": "0.49042273", "text": "def volume(self, level=None):\n MAX_LEVEL = 200\n if not level or not isinstance(level, int):\n return\n\n if level > MAX_LEVEL:\n level = MAX_LEVEL\n\n elif level < 0:\n level = 0\n\n self._volume = level\n self._write_cmd(\"set volume %s\" % level)", "title": "" }, { "docid": "4b81dda3eb184d474e844f4f2ed2c6ce", "score": "0.48953623", "text": "def inherit(self, name, command, strict=True):\n if name not in self:\n if strict:\n raise KeyError(name)\n parent = None\n else:\n parent = self[name]\n command.super = parent\n self[name] = command", "title": "" }, { "docid": "3ca5db1b65bcfa87f5694b84682841e6", "score": "0.48904723", "text": "def Adjust(self, resource):\n volumes_of_type = getattr(resource.template.volumes, self._type)\n if self._clear_others:\n # Delete all mounts that are mounting the volumes we're deleting\n for path, name in list(resource.template.volume_mounts.items()):\n if name in volumes_of_type:\n del resource.template.volume_mounts[path]\n elif self._to_remove:\n for path in self._to_remove:\n if path in resource.template.volume_mounts:\n volume_name = resource.template.volume_mounts[path]\n # Ensure the mount is mounting a volume of the right type\n if self._VolumeNotOfType(volume_name, resource):\n raise exceptions.ConfigurationError(\n 'Cannot remove mount [{}] with volume [{}] because the volume '\n 'is in use with a different type.'.format(path, volume_name))\n del resource.template.volume_mounts[path]\n\n if self._to_update:\n for path, split_name in self._to_update.items():\n volume_name = split_name[-1]\n reference_name = split_name[0]\n existing_volume_name = None\n # Ensure if exists, volume currently mounted is of the right type\n if path in resource.template.volume_mounts:\n existing_volume_name = resource.template.volume_mounts[path]\n if self._VolumeNotOfType(existing_volume_name, resource):\n raise exceptions.ConfigurationError(\n 'Cannot update mount [{}] with volume [{}] because the volume '\n 'is in use with a different type.'.format(\n path, existing_volume_name))\n # Ensure if exists, volume we want to mount is of the right type\n if self._VolumeNotOfType(volume_name, resource):\n raise exceptions.ConfigurationError(\n 'Cannot update mount [{}] to volume [{}] because the volume '\n 'is already is in use with a different type.'.format(\n path, volume_name))\n resource.template.volume_mounts[path] = volume_name\n volumes_of_type[volume_name] = reference_name\n\n # Delete all volumes no longer being mounted\n for volume in list(volumes_of_type):\n if not any(n == volume for n in resource.template.volume_mounts.values()):\n del volumes_of_type[volume]", "title": "" }, { "docid": "51c0ed91e2f94b2b890ab1e36678d423", "score": "0.485545", "text": "def set_volume(self, level):\n self.connect(\"receiver-0\")\n\n if level in (\"+\", \"-\"):\n self.get_receiver_status()\n\n if self.volume_status is not None:\n curr_level = self.volume_status['level']\n if level == \"+\":\n level = 0.1 + curr_level\n elif level == \"-\":\n level = curr_level - 0.1\n\n data = {\"type\":\"SET_VOLUME\", \"volume\":{\"muted\":False, \"level\":level} }\n namespace = \"urn:x-cast:com.google.cast.receiver\"\n self.send_msg_with_response(namespace, data)\n self.close_socket()", "title": "" }, { "docid": "140b35c44298dad77b997a5c70e12ea1", "score": "0.48168433", "text": "def set_volume_level(self, volume):\n for s in self._inputs:\n volume = self._xapx00.setPropGain(s['CHAN'], volume,\n isAbsolute=1, group=\"I\", unitCode = s['UNIT'])\n self._volume = volume", "title": "" }, { "docid": "c25887ca80b199244f9719f8f495735e", "score": "0.48121434", "text": "def set_volume(self, volume):\n if volume < 0.0:\n volume = 0.0\n elif volume > 1.0:\n volume = 1.0\n self._volume = volume\n return None", "title": "" }, { "docid": "bc13cc020f7684d690b74205bb732fc0", "score": "0.48098457", "text": "def on_volume_set(self, message):\n vol = message.data.get(\"percent\", 0.5)\n vol = clip(vol, 0.0, 1.0)\n\n self.volume = vol\n self.muted = False\n self.set_hardware_volume(vol)\n self.show_volume = True", "title": "" }, { "docid": "80c33d3892d68a002bce589707711143", "score": "0.4808769", "text": "def test_inherit_failure(utils_patch):\n res = OrderedDict(\n [\n (\"inherited\", False),\n (\"error\", \"'canmount' property cannot be inherited\"),\n ]\n )\n ret = {\n \"pid\": 43898,\n \"retcode\": 1,\n \"stderr\": \"'canmount' property cannot be inherited\",\n \"stdout\": \"\",\n }\n mock_cmd = MagicMock(return_value=ret)\n with patch.dict(zfs.__salt__, {\"cmd.run_all\": mock_cmd}), patch.dict(\n zfs.__utils__, utils_patch\n ):\n assert res == zfs.inherit(\"canmount\", \"myzpool/mydataset\")", "title": "" }, { "docid": "2f28d89802b3f342bc42a7abeaaeaa0a", "score": "0.47938848", "text": "def manage_existing(self, volume, external_ref):\n sfid = external_ref.get('source-id', None)\n sfname = external_ref.get('name', None)\n\n LOG.debug(\"Managing volume %(id)s to ref %(ref)s\",\n {'id': volume.id, 'ref': external_ref})\n if sfid is None:\n raise SolidFireAPIException(_(\"Manage existing volume \"\n \"requires 'source-id'.\"))\n\n # First get the volume on the SF cluster (MUST be active)\n params = {'startVolumeID': sfid,\n 'limit': 1}\n vols = self._issue_api_request(\n 'ListActiveVolumes', params)['result']['volumes']\n\n sf_ref = vols[0]\n sfaccount = self._get_create_account(volume['project_id'])\n\n import_time = volume['created_at'].isoformat()\n attributes = {'uuid': volume['id'],\n 'is_clone': 'False',\n 'os_imported_at': import_time,\n 'old_name': sfname}\n\n params = self._get_default_volume_params(volume)\n params['volumeID'] = sf_ref['volumeID']\n params['attributes'] = attributes\n params.pop('totalSize')\n self._issue_api_request('ModifyVolume',\n params, version='5.0')\n\n try:\n rep_updates = {}\n rep_settings = self._retrieve_replication_settings(volume)\n if self.replication_enabled and rep_settings:\n if len(sf_ref['volumePairs']) != 0:\n msg = _(\"Not possible to manage a volume with \"\n \"replicated pair! Please split the volume pairs.\")\n LOG.error(msg)\n raise SolidFireDriverException(msg)\n else:\n params = self._get_default_volume_params(volume)\n params['volumeID'] = sf_ref['volumeID']\n volume['volumeID'] = sf_ref['volumeID']\n params['totalSize'] = sf_ref['totalSize']\n rep_updates = self._replicate_volume(\n volume, params, sfaccount, rep_settings)\n except Exception:\n with excutils.save_and_reraise_exception():\n # When the replication fails in mid process, we need to\n # set the volume properties the way it was before.\n LOG.error(\"Error trying to replicate volume %s\",\n volume.id)\n params = {'volumeID': sf_ref['volumeID']}\n params['attributes'] = sf_ref['attributes']\n self._issue_api_request('ModifyVolume',\n params, version='5.0')\n\n model_update = self._get_model_info(sfaccount, sf_ref['volumeID'])\n\n model_update.update(rep_updates)\n\n return model_update", "title": "" }, { "docid": "8e2fa50d8a8f4d31d04ee1e5a8e335a8", "score": "0.4772813", "text": "def change_hybrid(self):\r\n self.var_hybrid = not self.var_hybrid", "title": "" }, { "docid": "85020a794ef428b5ab62258eacc87d11", "score": "0.47678086", "text": "def volume(self, volume):\n self._volume = volume", "title": "" }, { "docid": "479fd731ee0bc902d42233cafe2ead71", "score": "0.47634846", "text": "def set_volume(self, volume):\n if volume >= 0 and volume <= 100:\n self._send(\"set volume %d\" % volume)\n else:\n raise ValueError(\"Volume must be between 0 and 100\")", "title": "" }, { "docid": "e0c738658f549230b8e0e0aa283227e2", "score": "0.4753078", "text": "def manage_existing(self, volume, external_ref):\n extraSpecs = self._initial_setup(volume)\n self.conn = self._get_ecom_connection()\n arrayName, deviceId = self.utils.get_array_and_device_id(volume,\n external_ref)\n\n # Manage existing volume is not supported if fast enabled.\n if extraSpecs[FASTPOLICY]:\n LOG.warning(_LW(\n \"FAST is enabled. Policy: %(fastPolicyName)s.\"),\n {'fastPolicyName': extraSpecs[FASTPOLICY]})\n exceptionMessage = (_(\n \"Manage volume is not supported if FAST is enable. \"\n \"FAST policy: %(fastPolicyName)s.\")\n % {'fastPolicyName': extraSpecs[FASTPOLICY]})\n LOG.error(exceptionMessage)\n raise exception.VolumeBackendAPIException(\n data=exceptionMessage)\n # Check if the volume is attached by checking if in any masking view.\n volumeInstanceName = (\n self.utils.find_volume_by_device_id_on_array(self.conn,\n arrayName, deviceId))\n sgInstanceNames = (\n self.utils.get_storage_groups_from_volume(\n self.conn, volumeInstanceName))\n\n for sgInstanceName in sgInstanceNames:\n mvInstanceName = self.masking.get_masking_view_from_storage_group(\n self.conn, sgInstanceName)\n if mvInstanceName:\n exceptionMessage = (_(\n \"Unable to import volume %(deviceId)s to cinder. \"\n \"Volume is in masking view %(mv)s.\")\n % {'deviceId': deviceId,\n 'mv': mvInstanceName})\n LOG.error(exceptionMessage)\n raise exception.VolumeBackendAPIException(\n data=exceptionMessage)\n\n # Check if there is any associated snapshots with the volume.\n cinderPoolInstanceName, storageSystemName = (\n self._get_pool_and_storage_system(extraSpecs))\n repSessionInstanceName = (\n self.utils.get_associated_replication_from_source_volume(\n self.conn, storageSystemName, deviceId))\n if repSessionInstanceName:\n exceptionMessage = (_(\n \"Unable to import volume %(deviceId)s to cinder. \"\n \"It is the source volume of replication session %(sync)s.\")\n % {'deviceId': deviceId,\n 'sync': repSessionInstanceName})\n LOG.error(exceptionMessage)\n raise exception.VolumeBackendAPIException(\n data=exceptionMessage)\n\n # Make sure the existing external volume is in the same storage pool.\n volumePoolInstanceName = (\n self.utils.get_assoc_pool_from_volume(self.conn,\n volumeInstanceName))\n volumePoolName = volumePoolInstanceName['InstanceID']\n cinderPoolName = cinderPoolInstanceName['InstanceID']\n LOG.debug(\"Storage pool of existing volume: %(volPool)s, \"\n \"Storage pool currently managed by cinder: %(cinderPool)s.\",\n {'volPool': volumePoolName,\n 'cinderPool': cinderPoolName})\n if volumePoolName != cinderPoolName:\n exceptionMessage = (_(\n \"Unable to import volume %(deviceId)s to cinder. The external \"\n \"volume is not in the pool managed by current cinder host.\")\n % {'deviceId': deviceId})\n LOG.error(exceptionMessage)\n raise exception.VolumeBackendAPIException(\n data=exceptionMessage)\n\n # Rename the volume\n volumeId = volume['name']\n volumeElementName = self.utils.get_volume_element_name(volumeId)\n LOG.debug(\"Rename volume %(vol)s to %(elementName)s.\",\n {'vol': volumeInstanceName,\n 'elementName': volumeElementName})\n volumeInstance = self.utils.rename_volume(self.conn,\n volumeInstanceName,\n volumeElementName)\n keys = {}\n volpath = volumeInstance.path\n keys['CreationClassName'] = volpath['CreationClassName']\n keys['SystemName'] = volpath['SystemName']\n keys['DeviceID'] = volpath['DeviceID']\n keys['SystemCreationClassName'] = volpath['SystemCreationClassName']\n\n model_update = {}\n provider_location = {}\n provider_location['classname'] = volpath['CreationClassName']\n provider_location['keybindings'] = keys\n\n model_update.update({'display_name': volumeElementName})\n volume['provider_location'] = six.text_type(provider_location)\n model_update.update({'provider_location': volume['provider_location']})\n return model_update", "title": "" }, { "docid": "af234b17f5b59d9bc7a4869242748ac0", "score": "0.4753054", "text": "def _set_mode(self):\n self.conf.mode = ...", "title": "" }, { "docid": "cbfd2cc7b471adc53b783207a883f58e", "score": "0.47469744", "text": "def extend_volume(self, volume, new_size):\n try:\n self._eql_execute('volume', 'select', volume['name'],\n 'size', \"%sG\" % new_size)\n LOG.info(_LI('Volume %(name)s resized from '\n '%(current_size)sGB to %(new_size)sGB.'),\n {'name': volume['name'],\n 'current_size': volume['size'],\n 'new_size': new_size})\n except Exception:\n with excutils.save_and_reraise_exception():\n LOG.error(_LE('Failed to extend_volume %(name)s from '\n '%(current_size)sGB to %(new_size)sGB.'),\n {'name': volume['name'],\n 'current_size': volume['size'],\n 'new_size': new_size})", "title": "" }, { "docid": "52b6c765c991a9c2105f1f9e7b003eab", "score": "0.4742725", "text": "def set_volume(self, volume: int, group_command: bool = False, max_volume: int = -1) -> bool:\n try:\n # check volume range\n if volume < 0 or volume > 100:\n return\n # don ot raise error here polluting the log file\n # dpt3 handling can trigger negative values\n # raise Exception('Volume has to be an integer between 0 and 100.')\n\n if self._check_max_volume_exceeded(volume, max_volume):\n self.logger.debug(f\"Volume to set [{volume}] exceeds max volume [{max_volume}].\")\n volume = max_volume\n\n if group_command:\n for member in self.zone_group_members:\n if member != '':\n self.logger.debug(f\"set_volume: Setting {member} to volume {volume}\")\n sonos_speaker[member].soco.volume = volume\n sonos_speaker[member].volume = volume\n else:\n self.soco.volume = volume\n self.volume = volume\n return True\n except Exception as ex:\n self.logger.error(f\"set_volume: Error {ex} occurred.\")\n return False", "title": "" }, { "docid": "56188ccd1c607e01698b2141220e5e65", "score": "0.47375986", "text": "def setup_read_only_mode():\n\tfrappe.flags.read_only = True\n\n\t# If replica is available then just connect replica, else setup read only transaction.\n\tif frappe.conf.read_from_replica:\n\t\tfrappe.connect_replica()\n\telse:\n\t\tfrappe.db.begin(read_only=True)", "title": "" }, { "docid": "1db32cfa1d97bd5a67d1193d5892c9a5", "score": "0.4716755", "text": "def set_volume(self, val, op):\n print 'Operation:', op\n current_volume = self.get_volume()\n if op == self.ABRUPT:\n incr = val / self.UNIT_INCR\n new_volume = current_volume - incr\n print 'Current:', current_volume, 'New:', new_volume\n print val, incr\n\n elif op == self.HIGH:\n new_volume = current_volume - val / self.HIGH_CONTROLLER\n new_volume = new_volume if new_volume > 25 else 25\n\n elif op == self.LOW:\n new_volume = current_volume + val / self.LOW_CONTROLLER\n new_volume = new_volume if new_volume > 80 else 80\n\n else:\n new_volume = current_volume\n\n cmd = self.set_vol_cmd_str + str(new_volume)\n applescript.AppleScript(cmd).run()", "title": "" }, { "docid": "1850eaf76077e205a58c42972220417d", "score": "0.4707535", "text": "def set_volume(self, zone: int, volume: float):\n raise NotImplemented()", "title": "" }, { "docid": "409275e3ff3e67d7accd737b52b2f265", "score": "0.46948323", "text": "def test_inherit_success(utils_patch):\n res = OrderedDict([(\"inherited\", True)])\n ret = {\"pid\": 45193, \"retcode\": 0, \"stderr\": \"\", \"stdout\": \"\"}\n mock_cmd = MagicMock(return_value=ret)\n with patch.dict(zfs.__salt__, {\"cmd.run_all\": mock_cmd}), patch.dict(\n zfs.__utils__, utils_patch\n ):\n assert res == zfs.inherit(\"compression\", \"myzpool/mydataset\")", "title": "" }, { "docid": "5c670652b6709cc293f1db753b913030", "score": "0.4669197", "text": "def resize(self, volume, new_size):\n pass", "title": "" }, { "docid": "4ca0653e4646b98e4f0f4aed6f734d0e", "score": "0.4661754", "text": "def set_audio_file_volume(volume):\n if volume < 0 or volume > 100:\n raise ValueError('Out of range value for volume')\n send_command('AT+CMEDIAVOL=%d' % volume)\n get_output()", "title": "" }, { "docid": "e8aa5db793b683cfe254b765fa6dfeab", "score": "0.4659075", "text": "def _chmod_volumes(self):\n try:\n self._client.containers.run(\n image=self._io_image,\n volumes={\n self._input_volume: {\"bind\": \"/input/\", \"mode\": \"rw\"},\n self._output_volume: {\"bind\": \"/output/\", \"mode\": \"rw\"},\n },\n name=f\"{self._job_id}-chmod-volumes\",\n command=f\"chmod -R 0777 /input/ /output/\",\n remove=True,\n **self._run_kwargs,\n )\n except Exception as exc:\n raise RuntimeError(str(exc))", "title": "" }, { "docid": "a0eb7f66dfece99015809ceafe2b61d8", "score": "0.46584564", "text": "async def volume(self):\n # slice string of command including space\n volume = self.content[len(\"{}volume\".format(COMMAND_PREFIX))+1:]\n if not volume.isdigit():\n logger.debug(\"Author:{0}\\tText Channel:{1}\\tError: Volume cant be parsed.\".format(self.username, self.text_channel))\n raise DiscordException(\"Volume cant be parsed.\")\n\n logger.debug(\"Author:{0}\\tText Channel:{1}\\tChanging volume to {2}%.\".format(self.username, self.text_channel, volume))\n\n if self.guild.voice_client is None:\n logger.debug(\"Author:{0}\\tError:Not connected to a voice channel.\".format(self.username, self.text_channel))\n raise ConnectionError(\"Not connected to a voice channel. Please use `{}join <voice_channel>` command.\".format(COMMAND_PREFIX))\n \n self.guild.voice_client.source.volume = int(volume) / 100\n await self.channel.send(\"Changed volume to {}%\".format(volume))\n return", "title": "" }, { "docid": "d508033fe37afd19c756afc5e7ca24c3", "score": "0.46573094", "text": "def _set_read_only(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v, base=YANGBool, default=YANGBool(\"false\"), is_leaf=True, yang_name=\"read-only\",\n parent=self, path_helper=self._path_helper, extmethods=self._extmethods,\n register_paths=True, namespace='urn:etsi:osm:yang:augments:ns-configuration',\n defining_module='ns-configuration', yang_type='boolean', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"read_only must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, default=YANGBool(\"false\"), is_leaf=True, yang_name=\"read-only\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:augments:ns-configuration', defining_module='ns-configuration', yang_type='boolean', is_config=True)\"\"\",\n })\n\n self.__read_only = t\n if hasattr(self, '_set'):\n self._set()", "title": "" }, { "docid": "7902d62a9869026cdaf3164efaab4343", "score": "0.46559998", "text": "def _extend_cinder_volume(self, vol_id, new_size=2):\n # Extend existing volume size\n try:\n self.cinder.volumes.extend(vol_id, new_size)\n vol_size_org = self.cinder.volumes.get(vol_id).size\n except Exception as e:\n msg = 'Failed to extend volume: {}'.format(e)\n amulet.raise_status(amulet.FAIL, msg=msg)\n\n # Confirm that the volume reaches available status.\n ret = u.resource_reaches_status(self.cinder.volumes, vol_id,\n expected_stat=\"available\",\n msg=\"Volume status wait\")\n if not ret:\n msg = ('Cinder volume failed to reach expected state '\n 'while extending.')\n return ret\n\n # Validate volume size and status\n u.log.debug('Validating volume attributes...')\n vol_size_ext = self.cinder.volumes.get(vol_id).size\n vol_stat = self.cinder.volumes.get(vol_id).status\n msg_attr = ('Volume attributes - orig size:{} extended size:{} '\n 'stat:{}'.format(vol_size_org, vol_size_ext, vol_stat))\n\n if vol_size_ext > vol_size_org and vol_stat == 'available':\n u.log.debug(msg_attr)\n else:\n msg = ('Volume validation failed, {}'.format(msg_attr))\n return ret\n\n return None", "title": "" }, { "docid": "206c70d394a0686df8ef09fa66a26b0d", "score": "0.46513647", "text": "async def volume(self, ctx, volume: int):\r\n self.logger.info(f\"exec: volume {volume}\")\r\n\r\n if volume < 0 or 100 < volume:\r\n return await ctx.send(\"Please set 0..100\")\r\n self.volume = volume\r\n\r\n if ctx.voice_client is not None and ctx.voice_client.is_playing():\r\n ctx.voice_client.source.volume = self.volume / 100\r\n\r\n await ctx.send(\"Changed volume to {}%\".format(volume))", "title": "" }, { "docid": "2b02624642a341c16d6bb83902b6e4e2", "score": "0.4640263", "text": "def gpib_control_ren(self, session, mode):\n self.lib.viGpibControlREN(session, mode)", "title": "" }, { "docid": "8f539f5b0817a01dfbddd1c4540b76fd", "score": "0.46368298", "text": "def set_volume_level(self, volume):\n str_volume = str(int(volume * 100))\n print(\"volume=\" + str_volume)\n _LOGGER.info(\"volume=\" + str_volume)\n self._playback('setParameters?volume='+str_volume)", "title": "" }, { "docid": "d4a8b46c3a784161272b70dda70326eb", "score": "0.46363068", "text": "def _sync_volume_level(self):\n if self._active_source != SRC_OFF:\n XUNIT, XOUT = self.parse_output(self._outputs[0])\n volume = self._xapx00.getPropGain(XOUT, group=\"O\",\n unitCode = XUNIT)\n self.set_volume_level(volume)", "title": "" }, { "docid": "f2a877a0803725759781441acb126184", "score": "0.46295434", "text": "def enable_replication(self, context, group, volumes):\n self.create_group_replication(group)\n self.add_volumes_to_group_replication(group, volumes)\n return {}, []", "title": "" }, { "docid": "6931b017b6b21e6a87210cff3ef96fc7", "score": "0.46251565", "text": "def pour_into_custom(self, volume, row, col):\n self.update_glass(volume, row, col)", "title": "" }, { "docid": "fd02d1c39821481f12255e18e0fa068f", "score": "0.46212015", "text": "def alter(\n self,\n base: Union[str, Flag] = KEEP,\n extension: Optional[Union[str, Flag]] = KEEP,\n **kwargs: Union[Any, Flag],\n ) -> \"NameABC\":\n raise NotImplementedError", "title": "" }, { "docid": "f88574b583585d97f9483340bc3b6378", "score": "0.46176887", "text": "def set_volume(self, volume, **kwargs):\n return self._request(\"PUT\", \"volume/{0}\".format(volume), kwargs)", "title": "" }, { "docid": "0fdc52080006818582a07322246fc994", "score": "0.46108344", "text": "async def volume(self, ctx, volume: int):\n\n\t\tif ctx.voice_client is None:\n\t\t\treturn await ctx.send(\"Not connected to a voice channel.\")\n\n\t\tctx.voice_client.source.volume = volume / 100\n\t\tawait ctx.channel.send(embed=discord.Embed(title='Volume', description=\"Changed volume to {}%\".format(volume), color=0x6700F3))", "title": "" }, { "docid": "0b33290227ea6848844a03f855002008", "score": "0.46080324", "text": "def flags(self, **kw):\n for k, v in kw.items():\n self.override_config(k, v, group='quota')", "title": "" }, { "docid": "f951a9d6cfbbe31129a38929c717c219", "score": "0.46031377", "text": "def set_volume_up(device_name=None):\n CCMediaController(device_name=device_name).set_volume_up()", "title": "" }, { "docid": "bf418ab53a1a0b0e207e4d64e1a3f5fe", "score": "0.4599928", "text": "def extend_volume(self, volume, newSize):\n originalVolumeSize = volume['size']\n volumeName = volume['name']\n extraSpecs = self._initial_setup(volume)\n self.conn = self._get_ecom_connection()\n volumeInstance = self._find_lun(volume)\n if volumeInstance is None:\n exceptionMessage = (_(\"Cannot find Volume: %(volumename)s. \"\n \"Extend operation. Exiting....\")\n % {'volumename': volumeName})\n LOG.error(exceptionMessage)\n raise exception.VolumeBackendAPIException(data=exceptionMessage)\n return self._extend_volume(\n volumeInstance, volumeName, newSize, originalVolumeSize,\n extraSpecs)", "title": "" }, { "docid": "8918ac462ffddac24681417893ed646b", "score": "0.45992607", "text": "def setPersistent(self):\n ...", "title": "" }, { "docid": "7a6d504a3ec68d66a8ff387778c1842f", "score": "0.45906258", "text": "def lower_volume(self):\n raise NotImplementedError", "title": "" }, { "docid": "303752441d4ae9bd915de3c3c2e0973c", "score": "0.4585458", "text": "def setReadOnly(self, value):\n super().setReadOnly(value)", "title": "" }, { "docid": "67417b6be31a4aaafd6b78a633086aee", "score": "0.45851898", "text": "def modeChanged(writeable):", "title": "" }, { "docid": "fe69757f18f0f0f0761f5aa743318a66", "score": "0.45782435", "text": "def change_flag(self, new_flag):\n self.flag.set_value(float(new_flag)) # 1: train / -1: inference", "title": "" }, { "docid": "dca49bff8dcf4b7e2c29137108004a6c", "score": "0.45483086", "text": "def normalMode(interface):\n # os.system('sudo airmon-ng stop ' + interface + 'mon')\n # os.system()\n print(bcolors.OKBLUE + \"Setting managed mode on \" + interface + bcolors.ENDC)\n if 'mon' in interface:\n output = subprocess.check_output(\"sudo airmon-ng stop \" + interface, shell=True)\n clear()\n print(bcolors.OKBLUE + \"Mode changed!\" + bcolors.ENDC)\n return False", "title": "" }, { "docid": "c840f63e5422d46971d1a355d5d0ee6d", "score": "0.45461613", "text": "async def set_master(self, enabled: bool) -> None:", "title": "" }, { "docid": "4ba245ee72f097ce91fca228903e9ad9", "score": "0.45435852", "text": "def setup_volume(mnode, all_servers_info, volume_config, force=False):\n # Get volume name\n if 'name' in volume_config:\n volname = volume_config['name']\n else:\n g.log.error(\"Unable to get the volume name from config\")\n return False\n\n # Check if the volume already exists\n volinfo = get_volume_info(mnode=mnode)\n if volinfo is not None and volname in volinfo.keys():\n g.log.info(\"volume %s already exists. Returning...\", volname)\n return True\n\n # Get servers\n if 'servers' in volume_config:\n servers = volume_config['servers']\n else:\n g.log.error(\"Unable to get the volume servers from config\")\n return False\n\n # Get the volume type and values\n if not ('voltype' in volume_config and 'type' in volume_config['voltype']):\n g.log.error(\"Voltype not defined in config for the volume %s\",\n volname)\n return False\n\n volume_type = volume_config['voltype']['type']\n kwargs = {}\n number_of_bricks = 1\n if volume_type == 'distributed':\n if 'dist_count' in volume_config['voltype']:\n kwargs['dist_count'] = (volume_config['voltype']['dist_count'])\n else:\n g.log.error(\"Distribute count not specified in the volume config\")\n return False\n\n number_of_bricks = kwargs['dist_count']\n\n elif volume_type == 'replicated':\n if 'replica_count' in volume_config['voltype']:\n kwargs['replica_count'] = (volume_config['voltype']\n ['replica_count'])\n else:\n g.log.error(\"Replica count not specified in the volume config\")\n return False\n\n if 'arbiter_count' in volume_config['voltype']:\n kwargs['arbiter_count'] = (volume_config['voltype']\n ['arbiter_count'])\n\n number_of_bricks = kwargs['replica_count']\n\n elif volume_type == 'distributed-replicated':\n if 'dist_count' in volume_config['voltype']:\n kwargs['dist_count'] = (volume_config['voltype']['dist_count'])\n else:\n g.log.error(\"Distribute count not specified in the volume config\")\n return False\n\n if 'replica_count' in volume_config['voltype']:\n kwargs['replica_count'] = (volume_config['voltype']\n ['replica_count'])\n else:\n g.log.error(\"Replica count not specified in the volume config\")\n return False\n\n if 'arbiter_count' in volume_config['voltype']:\n kwargs['arbiter_count'] = (volume_config['voltype']\n ['arbiter_count'])\n\n number_of_bricks = (kwargs['dist_count'] * kwargs['replica_count'])\n\n elif volume_type == 'dispersed':\n if 'disperse_count' in volume_config['voltype']:\n kwargs['disperse_count'] = (volume_config['voltype']\n ['disperse_count'])\n else:\n g.log.error(\"Disperse Count not specified in the volume config\")\n return False\n\n if 'redundancy_count' in volume_config['voltype']:\n kwargs['redundancy_count'] = (volume_config['voltype']\n ['redundancy_count'])\n else:\n g.log.error(\"Redunduncy Count not specified in the volume config\")\n return False\n\n number_of_bricks = kwargs['disperse_count']\n\n elif volume_type == 'distributed-dispersed':\n if 'dist_count' in volume_config['voltype']:\n kwargs['dist_count'] = (volume_config['voltype']['dist_count'])\n else:\n g.log.error(\"Distribute Count not specified in the volume config\")\n return False\n\n if 'disperse_count' in volume_config['voltype']:\n kwargs['disperse_count'] = (volume_config['voltype']\n ['disperse_count'])\n else:\n g.log.error(\"Disperse Count not specified in the volume config\")\n return False\n\n if 'redundancy_count' in volume_config['voltype']:\n kwargs['redundancy_count'] = (volume_config['voltype']\n ['redundancy_count'])\n else:\n g.log.error(\"Redunduncy Count not specified in the volume config\")\n return False\n\n number_of_bricks = (kwargs['dist_count'] * kwargs['disperse_count'])\n else:\n g.log.error(\"Invalid volume type defined in config\")\n return False\n\n # get bricks_list\n bricks_list = form_bricks_list(mnode=mnode, volname=volname,\n number_of_bricks=number_of_bricks,\n servers=servers,\n servers_info=all_servers_info)\n if not bricks_list:\n g.log.error(\"Number_of_bricks is greater than the unused bricks on \"\n \"servers\")\n return False\n\n # Create volume\n ret, _, _ = volume_create(mnode=mnode, volname=volname,\n bricks_list=bricks_list, force=force,\n **kwargs)\n if ret != 0:\n g.log.error(\"Unable to create volume %s\", volname)\n return False\n\n # Start Volume\n time.sleep(2)\n ret = volume_start(mnode, volname)\n if not ret:\n g.log.error(\"volume start %s failed\", volname)\n return False\n\n # Create Tier volume\n if ('tier' in volume_config and 'create_tier' in volume_config['tier'] and\n volume_config['tier']['create_tier']):\n # get servers info for tier attach\n if ('extra_servers' in volume_config and\n volume_config['extra_servers']):\n extra_servers = volume_config['extra_servers']\n ret = add_extra_servers_to_cluster(mnode, extra_servers)\n if not ret:\n return False\n else:\n extra_servers = volume_config['servers']\n\n # get the tier volume type\n if 'tier_type' in volume_config['tier']:\n if 'type' in volume_config['tier']['tier_type']:\n tier_volume_type = volume_config['tier']['tier_type']['type']\n dist = rep = 1\n if tier_volume_type == 'distributed':\n if 'dist_count' in volume_config['tier']['tier_type']:\n dist = (volume_config['tier']['tier_type']\n ['dist_count'])\n\n elif tier_volume_type == 'replicated':\n if 'replica_count' in volume_config['tier']['tier_type']:\n rep = (volume_config['tier']['tier_type']\n ['replica_count'])\n\n elif tier_volume_type == 'distributed-replicated':\n if 'dist_count' in volume_config['tier']['tier_type']:\n dist = (volume_config['tier']['tier_type']\n ['dist_count'])\n if 'replica_count' in volume_config['tier']['tier_type']:\n rep = (volume_config['tier']['tier_type']\n ['replica_count'])\n else:\n tier_volume_type = 'distributed'\n dist = 1\n rep = 1\n number_of_bricks = dist * rep\n\n # Attach Tier\n ret, _, _ = tier_attach(mnode=mnode, volname=volname,\n extra_servers=extra_servers,\n extra_servers_info=all_servers_info,\n num_bricks_to_add=number_of_bricks,\n replica=rep)\n if ret != 0:\n g.log.error(\"Unable to attach tier\")\n return False\n\n time.sleep(30)\n # Check if tier is running\n _rc = True\n for server in extra_servers:\n ret = is_tier_process_running(server, volname)\n if not ret:\n g.log.error(\"Tier process not running on %s\", server)\n _rc = False\n if not _rc:\n return False\n\n # Enable Quota\n if ('quota' in volume_config and 'enable' in volume_config['quota'] and\n volume_config['quota']['enable']):\n ret, _, _ = quota_enable(mnode=mnode, volname=volname)\n if ret != 0:\n g.log.error(\"Unable to set quota on the volume %s\", volname)\n return False\n\n # Check if 'limit_usage' is defined\n if 'limit_usage' in volume_config['quota']:\n if 'path' in volume_config['quota']['limit_usage']:\n path = volume_config['quota']['limit_usage']['path']\n else:\n path = \"/\"\n\n if 'size' in volume_config['quota']['limit_usage']:\n size = volume_config['quota']['limit_usage']['size']\n else:\n size = \"100GB\"\n else:\n path = \"/\"\n size = \"100GB\"\n\n # Set quota_limit_usage\n ret, _, _ = quota_limit_usage(mnode=mnode, volname=volname,\n path=path, limit=size)\n if ret != 0:\n g.log.error(\"Unable to set quota limit on the volume %s\", volname)\n return False\n\n # Check if quota is enabled\n ret = is_quota_enabled(mnode=mnode, volname=volname)\n if not ret:\n g.log.error(\"Quota not enabled on the volume %s\", volname)\n return False\n\n # Enable USS\n if ('uss' in volume_config and 'enable' in volume_config['uss'] and\n volume_config['uss']['enable']):\n ret, _, _ = enable_uss(mnode=mnode, volname=volname)\n if ret != 0:\n g.log.error(\"Unable to enable uss on the volume %s\", volname)\n return False\n\n ret = is_uss_enabled(mnode=mnode, volname=volname)\n if not ret:\n g.log.error(\"USS is not enabled on the volume %s\", volname)\n return False\n\n # Set all the volume options:\n if 'options' in volume_config:\n volume_options = volume_config['options']\n ret = set_volume_options(mnode=mnode, volname=volname,\n options=volume_options)\n if not ret:\n g.log.error(\"Unable to set few volume options\")\n return False\n return True", "title": "" }, { "docid": "d49036b3b227af77a2eeb9150eb31366", "score": "0.45361435", "text": "def at_object_creation(self):\n self.cmdset.add_default(CmdSetClimbable, persistent=True)", "title": "" }, { "docid": "6e8bdbdac28e5e74f884bb675e3f0e39", "score": "0.45331904", "text": "def persistentVolume(containerName,size):\n\n inFh=open(\"general-templates/persistentvolumeclaim.yaml\",\"r\")\n outFh=open(\"config/\"+containerName+\".persistentvolumeclaim.yaml\",\"w\")\n\n props={}\n props[\"CONTAINERNAME\"]=containerName\n props[\"CLAIMSIZE\"]=size\n\n for line in inFh:\n result=__replaceLine(line)\n outFh.write(result[0])\n\n inFh.close()\n outFh.close()", "title": "" }, { "docid": "852f4391fc6ae8e9b3b1c6c53c58ddf2", "score": "0.45265913", "text": "def _set_volume(self, volume):\n self._player.set_volume(volume)", "title": "" }, { "docid": "e3e22004b19cd0d4f7438497a2ecb4e9", "score": "0.45225257", "text": "def volume(self, volume):\n\n self._volume = volume", "title": "" }, { "docid": "e3e22004b19cd0d4f7438497a2ecb4e9", "score": "0.45225257", "text": "def volume(self, volume):\n\n self._volume = volume", "title": "" }, { "docid": "731630e47adc9478ef312e52942cd250", "score": "0.45204338", "text": "def set_volume(self, volume):\n if volume < 0.0:\n volume = 0.0\n elif volume > 1.0:\n volume = 1.0\n self._volume = volume\n if self._channel.get_busy():\n self._channel.set_volume(volume)\n return None", "title": "" }, { "docid": "53c7cb60e2072218442b2a9897e6de0b", "score": "0.45203474", "text": "def retype(self, ctxt, volume, new_type, diff, host):\n model_update = {}\n\n LOG.debug(\"Retyping volume %(vol)s to new type %(type)s\",\n {'vol': volume.id, 'type': new_type})\n\n sfaccount = self._get_sfaccount(volume['project_id'])\n params = {'accountID': sfaccount['accountID']}\n sf_vol = self._get_sf_volume(volume['id'], params)\n\n if sf_vol is None:\n raise exception.VolumeNotFound(volume_id=volume['id'])\n\n if self.replication_enabled:\n ctxt = context.get_admin_context()\n src_rep_type = self._set_rep_by_volume_type(\n ctxt, volume.volume_type_id)\n dst_rep_type = self._set_rep_by_volume_type(ctxt, new_type['id'])\n\n if src_rep_type != dst_rep_type:\n if dst_rep_type:\n rep_settings = self._retrieve_replication_settings(volume)\n rep_params = self._get_default_volume_params(volume)\n volume['volumeID'] = (\n int(volume.provider_id.split()[0]))\n rep_updates = self._replicate_volume(volume, rep_params,\n sfaccount,\n rep_settings)\n else:\n rep_updates = self._disable_replication(volume)\n\n if rep_updates:\n model_update.update(rep_updates)\n\n attributes = sf_vol['attributes']\n attributes['retyped_at'] = timeutils.utcnow().isoformat()\n params = {'volumeID': sf_vol['volumeID'], 'attributes': attributes}\n qos = self._set_qos_by_volume_type(ctxt, new_type['id'],\n volume.get('size'))\n\n if qos:\n params['qos'] = qos\n\n self._issue_api_request('ModifyVolume', params)\n return True, model_update", "title": "" }, { "docid": "ad0280838814eac530a0d88ad34e8c9a", "score": "0.45124257", "text": "def update(self, **kwargs) -> None:\n # language=rst\n if kwargs[\"mask\"] is None:\n kwargs[\"mask\"] = self.mask\n\n super().update(**kwargs)", "title": "" }, { "docid": "a0819431df8a60fef963ea11e9ba4792", "score": "0.44997704", "text": "async def volume(self, ctx, volume: int):\n\n if ctx.voice_client is None:\n return await ctx.send(\"Not connected to a voice channel.\")\n\n ctx.voice_client.source.volume = volume / 100\n await ctx.send(\"Changed volume to {}%\".format(volume))", "title": "" }, { "docid": "b7fcbd7808dbdac372376ca516aaf0d2", "score": "0.44957563", "text": "def func_setPuppetControlSetting(self,mPuppet,attr,arg):\t\n l_slBuffer = mc.ls(sl=True) or []\n try:\n mPuppet.controlSettings_setModuleAttrs(attr,arg)\n except Exception,error:\n log.error(\"[func_setPuppetControlSetting fail!]{%s}\"%error)\n if l_slBuffer:mc.select(l_slBuffer)\t\t \n killUI()", "title": "" }, { "docid": "bf4733e6296e24308147c10974de9e22", "score": "0.44916835", "text": "def set_volume(v, device_name=None):\n CCMediaController(device_name=device_name).set_volume(v)", "title": "" }, { "docid": "d5621ab3412f743746ed193f96de2773", "score": "0.44810963", "text": "def unmanage(self, volume):\n pass", "title": "" }, { "docid": "cd7c7d81b1747c044c31a2c6f44f6c9e", "score": "0.44753975", "text": "def enable_replication(self, context, group, volumes):\n volumes_model_update = []\n model_update = (\n {'replication_status': fields.ReplicationStatus.ENABLED})\n if volumes:\n luns = [Lun(volume) for volume in volumes]\n try:\n self._replication.enable_replication(luns)\n except restclient.APIException as e:\n msg = (_('Failed to enable replication for group %(id)s, '\n 'Exception: %(ex)s.')\n % {'id': group.id, 'ex': str(e)})\n LOG.exception(msg)\n raise exception.VolumeDriverException(message=msg)\n for lun in luns:\n volumes_model_update.append(\n {'id': lun.os_id,\n 'replication_status': fields.ReplicationStatus.ENABLED})\n return model_update, volumes_model_update", "title": "" }, { "docid": "4ac2845bb241a1515be3d806f2bb8f6b", "score": "0.44658622", "text": "def set_volume(self, volume):\n if volume < 0.0:\n volume = 0.0\n elif volume > 1.0:\n volume = 1.0\n self._volume = volume\n self._sound_object.setVolume(self._volume)\n return None", "title": "" }, { "docid": "e6e97098441f8ba6a5adb87c0b40c194", "score": "0.44637328", "text": "def rf_overWriteMode(self):\n if self.pbReplace.isChecked():\n self.pbReplace.setText(\"REPLACE\")\n self.pbReplace.setStyleSheet(\"background-color: rgb(150, 0, 0)\")\n else:\n self.pbReplace.setText(\"NEW\")\n self.pbReplace.setStyleSheet(\"background-color: rgb(0, 150, 0)\")", "title": "" }, { "docid": "68554c15856b7933fc51174efe4321a8", "score": "0.4458716", "text": "def set_volume(self, volume):\n if volume < 0.0:\n volume = 0.0\n elif volume > 1.0:\n volume = 1.0\n self._volume = volume\n if self._sound:\n self._sound._sound_object.setVolume(self._volume * self._sound._sound_object._volume)\n else:\n self._volume = 1.0\n return None", "title": "" }, { "docid": "aba20b6cbb8e73ff96e805a78ba86ac6", "score": "0.44568658", "text": "async def async_set_volume_level(self, volume):\n if CONF_VOLUME_SET not in self._config:\n raise NotImplementedError()\n\n config = self._config[CONF_VOLUME_SET]\n\n self._volume_calls += 1\n volume_calls = self._volume_calls\n\n async with self._lock:\n if self._volume_calls != volume_calls:\n _LOGGER.debug('Aborted volume change early')\n\n def items():\n if self._volume_level:\n yield self._volume_level, None\n yield from self._volume_levels.items()\n\n base_level, base_code = min(\n items(),\n key=lambda kv: abs(volume - kv[0]))\n\n steps = int(round((volume - base_level) / self._volume_step))\n if steps > 0:\n code = self._config.get(CONF_VOLUME_UP)\n else:\n code = self._config.get(CONF_VOLUME_DOWN)\n\n target = base_level + self._volume_step * steps\n\n _LOGGER.debug('Volume base %f(%f) target %f(%f) steps %f',\n base_level,\n convert_volume_to_device(config, base_level),\n target,\n convert_volume_to_device(config, target),\n steps)\n\n # lie and say we are at volume, while\n # changing to keep gui happy\n self._volume_level = target\n\n if base_code:\n await self.send(base_code)\n self._volume_timestamp = datetime.now()\n\n for step in range(abs(steps)):\n await self.send_volume(code)\n if self._volume_calls != volume_calls:\n _LOGGER.debug('Aborted volume change')\n\n # set correct level on abort\n self._volume_level = base_level + (\n self._volume_step * copysign(step + 1, steps))\n break\n\n _LOGGER.debug('Volume level %f(%f)',\n self._volume_level,\n convert_volume_to_device(config, self._volume_level))", "title": "" }, { "docid": "18f56f58b69b838caa5754cdba3d3477", "score": "0.44553939", "text": "def configure_as_primary():\n def replication_callback(auth_state):\n assert ndb.in_transaction()\n trigger_replication(auth_state.auth_db_rev, transactional=True)\n model.configure_as_primary(replication_callback)", "title": "" }, { "docid": "2f826679e5a3a59f73b00a983964e2ce", "score": "0.44528985", "text": "def _inherit_metadata(self):\n try:\n check_metadata()\n\n if not self.args.get('ramdisk'):\n self.args['ramdisk'] = get_metadata('ramdisk-id')\n self.log.debug(\"inheriting ramdisk: {0}\"\n .format(self.args.get('ramdisk')))\n if not self.args.get('kernel'):\n self.args['kernel'] = get_metadata('kernel-id')\n self.log.debug(\"inheriting kernel: {0}\"\n .format(self.args.get('kernel')))\n if not self.args.get('block_device_mappings'):\n self.args['block_device_mappings'] = \\\n get_metadata_dict('block-device-mapping')\n self.log.debug(\"inheriting block device mappings: {0}\".format(\n self.args.get('block_device_mappings')))\n #\n # Product codes and ancestor ids are special cases since they\n # aren't always there.\n #\n try:\n productcodes = get_metadata_list('product-codes')\n self.args['productcodes'].extend(productcodes)\n self.log.debug(\"inheriting product codes: {0}\"\n .format(productcodes))\n except (ClientError, ServerError):\n msg = 'unable to read product codes from metadata.'\n print sys.stderr, msg\n self.log.warn(msg)\n try:\n if not self.args.get('ancestor_image_ids'):\n self.args['ancestor_image_ids'] = []\n ancestor_ids = get_metadata_list('ancestor-ami-ids')\n self.args['ancestor_image_ids'].extend(ancestor_ids)\n self.log.debug(\"inheriting ancestor ids: {0}\"\n .format(ancestor_ids))\n except (ClientError, ServerError):\n msg = 'unable to read ancestor ids from metadata.'\n print sys.stderr, msg\n self.log.warn(msg)\n except (ClientError, ServerError):\n msg = ('Unable to read instance metadata. Use --no-inherit if '\n 'you want to proceed without the metadata service.')\n print >> sys.stderr, msg\n self.log.warn(msg)\n raise", "title": "" }, { "docid": "a55bad22cce213fd8166d6646c4b6955", "score": "0.44498876", "text": "def set_single_fvb(self):\n self.set_read_mode(0)\n self.set_acquisition_mode(1)", "title": "" }, { "docid": "0b42af3987a054ee2f2ee6da371f20f8", "score": "0.44469273", "text": "def start_replica_cmd(builddir, replica_id):\n global viewChangeTimeoutSec\n statusTimerMilli = \"100\"\n viewChangeTimeoutMilli = \"{}\".format(viewChangeTimeoutSec*1000)\n path = os.path.join(builddir, \"tests\", \"simpleKVBC\", \"TesterReplica\", \"skvbc_replica\")\n return [path,\n \"-k\", KEY_FILE_PREFIX,\n \"-i\", str(replica_id),\n \"-s\", statusTimerMilli,\n \"-v\", viewChangeTimeoutMilli,\n \"-e\", str(True)\n ]", "title": "" }, { "docid": "8871a29e070efe3a9c7dc3fdd66db655", "score": "0.44440526", "text": "def set_mute(self, zone: int, on: bool):\n raise NotImplementedError()", "title": "" }, { "docid": "c0aa70250bd6e3aab459e75917d04228", "score": "0.44409776", "text": "def set_speaker_volume(volume):\n if volume < 0 or volume > 9:\n raise ValueError('\\n***\\n*** Out of range value for volume\\n***')\n send_command('ATL%d' % volume)\n get_output()", "title": "" }, { "docid": "4a986b8c1ef93cddc765948d3d373763", "score": "0.44358006", "text": "def radio_reconfig_flag(self, flag=False):\n self.reconfig_flag = flag", "title": "" }, { "docid": "8fe0d049fa088f9195f519ada088321b", "score": "0.44353184", "text": "def on_set_volume(self, event):\n self.currentVolume = self.volumeCtrl.GetValue()\n self.mplayer.SetProperty(\"volume\", self.currentVolume)", "title": "" }, { "docid": "eb4c86dc22d8bc9a5fdea474b5f9526a", "score": "0.44282737", "text": "def _extend_composite_volume(self, volumeInstance, volumeName,\n newSize, additionalVolumeSize, extraSpecs):\n # Is the volume extendable.\n isConcatenated = self.utils.check_if_volume_is_extendable(\n self.conn, volumeInstance)\n if 'True' not in isConcatenated:\n exceptionMessage = (_(\n \"Volume: %(volumeName)s is not a concatenated volume. \"\n \"You can only perform extend on concatenated volume. \"\n \"Exiting...\")\n % {'volumeName': volumeName})\n LOG.error(exceptionMessage)\n raise exception.VolumeBackendAPIException(data=exceptionMessage)\n else:\n compositeType = self.utils.get_composite_type(CONCATENATED)\n\n LOG.debug(\"Extend Volume: %(volume)s New size: %(newSize)s GBs.\",\n {'volume': volumeName,\n 'newSize': newSize})\n\n deviceId = volumeInstance['DeviceID']\n storageSystemName = volumeInstance['SystemName']\n LOG.debug(\n \"Device ID: %(deviceid)s: Storage System: \"\n \"%(storagesystem)s.\",\n {'deviceid': deviceId,\n 'storagesystem': storageSystemName})\n\n storageConfigService = self.utils.find_storage_configuration_service(\n self.conn, storageSystemName)\n\n elementCompositionService = (\n self.utils.find_element_composition_service(\n self.conn, storageSystemName))\n\n # Create a volume to the size of the\n # newSize - oldSize = additionalVolumeSize.\n unboundVolumeInstance = self._create_and_get_unbound_volume(\n self.conn, storageConfigService, volumeInstance.path,\n additionalVolumeSize, extraSpecs)\n if unboundVolumeInstance is None:\n exceptionMessage = (_(\n \"Error Creating unbound volume on an Extend operation.\"))\n LOG.error(exceptionMessage)\n raise exception.VolumeBackendAPIException(data=exceptionMessage)\n\n # Add the new unbound volume to the original composite volume.\n rc, modifiedVolumeDict = (\n self._modify_and_get_composite_volume_instance(\n self.conn, elementCompositionService, volumeInstance,\n unboundVolumeInstance.path, volumeName, compositeType,\n extraSpecs))\n if modifiedVolumeDict is None:\n exceptionMessage = (_(\n \"On an Extend Operation, error adding volume to composite \"\n \"volume: %(volumename)s.\")\n % {'volumename': volumeName})\n LOG.error(exceptionMessage)\n raise exception.VolumeBackendAPIException(data=exceptionMessage)\n\n return rc, modifiedVolumeDict", "title": "" }, { "docid": "be702ee6654cebecd838d027452e7944", "score": "0.4425432", "text": "async def change_vol(m: Message, gc: XPlayer):\n if m.input_str and (vol := m.input_str.strip()).isdigit():\n gc.set_my_volume(int(vol))\n await m.edit(f\"🔈 Volume changed to **{vol}%**\")\n elif m.client.is_bot:\n text, btns = volume_pannel()\n await m.reply(text, reply_markup=InlineKeyboardMarkup(btns))", "title": "" }, { "docid": "d7eec7a5c3a807a0163e99a0f25173a9", "score": "0.44224995", "text": "def setCategoryInheritance( self, category_inheritance ):\n if category_inheritance:\n self._category_inheritance = 1\n else:\n self._category_inheritance = 0", "title": "" }, { "docid": "21ab77cb53408b0828d2bcc78e44fbdb", "score": "0.4419295", "text": "def volume_level(self):\n #return self._client.volume / 100\n return 100", "title": "" }, { "docid": "a24d8f2040cb4b464642377eca444bcb", "score": "0.44188046", "text": "def configure_replication():\n\n # WAT Should we consider support standalone mysql deployments?\n if not env.mysql_replication_user and env.mysql_replication_pass:\n raise Exception(\n \"You must define MYSQL_REPLICATION_USER and MYSQL_REPLICATION_PASS in our settings.\"\n )\n\n # WAT should probably also sanity-check MYSQL_AUTOINCREMENT_OFFSET\n i = env.mysql_autoincrement_increment\n if (not i) or i <= 1:\n raise Exception(\n \"It is exceedingly unwise to set up multi-master replication with an \"\n \"MYSQL_AUTOINCREMENT_INCREMENT of %s, and I refuse to do it. Sorry.\" % i\n )\n\n create_user(env.mysql_replication_user, env.mysql_replication_pass)\n run(\n \"GRANT REPLICATION SLAVE ON *.* TO '%s'@'%%' IDENTIFIED BY '%s';\" % (\n env.mysql_replication_username,\n env.mysql_replication_password,\n )\n )\n run(\"FLUSH PRIVILEGES;\")", "title": "" }, { "docid": "3c39682325de697edcc55b803bf7be5e", "score": "0.44177768", "text": "def create_conglomerate_volume(self, volume):\n return self._request(\"POST\", \"volume/{0}\".format(volume), {\"protocol_endpoint\": True})", "title": "" } ]
fcaca5e573437f485f9b4e4a15161943
r""" Import graphs from ``file`` into the database. This function is used to import new censuses of graphs and is not meant to be used by users of DiscreteZOO. To properly import the graphs, all graphs of the same order must be together in the file, and no graph of this order must be present in the database.
[ { "docid": "23e07053e4212c9cf56777e7c8f4894f", "score": "0.7322005", "text": "def import_graphs(file, cl=ZooGraph, db=None, format=\"sparse6\",\n index=\"index\", verbose=False):\n info = ZooInfo(cl)\n if db is None:\n db = info.getdb()\n info.initdb(db=db, commit=False)\n previous = 0\n i = 0\n cur = db.cursor()\n with open(file) as f:\n for line in f:\n data = line.strip()\n if format not in [\"graph6\", \"sparse6\"]:\n data = eval(data)\n g = Graph(data)\n n = g.order()\n if n > previous:\n if verbose and previous > 0:\n print \"Imported %d graphs of order %d\" % (i, previous)\n previous = n\n i = 0\n i += 1\n cl(graph=g, order=n, cur=cur, db=db, **{index: i})\n if verbose:\n print \"Imported %d graphs of order %d\" % (i, n)\n f.close()\n cur.close()\n db.commit()", "title": "" } ]
[ { "docid": "7139a8d85797a92497b30657ec0db74d", "score": "0.72724146", "text": "def load_graph(file):\n g = nx.DiGraph()\n mode = \"N\"\n for l in file:\n l = l.strip()\n if mode == \"N\":\n if l == \"// Nodes\":\n mode = \"LN\"\n elif mode == \"LN\":\n if l == \"// Edges\":\n mode = \"LE\"\n else: # LOAD NODES\n nparts = l.split(\" \", 2)\n g.add_node(int(nparts[0]),\n {'unixtime': int(nparts[1]), 'line': nparts[2]})\n pass\n elif mode == \"LE\" and len(l) > 0: # LOAD EDGES\n eparts = [int(x) for x in l.split(\" \", 1)]\n g.add_edge(eparts[0], eparts[1])\n return g", "title": "" }, { "docid": "2800319770396622dd53e2662464a05d", "score": "0.68732077", "text": "def import_graph(self, edge_file_path: str) -> nx.Graph:\r\n my_file = Path(edge_file_path)\r\n\r\n if my_file.exists() == False:\r\n self.write_edge_list(edge_file_path)\r\n g = nx.read_edgelist(edge_file_path, delimiter='\\t')\r\n self.graph = g", "title": "" }, { "docid": "4d63a8a6e553d505ad9ef00f73dd4569", "score": "0.67489487", "text": "def import_graph(self,edge_file_path: str) -> None:\r\n if os.path.exists(edge_file_path)==False:\r\n self.write_edge_list(edge_file_path)\r\n g = nx.read_edgelist(edge_file_path, delimiter='\\t')\r\n self.graph = g", "title": "" }, { "docid": "7b45a79affa1568c8befc102f191a8b5", "score": "0.6703414", "text": "def import_graph(self, fname, directed=False):\n self._import_dot(list(fileinput.input(fname)))", "title": "" }, { "docid": "326332e371bb09f7d93c8290f6ea4264", "score": "0.66457766", "text": "def construct_graph_from_file(self, filename):\n f = open(filename)\n for line in f:\n line = line.split()\n print(line)\n if len(line) == 1:\n source = line[0]\n if source.isnumeric():\n source = int(source)\n self.add_to_graph(source)\n elif len(line) == 2:\n source = line[0]\n if source.isnumeric():\n source = int(source)\n destination = line[1]\n if destination.isnumeric():\n destination = int(destination)\n self.add_to_graph(source, destination)\n else:\n source = line[0]\n if source.isnumeric():\n source = int(source)\n destination = line[1]\n if destination.isnumeric():\n destination = int(destination)\n weight = int(line[2])\n self.add_to_graph(source, destination, weight)", "title": "" }, { "docid": "4d2732acf4d94198d8f9c8427ef38a6b", "score": "0.65890557", "text": "def read_graph(filename):\n frame = read_tsv(filename)\n clean_frame = frame.dropna() # ignore missing data\n edgelist = [\n (int(src), int(dst)) # convert edges to integers\n for src, dst in clean_frame.values.tolist()\n ]\n return networkx.DiGraph(edgelist)", "title": "" }, { "docid": "e64efe349fb2652d0a35098c21a5f79f", "score": "0.64513195", "text": "def read_graph_from_file(filename):\n\n # Use 'open' to open the file\n f = open(filename, \"r\")\n\n # Use the first line (G or D) to determine whether graph is directed \n # and create a graph object\n first_line = f.readline().strip()\n graph = Graph(False)\n \n # If undirected\n if first_line == \"G\":\n graph = Graph(False)\n \n # If directed\n elif first_line == \"D\":\n graph = Graph()\n \n else:\n print(\"Invalid Input\")\n print(first_line)\n\n # Use the second line to add the vertices to the graph\n vertices = f.readline().strip()\n for _ in vertices:\n graph.add_vertex(_)\n\n # Use the 3rd+ line to add the edges to the graph\n for line in f:\n if line != '':\n print(line)\n curr = line.replace('(', '')\n curr = curr.replace(')', '').strip()\n curr = curr.split(\",\")\n print(\"Current line: {}\".format(curr))\n \n if curr:\n vert1 = graph.add_vertex(curr[0])\n vert2 = graph.add_vertex(curr[1])\n # print(\"Vert 1: {} Vert 2: {}\".format(vert1, vert2))\n \n graph.add_edge(vert1.get_id(), vert2.get_id())\n \n f.close()\n return graph", "title": "" }, { "docid": "3aae9ec14a7900593b604651f8beb00f", "score": "0.64367074", "text": "def read_graph(file_path):\n\n edge_list = []\n\n # input file\n\n input_file = open(file_path)\n\n # process graph edges\n for line in input_file.readlines() :\n line = line.strip()\n\n # ignore lines starting with '#'\n if line.startswith('#') :\n continue\n\n edge = tuple(line.split('\\t'))\n edge_list.append(edge)\n\n # make a new graph\n G = nx.Graph()\n\n # uncomment to add all egdes\n G.add_edges_from(edge_list)\n\n return G", "title": "" }, { "docid": "e7f636fbd4bf65e197e1926e981a735b", "score": "0.6353936", "text": "def read_graph_from_file(file_path: str) -> nx.DiGraph:\n if not path.exists(file_path):\n raise FileNotFoundError\n\n with open(file_path, 'r') as file:\n graph = nx.nx_agraph.read_dot(file)\n\n graph = nx.convert_node_labels_to_integers(graph)\n float_delay_dict = dict([(k, float(v)) for k, v in nx.get_node_attributes(graph, 'delay').items()])\n int_weight_dict = dict([(k, float(v.replace('\"', ''))) for k, v in nx.get_edge_attributes(graph, 'weight').items()])\n nx.set_node_attributes(graph, float_delay_dict, 'delay')\n nx.set_edge_attributes(graph, int_weight_dict, 'weight')\n\n return graph", "title": "" }, { "docid": "d7e831fff1ebd35ef427c339ebb54f99", "score": "0.6294365", "text": "def file2igraph(file):\n with open(file, 'r') as fi:\n v,e = fi.next().split()\n e_list = [(int(i.split()[0]), int(i.split()[1])) for i in list(fi)]\n assert (int(e) == len(e_list)),\\\n \"#edges mentioned and # of edges in file differ\"\n g = Graph()\n g.add_vertices(int(v))\n g.add_edges(e_list)\n return g", "title": "" }, { "docid": "33aab58a743bb96dfb115ec6442f6e76", "score": "0.62006456", "text": "def lines_to_graph(file_path):\n with open(file_path, \"rb\") as file:\n\n reader = csv.reader(file, delimiter=\",\")\n\n keys = {}\n nodes = []\n edges = []\n\n i = 0\n\n for row in reader:\n row0 = round(float(row[0]) / 10) * 10\n row1 = round(float(row[1]) / 10) * 10\n row2 = round(float(row[2]) / 10) * 10\n row3 = round(float(row[3]) / 10) * 10\n\n if not (row0, row1) in keys.keys():\n keys[(row0, row1)] = i\n nodes.append((row0, row1))\n i = i + 1\n\n if not (row2, row3) in keys.keys():\n keys[(row2, row3)] = i\n nodes.append((row2, row3))\n i = i + 1\n\n edges.append((keys[(row0, row1)], (keys[(row2, row3)])))\n\n print nodes, edges\n return nodes, edges", "title": "" }, { "docid": "e11373451b08e8ee17fe7f11b854ac11", "score": "0.61276406", "text": "def readFromFile(file):\n with open(file, mode=\"r\") as f:\n vertices, edges = f.readline().split()\n edgeList = list()\n for line in f:\n #add each line to edgelist as (a,b) where a and b are vertex ids\n edgeList.append((int(line.split()[0]), int(line.split()[1])))\n g = Graph(int(vertices))\n g.add_edges(edgeList)\n #makes sure that edges are equal to nubmer specified in file\n assert (int(edges) == len(edgeList))\n return g, int(vertices)", "title": "" }, { "docid": "016b9e346ff6c9bf6f94b7cabaa1c281", "score": "0.61168015", "text": "def load(file_path=\"./graph.csv\"):\n assert path.isfile(file_path), \"'{}' is not a file\".format(file_path)\n log.debug(\"loading graph from: {}\".format(file_path))\n graph = None\n\n with open(file_path, 'r', newline='', encoding='utf-8') as file:\n reader = csv.reader(file, quotechar='\"')\n for i, row in enumerate(reader):\n if not graph:\n graph = ActorsGraph(row)\n else:\n for j, movie_ids in enumerate(row):\n for movie_id in movie_ids.split(\",\"):\n graph.add_edge_by_indices(i - 1, j, movie_id)\n\n assert graph, \"graph has not been created, check '{}' file\".format(file_path)\n return graph", "title": "" }, { "docid": "74452f86119817a3a2bc104097eadc02", "score": "0.6114323", "text": "def from_file(file):\n adjacency_list = open(file).read().splitlines()\n E, size = [], 0\n for v, neighborhood in enumerate(adjacency_list):\n neighbors = map(int, neighborhood.split(\" \"))\n for n in neighbors:\n E.append((v, n))\n size = max(size, max(neighbors))\n\n return Graph(V=range(size + 1), E=E)", "title": "" }, { "docid": "5f2b242c33300281a9e6db95f09d55d6", "score": "0.608862", "text": "def load_graph_from_file(filename):\n with open(filename) as UK_cities:\n dict_uk_cities = json.load(UK_cities)\n return nx.Graph(dict_uk_cities)", "title": "" }, { "docid": "0b0a83f39375b78e2eb8611b01cb6808", "score": "0.6084015", "text": "def from_file_reg_format(file):\n import re\n\n lines = open(file).read().splitlines()\n graphs, E = [], []\n i = j = M = 0\n b = False\n\n for line in lines:\n if re.match(r'Gr', line):\n i += 1\n b = True\n if i > 1:\n graphs.append(Graph(E=E, V=range(M)))\n j = M = 0\n E[:] = []\n elif line == \"\":\n pass\n elif re.match(r'[GOT]', line):\n b = False\n elif b:\n for c in line[4:]:\n if c == ' ':\n continue\n elif int(c) - 1 > j:\n E.append((j, int(c) - 1))\n M = max(M, int(c))\n j += 1\n graphs.append(Graph(E=E, V=range(M))) # get last graph\n return graphs", "title": "" }, { "docid": "2ef437109bc2db39dad59e628543ff48", "score": "0.6069113", "text": "def load_graph(filename):\r\n with tf.gfile.FastGFile(filename, 'rb') as f:\r\n graph_def = tf.GraphDef()\r\n graph_def.ParseFromString(f.read())\r\n tf.import_graph_def(graph_def, name='')", "title": "" }, { "docid": "6fd065fd91f96cb9dd1d42d09328f9f0", "score": "0.60630155", "text": "def loadFromIgraph(self, filename, gformat=\"graphml\"):\n self.graph = igraph.load(filename, format=gformat)", "title": "" }, { "docid": "f455f4d7f1073c9f70bf76e3c4d2bf0e", "score": "0.6022515", "text": "def load_graph(filename):\n with tf.gfile.FastGFile(filename, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n tf.import_graph_def(graph_def, name='')", "title": "" }, { "docid": "f455f4d7f1073c9f70bf76e3c4d2bf0e", "score": "0.6022515", "text": "def load_graph(filename):\n with tf.gfile.FastGFile(filename, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n tf.import_graph_def(graph_def, name='')", "title": "" }, { "docid": "4a88484ad307f67bdf62356e9eb1627e", "score": "0.6005507", "text": "def read_graph(filename, directed=True):\n if not directed:\n G = nx.Graph()\n else:\n G = nx.DiGraph()\n with open(filename) as f:\n for line in f:\n d = line.split()\n G.add_edge(int(d[0]), int(d[1]))\n return G", "title": "" }, { "docid": "f612bc245d36f71694ab05f863b289a7", "score": "0.6002649", "text": "def read_from_csv(self, filename):\n \n self.graph = []\n\n # Read the file into a string separated by newlines for each row\n try:\n with open(filename,'r') as readfile:\n lines = readfile.read().splitlines()\n except IOError, e:\n print \"I/O error: %s\" % e\n return\n\n # Read lines with csv.reader\n csvreader = csv.reader(lines)\n\n # Load the rows into self.graph matrix\n try:\n for row in csvreader:\n self.graph.append([])\n for i in range(csvreader.line_num):\n self.graph[csvreader.line_num-1].append(int(row[i]))\n except csv.Error, e:\n print \"CSV error: %s\" % e\n return\n except IndexError, e:\n print \"Index error: %s\" % e\n return\n \n # Make matrix symmetric\n # While increasing size, we do not have to check for\n # out of range when getting data from the graph\n for i in range(len(self.graph)):\n for j in range(i+1,len(self.graph)):\n self.graph[i].append(self.graph[j][i])", "title": "" }, { "docid": "ca19ce43e8d09bce1fd3ff91bfe62e1c", "score": "0.5987846", "text": "def load_graph(vertex_file, edge_file):\n G = nx.Graph()\n\n # Parse edges.\n with open(edge_file, \"r\") as edges:\n for e in edges.readlines():\n v1, v2 = e.strip().split()\n G.add_edge(v1, v2)\n return G", "title": "" }, { "docid": "5a322345a5973c3e69cea2b29d058ab9", "score": "0.5984751", "text": "def parse_instance(filepath: str) -> Graph:\n\n with open(filepath, \"r\") as file:\n\n # read first three lines\n num_tasks = int(file.readline())\n num_relations = int(file.readline())\n cycle_time = int(file.readline())\n\n # Create Tasks with id and processing times\n tasks: List[Task] = []\n for _ in range(num_tasks):\n task_id, processing_time = file.readline().split(\",\")\n tasks.append(Task(int(task_id), int(processing_time)))\n\n # Add the ids of the predecessor to each Task\n for _ in range(num_relations):\n predecessor_id, successor_id = file.readline().split(\",\")\n tasks[int(successor_id)].predecessors.append(int(predecessor_id))\n\n # Add the setup times from one Task to all other Tasks\n for i in range(num_tasks):\n setup_times_i = file.readline().split(\",\")\n tasks[i].setup_times = list(map(int, setup_times_i))\n\n print(f\"*Import of {filepath} successful!*\")\n\n name = filepath.split(\"/\")[-1][:-4]\n\n return Graph(tasks, cycle_time, name)", "title": "" }, { "docid": "e69f66c0561f1b2ee5774977aa10e2db", "score": "0.59591967", "text": "def load_graph(graph_file):\n with open(graph_file) as graph:\n graph_lines = graph.read().split('\\n')[:-1]\n \n print \"Loaded graph with\", len(graph_lines), \"nodes\"\n answer_graph = {}\n for line in graph_lines:\n neighbors = line.split(' ')\n node = int(neighbors[0])\n answer_graph[node] = set([])\n for neighbor in neighbors[1 : -1]:\n answer_graph[node].add(int(neighbor))\n return answer_graph", "title": "" }, { "docid": "b4e2e6e4c0c5ed5cc3cce5b9e23c0d41", "score": "0.5921765", "text": "def load_graphs(self):\r\n self.graphs = [Graph(table) for table in self.tables]", "title": "" }, { "docid": "bac7d6321e9e8b10f896797c4fd9c98f", "score": "0.5902526", "text": "def load_csv_into_graph(file_name: str) -> Graph:\n user_map = Graph()\n row_num = 0\n width_of_map = 0\n\n with open(file_name) as csv_file:\n reader = csv.reader(csv_file)\n for row in reader:\n width_of_map = len(row)\n for i in range(len(row)):\n if row[i] == \"B\":\n user_map.add_vertex((i, row_num), \"blocked\")\n elif row[i] == \"S\":\n user_map.add_vertex((i, row_num), \"start\")\n elif row[i] == \"E\":\n user_map.add_vertex((i, row_num), \"end\")\n else:\n user_map.add_vertex((i, row_num))\n row_num += 1\n height_of_map = row_num\n # Knowing the dimensions of the graph, connect the graph\n for y in range(height_of_map):\n for x in range(width_of_map):\n if x == width_of_map - 1 and y == height_of_map - 1:\n pass\n elif x == width_of_map - 1:\n user_map.add_edge((x, y), (x, y + 1))\n elif y == height_of_map - 1:\n user_map.add_edge((x, y), (x + 1, y))\n else:\n user_map.add_edge((x, y), (x, y + 1))\n user_map.add_edge((x, y), (x + 1, y))\n\n return user_map", "title": "" }, { "docid": "e68e2073b553860a9ec3a8dd0bc87e5e", "score": "0.5865928", "text": "def read_file_draw_graph():\n global pos\n global G\n array2d = readFile()\n\n ROW, COLUMN = len(array2d), len(array2d[0])\n count = 0\n\n G = nx.Graph()\n\n for j in xrange(COLUMN):\n for i in xrange(ROW):\n if array2d[ROW - 1 - i][j] == 0:\n G.add_node(count, pos=(j, i))\n count += 1\n\n pos = nx.get_node_attributes(G, 'pos')\n\n for index in pos.keys():\n for index2 in pos.keys():\n if pos[index][0] == pos[index2][0] and pos[index][1] == pos[index2][1] - 1:\n G.add_edge(index, index2, weight=1)\n if pos[index][1] == pos[index2][1] and pos[index][0] == pos[index2][0] - 1:\n G.add_edge(index, index2, weight=1)\n\n return G", "title": "" }, { "docid": "6ea0fac672ad09018a858a0547f8b844", "score": "0.5842683", "text": "def CreateGraph( filename ):\n \n \"\"\"\n print('-----------CreateGraph-----------')\n \n #-------------------------------\n v = list()\n v.append(Vertex('A', 366))\n v.append(Vertex('B', 0))\n v.append(Vertex('C', 160))\n v.append(Vertex('D', 242))\n v.append(Vertex('E', 161))\n v.append(Vertex('F', 176))\n v.append(Vertex('G', 77))\n v.append(Vertex('H', 151))\n v.append(Vertex('I', 226))\n v.append(Vertex('J', 244) )\n v.append(Vertex('K', 241))\n v.append(Vertex('L', 234))\n v.append(Vertex('M', 380))\n \n \n #-----------------------------\n graph = Graph(directed=False)\n \n for i in range (0, 13):\n graph.AddVertex(v[i])\n \n graph.SetStartVertex(v[4])\n graph.SetGoalVertex(v[6])\n \n graph.AddEdge(v[4], v[9], 19)\n graph.AddEdge(v[4], v[8], 21)\n graph.AddEdge(v[4], v[5], 35)\n graph.AddEdge(v[4], v[7], 47)\n \n graph.AddEdge(v[9], v[10], 17)\n graph.AddEdge(v[9], v[8], 28)\n \n graph.AddEdge(v[10], v[11], 20)\n graph.AddEdge(v[10], v[12], 25)\n \n graph.AddEdge(v[11], v[12], 70)\n \n graph.AddEdge(v[8], v[2], 27)\n graph.AddEdge(v[8], v[3], 18)\n graph.AddEdge(v[8], v[5], 33)\n \n graph.AddEdge(v[5], v[4], 35)\n graph.AddEdge(v[5], v[8], 54)\n graph.AddEdge(v[5], v[3], 33)\n \n graph.AddEdge(v[3], v[6], 41)\n graph.AddEdge(v[3], v[5], 33)\n graph.AddEdge(v[3], v[8], 18)\n graph.AddEdge(v[3], v[2], 12)\n \n graph.AddEdge(v[2], v[0], 41)\n graph.AddEdge(v[2], v[1], 33)\n graph.AddEdge(v[2], v[3], 18)\n graph.AddEdge(v[2], v[8], 12)\n \n return graph\n \"\"\"\n \n print('-----------CreateGraph-----------')\n \n #-------------------------------\n v = list()\n v.append(Vertex('A', 5))\n v.append(Vertex('C', 4))\n v.append(Vertex('E', 2))\n v.append(Vertex('F', 2))\n v.append(Vertex('G', 4))\n v.append(Vertex('I', 3))\n v.append(Vertex('J', 2))\n v.append(Vertex('K', 1))\n v.append(Vertex('M', 0))\n \n \n #-----------------------------\n graph = Graph(directed=False)\n \n for i in range (0, 9):\n graph.AddVertex(v[i])\n \n graph.SetStartVertex(v[0])\n graph.SetGoalVertex(v[8])\n \n graph.AddEdge(v[0], v[1], 1)\n \n graph.AddEdge(v[1], v[5], 3)\n graph.AddEdge(v[1], v[2], 4)\n \n graph.AddEdge(v[5], v[4], 2)\n graph.AddEdge(v[5], v[3], 4)\n \n graph.AddEdge(v[2], v[3], 2)\n graph.AddEdge(v[2], v[7], 4)\n graph.AddEdge(v[2], v[6], 2)\n \n graph.AddEdge(v[3], v[7], 1)\n \n graph.AddEdge(v[7], v[3], 1)\n graph.AddEdge(v[7], v[8], 1)\n \n return graph", "title": "" }, { "docid": "5d5420377b332b7df83d2febe0b0acb0", "score": "0.5839412", "text": "def from_csv(self, file: str):\n with open(file) as f:\n for line in f:\n words = line.split(',')\n src = int(words[0])\n dest = int(words[1])\n self.add_edge(src, dest)", "title": "" }, { "docid": "1b9dbec525ece3404f2e800e168f0e7f", "score": "0.5810878", "text": "def load_traces(self, file_path):\n self.traces = gpd.GeoDataFrame(gpd.read_file(file_path))\n \n #filter none traces\n self.traces=self.traces[~self.traces.geom_type.isna()]\n\n print('Traces loaded')\n \n if self.show_figures:\n self.traces.plot()\n if self.save_figures:\n plt.savefig(self.output_path+'traces.pdf')\n plt.savefig(self.output_path+'traces.png')\n plt.show(block=False)", "title": "" }, { "docid": "b74856f8acedd40caf410f1cdb8d49ae", "score": "0.5784517", "text": "def graph_from_file(handle):\n lines = handle.read().split('\\n')\n # Check vertex count for floats\n if float(lines[0]) % 1 != 0.0:\n raise ValueError(\"Vertex count must be an integer\")\n vertex_count = int(lines[0])\n if vertex_count < 0:\n raise ValueError(\"Vertex count must be a non-negative integer\")\n # If we don't check for a 0 vertex count here, it will cause the parser\n # to look for edges starting at line 0.\n if vertex_count == 0:\n return graph.Graph([])\n vertices = []\n # Offset by 1 to account for the vertex count line\n for vertex_name in lines[1:vertex_count+1]:\n # Empty string is technically a valid Vertex name, but here an extra\n # newline will cause parsing problems (was it supposed to be a Vertex\n # name or just a formatting error?) so we will not allow it.\n if vertex_name == \"\":\n raise ValueError(\"Extra newline found in vertex names\")\n if \"|\" in vertex_name:\n raise ValueError(\"Vertex names may not contain pipes\")\n vertices.append(graph.Vertex(vertex_name))\n for edge_data in lines[vertex_count+1:]:\n line = edge_data.split(\"|\")\n # Also catches extra newlines\n if len(line) < 2:\n raise ValueError(\"Edge lines must contain at least one pipe\")\n # Find the Vertex objects that match these names\n from_vertex = None\n to_vertex = None\n for vertex in vertices:\n if vertex.name == line[0]:\n from_vertex = vertex\n # Not an elif because it could be a self loop\n if vertex.name == line[1]:\n to_vertex = vertex\n if not (from_vertex and to_vertex):\n raise ValueError(\"Edge data does not match Vertex names\")\n if len(line) > 2:\n new_edge = graph.Edge(to_vertex, line[2])\n else:\n new_edge = graph.Edge(to_vertex)\n from_vertex.add_edge(new_edge)\n return graph.Graph(vertices)", "title": "" }, { "docid": "ac479056f306df2d3483ce461ed954f6", "score": "0.576732", "text": "def load_diagram_from_csv_file(self, filepath):\n\n bpmn_csv_import.BpmnDiagramGraphCSVImport.load_diagram_from_csv(filepath, self)", "title": "" }, { "docid": "092fd955f325b1ada9e892050700075e", "score": "0.5764412", "text": "def import_connectivity_topology(self, filename):\n try:\n conn = sqlite3.connect(filename)\n except:\n print \"Error while connecting to the database \" + filename\n return -1\n cursor = conn.cursor()\n cursor.execute(\"SELECT * FROM glycans\")\n glycans = cursor.fetchall()\n\n self.connect_topology = {}\n for glycan in glycans:\n name,tree = glycan\n residue = {}\n residue['UNIT'] = []\n nbr_unit = 0\n for unit in tree.split('|'):\n unit = unit.split(' ')\n nbr_unit += 1\n if len(unit) > 2:\n residue['UNIT'].append([unit[0],unit[1], unit[2:]])\n else:\n residue['UNIT'].append([unit[0], ' ', []])\n \n residue['#UNIT'] = nbr_unit\n self.connect_topology[name] = residue\n self.build_keys()", "title": "" }, { "docid": "8ac9d97dbe621dba86957ac1db654dd9", "score": "0.5750857", "text": "def load(self, filename: str):\n\n if not isinstance(filename, str):\n raise TypeError(\"filename must be a string\")\n self.graph = pickle.load(open(filename, \"rb\"))", "title": "" }, { "docid": "e0fa0311bb24f33dc0c989542811eb51", "score": "0.57448035", "text": "def load_ratings(filename):\n print(\"Loading Ratings\")\n\n Rating.query.delete()\n\n with open(filename) as file:\n for line in file:\n line = line.strip()\n user_id, movie_id, score, timestamp = line.split(\"\\t\") # split by tab\n # print(\"user_id: {} movie_id: {} score: {}\".format(user_id,movie_id,score))\n rating = Rating(movie_id=movie_id, user_id=user_id,score=score)\n\n db.session.add(rating)\n\n db.session.commit()", "title": "" }, { "docid": "646ca8b8548ac2e59a675c298e6f8dd5", "score": "0.5736857", "text": "def load_filename(self, filename):\n graph = {}\n with open(filename, 'rb') as csvfile:\n reader = csv.reader(csvfile)\n nodes = map(eval, next(reader)[1:])\n for line in reader:\n base = eval(line.pop(0))\n graph[base] = dict((n1, n2)\n for n1, n2 in zip(nodes, map(float, line))\n if n2 > 0)\n self.load(graph)", "title": "" }, { "docid": "ef49b5df37a7679133e4e172bc29aa25", "score": "0.573546", "text": "def read_graph_from_file(filename):\n\n fp = open(filename)\n\n edges = []\n\n max_node = 0\n\n edge_as_line = fp.readline()\n while edge_as_line != \"\":\n edge_as_line = edge_as_line.split()\n\n src_node = int(edge_as_line[0]) - 1\n dst_node = int(edge_as_line[1]) - 1\n\n if src_node > max_node:\n max_node = src_node\n\n if dst_node > max_node:\n max_node = dst_node\n\n edges.append([src_node, dst_node])\n\n edge_as_line = fp.readline()\n\n G = [[] for i in range(max_node + 1)]\n Grev = [[] for i in range(max_node + 1)]\n\n for i in range(len(edges)):\n\n src_node = edges[i][0]\n dst_node = edges[i][1]\n\n # building G\n if dst_node not in G[src_node]:\n G[src_node].append(dst_node)\n\n # building Grev\n if src_node not in Grev[dst_node]:\n Grev[dst_node].append(src_node)\n\n fp.close()\n\n return G, Grev", "title": "" }, { "docid": "cabe882495f4ebd5e6973e2b256fced5", "score": "0.5717964", "text": "def from_file(cls, filename):\n\n g = nx.MultiGraph()\n\n # filename can be the path to a file or the name of a local topology\n if os.path.isfile(filename):\n filepath=filename\n else:\n raise ValueError(\"Wrong file path\")\n\n with open(filepath) as f:\n\n data = json.load(f)\n\n for node in data[\"nodes\"]:\n g.add_node(\n node,\n cores=data[\"nodes\"][node].get(\"cores\", 0),\n memory=data[\"nodes\"][node].get(\"memory\", 0),\n )\n\n for link in data[\"links\"]:\n u,v = link\n devices = data[\"links\"][link][\"devices\"]\n rate = data[\"links\"][link][\"rate\"]\n\n g.add_edge(\n u,\n v,\n rate=rate,\n devices={u: devices[u], v: devices[v]},\n )\n\n return cls(nx.freeze(g))", "title": "" }, { "docid": "9cbf334b00bc2fe12e3f9f52d59200a9", "score": "0.5709398", "text": "def load_vert_traces(self, file_path):\n self.vert_traces = gpd.GeoDataFrame(gpd.read_file(file_path))\n print('Traces loaded')", "title": "" }, { "docid": "19dbbca7b0340de50cd2512ae800a1c1", "score": "0.5700052", "text": "def _build_graph_from_file(self, filename, sp):\n\n if filename is not None:\n with open(filename, 'rt') as fp:\n for i, line in enumerate(fp):\n if i == 0:\n for v in line.rstrip().split(sp):\n self._G.add_vertex(self.index_of(v))\n else:\n a = line.rstrip().split(sp)\n v = self._st[a[0]]\n j = 1\n while j < len(a):\n self._G.add_edge(v, self._st[a[j]])\n j += 1", "title": "" }, { "docid": "8cc4b7fc11c23a5dbf1374f250d7cea4", "score": "0.5696205", "text": "def load_diagram_from_xml_file(self, filepath):\n\n bpmn_import.BpmnDiagramGraphImport.load_diagram_from_xml(filepath, self)", "title": "" }, { "docid": "2594832cd0435b7be322dbb09c781dc7", "score": "0.5679725", "text": "def __constructGraphFromInputFile(self, input_file):\n file = open(input_file)\n graphs = {}\n for line in file:\n #print(line)\n edge = line.split(\" \")\n source = int(edge[0])\n dest = int(edge[1])\n # add vertex adjacency list \n if not (source in graphs):\n graphs[source] = [dest]\n elif not (dest in graphs[source]):\n graphs[source].append(dest)\n # need an empty list of adjacent neigbhors from each vertex\n if not (dest in graphs):\n graphs[dest] = []\n # return graphs\n #print(graphs)\n return graphs", "title": "" }, { "docid": "958d3a782cf908c9f803302872185507", "score": "0.56651694", "text": "def add_onnx_graph(\n self,\n onnx_model_file):\n self._get_file_writer().add_onnx_graph(load_onnx_graph(onnx_model_file))\n self._get_comet_logger().log_asset(onnx_model_file)", "title": "" }, { "docid": "71e5cedb80fd3b27b462cca127f3922e", "score": "0.5626393", "text": "def __read_graph(self, network_filename):\n self.G = igraph.Graph.Read_Ncol(network_filename, directed=False)", "title": "" }, { "docid": "831649d78a60b2732824eee0ca5a5bd1", "score": "0.5615621", "text": "def construct_grid_graph(file_path):\n return lambda g: utils.parse_grid_file(g, file_path)", "title": "" }, { "docid": "5eaaef7fe60202a6ae3d0b287bf17cde", "score": "0.56133604", "text": "def load_connectivity_matrix(self, filename, gangs):\n\n # Load csv file, make adjency matrix and determine the edges\n matrix = pd.read_excel(filename)\n matrix = matrix.values\n total_matrix = np.ones((matrix.shape[0], matrix.shape[1]))\n rows, cols = np.where(matrix == 1)\n edges = zip(rows.tolist(), cols.tolist())\n\n # Add nodes to graph\n for k, gang in gangs.items():\n self.observed_gr.add_node(k, pos=gang.coords)\n self.observed_gr.add_edges_from(edges)\n\n # Also generate a graph with all possible edges for now\n rows, cols = np.where(total_matrix == 1)\n all_edges = zip(rows.tolist(), cols.tolist())\n\n # Add nodes (gangs) to the graph with all possible edges\n for k, gang in gangs.items():\n self.all_gr.add_node(k, pos=gang.coords)\n self.all_gr.add_edges_from(all_edges)", "title": "" }, { "docid": "815550146504d3a465f262e12cc0ca5a", "score": "0.5597409", "text": "def loadfile(self, file):\n\n with open(file, 'r') as jf:\n for line in jf.readlines():\n\n # Remove newline character from line\n line = line.replace('\\n', '')\n\n # If line is not a comment, or empty\n if line.startswith('#') == False and line != '':\n\n # Create item\n self.queue_items[self.current_id] = {\n 'id': self.current_id,\n 'values': line.split(',')\n }\n\n self.current_id += 1 # Add one to the current id", "title": "" }, { "docid": "4b4bb53812cecdcc9c74fed0ef81f86a", "score": "0.5593402", "text": "def load_yago_core(file_name: str, graph: Graph, parse_line: Callable[[str], tuple]) -> Graph:\n print(f\"Reading graph from {file_name}\")\n dirty_chars = \"<>\"\n with open(file_name) as yago_file:\n total = len(yago_file.readlines())\n with open(file_name) as yago_file:\n for line in tqdm(yago_file, total=total):\n triple = parse_line(line.strip())\n cleaned_triple = []\n for element in triple:\n cleaned = \"\".join([char for char in element if char not in dirty_chars])\n cleaned = cleaned.replace('\"', \"''\")\n cleaned = cleaned.replace(\"`\", \"'\")\n cleaned = cleaned.replace(\"\\\\\", \"U+005C\")\n cleaned = cleaned.replace(\"^\", \"U+005E\")\n cleaned_triple.append(cleaned)\n graph.add(tuple([URIRef(element) for element in cleaned_triple]))\n print()\n print(f\"Created graph with {len(graph)} triples\")\n return graph", "title": "" }, { "docid": "3801db53f97551321e53d270eb2801a5", "score": "0.55860543", "text": "def read_graph(file_path: str):\n with open(file_path, 'r') as file:\n for line in file:\n node, _, neighbors = line.partition(DELIMITER)\n assert neighbors\n yield node, neighbors.replace(NODES_DELIMITER, '').split()", "title": "" }, { "docid": "65c9e9aa8b116c154d60fa2f1185c5cb", "score": "0.5584422", "text": "def load_map(self, filename):\n self._graph = Graph()\n self._location = {}\n self._streetnames = {}\n\n with open(filename, 'r') as f:\n for line in f:\n elements = line.split(\",\")\n if(elements[0] == \"V\"):\n self._graph.add_vertex(int(elements[1]))\n self._location[int(elements[1])] = (self.process_coord(elements[2]),\n self.process_coord(elements[3]))\n elif (elements[0] == \"E\"):\n self._graph.add_edge((int(elements[1]), int(elements[2])))\n self._streetnames[(int(elements[1]), int(elements[2]))] = elements[3]", "title": "" }, { "docid": "d2e2b499f48eef69da5ceea08ae057b7", "score": "0.55773145", "text": "def get_graph(filename):\n g = {}\n with open(filename, 'r') as f:\n n = int(f.readline().strip())\n for index in xrange(n):\n g[index] = map(int, f.readline().strip().split())\n return g", "title": "" }, { "docid": "2ff47bc6ed292cd8224c532f327b590c", "score": "0.5565647", "text": "def load(self):\n self.graphs = load_graphs(self.processed_path, num_parallel_workers=1)", "title": "" }, { "docid": "a9374e31b7575dc4b54dbf31d8f650c5", "score": "0.55485517", "text": "def import_data(file_name=\"data.sql\"):\n require('dbuser', 'dbpassword', 'dbhost')\n\n env.file_name = file_name\n\n print \"Importing data from file: \" + blue(file_name, bold=True) + \"...\"\n run(\"\"\"\n mysql -u {dbuser} -p\\\"{dbpassword}\\\" {dbname} --host={dbhost} <\\\n {public_dir}database/{file_name} \"\"\".format(**env))", "title": "" }, { "docid": "da41cb35073df863beef61c4f11e5bb6", "score": "0.5543137", "text": "def from_csv(self, file: str):\n with open(file) as f:\n for line in f:\n words = line.split(',')\n line_type = words[0]\n if line_type == 'c':\n self._add_node(int(words[1]), int(words[2]))\n elif line_type == 'e':\n self.add_edge(int(words[1]), int(words[2]))", "title": "" }, { "docid": "571f9546034a448ed91689f40134e88c", "score": "0.5542806", "text": "def load(self, filename):\n self.ids = []\n self.nodes = {}\n self.neigh = {}\n f = open(filename)\n for line in f:\n if not line.strip(): break\n i, l = map(str.strip, line.split(':'))\n if i in self.nodes:\n raise ValueError\n self.nodes[i] = Node(i, l)\n self.neigh[i] = []\n self.ids.append(i)\n\n for line in f:\n fr, l, to = map(str.strip, line.split(':'))\n if not l: l = None\n if fr not in self.nodes or to not in self.nodes:\n raise ValueError\n self.nodes[fr].add_neighbour(self.nodes[to], l)\n if l == None: l = ''\n self.neigh[fr].append((to, l))", "title": "" }, { "docid": "9980b870c7dd143a1ed4063be52b7d21", "score": "0.55365884", "text": "def buildGraph(G, file_, delimiter_):\n reader = csv.reader(open(file_), delimiter=delimiter_)\n for line in reader:\n if len(line) > 2: # for the format of cgraph.dat\n if float(line[2]) != 0.0: # if there are 3 edges\n G.add_edge(int(line[0]),int(line[1]),weight=float(line[2]))\n else: # 2 edges\n G.add_edge(int(line[0]),int(line[1]),weight=1.0)", "title": "" }, { "docid": "786248d459b18871b73fad9d627a986b", "score": "0.55353045", "text": "def read_input_file(file_name):\n\n graph_obj = graphs.graph_struct.Graph();\n obj_launches = utils.launch.Launches()\n count_valid_edges = 0\n\n try:\n with open(file_name, \"r\") as input_file:\n for line in input_file:\n splitted_line = line.split()\n\n if len(splitted_line) == 2:\n #vertex line\n if splitted_line[0][0] != \"V\":\n #invalid vertex line\n continue\n try:\n float(splitted_line[1])\n #valid vertex line\n graph_obj.add_vertex(splitted_line[0], float(splitted_line[1]))\n except ValueError:\n #invalid vertex line\n continue\n\n elif len(splitted_line) == 5:\n #launch line\n if splitted_line[0] != \"L\" or len(splitted_line[1]) != 8:\n #invalid launch line\n continue\n\n #valid launch line (still need to check date validity)\n launch_obj = utils.launch.Launch(splitted_line[1],float(splitted_line[2]),float(splitted_line[3]),float(splitted_line[4]))\n obj_launches.ordered_insertion_on_list(launch_obj)\n else:\n #invalid line\n continue\n\n input_file.seek(0,0)\n\n for line in input_file:\n splitted_line = line.split()\n if len(splitted_line) == 3:\n #edge line\n if splitted_line[0] != \"E\" or splitted_line[1][0] != \"V\" or splitted_line[2][0] != \"V\":\n #invalid edge line\n continue\n #valid edge line\n if graph_obj.add_edge(splitted_line[1], splitted_line[2]) == False:\n continue\n count_valid_edges += 1\n\n if count_valid_edges < len(graph_obj.vertices)-1:\n return False\n\n obj_launches.generate_dictionary_trough_list()\n\n input_file.close()\n return [graph_obj, obj_launches]\n\n except IOError:\n #error in opening the file\n return None", "title": "" }, { "docid": "7ef0f158801009491cac4f6a6c449ef9", "score": "0.5509414", "text": "def loadTAB(self, filename):\n\t\twith open(filename) as f: \n\t\t\t# GET COLUMNS NAMES\n\t\t\ttmp = f.readline().rstrip()\n\t\t\tattNames= tmp.split('\\t')\n\t\t\t# REMOVES FIRST TWO COLUMNS WHICH CORRESPONDS TO THE LABELS OF THE CONNECTED VERTICES\n\t\t\tattNames.pop(0)\n\t\t\tattNames.pop(0)\n\t\t\t# PROCESS THE REMAINING LINES\n\t\t\trow = f.readline().rstrip()\n\t\t\twhile row:\n\t\t\t\tvals = row.split('\\t')\n\t\t\t\tv1 = vals.pop(0)\n\t\t\t\tv2 = vals.pop(0)\n\t\t\t\tatt = {}\n\t\t\t\tfor i in xrange(len(attNames)):\n\t\t\t\t\tatt[ attNames[i] ] = vals[i]\n\t\t\t\tself.addEdge(v1, v2, att)\n\t\t\t\trow = f.readline().rstrip() # NEXT LINE", "title": "" }, { "docid": "7fa30ee8e99aabaffb424d8ed94b2af8", "score": "0.54903877", "text": "def load_Network(Filename):\n print(\"Loading Network map from file \", Filename)\n g = SmartDigraph()\n hash_table = {}\n counter = 1\n \n lines = open(Filename, 'r').readlines()\n \n for i in lines:\n for j in range(2):\n line = i.split()\n if line[j] not in hash_table:\n hash_table[line[j]] = counter\n counter += 1\n \n for i in hash_table:\n g.addNode(SmartNode(hash_table[i]))\n \n for i in lines:\n line = i.split()\n start_node = g.getNode(str(hash_table[line[0]]))\n end_node = g.getNode(str(hash_table[line[1]]))\n lead_time = int(line[2])\n\n edge = WeightedEdge(start_node, end_node,lead_time) # creates the edge\n g.addEdge(edge) # adds the edge to the smartdigrahp\n\n return g, hash_table", "title": "" }, { "docid": "988944e6677c9e66fdd2f6e5fb1f4e62", "score": "0.5484973", "text": "def import_file(self, filename):\n df = pd.read_csv(filename, sep=\"\\t\")\n\n # drop 'call' columns\n subset_keys = list(k for k in df.keys() if k[:4] != 'call')\n self.df = df[subset_keys]", "title": "" }, { "docid": "c5e69ec296f97c8246d9b163c5e7e74b", "score": "0.54765224", "text": "def graph_dict_bis(filename):\n # un dictionnaire vide normal\n g = {}\n\n with open(filename) as f:\n for line in f:\n begin, value, end = line.split()\n # c'est cette partie\n # qu'on économise avec un defaultdict\n if begin not in g:\n g[begin] = []\n # sinon c'est tout pareil\n g[begin].append((end, int(value)))\n return g", "title": "" }, { "docid": "1df95c482166d605790d28441cea1152", "score": "0.5476196", "text": "def upload_graph_file_to_env(graph_file_path, env_id):\n\n # Try to load graph data\n graph_data = helpers.load_yaml(graph_file_path)\n if not graph_data:\n raise Exception(\"Graph '{0}' is empty.\".format(graph_file_path))\n graph_name = os.path.splitext(os.path.basename(graph_file_path))[0]\n\n # Upload graph to Nailgun\n client = graph.GraphClient()\n client.upload(graph_data, \"clusters\", env_id, graph_name)\n LOG.info(\"Graph '%s' was uploaded for the environment '%s'.\",\n graph_name, env_id)", "title": "" }, { "docid": "1bd07fb7b3c235a0919c0cde17293d57", "score": "0.5452851", "text": "def load(file):\n global engine, session\n Base.metadata.create_all(engine)\n\n with open(file, \"r\") as export_file:\n export_data = json.load(export_file)\n\n for book in export_data['books']:\n new_book = Book(book)\n session.add(new_book)\n session.commit()\n\n for reading_session in book['sessions']:\n session.add(Reading_Session(reading_session, new_book.id))\n session.commit()", "title": "" }, { "docid": "7b98b7095b27202df848a065067732ca", "score": "0.5446055", "text": "def load_graph(graph_file):\n graph = tf.Graph()\n with graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(graph_file, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n return graph", "title": "" }, { "docid": "af221a5016e70dde6a1f9ab05a98b94c", "score": "0.5443592", "text": "def read_graph(filename):\n try:\n return nx.read_edgelist(filename, nodetype=int, data=(('weight', int),))\n except IOError:\n sys.exit(\"El fitxer {} no existeix\".format(filename))", "title": "" }, { "docid": "7ebe6cdea308eba22bc12872969c66ad", "score": "0.54411924", "text": "def All_Graphs_By_File(n):\n #Look if graphs of order n-1 are already created \n L=[]\n if n==1:\n g=Graph(directed=False)\n g.add_vertex()\n L.append(g) \n elif n==2:\n NonAdjacent=Graph(directed=False)\n v1=NonAdjacent.add_vertex()\n v2=NonAdjacent.add_vertex()\n L.append(NonAdjacent)\n Adjacent=Graph(directed=False)\n v1A=Adjacent.add_vertex()\n v2A=Adjacent.add_vertex()\n Adjacent.add_edge(v2A,v1A)\n L.append(Adjacent) \n else : \n if Look_in_sommaire(n-1):\n lminusone=Read_Graph(n-1)\n else :\n lminusone=All_Graphs(n-1)\n print(\"I generate all graphs of order\", n-1)\n L=[]\n Dic = {}\n #Generate all ordered subset of integer from 1 to n of size m P, there are the vertex to attach \n LP=[]\n for m in range(1,n):\n LP=LP+subset_nonord_k(m,n-1)\n print(\"I need to add a vertex to \", len(lminusone), 'graphs')\n for i in range(len(lminusone)):\n if i%10==0:\n print(\"We are on the \",i+1,\"th graph under\", len(lminusone), \"graphs\")\n g=copy.copy(lminusone[i])\n GNotConnected=copy.copy(g)\n q=GNotConnected.add_vertex()\n if FreeC4O4(GNotConnected,q)==True:\n Dic[ListDegree(GNotConnected)]=[GNotConnected] \n for P in LP :\n G=copy.copy(g)\n v=G.add_vertex()\n #Add all edges between v and the vertex corresponding to index in P\n for i in P :\n w = G.vertex(i-1)\n G.add_edge(v,w)\n #Test if g in Free(C4,4K1) and isomorph to one already created\n if FreeC4O4(G,v)==True:\n LD=ListDegree(G)\n IsInDic=False\n #If the key exists, look for isomorphism\n if (LD in Dic)==True:\n ToCheck=Dic.get(LD)\n for g2 in ToCheck:\n if graph_tool.topology.isomorphism(G,g2)==True:\n IsInDic=True\n break\n if IsInDic==False:\n Dic[LD].append(G)\n #Otherwise create the key \n else: \n Dic[LD]=[G]\n #Create final list\n for key in Dic:\n L=L+Dic.get(key)\n #Put all in a file \n for i in range(len(L)):\n Graph_in_file(L[i],n)\n return L", "title": "" }, { "docid": "7fa7318c1ef2dd1c4a93c43ab09c25f3", "score": "0.54355556", "text": "def save_graph(graph, graph_file):\n #with open(graph_file, \"wt\") as save:\n # pickle.dump(graph, save)", "title": "" }, { "docid": "7f4fa34436b052f87a7d38827d56c29c", "score": "0.5424604", "text": "def load_graph(graph_file):\n graph = tf.Graph()\n with graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(graph_file, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n return graph", "title": "" }, { "docid": "7f4fa34436b052f87a7d38827d56c29c", "score": "0.5424604", "text": "def load_graph(graph_file):\n graph = tf.Graph()\n with graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(graph_file, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n return graph", "title": "" }, { "docid": "4f2d8f90f470d6a35c13776254c5ac65", "score": "0.5422862", "text": "def main(G, input_file):\n for line in open(input_file, 'r'):\n line = line.rstrip('\\n').replace(', ', ',').split(',')\n [v, w, cost] = line[0], line[1], int(line[2])\n G.add_edge(v, w, cost)", "title": "" }, { "docid": "a2ae921f8242cdcea7d298e39039c21b", "score": "0.54159564", "text": "def load_horiz_traces(self, file_path):\n self.horiz_traces = gpd.GeoDataFrame(gpd.read_file(file_path))\n print('Traces loaded')", "title": "" }, { "docid": "8f2bb00ad6cd6dabd448af1999c1d77c", "score": "0.54037195", "text": "def graphreader(self, filename):\r\n graph = RouteMap()\r\n file = open(filename, 'r')\r\n entry = file.readline() # either 'Node' or 'Edge'\r\n num = 0\r\n while entry == 'Node\\n':\r\n num += 1\r\n nodeid = int(file.readline().split()[1])\r\n l = file.readline().split() # split the line to get the latitude and longditude\r\n co_ordinates = tuple((l[1], l[2]))\r\n vertex = graph.add_vertex(nodeid, co_ordinates)\r\n entry = file.readline() # either 'Node' or 'Edge'\r\n print('Read', num, 'vertices and added into the graph')\r\n num = 0\r\n while entry == 'Edge\\n':\r\n num += 1\r\n source = int(file.readline().split()[1])\r\n sv = graph.get_vertex_by_label(source)\r\n target = int(file.readline().split()[1])\r\n tv = graph.get_vertex_by_label(target)\r\n file.readline()\r\n length = float(file.readline().split()[1])\r\n edge = graph.add_edge(sv, tv, length)\r\n file.readline() # read the one-way data\r\n entry = file.readline() # either 'Node' or 'Edge'\r\n print('Read', num, 'edges and added into the graph')\r\n print(graph)\r\n return graph", "title": "" }, { "docid": "09a7126cba5940bc66ed948222ca2a6a", "score": "0.5400464", "text": "def load_gtg_matrix(self, filename, gangs):\n matrix = pd.read_excel(filename)\n matrix = matrix.values\n total_matrix = np.ones((matrix.shape[0], matrix.shape[1]))\n rows, cols = np.where(matrix == 1)\n edges = zip(rows.tolist(), cols.tolist())\n\n # Add nodes to graph\n for k, gang in gangs.items():\n self.gtg_gr.add_node(k, pos=gang.coords)\n self.gtg_gr.add_edges_from(edges)", "title": "" }, { "docid": "38fac588e0fdc0f5799463cfa19c3b60", "score": "0.5396288", "text": "def process_graphs (sc, in_dir, partitions):\n sqlContext = SQLContext (sc)\n\n n3_dirs = [ os.path.join (in_dir, d) for d in [ \"drugbank\", \"pubchem\" ] ]\n\n vertices_path_posix = os.path.join (in_dir, \"vertices\")\n edges_path_posix = os.path.join (in_dir, \"edges\")\n vertices_path = \"file://{0}\".format (vertices_path_posix)\n edges_path = \"file://{0}\".format (edges_path_posix)\n\n triples = None\n vertices = None\n edges = None\n g = None\n\n if os.path.exists (vertices_path_posix) and os.path.exists (edges_path_posix):\n\n print (\"Loading existing vertices: {0}\".format (vertices_path))\n start = time.time ()\n vertices = sqlContext.read.parquet (vertices_path).repartition(partitions).cache ()\n print (\"Elapsed time for loading precomputed vertices: {0} seconds.\".format (\n time.time () - start))\n\n print (\"Loading existing edges: {0}\".format (edges_path))\n start = time.time ()\n edges = sqlContext.read.parquet (edges_path).repartition(partitions).cache ()\n print (\"Elapsed time for loading precomputed edges: {0} seconds.\".format (\n time.time () - start))\n\n else:\n print (\"Constructing vertices and edges from chem2bio2rdf data sources\")\n\n files = [ os.path.join (n3_dir, n3_file) for n3_dir in n3_dirs for n3_file in os.listdir (n3_dir) ]\n triples = sc.parallelize (files, numSlices=partitions). \\\n flatMap (lambda n3_file : process_chunk (n3_file))\n\n vertices = sqlContext.createDataFrame (\n data = triples.flatMap (lambda d : [\n ( trim_uri (d.S), \"attr0\" ),\n ( trim_uri (d.O), \"attr1\" ) ]),\n schema=[ \"id\", \"attr\" ]).\\\n cache () \n edges = sqlContext.createDataFrame (\n data = triples.map (lambda d : (\n trim_uri (d.S),\n trim_uri (d.O),\n trim_uri (d.P) )),\n schema = [ \"src\", \"dst\", \"relationship\" ]). \\\n cache ()\n \n print (\"Triples: {0}\".format (triples.count ()))\n\n if os.path.exists (vertices_path_posix):\n shutil.rmtree (vertices_path_posix)\n if os.path.exists (edges_path_posix):\n shutil.rmtree (edges_path_posix)\n vertices.write.parquet (vertices_path)\n edges.write.parquet (edges_path)\n\n if vertices is not None and edges is not None:\n start = time.time ()\n vertices.printSchema ()\n edges.printSchema ()\n print (\"Elapsed time for print schema: {0} seconds.\".format (\n time.time () - start))\n\n start = time.time ()\n print (\" Total of {0} edges.\".format (edges.count ()))\n print (\"Elapsed time for count edges: {0}\".format (time.time () - start))\n\n g = GraphFrame(vertices, edges)\n\n print (\"Query: Get in-degree of each vertex.\")\n start = time.time ()\n g.inDegrees.\\\n sort (\"inDegree\", ascending=False).\\\n show(n=3, truncate=False)\n print (\"Elapsed time for computing in-degree: {0} seconds.\".format (\n time.time () - start))\n\n start = time.time ()\n print (\"Query: Number of protein database relationships: {0}\".format (\n g.edges.\\\n filter(\"relationship LIKE '%resource/PDB_ID%' \").\\\n count ()))\n print (\"Elapsed time for edge filter and count query: {0} seconds.\".format (\n time.time () - start))\n \n edges.registerTempTable (\"edges\")\n\n sqlContext.sql (\"\"\"\n SELECT substring(src, length(src)-7, 6) as Drug,\n dst as Name\n FROM edges\n WHERE relationship LIKE '%resource/Name%'\n \"\"\").show (n=3, truncate=False)\n\n start = time.time ()\n sqlContext.sql (\"\"\"\n SELECT substring(src, length(src)-7, 6) as Compound,\n dst as SMILES\n FROM edges\n WHERE relationship LIKE '%open%_smiles%'\n \"\"\").show (n=3, truncate=False)\n print (\"Elapsed time for SQL query: {0} seconds.\".format (\n time.time () - start))\n\n start = time.time ()\n g.find (\"()-[Drug2PDB]->()\"). \\\n filter (\"Drug2PDB.relationship LIKE '%/PDB_ID' \"). \\\n show (n=3, truncate=False)\n print (\"Elapsed time for graph motif query: {0} seconds.\".format (\n time.time () - start))\n\n return g", "title": "" }, { "docid": "22972cfb0f81621d0c95084a35a75205", "score": "0.53908056", "text": "def load_from_json(self, file_name: str) -> bool:\n\n def get_pos(pos_s: str) -> tuple:\n i = pos_s.find(',')\n x = float(pos_s[:i])\n\n pos_s = pos_s[i + 1:]\n i = pos_s.find(',')\n y = float(pos_s[:i])\n\n pos_s = pos_s[i + 1:]\n z = float(pos_s)\n\n return (x, y, z)\n\n f = open(file_name)\n json_s = f.read()\n json_obj = json.loads(json_s)\n\n if len(json_obj) > 0:\n self.clear()\n\n for node in json_obj[\"Nodes\"]:\n if \"pos\" in node.keys():\n pos = get_pos(node[\"pos\"])\n self.__graph.add_node(node[\"id\"], pos)\n else:\n self.__graph.add_node(node[\"id\"])\n \n \n for edge in json_obj[\"Edges\"]:\n self.__graph.add_edge(edge[\"src\"], edge[\"dest\"], edge[\"w\"])\n \n f.close()", "title": "" }, { "docid": "efcd830aa775344932897d3c26e6176b", "score": "0.5385624", "text": "def loadGraph(graph_name):\n with open('./dataset/{}_node.csv'.format(graph_name), 'r',\n encoding='utf-8') as fp:\n reader = csv.reader(fp)\n nodes = list(int(_[0]) for _ in reader)\n with open('./dataset/{}_edge.csv'.format(graph_name), 'r',\n encoding='utf-8') as fp:\n reader = csv.reader(fp)\n edges = list((int(_[0]), int(_[1])) for _ in reader if _[0] != _[1])\n G = nx.Graph()\n G.add_nodes_from(nodes)\n G.add_edges_from(edges)\n return G", "title": "" }, { "docid": "d4eaa58fa5d95c2219d58b35ca214ad5", "score": "0.53576106", "text": "def load_visualization(self, fname):\n try:\n self.engine.load_visualization(fname)\n except:\n exception()", "title": "" }, { "docid": "73e6e49eee6c2c8a90c0c2e8532f282a", "score": "0.5344014", "text": "def loadfromfile(self, file):\n f = open(file, 'r')\n for line in [x.strip() for x in f]:\n m = re.match(r'^(\\S+)(\\d{4})(\\d{2})(\\S{4})\\s+.*$', line)\n if m:\n station_id = m.group(1)\n yyyy = m.group(2)\n mm = m.group(3)\n element = m.group(4)\n if element not in self.data:\n self.data[element] = []\n self.data[element].append({ 'year' : int(yyyy), 'month' : int(mm), 'values' : [ int(line[21+8*i:26+8*i].strip()) for i in range(0,31) ] })", "title": "" }, { "docid": "520738fed063de4ec9bf3d8db1025b77", "score": "0.534208", "text": "def export_connectivity_topology(self, filename):\n try:\n conn = sqlite3.connect(filename)\n except:\n print \"Error while connecting to the database \" + filename\n return -1\n cursor = conn.cursor()\n tn = 'glycans'\n gn = 'glycan_name'\n gt = 'glycan_tree'\n cursor.execute(\"DROP TABLE IF EXISTS {tn}\".format(tn = tn))\n cursor.execute(\"CREATE TABLE {tn} ({gn} text, {gt} text)\".format(tn = tn, gn = gn, gt = gt))\n\n for key in self.connect_topology.keys():\n units = self.connect_topology[key]['UNIT']\n glycan = []\n for unit in units:\n v = []\n v.extend(unit[0:2])\n v.extend(unit[2])\n glycan.append(' '.join(v))\n glycan = '|'.join(glycan)\n \n cursor.execute(\"INSERT INTO {tn} VALUES (\\'{gn}\\', \\'{gt}\\')\".format(tn = tn, gn = key, gt = glycan))\n\n conn.commit()\n conn.close()", "title": "" }, { "docid": "75590ff274fe8108063d9b55fb5759dc", "score": "0.53374285", "text": "def FromFileToDB(file_name, db, table_name):\n \n kegg = Kegg.getInstance()\n DissociationConstants._CreateDatabase(db, table_name)\n\n for i, row in enumerate(csv.DictReader(open(file_name, 'r'))):\n if 'pK' not in row and 'ddG' not in row:\n raise Exception(\"The CSV file is not in a recognized format: \"\n \"there should be a column named ddG or pK\")\n try:\n if not row['cid']:\n continue # without a CID we cannot match this to the dG0 table\n cid = int(row['cid'])\n name = row['name'] or kegg.cid2name(cid)\n logging.debug(\"Parsing row #%d, compound %s (C%05d)\" %\n (i, name, cid))\n \n nH_below = int(row['nH_below'])\n nH_above = int(row['nH_above'])\n nMg_below = int(row['nMg_below'])\n nMg_above = int(row['nMg_above'])\n mol_below = row['mol_below'] or None\n mol_above = row['mol_above'] or None\n ref = row['ref']\n \n if 'ddG' in row: # this is the 1st format type\n ddG = float(row['ddG'])\n elif 'pK' in row: # this is the 2nd format type\n pK = float(row['pK'] or 0)\n T = float(row['T'] or default_T)\n if row['type'] == 'acid-base':\n if nMg_below != nMg_above or nH_below != nH_above+1:\n raise Exception('wrong nMg and nH values')\n ddG = -R * T * np.log(10) * pK\n elif row['type'] == 'Mg':\n if nMg_below != nMg_above+1 or nH_below != nH_above:\n raise Exception('wrong nMg and nH values')\n ddG = -R * T * np.log(10) * pK + dG0_f_Mg\n elif row['type'] == '':\n if nMg_below != nMg_above or nH_below != nH_above:\n raise Exception('wrong nMg and nH values')\n ddG = None\n else:\n raise Exception('unknown dissociation type: ' + row['type'])\n\n except Exception as e:\n raise Exception(\"Row %i: %s\" % (i, str(e)))\n\n db.Insert(table_name, [cid, name, nH_below, nH_above, \n nMg_below, nMg_above, mol_below,\n mol_above, ddG, ref])\n \n db.Commit()", "title": "" }, { "docid": "0e71996c481c17db9b19cdc98cff9746", "score": "0.5334638", "text": "def parse_input(file):\n with open(file) as fin:\n n = int(fin.readline().strip())\n coordinates = []\n for e in range(n):\n x, y = [float(x) for x in fin.readline().split()]\n #Append coordinate pair\n coordinates.append((x, y))\n\n #Generate all possible edges of complete graph and distances between them\n edges = []\n #Calculate distances between vertices\n for u in range(len(coordinates)):\n for v in range(u, len(coordinates)):\n if u != v:\n p1 = coordinates[u]\n p2 = coordinates[v]\n edges.append((u, v, math.sqrt(math.pow(p1[0] - p2[0], 2) + math.pow(p1[1] - p2[1], 2))))\n\n return n, edges", "title": "" }, { "docid": "8d14b3199ee228a902ca5bbb03d942e0", "score": "0.5333962", "text": "def load_edge_file(file_path):\n if not file_path or not file_path.strip() or not os.path.exists(file_path):\n logger.logging.append('ERROR: Input file path is not valid: {}. Please provide a valid '\n 'input path.'.format(file_path))\n return None\n try:\n # loads input data\n input_df = pandas.read_csv(file_path, sep='\\t', header=None, index_col=None)\n if input_df.shape == (0, 0):\n logger.logging.append('ERROR: Input data {} is empty. Please provide a valid '\n 'input data.'.format(file_path))\n return None\n if input_df.shape[1] < 3:\n logger.logging.append('ERROR: Not enough columns in input data {}. Please provide '\n 'a valid input data.'.format(file_path))\n return None\n logger.logging.append(\n 'INFO: Successfully loaded input data: {} with {} row(s) and {} '\n 'column(s)'.format(file_path, input_df.shape[0], input_df.shape[1]))\n return input_df\n except Exception as err:\n logger.logging.append('ERROR: {}'.format(str(err)))\n return None", "title": "" }, { "docid": "2fb2e2a276356e5fd23e94d835cb27af", "score": "0.5323527", "text": "def load_edges(self, file_, src2index, dst2index, symmetry=False):\n edges = []\n with open(file_, 'r') as reader:\n for line in reader:\n items = line.strip().split()\n src, dst = src2index[items[0]], dst2index[items[1]]\n edges.append((src, dst))\n if symmetry:\n edges.append((dst, src))\n edges = list(set(edges))\n return edges", "title": "" }, { "docid": "14894f88dc008bccb0bd19f5cb8bd702", "score": "0.53213", "text": "def read_file(self, file_path: str) -> None:\n pattern = re.compile(r'([A-Za-z]{1,3})\\s*(-?\\d+(?:\\.\\d+)?)\\s*(-?\\d+(?:\\.\\d+)?)\\s*(-?\\d+(?:\\.\\d+)?)')\n with open(file_path) as file:\n for element, x, y, z in pattern.findall(file.read()):\n self.elements.append(element)\n self.x_coordinates.append(float(x))\n self.y_coordinates.append(float(y))\n self.z_coordinates.append(float(z))\n self.atomic_radii = [atomic_radii[element] for element in self.elements]\n self.atomic_numbers = [atomic_numbers[element] for element in self.elements]\n self._generate_adjacency_list()", "title": "" }, { "docid": "7e616c332a8381758f34355391576399", "score": "0.5316223", "text": "def load_npz_to_sparse_graph(file_name):\n file_name = \"data/{}\".format(file_name)\n with np.load(file_name, allow_pickle=True) as loader:\n loader = dict(loader)\n adj_matrix = sp.csr_matrix((loader['adj_data'], loader['adj_indices'], loader['adj_indptr']),\n shape=loader['adj_shape'])\n\n if 'attr_data' in loader:\n # Attributes are stored as a sparse CSR matrix\n attr_matrix = sp.csr_matrix((loader['attr_data'], loader['attr_indices'], loader['attr_indptr']),\n shape=loader['attr_shape'])\n elif 'attr_matrix' in loader:\n # Attributes are stored as a (dense) np.ndarray\n attr_matrix = loader['attr_matrix']\n else:\n attr_matrix = None\n\n if 'labels_data' in loader:\n # Labels are stored as a CSR matrix\n labels = sp.csr_matrix((loader['labels_data'], loader['labels_indices'], loader['labels_indptr']),\n shape=loader['labels_shape'])\n elif 'labels' in loader:\n # Labels are stored as a numpy array\n labels = loader['labels']\n else:\n labels = None\n\n node_names = loader.get('node_names')\n attr_names = loader.get('attr_names')\n class_names = loader.get('class_names')\n metadata = loader.get('metadata')\n\n return SparseGraph(adj_matrix, attr_matrix, labels, node_names, attr_names, class_names, metadata)", "title": "" }, { "docid": "51a76f8715f60766c6bbc9a15f2f982d", "score": "0.5310153", "text": "def load_kg_triple(self, file):\n triples = []\n with open(os.path.join(self._data_dir, file), \"r\") as f:\n for line in f.readlines():\n line_list = line.strip().split('\\t')\n assert len(line_list) == 3\n head = self._entity2id[line_list[0]]\n tail = self._entity2id[line_list[1]]\n relation = self._relation2id[line_list[2]]\n triples.append((head, relation, tail))\n return np.array(triples)", "title": "" }, { "docid": "73f2e2a23a754107bd7e316b2de1046b", "score": "0.5302283", "text": "def loadSIF(self, filename):\n\t\twith open(filename) as f:\n\t\t\t# SKIP COLUMNS NAMES\n\t\t\ttmp = f.readline()\n\t\t\t# PROCESS THE REMAINING LINES\n\t\t\trow = f.readline().rstrip()\n\t\t\twhile row:\n\t\t\t\tvals = row.split('\\t')\n\t\t\t\tself.addEdge(vals[0], vals[2])\n\t\t\t\trow = f.readline().rstrip()\n\t\treturn None", "title": "" }, { "docid": "f4eb2c83123bcfdb0e141b1de95812fa", "score": "0.529562", "text": "def read_graph(file_name):\n with open(file_name, 'r') as graph:\n line = graph.readline().split(\" \")\n k = None\n try:\n _, name, nVertices, nEdges = line\n except ValueError:\n _, name, nVertices, nEdges, k = line\n header = ['#', str(name), str(nVertices), str(nEdges), str(k[-2])]\n matrix = np.zeros([int(nVertices), int(nVertices)]) # slower than empty\n degrees = np.zeros(int(nVertices))\n d = defaultdict(list)\n n_lines = 0\n while True:\n line = graph.readline()\n if not line:\n break\n v0, v1 = line.split(\" \")\n v0, v1 = int(v0), int(v1)\n\n if v0 < v1:\n smaller, bigger = v0, v1\n else:\n smaller, bigger = v1, v0\n\n values = d.get(smaller)\n if values is None or bigger not in values:\n degrees[smaller] += 1\n degrees[bigger] += 1\n d[smaller].append(bigger)\n\n matrix[v0][v1] = 1\n matrix[v1][v0] = 1\n n_lines += 2\n\n d = None\n # assert np.sum(matrix) == n_lines # Check all lines read\n return matrix, np.diag(degrees), int(k), header", "title": "" }, { "docid": "50727d13cfa04bb896088b1610e1b853", "score": "0.5291847", "text": "def load(self, hyperlinks):\n \tif Path(hyperlinks).is_file():\n \t\tself.g = nx.DiGraph()\n\t \twith open(hyperlinks, \"rb\") as ifile:\n\t \t\tfor line in ifile.readlines():\n\t \t\t\tdoc, links = line.split(\":\")\n\t \t\t\tdoc = int(doc)\n\t \t\t\tfor link in links.split(\";\"):\n\t \t\t\t\tself.g.add_edge(doc, int(link))\n\t \tlog.debug(\"%d documents et %d hyperliens chargés\", len(self.g), self.g.size())\n\t else:\n\t \tlog.error(\"Erreur lors de l'ouverture du fichier %s\", hyperlinks)", "title": "" }, { "docid": "e9fcd4ada40475ce4d8c34ca302ba815", "score": "0.5291808", "text": "def load_map(map_filename):\n\n print(\"Loading map from file...\")\n \n # Open the file and assign to the variable file\n file = open(map_filename, 'r')\n \n # Create an empty node set to add all the different node names from file. By\n # using the set type we can omit any duplicate nodes in the file\n # Additionally, create a list of all the weighted edges, to be able to add them to the \n # graph later\n node_names = set([])\n weighted_edges = []\n for line in file:\n# print(line)\n a, b, c, d = tuple(line.split())\n# a = int(a)\n# b = int(b)\n c = int(c)\n d = int(d)\n node_names.add(a)\n node_names.add(b)\n weighted_edges.append(WeightedEdge(Node(a), Node(b), c, d))\n \n # Transform th node_names into a list of nodes and put them in a nodes list\n nodes = []\n for i in node_names:\n nodes.append(Node(i))\n \n # Create the graph and add all of the nodes and edges to it\n graph = Digraph()\n for i in nodes:\n graph.add_node(i)\n for j in weighted_edges:\n graph.add_edge(j)\n \n # Print a message saying how many edges and nodes were loaded\n print(' '+ str(len(nodes))+ ' nodes loaded.')\n print(' '+ str(len(weighted_edges))+ ' weighted edges loaded.\\n')\n\n # Close the file\n file.close()\n \n return graph", "title": "" }, { "docid": "54f473713f4028e705df022e7a664ce5", "score": "0.52863", "text": "def load_model_file(cls, data_file): # TODO - allow commas instead of tabs\n # TODO - allow user to add user-defined columns in NODES_TABLE and add that as an attribute to the Node\n\n interface_set = set()\n node_set = set()\n demand_set = set()\n lsp_set = set()\n\n # Open the file with the data, read it, and split it into lines\n with open(data_file, \"r\", encoding=\"utf-8-sig\") as f:\n data = f.read()\n\n lines = data.splitlines()\n\n # Define the Interfaces from the data and extract the presence of\n # Nodes from the Interface data\n int_info_begin_index = lines.index(\"INTERFACES_TABLE\") + 2\n int_info_end_index = find_end_index(int_info_begin_index, lines)\n\n # Check that each circuit_id appears exactly 2 times\n circuit_id_list = []\n for line in lines[int_info_begin_index:int_info_end_index]:\n try:\n circuit_id_item = line.split(\"\\t\")[5]\n circuit_id_list.append(circuit_id_item)\n except IndexError:\n pass\n\n bad_circuit_ids = [\n {\"circuit_id\": item, \"appearances\": circuit_id_list.count(item)}\n for item in set(circuit_id_list)\n if circuit_id_list.count(item) != 2\n ]\n\n if len(bad_circuit_ids) != 0:\n msg = (\n \"Each circuit_id value must appear exactly twice; the following circuit_id values \"\n \"do not meet that criteria: {}\".format(bad_circuit_ids)\n )\n raise ModelException(msg)\n\n interface_set, node_set = cls._extract_interface_data_and_implied_nodes(\n int_info_begin_index, int_info_end_index, lines\n )\n # Define the explicit nodes info from the file\n nodes_info_begin_index = lines.index(\"NODES_TABLE\") + 2\n nodes_info_end_index = find_end_index(nodes_info_begin_index, lines)\n node_lines = lines[nodes_info_begin_index:nodes_info_end_index]\n for node_line in node_lines:\n node_set = cls._add_node_from_data(\n demand_set, interface_set, lsp_set, node_line, node_set\n )\n\n # Define the demands info\n demands_info_begin_index = lines.index(\"DEMANDS_TABLE\") + 2\n demands_info_end_index = find_end_index(demands_info_begin_index, lines)\n # There may or may not be LSPs in the model, so if there are not,\n # set the demands_info_end_index as the last line in the file\n if not demands_info_end_index:\n demands_info_end_index = len(lines)\n\n demands_lines = lines[demands_info_begin_index:demands_info_end_index]\n\n for demand_line in demands_lines:\n try:\n cls._add_demand_from_data(demand_line, demand_set, lines, node_set)\n except ModelException as e:\n err_msg = e.args[0]\n raise ModelException(err_msg)\n\n # Define the LSP info (if present)\n try:\n lsp_info_begin_index = lines.index(\"RSVP_LSP_TABLE\") + 2\n cls._add_lsp_from_data(lsp_info_begin_index, lines, lsp_set, node_set)\n except ValueError:\n print(\"RSVP_LSP_TABLE not in file; no LSPs added to model\")\n except ModelException as e:\n err_msg = e.args[0]\n raise ModelException(err_msg)\n\n return cls(interface_set, node_set, demand_set, lsp_set)", "title": "" }, { "docid": "db16b06c01be6f29d0471c217fc1ee2d", "score": "0.5286203", "text": "def generate_graph(file_path, yticks_distance, increase_ylim=False):\n generate_graph_from_scores(log_to_data(file_path), yticks_distance, increase_ylim)", "title": "" }, { "docid": "3167f040a2a5c442bc2f8018ba7477e2", "score": "0.52859586", "text": "def load(fp):\n return BifGraph().load(fp)", "title": "" }, { "docid": "783f4c01bd00e2e61104efce038ed8d8", "score": "0.52847534", "text": "def read_input_file(filename):\n G=nx.DiGraph()\n f = open(filename)\n v_size = f.readline()\n while '.n ' not in v_size:\n v_size = f.readline()\n v_size = int(v_size.split()[1])\n v_data = f.readline().split()\n G.add_node(0)\n G.node[0] = 0\n for i in range(1, v_size+1):\n G.add_node(i)\n G.node[i] = int(v_data[i])\n f.readline()\n line = f.readline()\n area = 0\n while line and (not '.e' in line):\n line = line.split()\n w = int(line[2])\n area += w\n G.add_edge(int(line[0]), int(line[1]), weight=w)\n line = f.readline()\n f.close()\n return G, area", "title": "" }, { "docid": "187e5609a2943cafdc4bf5be72ba69c2", "score": "0.52702236", "text": "def load(self, graph):\n self.graph = graph", "title": "" }, { "docid": "416f0059835d689f8a07f34788ea609e", "score": "0.52699554", "text": "def graph_iter_from_file(filename: str, parser: tp.Callable[[str], ops.TRow]) -> ops.TRowsGenerator:\n\n with open(filename) as f:\n for line in f:\n yield parser(line)\n # break # read only one line for less time consumption\n print(\"file iteration finished\")", "title": "" }, { "docid": "0780c736af578ac2b6fb912cc7406dfd", "score": "0.5265657", "text": "def create_graph() -> nx.classes.graph.Graph:\n\n\tprint(\"Creating a NetworkX graph from .inp file (networkx_graph.py - create_graph())\")\n\n\t# get data from .inp file\n\tnodes, edges = get_node_data(data_file)\n\n\t# remove junctions with no edges\n\tnodes = [i for i in nodes if i.name not in junctions_with_no_edges]\n\n\t# create a graph object\n\tG = nx.Graph()\n\n\t# add junctions (nodes), specify their coordinates\n\tfor node in nodes:\n\t\tG.add_node(node.name, pos=(float(node.x), float(node.y)), measurement=node.measurement)\n\n\t# add pipes (edges)\n\tfor edge in edges:\n\t\tG.add_edge(edge.node1, edge.node2,\n\t\t\tweight=float(edge.length), measurement=edge.measurement, name=edge.name)\n\n\treturn G \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# return nx.classes.graph.Graph", "title": "" }, { "docid": "a208da3580614bbc20179555c1931bff", "score": "0.5260279", "text": "def build_graph(path) -> nx.Graph:\n df = pd.read_csv(path, usecols=['uid1', 'uid2', 'target'])\n df.columns = ['uid1', 'uid2', 'weight']\n df['weight'] = 1 / df['weight']\n g = nx.from_pandas_edgelist(df, source='uid1', target='uid2', edge_attr=['weight'])\n return g", "title": "" }, { "docid": "e5841de3aadb1ce5556ba655c008662e", "score": "0.5255257", "text": "def makeGraph(self):\n self.floorGraph = graph.Graph()\n file = open(\"edges.csv\")\n edges = file.readlines()\n for edge in edges:\n params = edge.split(\",\")\n self.floorGraph.addEdge(params[0],params[1],float(params[2]))\n self.floorGraph.addEdge(params[1],params[0],float(params[2]))", "title": "" } ]
3fa9d87096dc3fad08e3952a0081d446
Called after a property has changed.
[ { "docid": "719cdb4e44572b9c254ca4771693d64f", "score": "0.0", "text": "def _on_time_formatter_property_changed(self, evt=None):\n \n self._is_dirty = True", "title": "" } ]
[ { "docid": "d91682be3dcb80886000459abf2b8b65", "score": "0.71458226", "text": "def onPropertyStoreChanged(self):\n self.haschanged = True\n self.update()", "title": "" }, { "docid": "06f1063962948be0a25c6606187672d7", "score": "0.6950608", "text": "def property_changed(self, property_name):\n if (isinstance(self.properties[property_name], Entry)):\n # insert the new value in the entry\n self.properties[property_name].delete(0, END)\n self.properties[property_name].insert(\n 0,\n getattr(self.context, property_name)\n )\n\n elif (isinstance(self.properties[property_name], Checkbutton)):\n # check/uncheck the check button then run it's requisite function\n if (getattr(self.context, property_name)):\n self.properties[property_name].select()\n else:\n self.properties[property_name].deselect()\n self.properties[property_name].invoke()\n\n elif (isinstance(self.properties[property_name], Label)):\n # set the label text\n self.properties[property_name].config(\n text = str(getattr(self.context, property_name))\n )", "title": "" }, { "docid": "1eb89947a78b4204837d2811828e08ac", "score": "0.68234956", "text": "def onChanged(self, vp, prop):\n pass", "title": "" }, { "docid": "41d07facd53d5f360a96f0d50b75313d", "score": "0.68125093", "text": "def update_properties(self):\n pass", "title": "" }, { "docid": "9ad75f051f24decee17265328bbbe5c4", "score": "0.6650927", "text": "def set_property_value(self, current_value, new_value):\n if live_object_is_valid(self._parent) and current_value != new_value:\n setattr(self._parent, self._property_name, new_value)\n if self._display_callback:\n self._display_callback(new_value)", "title": "" }, { "docid": "ae7105481b62403ea924faba6854a138", "score": "0.65983623", "text": "def _update_property(self, prop):\n try:\n do_update = getattr(self, f\"_update_{prop}\")\n except AttributeError:\n _LOGGER.debug(\n \"Kumo %s: %s property updater not implemented\", self._name, prop\n )\n return\n do_update()", "title": "" }, { "docid": "761426ef8e0b801d1ec0421247c0cb23", "score": "0.65422606", "text": "def prop(self, prop):\n\n self._prop = prop", "title": "" }, { "docid": "417af3871c2a4e037248cf86dc80c534", "score": "0.6537468", "text": "def PropertiesChanged(self, interface_name, changed_properties,\r\n invalidated_properties):\r\n\r\n pass", "title": "" }, { "docid": "c088e71f9b0cea7d51457a23856f2ee8", "score": "0.6520824", "text": "def do_set_property(self, property, value):\n if property.name == 'plugin':\n self.__plugin = value\n else:\n raise AttributeError, 'unknown property %s' % property.name", "title": "" }, { "docid": "d9f8b31672a137d92cf200c111502fe1", "score": "0.6510544", "text": "def property_changed(self, property_name):\n # if property_changed(\"\") is called all properties should be updated\n single = ViewCollection()\n if (property_name == \"\"):\n self.all_properties_changed()\n return\n for view in single.views:\n if (property_name in view.properties):\n view.property_changed(property_name)", "title": "" }, { "docid": "9700bf28155a0aa658352d040ceaedc8", "score": "0.65050066", "text": "def update_properties(self) -> None:\n changes = {k: v[1] for k, v in self._altered_properties.items()}\n self._alter_properties(**changes)\n self._altered_properties.clear()", "title": "" }, { "docid": "ff4a8987ca2a2131309bb909cbf4db6a", "score": "0.6476606", "text": "def updated(self, subject=None, property=None, value=None):\n pass", "title": "" }, { "docid": "116c6ff4346b0ca3432025816d9f0687", "score": "0.6422893", "text": "def onPropertyChanged(self,node,feature):\n if feature=='value':\n self.onPropertyStoreChanged()", "title": "" }, { "docid": "f8c57e6024dad0b57c959fdbac6d2731", "score": "0.6416933", "text": "def _set_property(self, name: str, value: Any):\n if self._get_property(name) is not None:\n raise ValueError(\"Cannot reassign component\")\n super()._set_property(name, value)", "title": "" }, { "docid": "a36653e8482b9e3d69ba5e9b00ca9b52", "score": "0.6405068", "text": "def SetProperty(self, key, value):", "title": "" }, { "docid": "fe6a05d9cede7c1370af14e7f8366d7b", "score": "0.6369133", "text": "def property_modified_callback(self, property_object, value):\n if property_object is self.user_data:\n if self.slider.value != value:\n if self.is_boolean:\n self.slider.value = int(value)\n else:\n self.slider.value = value", "title": "" }, { "docid": "5563d0fc0f0e8a30d0f500a30f3776f6", "score": "0.6340812", "text": "def _add_property_listener(self):\n if live_object_is_valid(self._parent):\n self._on_parameter_changed.subject = self._parent\n self._on_property_value_changed()", "title": "" }, { "docid": "678469c10aefafd9a0da8bf64daf28cd", "score": "0.63296026", "text": "def cb_PropertyNotifyEvent(self, e):\n if not self.is_alive():\n return\n\n if (e.state == xcb.xproto.Property.NewValue and\n e.atom not in self.win.properties):\n self.win.properties.append(e.atom)\n\n a = aname(e.atom)\n\n if a in ('_NET_WM_NAME', 'WM_NAME'):\n self.update_title(a)\n elif a == 'WM_NORMAL_HINTS':\n self.win.normal_hints = icccm.get_wm_normal_hints(state.conn,\n self.win.id)\n elif a in ('_NET_WM_STRUT', '_NET_WM_STRUT_PARTIAL'):\n self.update_struts()\n\n if (e.state == xcb.xproto.Property.Delete and\n e.atom in self.win.properties):\n self.win.properties.remove(e.atom)", "title": "" }, { "docid": "4dfe47c2c0ab14c7b4189714588c8edc", "score": "0.63224787", "text": "def _set_property(self, serialized_key, new_value):\n if '$set' not in self._updates:\n self._updates['$set'] = {}\n self._updates['$set'][serialized_key] = new_value", "title": "" }, { "docid": "1c5abefb2839ca0965c3c715cd6e65ee", "score": "0.6292916", "text": "def on_entry_changed(entry, property_name):\r\n self.set_attr(property_name, entry.get_text())", "title": "" }, { "docid": "edb9c34a66193991a7ef1cff0d13f91a", "score": "0.6281513", "text": "def _on_json_canvas_property_changed(self, evt):\n \n # get value\n value = evt.new_value\n \n # convert UNDEF\n if value is UNDEF:\n value = str(UNDEF)\n \n # convert color\n elif isinstance(value, Color):\n value = value.hex\n \n # store command\n self._store_command('set_property', {\n 'name': evt.name,\n 'value': value,\n 'raise_error': False})", "title": "" }, { "docid": "298a3ee087fca1cfec5c60ee5b5deed4", "score": "0.627842", "text": "def props_changed_listener(self):\n self.spotify = self.bus.get_object(\"org.mpris.MediaPlayer2.spotify\",\n \"/org/mpris/MediaPlayer2\")\n self.spotify.connect_to_signal(\"PropertiesChanged\",\n self.handle_properties_changed)", "title": "" }, { "docid": "7460e836e1020669f4a6ddb402b75dd8", "score": "0.62428546", "text": "def onBotPropertyChanged(self, caller, property):\r\n\t\tself.updateControlStates()", "title": "" }, { "docid": "683c7eac055c593e62c22adb19e4815f", "score": "0.62249255", "text": "def do_set_property(self, pspec, val):\n if pspec.name == \"name\":\n self._activity.SetProperties({'name': val})\n self._name = val\n elif pspec.name == \"color\":\n self._activity.SetProperties({'color': val})\n self._color = val\n elif pspec.name == \"tags\":\n self._activity.SetProperties({'tags': val})\n self._tags = val\n elif pspec.name == \"private\":\n self._activity.SetProperties({'private': val})\n self._private = val", "title": "" }, { "docid": "21c5f770ba509dc11ab81a340b6e1f94", "score": "0.62147284", "text": "def update_property(self, property_info):\n validate_property_schema(property_info)\n self.schema[\"@graph\"].append(property_info)\n validate_schema(self.schema)\n print(\"Updated the property {} successfully!\".format(property_info[\"rdfs:label\"]))", "title": "" }, { "docid": "0d8fec452957998d5ddb139835765ebd", "score": "0.6195203", "text": "def _on_properties_changed(self, iface_name, props_changed,\n props_invalidated, *, object_path):\n # Apply changes\n self._objects[object_path]._change_properties(\n iface_name, props_changed, props_invalidated)\n # Notify users\n self._event_cb(\n self, 'object-changed',\n object_path, iface_name, props_changed, props_invalidated)", "title": "" }, { "docid": "82b266657f7e55ac6f7e763e92ac1199", "score": "0.6175951", "text": "def setvaluechanged(self, changed):\n self.value_changed = changed", "title": "" }, { "docid": "810b8bb57416b7c6b35aecb2ea213108", "score": "0.61611396", "text": "def _on_clear_properties(self):\n\n self._properties_widget.clear_properties()", "title": "" }, { "docid": "fd4d829d3b35664ecb21b05f09f02d04", "score": "0.6138033", "text": "def update_properties(self):\n self._state = self.values.primary.data", "title": "" }, { "docid": "f9bb682fc5b991813b04d45061b3688b", "score": "0.61316526", "text": "def listen_properties_changed(bus, loop, callback):\n return bus.addMatch(\n callback,\n interface=PROPERTIES_INTERFACE,\n member=\"PropertiesChanged\",\n path_namespace=\"/org/bluez\"\n ).asFuture(loop)", "title": "" }, { "docid": "13b43dd4bc4abc1c2a27bbd5ffbfd813", "score": "0.6119713", "text": "def setPropertyValue(self, QString, QVariant, bool_changed=True): # real signature unknown; restored from __doc__\r\n pass", "title": "" }, { "docid": "1e153e95b0a53490b41b2109f39986fb", "score": "0.6112475", "text": "def test_properties(self):\n\n node, sources = self._prepare_node()\n\n properties = node.get_properties()\n\n for prop in properties:\n gotten_value = prop.value\n\n prop.callback(str(gotten_value))\n\n # Check at least that the setter did not store a different value\n assert gotten_value == prop.value", "title": "" }, { "docid": "97f439e91c5aeffaf501faa76a8825d6", "score": "0.60861915", "text": "def redo(self):\n super(SetPropertyValueCommand, self).redo()\n for c in self._components:\n self._property.fset(c, self._value)", "title": "" }, { "docid": "df59a3f00e9cbe26400b55d9d23b480f", "score": "0.6082186", "text": "async def update(self, changed: dict[str, Any] | None = None) -> None:\n if changed and self.properties:\n self.properties.update(changed)\n else:\n self.properties = await self.dbus.get_properties(self.properties_interface)", "title": "" }, { "docid": "b9ef3cae643fc4865126c7dfa8358861", "score": "0.6078112", "text": "def pchanged(self):\n self._callbacks.process(\"pchanged\")", "title": "" }, { "docid": "c83dae61cf6c0088ed6a1ae1093a5e9d", "score": "0.6073866", "text": "def markChanged(self):\n self.changed = True", "title": "" }, { "docid": "1ab4f65038cbd7ab27e5314d8f9fc1e1", "score": "0.6073135", "text": "def update_properties(self, props):\n pass", "title": "" }, { "docid": "90a0b9f55d1326243267301af772e801", "score": "0.60675174", "text": "def _yeild_node_property(self, new_property):\n pass", "title": "" }, { "docid": "d69ed4c532e965b75af5c21fd221bbf4", "score": "0.605638", "text": "def changed_callback(self, attr, oldval, newval):", "title": "" }, { "docid": "aef48dae8a058278e7b613d1147e341c", "score": "0.6045831", "text": "def update_properties(self):\n self._state = self.values.primary.data\n _LOGGER.debug(\"self._state=%s\", self._state)", "title": "" }, { "docid": "4a94af3c34a09b67773fdc15e8c925c0", "score": "0.60228544", "text": "def do_set_property(self, property, value):\n\t\tif property.name == \"node\":\n\t\t\tself.node = value\n\t\telif property.name == \"in-lines\":\n\t\t\tself.in_lines = value\n\t\telif property.name == \"out-lines\":\n\t\t\tself.out_lines = value\n\t\telse:\n\t\t\traise AttributeError, \"no such property: '%s'\" % property.name", "title": "" }, { "docid": "fd61589aeea3a22dda7c7c7b88a85165", "score": "0.60208523", "text": "def property_set(self, prop, value):\n # See ttbl.test_target.to_dict(): these properties are\n # generated, not allowed to set them\n if prop in ( 'timestamp', '_alloc.timestamp' ):\n raise RuntimeError(\"property '%s' cannot be set\" % prop)\n\n self.fsdb.set(prop, value)\n for key in self.fsdb.keys(prop + \".*\"):\n self.fsdb.set(key, None)", "title": "" }, { "docid": "4f5ffe7a8663f282a39c959733b7143d", "score": "0.6013442", "text": "def on_config_set(self, instance, key, value):\n super().on_config_set(instance, key, value)\n\n if key not in ('values', 'units', 'limits', 'get_funcs', 'set_funcs'):\n return\n\n for _, subprop in self._subproperties.items():\n setattr(subprop, key, value)", "title": "" }, { "docid": "7df7cbd812e3456fea6dc88576a6e3d1", "score": "0.59867907", "text": "def _set_property_data(self,results):\n \n for key, property_instance in self._properties.items():\n value = results.get(key,None)\n #if property_instance.method is None:\n # this is a normal attribute, not a derived one so set its value\n try:\n setattr(self,key,value)\n except:\n # TODO: log/warn/email regarding type mismatch\n setattr(self,key,None)", "title": "" }, { "docid": "6018a13ab9363c89fd7e61da9cd81c42", "score": "0.59823585", "text": "def set_property_value(self, current_value, new_value):\n if live_object_is_valid(self._parent) and current_value != new_value:\n self._parent.value = new_value", "title": "" }, { "docid": "d31b13db04f7d4644b5f656c32fa238d", "score": "0.5975362", "text": "def _remove_property_listener(self):\n self._on_parameter_changed.subject = None\n return", "title": "" }, { "docid": "0926057d2e590742c10a54fc6f8c7fb1", "score": "0.59640753", "text": "def prop(self, value):", "title": "" }, { "docid": "5805691fc9acb2bfb07ab9c157d09bfa", "score": "0.5962179", "text": "def set_properties(self, dictval):\n self.properties = dictval", "title": "" }, { "docid": "ca72ad38a19f35db7b23b3e4bbd5a569", "score": "0.59547806", "text": "def properties(self, properties):\n\n\n self._properties = properties", "title": "" }, { "docid": "4936a8250082f4d4a1ddf6516d264eeb", "score": "0.5948748", "text": "def property_set(self, prop, value):\n # See ttbl.test_target.to_dict(): these properties are\n # generated, not allowed to set them\n if prop in self.properties_forbidden:\n raise RuntimeError(\"property '%s' cannot be set\" % prop)\n\n self.fsdb.set(prop, value)\n for key in self.fsdb.keys(prop + \".*\"):\n self.fsdb.set(key, None)", "title": "" }, { "docid": "1e5698e391485e3f88aa675889fe6d0f", "score": "0.5943585", "text": "def changed(self):\n raise NotImplementedError()", "title": "" }, { "docid": "c170215371f8a0c0beb0282e28579cb4", "score": "0.5943476", "text": "def listen_propchanged(self, func):\n self.conn.listen_signal(path = \"/org/mpris/MediaPlayer2\",\n fallback = False,\n interface = \"org.freedesktop.DBus.Properties\",\n name = \"PropertiesChanged\",\n func = func)", "title": "" }, { "docid": "463bff698ba27678a10789144e2c5237", "score": "0.5932273", "text": "def set_property(self, key, value):\r\n return self.set_properties({key: value})", "title": "" }, { "docid": "463bff698ba27678a10789144e2c5237", "score": "0.5932273", "text": "def set_property(self, key, value):\r\n return self.set_properties({key: value})", "title": "" }, { "docid": "3b420ab04e645f244c580ae1d9b0c207", "score": "0.59117573", "text": "def _on_changed(self, old):\n pass", "title": "" }, { "docid": "03904aee59f76b857496fb563ffca566", "score": "0.5887672", "text": "def _on_property_change(self):\n self._screen_buffer.clear()\n if self._running.isSet():\n self.stop_acquire()\n self.start_acquire()", "title": "" }, { "docid": "f0de3a3882aac7a852dd98e65fa4d075", "score": "0.5876568", "text": "def all_properties_changed(self):\n single = ViewCollection()\n for view in single.views:\n for prop in view.properties:\n view.property_changed(prop)", "title": "" }, { "docid": "35bd72be4f8f1cfc0040ab33caa2d6bf", "score": "0.5874417", "text": "def _fire_data_changed(self, *args):\r\n self.data_changed = True", "title": "" }, { "docid": "8fb1ec16792a3c89c851ff955d2a17e4", "score": "0.5874355", "text": "def update_cont_properties(self, cont_prop):\n self.container.properties.value = cont_prop", "title": "" }, { "docid": "5a6c32eb1c49aad3c5f73850a3c87895", "score": "0.5872436", "text": "def _updateProperty(id, value):", "title": "" }, { "docid": "0349fa145d0917991d07ca77ee1adfa6", "score": "0.58709574", "text": "def _properties_changed(self,\n interface_name,\n changed_properties,\n invalidated_properties,\n object_path):\n # update device state:\n old_state = deepcopy(self._objects[object_path])\n for property_name in invalidated_properties:\n del self._objects[object_path][interface_name][property_name]\n for key,value in changed_properties.items():\n self._objects[object_path][interface_name][key] = value\n new_state = self._objects[object_path]\n # detect changes and trigger events:\n if interface_name == Interface['Drive']:\n self._detect_toggle(\n 'has_media',\n self.get(object_path, old_state),\n self.get(object_path, new_state),\n 'media_added', 'media_removed')\n elif interface_name == Interface['Filesystem']:\n self._detect_toggle(\n 'is_mounted',\n self.get(object_path, old_state),\n self.get(object_path, new_state),\n 'device_mounted', None)", "title": "" }, { "docid": "bc13fdecdaf832a3b330b95ec8bf0e1c", "score": "0.5866119", "text": "def notifyChange(self):\n pass", "title": "" }, { "docid": "bc13fdecdaf832a3b330b95ec8bf0e1c", "score": "0.5866119", "text": "def notifyChange(self):\n pass", "title": "" }, { "docid": "9f3defc8fd0244d4f5b3a05bcb6b4f0b", "score": "0.5856028", "text": "def edit_property(self, property_info):\n for i, schema_property in enumerate(self.schema[\"@graph\"]):\n if schema_property[\"rdfs:label\"] == property_info[\"rdfs:label\"]:\n validate_property_schema(property_info)\n self.schema[\"@graph\"][i] = property_info\n \n #TODO: check if properties are added/edited multiple times (e.g. look at explore_property)\n break\n \n validate_schema(self.schema)\n print(\"Edited the property {} successfully!\".format(property_info[\"rdfs:label\"]))\n self.schema_nx = load_schema_into_networkx(self.schema)", "title": "" }, { "docid": "3944f26e40aa264ba86ea334c13f6cb3", "score": "0.58490294", "text": "def state(self) -> PropertyState:\n pass", "title": "" }, { "docid": "07656d9fb49c0b2cdd6c510880b507ff", "score": "0.5847571", "text": "def _reset_property_notification(self, prop_name):\n\n self.register_property(prop_name)\n\n for observer in self.__observers:\n self.__remove_observer_notification(observer, prop_name)\n self.__add_observer_notification(observer, prop_name)\n pass\n return", "title": "" }, { "docid": "6ec7018a59393175f846a92543d59d32", "score": "0.5846329", "text": "def update(self, prop_changed_event):\n\n changed_entity = prop_changed_event.obj\n if not self.is_entity_in_bounds(changed_entity):\n raise utils.OutOfBoundsError(changed_entity, self)\n\n self.notify_subscribers(event.PropertyChangeEvent(self))", "title": "" }, { "docid": "85f2f1929b28b624b8f3d1472ab12106", "score": "0.58319557", "text": "def manage_changeProperties(REQUEST=None, **kw):", "title": "" }, { "docid": "cd864fff7fbba0c715abc10eb100f232", "score": "0.5829081", "text": "def _alter_properties(self, **properties) -> None:\n if not properties:\n if config.verbose:\n print(f\"No changes specified for {type(self).__name__} '{self.name}'.\")\n return None\n\n changed = self._send_proper_patch_request(properties)\n\n if config.verbose and all(changed):\n msg = (f\"{type(self).__name__} '{self.name}' has been modified on the server. \"\n f\"Your changes are saved locally.\")\n print(msg)", "title": "" }, { "docid": "3746e7ade072bfdf0261db5ac36ead5e", "score": "0.5822236", "text": "def _onParamValueChange(self, param):\n self.onUseDone = False\n self.onParamChange", "title": "" }, { "docid": "1f3f08bf36f8281f26d577cc708f0af9", "score": "0.5815437", "text": "def properties(self, properties):\n self._properties = properties", "title": "" }, { "docid": "1f3f08bf36f8281f26d577cc708f0af9", "score": "0.5815437", "text": "def properties(self, properties):\n self._properties = properties", "title": "" }, { "docid": "1ca23293ab45b1fca14b229aafc213c7", "score": "0.58105063", "text": "def _setProperty(self, prop: str, val: Any):\n mo = self.metaObject()\n idx = mo.indexOfProperty(prop)\n if idx < 0:\n raise ValueError(f\"property `{prop}` does not exist\")\n mp = mo.property(idx)\n if mp.isWritable():\n setattr(self, prop, val)\n else:\n pName = f\"_{prop}\"\n # Do nothing if old and new values are none (crude optimization)\n if val is not None or getattr(self, pName) is not None:\n setattr(self, pName, val)\n getNotifySignal(self, prop).emit()", "title": "" }, { "docid": "e676290591a503b8066010d1378a5bd2", "score": "0.5786053", "text": "def _property_change_event_handler(self, signal, transport, *args):\n current_state = self.source.State\n if (self.state == 'connected' and current_state == 'playing'):\n self._acquire_media_transport(transport, 'r')\n elif (self.state == 'playing' and current_state == 'connected'):\n self._release_media_transport(transport, 'r')\n self.state = current_state", "title": "" }, { "docid": "e94cd3f00b181de99b186ea6e1507b95", "score": "0.5783029", "text": "def setChanged(self, type: int, oldValue: object, newValue: object) -> None:\n ...", "title": "" }, { "docid": "6eb73f416ae42082162105185bdb8a2d", "score": "0.57825935", "text": "def registerPropertyCallback(self, callback):\n\n self.__callbacks.append( callback )", "title": "" }, { "docid": "fd7b98b67d501466a9bae572b9e1c79a", "score": "0.57764876", "text": "def set_property(self, prop, val):\n if hasattr(self, prop):\n setattr(self, prop, val)\n self.__create_text_surf__()\n self.__render_text__()\n else:\n raise Exception('Button has no property named \"%s\"' % prop)", "title": "" }, { "docid": "6d55b9461f82605173cff84a80b07408", "score": "0.5756201", "text": "def onSettingUpdated(self):\n if self._translator is not None:\n self._translator.trigger()", "title": "" }, { "docid": "1c97a25e7e952b87c3ace778ba88a23a", "score": "0.5753159", "text": "def Forget(self):\n map(ProfileProperty.Forget, self.props)", "title": "" }, { "docid": "5351f408f3c55c9dbd7d4f045d6f60c7", "score": "0.57407975", "text": "def writeDeadProperty(self, property):\n self.deadProperties().set(property)", "title": "" }, { "docid": "edc34c5fd64fbfb7cda86971d65f0053", "score": "0.57377434", "text": "def updated(self):\n ...", "title": "" }, { "docid": "97f9026c8a89b1e8c5356b3fb199bf2e", "score": "0.5735381", "text": "def _mark_as_changed(self):\r\n if self.instance:\r\n self.instance._mark_as_changed(self.key)", "title": "" }, { "docid": "b83eaeaf1c93a8c2eb628d4871fafd6c", "score": "0.5735117", "text": "def Set(self, interface, prop, value):\r\n my_prop = self.__getattribute__(prop)\r\n my_prop = value", "title": "" }, { "docid": "daeddb0bb981b15f542a9331a6910822", "score": "0.5730776", "text": "def after_update(self) -> None:", "title": "" }, { "docid": "50bd1116494a5e9583cdb108f863f33f", "score": "0.5725219", "text": "def _property_change_event_handler(self, signal, transport, *args):\n current_state = self.sink.State\n if ((self.state == 'disconnected' and current_state == 'connected') or\n (self.state == 'connecting' and\n current_state == 'connected')):\n self._acquire_media_transport(transport, 'w')\n elif (self.state == 'connected' and current_state == 'disconnected'):\n self._release_media_transport(transport, 'w')\n self.state = current_state", "title": "" }, { "docid": "c7a528f24e072d3ec132a18a98881832", "score": "0.5722519", "text": "def properties(self, properties):\n # type: (dict) -> None\n # Keep a copy of the new properties\n self.__properties = properties.copy() if properties else {}", "title": "" }, { "docid": "614348028c58e4b157611b8f7d07153f", "score": "0.5721096", "text": "def on_pg_changed (self, event):\r\n prop_info = event.get_value()\r\n data = self.pg.get_property_data(prop_info.get_pid())\r\n if data is not None:\r\n prop_name = data['name']\r\n if self.pg.get_property_type(prop_info.get_pid()) != propgrid.Type_Subgroup:\r\n if not data['multiparam']:\r\n self.model.set_property(prop_name, prop_info.get_value())\r\n self.refresh_colours(prop_info.get_pid(), prop_name)\r\n else:\r\n parent = self.pg.get_parent(prop_info.get_pid())\r\n self.model.set_property(\r\n self.pg.get_property_name(parent), \r\n self.pg.get_property_value(parent)\r\n )\r\n self.refresh_colours(prop_info.get_pid(), prop_name)\r\n else:\r\n if data['multiparam']:\r\n self.model.set_property(prop_name, prop_info.get_value())\r\n self.refresh_colours(prop_info.get_pid(), prop_name)", "title": "" }, { "docid": "66ba6fe32dc83f9ce2ba0057ba761ed5", "score": "0.57179", "text": "def register_for_changed_values(self):\n pass", "title": "" }, { "docid": "326beda3bcaa60b4640428d5bf5af0ac", "score": "0.5714013", "text": "def do_set_property(self, property, value):\n\t\tif property.name == 'apiAccessible':\n\t\t\tself.apiAccessible = value\n\t\telse:\n\t\t\traise AttributeError, 'unknown property %s' % property.name", "title": "" }, { "docid": "1543300527010036cc69a067ff8f8faa", "score": "0.5696176", "text": "def setprop(self, key, val):\n if not self.propdict.has_key(key):\n self.proplist.append(key)\n self.propdict[key] = val\n self.hasprop = True", "title": "" }, { "docid": "a3969d532c6bec7ce308ec17ce6c57cb", "score": "0.5676459", "text": "def on_fire_change_event(self, props):\n dsbl = props['disabled']\n if dsbl is not None:\n self._disabled = dsbl\n val = props['value']\n if val is not None:\n self._value = val\n max = props['max']\n if max is not None:\n self._max = max\n min = props['min']\n if min is not None:\n self._min = min\n rdonly = props['readonly']\n if rdonly is not None:\n self._readonly = rdonly\n try:\n if self._change_callback is not None:\n self._change_callback(self._name, props)\n emit('success', {'status': True, 'message': 'success'})\n else:\n emit('warning', {'status': False, 'message': 'No callback registered'})\n except Exception as e:\n print(\"Error: \" + str(e))\n emit('failed', {'status': False, 'message': 'Method failed during callback execution: ' + str(e)})", "title": "" }, { "docid": "ba3e137f3523a0f7df8a1bff739efafc", "score": "0.567505", "text": "def property_name(self, property_name):\n\n self._property_name = property_name", "title": "" }, { "docid": "fb49f45f16bb74599be9ed29097077fb", "score": "0.56745285", "text": "def property_value(self, property_value: str):\n\n self._property_value = property_value", "title": "" }, { "docid": "eb6e190c773a3a02df062846cf79d620", "score": "0.5672088", "text": "def _change_properties(self, iface_name, props_changed, props_invalidated):\n self._interfaces_and_properties[iface_name].update(props_changed)\n for prop_name in props_invalidated:\n self._interfaes_and_properties[iface_name][prop_name] = Invalidated", "title": "" }, { "docid": "f23d569432847c96fed98a8209082346", "score": "0.5668726", "text": "def properties(self, properties):\n\n self._properties = properties", "title": "" }, { "docid": "f23d569432847c96fed98a8209082346", "score": "0.5668726", "text": "def properties(self, properties):\n\n self._properties = properties", "title": "" }, { "docid": "f23d569432847c96fed98a8209082346", "score": "0.5668726", "text": "def properties(self, properties):\n\n self._properties = properties", "title": "" }, { "docid": "f23d569432847c96fed98a8209082346", "score": "0.5668726", "text": "def properties(self, properties):\n\n self._properties = properties", "title": "" }, { "docid": "f23d569432847c96fed98a8209082346", "score": "0.5668726", "text": "def properties(self, properties):\n\n self._properties = properties", "title": "" }, { "docid": "f23d569432847c96fed98a8209082346", "score": "0.5668726", "text": "def properties(self, properties):\n\n self._properties = properties", "title": "" }, { "docid": "f23d569432847c96fed98a8209082346", "score": "0.5668726", "text": "def properties(self, properties):\n\n self._properties = properties", "title": "" } ]
dfcac79a457a0f57ceddfe715a787505
basic stability test of a Numba CPUDispatcher function (i.e., function compiled via / )
[ { "docid": "a3918b70ff5560c9c9829ff65beb3349", "score": "0.0", "text": "def stability_test(func, func_kw, ref_path, ignore_fails=False, define_as_ref=False):\n func_name = func.py_func.__name__\n logging.info(\"stability testing `%s`\", func_name)\n ref_path = expand(ref_path)\n\n test = execute_func(func=func, func_kw=func_kw)\n\n if define_as_ref:\n to_file(test, ref_path)\n\n # Even when we define the test case as ref, round-trip to/from file to\n # ensure that doesn't corrupt the values\n ref = from_file(ref_path)\n\n check(test=test, ref=ref, label=func_name, ignore_fails=ignore_fails)\n\n return test, ref", "title": "" } ]
[ { "docid": "b7dd18680ed15bae6aa27388de7f3f2a", "score": "0.6352842", "text": "def test_basic(self):\n a = 1\n\n @njit\n def foo(x):\n return x + 1\n\n foo(a)\n int_int_fc = types.FunctionType(types.int64(types.int64,))\n\n @njit(types.int64(int_int_fc))\n def bar(fc):\n return fc(a)\n\n self.assertEqual(bar(foo), foo(a))", "title": "" }, { "docid": "1d6b302c2e1e3a2cacf9baa0dbd1a739", "score": "0.6279966", "text": "def test_basic4(self):\n a = 1\n\n @njit\n def foo1(x):\n return x + 1\n\n @njit\n def foo2(x):\n return x + 2\n\n tup = (foo1, foo2)\n int_int_fc = types.FunctionType(types.int64(types.int64,))\n\n @njit(types.int64(types.UniTuple(int_int_fc, 2)))\n def bar(fcs):\n x = 0\n for i in range(2):\n x += fcs[i](a)\n return x\n self.assertEqual(bar(tup), foo1(a) + foo2(a))", "title": "" }, { "docid": "03a7ab675b5de6ad392479f878897f3f", "score": "0.62623435", "text": "def test_basic3(self):\n a = 1\n\n @njit\n def foo1(x):\n return x + 1\n\n @njit\n def foo2(x):\n return x + 2\n\n int_int_fc = types.FunctionType(types.int64(types.int64,))\n\n @njit(types.int64(int_int_fc))\n def bar(fc):\n return fc(a)\n\n self.assertEqual(bar(foo1) + 1, bar(foo2))", "title": "" }, { "docid": "82776cb5f0669e74ae7dbf44ef5e8fb9", "score": "0.6225936", "text": "def test_basic2(self):\n a = 1\n\n @njit\n def foo(x):\n return x + 1\n\n int_int_fc = types.FunctionType(types.int64(types.int64,))\n\n @njit(types.int64(int_int_fc))\n def bar(fc):\n return fc(a)\n\n self.assertEqual(bar(foo), foo(a))", "title": "" }, { "docid": "49787bddf8bf3d4b541592b51aa88466", "score": "0.6150174", "text": "def test_typeof_pure(arg):\n return numba.typeof(arg)", "title": "" }, { "docid": "660381f84e1311d44eae1f59c3f0da24", "score": "0.6087743", "text": "def test_typeof_numba3(arg):\n return numba.typeof(arg)", "title": "" }, { "docid": "d02daacf267bc1eaf72f7876eec97780", "score": "0.5932847", "text": "def test_ns_call(self):\n\n def a(i):\n return i + 1\n\n def mkfoo(a_):\n def foo():\n return a_(123)\n return foo\n\n sig = int64(int64)\n\n for decor in [mk_cfunc_func(sig), njit_func,\n mk_njit_with_sig_func(sig), mk_wap_func(sig)]:\n for jit_opts in [dict(nopython=True), dict(forceobj=True)]:\n jit_ = jit(**jit_opts)\n with self.subTest(decor=decor.__name__):\n a_ = decor(a)\n self.assertEqual(jit_(mkfoo(a_))(), mkfoo(a)())", "title": "" }, { "docid": "08de50ec69817aef81277fc230f43d09", "score": "0.5880727", "text": "def test_typeof_numba(a, b):\n return numba.typeof(a)(a + b)", "title": "" }, { "docid": "5e9179fa1db4e8aba40d3ee2e012a83d", "score": "0.5819886", "text": "def test_ccall():\r\n return ccall_sqr(5)", "title": "" }, { "docid": "2232c392043f77be7b7ea9ba1fc3887f", "score": "0.58064526", "text": "def compile_jit(self):", "title": "" }, { "docid": "40f0084febcf2c245a09c66db81e11e3", "score": "0.5758869", "text": "def c_func(n, a):\n\n #unpack the pointer into an array\n args = numba.carray(a,n)\n\n return jitted_func(args)", "title": "" }, { "docid": "40286628a1d69fd4b3f2d4e72676a3a4", "score": "0.57537764", "text": "def _jit(function):\n import sys\n\n compiled = numba.jit(function)\n\n if hasattr(sys, '_called_from_test'):\n return function\n else: # pragma: no cover\n return compiled", "title": "" }, { "docid": "9c737724780e33880a4e14f1f37d642b", "score": "0.57416886", "text": "def wrap_for_numba(func):\n \n #First need to jit the function so cfunc can handle it.\n jitted_func = numba.jit(\"float64(float64[:])\", nopython=True)(func)\n\n\n def c_func(n, a):\n \"\"\"Simple wrapper to convert (int, pointer-to-array) args to (list) args.\n\n Parameters\n ----------\n n : C-language integer\n\n a : C-language pointer to array of type double.\n\n Returns\n -------\n func : python function\n Function signature is float(float[:])\"\"\"\n\n #unpack the pointer into an array\n args = numba.carray(a,n)\n\n return jitted_func(args)\n\n #Function signature required by numba\n #arguments are integer denoting length of array and pointer to array\n c_sig = numba.types.double(numba.types.intc, numba.types.CPointer(numba.types.double))\n\n #Use numba to create a c-callback\n new_cfunc = numba.cfunc(c_sig)(c_func)\n\n if LLC_EXISTS == True:\n #convert this into something that scipy.integrate.quad can work with\n return LowLevelCallable(new_cfunc.ctypes)\n else:\n warnings.warn(\"Falling back on legacy scipy behavior. Should upgrade to verion 0.19 or greater.\", DeprecationWarning)\n #This is a hack to address a bug in scipy.integrate.quad for scipy versions < 0.19\n new_cfunc.ctypes.argtypes = (ct.c_int, ct.c_double)\n\n return new_cfunc.ctypes", "title": "" }, { "docid": "59e1375a5f13cb639449dccf58c0589f", "score": "0.571599", "text": "def test_call_generated(self):\n cfunc = jit(nopython=True)(call_generated)\n self.assertPreciseEqual(cfunc(1, 2), (-4, 2))\n self.assertPreciseEqual(cfunc(1j, 2), (1j + 5, 2))", "title": "" }, { "docid": "7cf2b0201da12501dcb84e6da16ea62a", "score": "0.56078917", "text": "def test_thread_response(self):\n\n @vectorize('float64(float64, float64)', target='parallel')\n def fnv(a, b):\n return a + b\n\n sleep_time = 1 # 1 second\n while sleep_time > 0.00001: # 10us\n time.sleep(sleep_time)\n a = b = np.arange(10**5)\n np.testing.assert_equal(a + b, fnv(a, b))\n # Reduce sleep time\n sleep_time /= 2", "title": "" }, { "docid": "bf5bc64bfd9f91b15571e447b1259341", "score": "0.5586428", "text": "def _get_numba_ufunc(expr):\n if isinstance(expr, Broadcast):\n leaves = expr._scalars\n expr = expr._scalar_expr\n else:\n leaves = expr._leaves()\n\n s, scope = funcstr(leaves, expr)\n\n scope = dict((k, numba.jit(nopython=True)(v) if callable(v) else v)\n for k, v in scope.items())\n # get the func\n func = eval(s, scope)\n # get the signature\n sig = compute_signature(expr)\n # vectorize is currently not thread safe. So lock the thread.\n # TODO FIXME remove this when numba has made vectorize thread safe.\n with lock:\n ufunc = numba.vectorize([sig], nopython=True)(func)\n return ufunc", "title": "" }, { "docid": "c0d4523f38af36ab6e7a1fed7565825a", "score": "0.5579696", "text": "def test_ns_call_out(self):\n def a(i):\n return i + 1\n\n def mkfoo(a_):\n def foo():\n a_(123)\n return a_\n return foo\n\n sig = int64(int64)\n\n for decor in [mk_cfunc_func(sig), njit_func,\n mk_njit_with_sig_func(sig), mk_wap_func(sig),\n mk_ctypes_func(sig)]:\n for jit_opts in [dict(nopython=True), dict(forceobj=True)]:\n jit_ = jit(**jit_opts)\n with self.subTest(decor=decor.__name__):\n a_ = decor(a)\n self.assertEqual(jit_(mkfoo(a_))().pyfunc, mkfoo(a)())", "title": "" }, { "docid": "c4e3033ee92d3e59bcf6178260fcf2c8", "score": "0.5565246", "text": "def _func_alt(self , uc , address , size , user_data):\n self.debug_func = False\n if self.debug_func:\n # print('>>> Tracing instruction at 0x%x, instruction size = 0x%x' %(address, size))\n pass\n if address in self.func_alt_addr.keys():\n \"\"\"\n keep balance may be need to \n consider in the future\n fake func fmt : (func , args)\n \"\"\"\n func = self.func_alt_addr[address]\n if self.debug_func:\n print \"func : {0} \".format(func)\n # fc <==> firmcorn class <--- Uc class\n # 1 , get return address\n reg_sp = self.fc.reg_read(self.REG_SP)\n if self.REG_RA == 0: \n _value = str(self.fc.mem_read(reg_sp , self.size)).encode(\"hex\")\n if self.fc.endian == \"little\":\n _value = LITTLE2BIG(_value)\n else:\n _value = int( _value , 16)\n ret_addr = _value\n else:\n ret_addr = self.fc.reg_read(self.REG_RA)\n \n # 2 , get args by argc\n if self.debug_func:\n print \"ret_addr : {}\".format(hex(ret_addr))\n \n # 3 , execute custom function and get return value\n res = func() \n if self.debug_func:\n print \"ret_addr : {}; res : {}\".format(hex(ret_addr) , res)\n if type(res) != int: res = 0 # return value is not very import for fuzz , easpically return value is not int type\n # multiple return values should to be considered, maybe \n self.fc.reg_write(self.REG_RES[0] , res)\n self.fc.reg_write(self.REG_PC , ret_addr)\n '''\n # maybe need to keep stack balace\n pass \n '''", "title": "" }, { "docid": "0cccffd58034efc4e4fdde7a7e0d34d9", "score": "0.5562532", "text": "def test_in_ns_seq_call(self):\n\n def a(i):\n return i + 1\n\n def b(i):\n return i + 2\n\n def mkfoo(b_):\n def foo(f):\n r = 0\n for f_ in (f, b_):\n r = r + f_(r)\n return r\n return foo\n\n sig = int64(int64)\n\n for decor in [mk_cfunc_func(sig),\n mk_njit_with_sig_func(sig), mk_wap_func(sig),\n mk_ctypes_func(sig)][:-1]:\n for jit_opts in [dict(nopython=True), dict(forceobj=True)]:\n jit_ = jit(**jit_opts)\n with self.subTest(decor=decor.__name__):\n a_ = decor(a)\n b_ = decor(b)\n self.assertEqual(jit_(mkfoo(b_))(a_), mkfoo(b)(a))", "title": "" }, { "docid": "c07e9781666ca222b2cfc912f7015db8", "score": "0.5481509", "text": "def test_ns_overload(self):\n def a(i):\n return i + 1\n\n def mkfoo(a_):\n def foo():\n r1 = a_(123)\n r2 = a_(123.45)\n return (r1, r2)\n return foo\n\n for decor in [njit_func]:\n for jit_opts in [dict(nopython=True), dict(forceobj=True)]:\n jit_ = jit(**jit_opts)\n with self.subTest(decor=decor.__name__):\n a_ = decor(a)\n self.assertEqual(jit_(mkfoo(a_))(), mkfoo(a)())", "title": "" }, { "docid": "37029b4f2a0420963a35cb66512173c6", "score": "0.54654795", "text": "def test_subroutine_call_to_externally_visible_subroutine():\n\n @I.ir_module\n class before:\n @T.prim_func\n def main(A: T.Buffer(1, \"float32\")):\n T.func_attr({\"global_symbol\": \"main\", \"target\": T.target(\"llvm\", host=\"llvm\")})\n before.subroutine(A.data)\n\n @T.prim_func\n def subroutine(A_data: T.handle(\"float32\")):\n T.func_attr({\"global_symbol\": \"subroutine\", \"target\": T.target(\"llvm\", host=\"llvm\")})\n T.evaluate(A_data)\n\n after = tvm.tir.transform.MakePackedAPI()(before)\n\n main_compute_scope = _find_compute_scope(after[\"main\"])\n assert main_compute_scope is not None\n subroutine_compute_scope = _find_compute_scope(after[\"subroutine\"])\n assert subroutine_compute_scope is not None\n\n subroutine_call_op = main_compute_scope.body.value.op\n assert (\n isinstance(subroutine_call_op, tvm.ir.Op)\n and subroutine_call_op.name == \"tir.tvm_call_cpacked\"\n ), (\n f\"The main function's CallNode should be lowered to the builtin 'tir.tvm_call_cpacked', \"\n f\"but instead has an operation of type {subroutine_call_op}\"\n )", "title": "" }, { "docid": "bf3c8ae871a6de4a0564615b5eb6a36c", "score": "0.5462659", "text": "def test_ns_out(self):\n def a(i):\n return i + 1\n\n def mkfoo(a_):\n def foo():\n return a_\n return foo\n\n sig = int64(int64)\n\n for decor in [mk_cfunc_func(sig), njit_func,\n mk_njit_with_sig_func(sig), mk_wap_func(sig),\n mk_ctypes_func(sig)][:-1]:\n for jit_opts in [dict(nopython=True), dict(forceobj=True)]:\n jit_ = jit(**jit_opts)\n with self.subTest(decor=decor.__name__):\n a_ = decor(a)\n self.assertEqual(jit_(mkfoo(a_))().pyfunc, mkfoo(a)())", "title": "" }, { "docid": "588313c5992cc9fc53c73ee9f035ef26", "score": "0.54603344", "text": "def numba_supported_reductions(request):\n return request.param", "title": "" }, { "docid": "e6ced0518649ff063db40a44ce302ad5", "score": "0.54475284", "text": "def test_internal_subroutine_call():\n\n @I.ir_module\n class before:\n @T.prim_func\n def main(A: T.Buffer(1, \"float32\")):\n T.func_attr({\"target\": T.target(\"llvm\", host=\"llvm\")})\n before.subroutine(A.data)\n\n # this test fails if it's made public\n @T.prim_func(private=True)\n def subroutine(A_data: T.handle(\"float32\")):\n T.func_attr({\"target\": T.target(\"llvm\")})\n T.evaluate(A_data)\n\n after = tvm.tir.transform.MakePackedAPI()(before)\n tvm.ir.assert_structural_equal(before[\"subroutine\"], after[\"subroutine\"])\n\n compute_scope = _find_compute_scope(after[\"main\"])\n subroutine_call_op = compute_scope.body.value.op\n assert isinstance(subroutine_call_op, tvm.ir.GlobalVar), (\n f\"The main function's CallNode should use the subroutine's GLobalVar as the operation, \"\n f\"but instead has an operation of type {subroutine_call_op}\"\n )", "title": "" }, { "docid": "a21597594fbde2898b357959a2dbc71e", "score": "0.5434059", "text": "def _func_alt_dbg(self , uc , address , size , user_data):\n if self.debug_func:\n # print('>>> Tracing instruction at 0x%x, instruction size = 0x%x' %(address, size))\n pass\n if address in self.func_alt_addr.keys():\n \"\"\"\n keep balance may be need to \n consider in the future\n fake func fmt : (func , args)\n \"\"\"\n func = self.func_alt_addr[address]\n if self.debug_func:\n print \"func : {0} ; argc : {1}\".format(func, self.argcs)\n # fc <==> firmcorn class <--- Uc class\n # 1 , get return address\n reg_sp = self.fc.reg_read(self.REG_SP)\n # we should get address value from stack\n print hex(reg_sp)\n if self.REG_RA == 0: \n ret_addr = unpack(self.pack_fmt , str(self.fc.mem_read(reg_sp , self.size)))[0]\n else:\n ret_addr = self.fc.reg_read(self.REG_RA)\n \n # 2 , get args by argc\n if self.debug_func:\n print \"ret_addr : {}\".format(hex(ret_addr))\n \n # 3 , execute custom function and get return value\n res = func() \n if self.debug_func:\n print \"ret_addr : {}; args : {}; res : {}\".format(ret_addr , self.argcs , res)\n if type(res) != int: res = 0 # return value is not very import for fuzz , easpically return value is not int type\n # multiple return values should to be considered, maybe \n self.fc.reg_write(self.REG_RES[0] , res)\n self.fc.reg_write(self.REG_PC , ret_addr)\n '''\n # maybe need to keep stack balace\n pass \n '''", "title": "" }, { "docid": "b7595c7e1adc3c4bc0b233b83d1eb552", "score": "0.5412646", "text": "def cpython_only(test):\r\n return impl_detail(cpython=True)(test)", "title": "" }, { "docid": "22d8cf29b9730cddab0757362fbbf6a8", "score": "0.53926724", "text": "def test_in__(self):\n\n def a(i):\n return i + 1\n\n def foo(f):\n return 0\n\n sig = int64(int64)\n\n for decor in [mk_cfunc_func(sig),\n njit_func,\n mk_njit_with_sig_func(sig),\n mk_ctypes_func(sig),\n mk_wap_func(sig)]:\n for jit_opts in [dict(nopython=True), dict(forceobj=True)]:\n jit_ = jit(**jit_opts)\n with self.subTest(decor=decor.__name__, jit=jit_opts):\n a_ = decor(a)\n self.assertEqual(jit_(foo)(a_), foo(a))", "title": "" }, { "docid": "ae843b795bb00e358eb5252013c75d0f", "score": "0.5386802", "text": "def test_compilation_results(self):\n\n @jit(nopython=True)\n def add_template(x, y):\n return x + y\n\n # Trigger compilations\n self.assertEqual(add_template(1, 2), 3)\n self.assertEqual(add_template(1.2, 3.4), 4.6)\n\n cres1, cres2 = add_template.overloads.values()\n\n # Turn compilation results into first-class functions\n iadd = types.CompileResultWAP(cres1)\n fadd = types.CompileResultWAP(cres2)\n\n @jit(nopython=True)\n def foo(add, x, y):\n return add(x, y)\n\n @jit(forceobj=True)\n def foo_obj(add, x, y):\n return add(x, y)\n\n self.assertEqual(foo(iadd, 3, 4), 7)\n self.assertEqual(foo(fadd, 3.4, 4.5), 7.9)\n\n self.assertEqual(foo_obj(iadd, 3, 4), 7)\n self.assertEqual(foo_obj(fadd, 3.4, 4.5), 7.9)", "title": "" }, { "docid": "5f9d15727a9c6d627210b8ea2e9be78d", "score": "0.53786474", "text": "def test_in_call__(self):\n\n def a_i64(i):\n return i + 1234567\n\n def a_f64(i):\n return i + 1.5\n\n def a_str(i):\n return \"abc\"\n\n def foo(f):\n return f(123)\n\n for f, sig in [(a_i64, int64(int64)), (a_f64, float64(int64))]:\n for decor in [mk_cfunc_func(sig), njit_func,\n mk_njit_with_sig_func(sig),\n mk_wap_func(sig)]:\n for jit_opts in [dict(nopython=True), dict(forceobj=True)]:\n jit_ = jit(**jit_opts)\n with self.subTest(\n sig=sig, decor=decor.__name__, jit=jit_opts):\n f_ = decor(f)\n self.assertEqual(jit_(foo)(f_), foo(f))", "title": "" }, { "docid": "cbea4eb86c1124526ea96a6ed0861d61", "score": "0.53761166", "text": "def test_err10b(self):\n\n @functor_api(namespace='foo')\n def err10b(data):\n pass\n\n @functor_api(namespace='foo')\n def err10b(data):\n pass", "title": "" }, { "docid": "e1be7ece9f19f1972fd281047053818f", "score": "0.53507966", "text": "def test_in_call_out(self):\n\n def a(i):\n return i + 1\n\n def foo(f):\n f(123)\n return f\n\n sig = int64(int64)\n\n for decor in [mk_cfunc_func(sig), njit_func,\n mk_njit_with_sig_func(sig), mk_wap_func(sig)]:\n for jit_opts in [dict(nopython=True), dict(forceobj=True)]:\n jit_ = jit(**jit_opts)\n with self.subTest(decor=decor.__name__):\n a_ = decor(a)\n r1 = jit_(foo)(a_).pyfunc\n r2 = foo(a)\n self.assertEqual(r1, r2)", "title": "" }, { "docid": "43f6803944bdaa499a5f9d5b0a3b634f", "score": "0.53125364", "text": "def test_udp_udf_reduce_bands_with_parameter(api100, user_defined_process_registry, set_parameters, udf_code):\n udf_code = textwrap.dedent(udf_code)\n udp_id = random_name(\"udp\")\n udp_spec = {\n \"id\": udp_id,\n \"parameters\": [\n {\"name\": \"data\", \"schema\": {\"type\": \"object\", \"subtype\": \"raster-cube\"}},\n {\"name\": \"l_scale\", \"default\": 1000, \"optional\": True, \"schema\": {\"type\": \"number\"}},\n {\"name\": \"d_scale\", \"default\": 2, \"optional\": True, \"schema\": {\"type\": \"number\"}},\n ],\n \"process_graph\": {\n \"reduce\": {\n \"process_id\": \"reduce_dimension\",\n \"arguments\": {\n \"data\": {\"from_parameter\": \"data\"},\n \"dimension\": \"bands\",\n \"reducer\": {\"process_graph\": {\"udf\": {\n \"process_id\": \"run_udf\",\n \"arguments\": {\n \"data\": {\"from_parameter\": \"data\"},\n \"udf\": udf_code,\n \"runtime\": \"Python\",\n \"context\": {\n \"l_scale\": {\"from_parameter\": \"l_scale\"},\n \"d_scale\": {\"from_parameter\": \"d_scale\"}\n }\n },\n \"result\": True\n }}}\n },\n \"result\": True\n }\n }\n }\n user_defined_process_registry.save(user_id=TEST_USER, process_id=udp_id, spec=udp_spec)\n\n udp_args = {\"data\": {\"from_node\": \"lc\"}}\n if set_parameters:\n udp_args[\"l_scale\"] = 100000\n udp_args[\"d_scale\"] = 3\n\n response = api100.check_result({\n \"lc\": {\n \"process_id\": \"load_collection\",\n \"arguments\": {\n \"id\": \"TestCollection-LonLat4x4\",\n \"temporal_extent\": [\"2021-01-01\", \"2021-02-01\"],\n \"spatial_extent\": {\"west\": 0.0, \"south\": 0.0, \"east\": 1.0, \"north\": 1.0},\n \"bands\": [\"Longitude\", \"Day\"]\n },\n },\n \"udp\": {\"process_id\": udp_id, \"arguments\": udp_args},\n \"save\": {\n \"process_id\": \"save_result\",\n \"arguments\": {\"data\": {\"from_node\": \"udp\"}, \"format\": \"json\"},\n \"result\": True,\n }\n })\n result = response.assert_status_code(200).json\n _log.info(repr(result))\n\n assert result[\"dims\"] == [\"t\", \"x\", \"y\"]\n data = result[\"data\"]\n\n if set_parameters:\n expected = np.array([\n np.array([[15, 25015, 50015, 75015]] * 4).T,\n np.array([[45, 25045, 50045, 75045]] * 4).T,\n np.array([[75, 25075, 50075, 75075]] * 4).T,\n ])\n else:\n expected = np.array([\n np.array([[10, 10 + 250, 10 + 500, 10 + 750]] * 4).T,\n np.array([[30, 30 + 250, 30 + 500, 30 + 750]] * 4).T,\n np.array([[50, 50 + 250, 50 + 500, 50 + 750]] * 4).T,\n ])\n\n assert_equal(data, expected)", "title": "" }, { "docid": "de02c8781ec7754c5962c91afa34e38c", "score": "0.5309724", "text": "def test_guvectorize_scalar_objectmode(self):\n ufunc = guvectorize(['(int32[:,:], int32, int32[:,:])'],\n \"(x,y),()->(x,y)\")(guadd_scalar_obj)\n a = np.arange(10, dtype='int32').reshape(2, 5)\n b = ufunc(a, 3)\n self.assertPreciseEqual(a + 3, b)", "title": "" }, { "docid": "6a545f06814c05180432b6a15d910bfc", "score": "0.5296906", "text": "def test_c_functions(self):\n self.module_name = \"_aacgmv2\"\n self.test_module_functions()", "title": "" }, { "docid": "1c43e7933f429bbf081db8d554446e48", "score": "0.5252882", "text": "def test_in_overload(self):\n def a(i):\n return i + 1\n\n def foo(f):\n r1 = f(123)\n r2 = f(123.45)\n return (r1, r2)\n\n for decor in [njit_func]:\n for jit_opts in [dict(nopython=True), dict(forceobj=True)]:\n jit_ = jit(**jit_opts)\n with self.subTest(decor=decor.__name__):\n a_ = decor(a)\n self.assertEqual(jit_(foo)(a_), foo(a))", "title": "" }, { "docid": "0ad1c9e3c03075005e9ea951c140d518", "score": "0.5249242", "text": "def c_call(x):\r\n return x", "title": "" }, { "docid": "84ea510fe5080f6867af3e27ec2ac737", "score": "0.5246428", "text": "def test_err10a(self):\n\n @functor_api\n def err10a(data):\n pass\n\n @functor_api\n def err10a(data):\n pass", "title": "" }, { "docid": "e0243f9d04a2f3ec325f8152713bef90", "score": "0.52407086", "text": "def test_issue_1265(self):\n\n def py_gen(rmin, rmax, nr):\n a = np.linspace(rmin, rmax, nr)\n yield a[0]\n yield a[1]\n\n c_gen = jit(nopython=True)(py_gen)\n\n py_res = list(py_gen(-2, 2, 100))\n c_res = list(c_gen(-2, 2, 100))\n\n self.assertEqual(py_res, c_res)\n\n def py_driver(args):\n rmin, rmax, nr = args\n points = np.empty(nr, dtype=np.complex128)\n for i, c in enumerate(py_gen(rmin, rmax, nr)):\n points[i] = c\n\n return points\n\n @jit(nopython=True)\n def c_driver(args):\n rmin, rmax, nr = args\n points = np.empty(nr, dtype=np.complex128)\n for i, c in enumerate(c_gen(rmin, rmax, nr)):\n points[i] = c\n\n return points\n\n n = 2\n patches = (-2, -1, n)\n\n py_res = py_driver(patches)\n # The error will cause a segfault here\n c_res = c_driver(patches)\n\n np.testing.assert_equal(py_res, c_res)", "title": "" }, { "docid": "226af2187ffa43a5b23c40582e316cc6", "score": "0.5232464", "text": "def test_array_scalar_ufunc_equivalence(op, arr1, arr2):\n check_ufunc_scalar_equivalence(op, arr1, arr2)", "title": "" }, { "docid": "127a7994108b21e3c23cad9b02779480", "score": "0.5229372", "text": "def test_in_pick_func_call(self):\n def a(i):\n return i + 1\n\n def b(i):\n return i + 2\n\n def foo(funcs, i):\n f = funcs[i]\n r = f(123)\n return r\n\n sig = int64(int64)\n\n for decor in [mk_cfunc_func(sig), mk_wap_func(sig),\n mk_njit_with_sig_func(sig)]:\n for jit_opts in [dict(nopython=True), dict(forceobj=True)]:\n jit_ = jit(**jit_opts)\n with self.subTest(decor=decor.__name__):\n a_ = decor(a)\n b_ = decor(b)\n self.assertEqual(jit_(foo)((a_, b_), 0), foo((a, b), 0))\n self.assertEqual(jit_(foo)((a_, b_), 1), foo((a, b), 1))\n self.assertNotEqual(jit_(foo)((a_, b_), 0), foo((a, b), 1))", "title": "" }, { "docid": "8bde0b723c8dc2981eb1000ed2e96bd0", "score": "0.5220389", "text": "def refcount_test(test):\r\n return no_tracing(cpython_only(test))", "title": "" }, { "docid": "1f4ae011561291ce18fedd69679a26d4", "score": "0.5209112", "text": "def mem_ncsb():\n _min3p.f90wrap_mem_ncsb()", "title": "" }, { "docid": "73f17b0e140f51f4db3b558d2a62653d", "score": "0.520812", "text": "def __numpy_ufunc__(self, func, method, pos, inputs, **kwargs):\n if method != '__call__' or kwargs:\n return NotImplemented\n if func in matrix._numpy_to_systeml_mapping:\n fn = matrix._numpy_to_systeml_mapping[func]\n else:\n return NotImplemented\n if len(inputs) == 2:\n return fn(inputs[0], inputs[1])\n elif len(inputs) == 1:\n return fn(inputs[0])\n else:\n raise ValueError('Unsupported number of inputs')", "title": "" }, { "docid": "09728e4f559856dc2f3880fd08f248a2", "score": "0.51905775", "text": "def test_udp_udf_reduce_temporal(api100, user_defined_process_registry, udf_code):\n udf_code = textwrap.dedent(udf_code)\n udp_id = random_name(\"udp\")\n udp_spec = {\n \"id\": udp_id,\n \"parameters\": [\n {\"name\": \"data\", \"schema\": {\"type\": \"object\", \"subtype\": \"raster-cube\"}},\n ],\n \"process_graph\": {\n \"reduce\": {\n \"process_id\": \"reduce_dimension\",\n \"arguments\": {\n \"data\": {\"from_parameter\": \"data\"},\n \"dimension\": \"t\",\n \"reducer\": {\"process_graph\": {\"udf\": {\n \"process_id\": \"run_udf\",\n \"arguments\": {\n \"data\": {\"from_parameter\": \"data\"},\n \"udf\": udf_code,\n \"runtime\": \"Python\",\n },\n \"result\": True\n }}}\n },\n \"result\": True\n }\n }\n }\n user_defined_process_registry.save(user_id=TEST_USER, process_id=udp_id, spec=udp_spec)\n\n response = api100.check_result({\n \"lc\": {\n \"process_id\": \"load_collection\",\n \"arguments\": {\n \"id\": \"TestCollection-LonLat4x4\",\n \"temporal_extent\": [\"2021-01-01\", \"2021-02-01\"],\n \"spatial_extent\": {\"west\": 0.0, \"south\": 0.0, \"east\": 1.0, \"north\": 2.0},\n \"bands\": [\"Longitude\", \"Day\"]\n },\n },\n \"udp\": {\n \"process_id\": udp_id, \"arguments\": {\"data\": {\"from_node\": \"lc\"}}\n },\n \"save\": {\n \"process_id\": \"save_result\",\n \"arguments\": {\"data\": {\"from_node\": \"udp\"}, \"format\": \"json\"},\n \"result\": True,\n }\n })\n result = response.assert_status_code(200).json\n _log.info(repr(result))\n\n assert result[\"dims\"] == [\"bands\", \"x\", \"y\"]\n data = result[\"data\"]\n assert_equal(data, np.array([\n np.array([[0, .25, .5, .75]] * 8).T,\n np.full((4, 8), fill_value=25)\n ]))", "title": "" }, { "docid": "62674bf27cb5a2cca6517ce4937d332f", "score": "0.51755303", "text": "def test_fromfunction_complicated(self):\n def f(*global_inds):\n return sum(global_inds)\n \n try:\n comm = create_comm_of_size(4)\n except InvalidCommSizeError:\n pass\n else:\n try:\n a = densedistarray.fromfunction(f, (16,16), dtype='int64', dist=('b','c'), comm=comm)\n except NullCommError:\n pass\n else:\n self.assertEquals(a.shape, (16,16))\n self.assertEquals(a.dtype, np.dtype('int64'))\n for global_inds, value in densedistarray.ndenumerate(a):\n self.assertEquals(sum(global_inds), value)\n comm.Free()", "title": "" }, { "docid": "7a182c2a529460c16bd8222bf0a11930", "score": "0.5174457", "text": "def test_transform_scope(self):\n def get_info(pipeline):\n @njit(pipeline_class=pipeline)\n def foo():\n acc = 0\n for i in [1, 2, 3]:\n acc += i\n return acc\n\n self.assertEqual(foo(), foo.py_func())\n cres = foo.overloads[foo.signatures[0]]\n func_ir = cres.metadata['preserved_ir']\n return func_ir, cres.fndesc\n\n ignore_loops_ir, ignore_loops_fndesc = \\\n get_info(self.LoopIgnoringCompiler)\n canonicalise_loops_ir, canonicalise_loops_fndesc = \\\n get_info(self.LoopCanonicalisingCompiler)\n\n # check CFG is the same\n def compare_cfg(a, b):\n a_cfg = compute_cfg_from_blocks(flatten_labels(a.blocks))\n b_cfg = compute_cfg_from_blocks(flatten_labels(b.blocks))\n self.assertEqual(a_cfg, b_cfg)\n\n compare_cfg(ignore_loops_ir, canonicalise_loops_ir)\n\n # check there's three more call types in the canonicalised one:\n # len(literal list)\n # range(of the len() above)\n # getitem(literal list arg, index)\n self.assertEqual(len(ignore_loops_fndesc.calltypes) + 3,\n len(canonicalise_loops_fndesc.calltypes))\n\n def find_getX(fd, op):\n return [x for x in fd.calltypes.keys()\n if isinstance(x, ir.Expr) and x.op == op]\n\n il_getiters = find_getX(ignore_loops_fndesc, \"getiter\")\n self.assertEqual(len(il_getiters), 1) # list iterator\n\n cl_getiters = find_getX(canonicalise_loops_fndesc, \"getiter\")\n self.assertEqual(len(cl_getiters), 1) # loop range iterator\n\n cl_getitems = find_getX(canonicalise_loops_fndesc, \"getitem\")\n self.assertEqual(len(cl_getitems), 1) # list getitem induced by loop\n\n # check the value of the untransformed IR getiter is now the value of\n # the transformed getitem\n self.assertEqual(il_getiters[0].value.name, cl_getitems[0].value.name)\n\n # check the type of the transformed IR getiter is a range iter\n range_inst = canonicalise_loops_fndesc.calltypes[cl_getiters[0]].args[0]\n self.assertTrue(isinstance(range_inst, types.RangeType))", "title": "" }, { "docid": "db8e082f1390972596fe83cdbb59d54e", "score": "0.5163115", "text": "def run_test(*args, **kwargs):\n epsilon = args[0]\n A = args[1]\n B = args[2]\n C = args[3]\n vcl_A = args[4]\n vcl_B = args[5]\n vcl_C = args[6]\n\n dtype = np.result_type(kwargs['dtype']).type\n\n alpha = p.Scalar(dtype(3.1415)) ## TODO SHOULD BE GPU SCALAR\n beta = p.HostScalar(dtype(2.718))\n\n ###\n ### TODO MISSING:\n ### + cpu / gpu combos\n ### + elementwise power function?\n ###\n\n # Test initialisers\n # + GPU scalar TODO\n #X = p.Vector(A.shape, alpha)\n #if not (X == (np.ones(A.shape, dtype = dtype) * alpha.value)).all():\n # raise RuntimeError(\"Failed: GPU scalar vector init\")\n #print(\"Test: initialisation of vector with GPU scalar passed\")\n\n # + CPU scalar TODO\n Y = p.Vector(A.shape[0], beta.value) # TODO\n if not (Y == (np.ones(A.shape, dtype = dtype) * beta.value)).all():\n raise RuntimeError(\"Failed: CPU scalar vector init\")\n print(\"Test: initialisation of vector with CPU scalar passed\")\n\n # + ndarray\n X = p.Vector(np.ones(A.shape, dtype = dtype) * beta.value)\n if not (X == (np.ones(A.shape, dtype = dtype) * beta.value)).all():\n raise RuntimeError(\"Failed: ndarray vector init\")\n print(\"Test: initialisation of vector with ndarray passed\")\n\n # + Vector\n X = p.Vector(Y)\n if not (X == Y).all():\n raise RuntimeError(\"Failed: Vector Vector init\")\n print(\"Test: initialisation of vector with Vector passed\")\n\n # Negation\n X = -vcl_A\n Y = -vcl_A.value\n act_diff = math.fabs(diff(X, Y))\n if act_diff > epsilon:\n raise RuntimeError(\"Failed: negation\")\n print(\"Test: negation passed\")\n\n # Inner product\n X = vcl_A.dot(vcl_B)\n Y = vcl_A.value.dot(vcl_B.value)\n act_diff = math.fabs(X - Y)\n if act_diff > 0.01: # NB: numpy seems to be imprecise here\n raise RuntimeError(\"Failed: inner product of vectors\")\n print(\"Test: inner product of vectors passed\")\n\n # In-place scaling (multiplication by scalar)\n X = vcl_A.value\n X *= beta.value\n vcl_A *= beta\n act_diff = math.fabs(diff(X, vcl_A))\n if act_diff > epsilon:\n raise RuntimeError(\"Failed: in-place scale (multiplication)\")\n print(\"Test: in-place scale (multiplication) passed\")\n\n # In-place scaling (division by scalar)\n X = vcl_A.value\n X /= alpha.value\n vcl_A /= alpha\n act_diff = math.fabs(diff(X, vcl_A))\n if act_diff > epsilon:\n raise RuntimeError(\"Failed: in-place scale (division)\")\n print(\"Test: in-place scale (division) passed\")\n\n # In-place add\n X = vcl_A.value\n X += vcl_B.value\n vcl_A += vcl_B\n act_diff = math.fabs(diff(X, vcl_A))\n if act_diff > epsilon:\n raise RuntimeError(\"Failed: in-place add\")\n print(\"Test: in-place add passed\")\n\n # Scaled in-place add\n X += alpha.value * vcl_B.value\n vcl_A += alpha * vcl_B\n act_diff = math.fabs(diff(X, vcl_A))\n if act_diff > epsilon:\n raise RuntimeError(\"Failed: scaled in-place add\")\n print(\"Test: scaled in-place add passed\")\n\n # Add\n Y = vcl_A.value + vcl_B.value\n Z = vcl_A + vcl_B\n act_diff = math.fabs(diff(Y, Z))\n if act_diff > epsilon:\n raise RuntimeError(\"Failed: add\")\n print(\"Test: add passed\")\n\n # Scaled add (left)\n Y = dtype(alpha.value) * vcl_B.value + vcl_C.value\n Z = alpha * vcl_B + vcl_C\n act_diff = math.fabs(diff(Y, Z))\n if act_diff > epsilon:\n print(act_diff)\n print(Y, type(Y))\n print(Z, type(Z))\n print(Z - Y)\n raise RuntimeError(\"Failed: scaled add (left)\")\n print(\"Test: scaled add (left) passed\")\n\n # Scaled add (right)\n Y = vcl_B.value + dtype(alpha.value) * vcl_C.value\n Z = vcl_B + alpha * vcl_C\n act_diff = math.fabs(diff(Y, Z))\n if act_diff > epsilon: # (Z == Y).all():\n pass\n raise RuntimeError(\"Failed: scaled add (left)\")\n print(\"Test: scaled add (right) passed\")\n\n # Scaled add (both)\n Y = alpha.value * vcl_B.value + alpha.value * vcl_C.value\n Z = alpha * vcl_B + alpha * vcl_C\n act_diff = math.fabs(diff(Y, Z))\n if act_diff > epsilon:\n pass\n raise RuntimeError(\"Failed: scaled add (both)\")\n print(\"Test: scaled add (both) passed\")\n\n # In-place sub\n X = vcl_A.value\n X -= vcl_B.value\n vcl_A -= vcl_B\n if not (vcl_A == X).all():\n raise RuntimeError(\"Failed: in-place sub\")\n print(\"Test: in-place sub passed\")\n\n # Scaled in-place sub\n X -= alpha.value * vcl_B.value\n vcl_A -= alpha * vcl_B\n if not (vcl_A == X).all():\n raise RuntimeError(\"Failed: scaled in-place sub\")\n print(\"Test: scaled in-place sub passed\")\n\n # Sub\n Y = vcl_A.value - vcl_B.value\n Z = vcl_A - vcl_B\n if not (Y == Z).all():\n raise RuntimeError(\"Failed: sub\")\n print(\"Test: sub passed\")\n\n # Scaled sub (left)\n Y = alpha.value * vcl_B.value - vcl_C.value\n Z = alpha * vcl_B - vcl_C\n act_diff = math.fabs(diff(Y, Z))\n if act_diff > epsilon:\n raise RuntimeError(\"Failed: scaled sub (left)\")\n print(\"Test: scaled sub (left) passed\")\n\n # Scaled sub (right)\n Y = vcl_B.value - alpha.value * vcl_C.value\n Z = vcl_B - alpha * vcl_C\n act_diff = math.fabs(diff(Y, Z))\n if act_diff > epsilon:\n raise RuntimeError(\"Failed: scaled sub (right)\")\n print(\"Test: scaled sub (right) passed\")\n\n # Scaled sub (both)\n Y = alpha.value * vcl_B.value - alpha.value * vcl_C.value\n Z = alpha * vcl_B - alpha * vcl_C\n act_diff = math.fabs(diff(Y, Z))\n if act_diff > epsilon:\n raise RuntimeError(\"Failed: scaled sub (both)\")\n print(\"Test: scaled sub (both) passed\")\n\n # Scalar multiplication (CPU scalar) -- not supported yet\n #gamma_py = beta.value * beta.value\n #gamma_vcl = beta * beta\n # ...\n # Scalar multiplication (GPU scalar)\n\n # Matrix divided by scalar\n X = vcl_A.value / alpha.value\n Y = vcl_A / alpha\n act_diff = math.fabs(diff(X, Y))\n if act_diff > epsilon:\n raise RuntimeError(\"Failed: matrix-scalar division\")\n print(\"Test: matrix-scalar division passed\")\n\n # Binary elementwise operations -- prod and div\n X = vcl_A.value * vcl_B.value\n Y = p.ElementProd(vcl_A, vcl_B)\n act_diff = math.fabs(diff(X, Y))\n if act_diff > epsilon:\n raise RuntimeError(\"Failed: elementwise matrix-matrix multiplication\")\n print(\"Test: elementwise matrix-matrix multiplication passed\")\n\n X = vcl_A.value / vcl_B.value\n Y = p.ElementDiv(vcl_A, vcl_B)\n act_diff = math.fabs(diff(X, Y))\n if act_diff > epsilon:\n raise RuntimeError(\"Failed: elementwise matrix-matrix division\")\n print(\"Test: elementwise matrix-matrix division passed\")\n\n # Unary elementwise operations\n # - abs TODO\n #X = abs(vcl_A.value)\n #Y = p.ElementAbs(vcl_A)\n #act_diff = math.fabs(diff(X, Y))\n #if act_diff > epsilon:\n # raise RuntimeError(\"Failed: elementwise abs\")\n #print(\"Test: elementwise abs passed\")\n\n # - acos\n X = np.arccos(vcl_A.value)\n Y = p.ElementAcos(vcl_A).result # TODO THIS SHOULDN'T BE REQUIRED\n act_diff = math.fabs(diff(X, Y))\n if act_diff > epsilon:\n raise RuntimeError(\"Failed: elementwise acos\")\n print(\"Test: elementwise acos passed\")\n\n # - asin\n X = np.arcsin(vcl_A.value)\n Y = p.ElementAsin(vcl_A).result\n act_diff = math.fabs(diff(X, Y))\n if act_diff > epsilon:\n raise RuntimeError(\"Failed: elementwise asin\")\n print(\"Test: elementwise asin passed\")\n\n # - atan\n X = np.arctan(vcl_A.value)\n Y = p.ElementAtan(vcl_A).result\n act_diff = math.fabs(diff(X, Y))\n if act_diff > epsilon:\n raise RuntimeError(\"Failed: elementwise atan\")\n print(\"Test: elementwise atan passed\")\n\n # - ceil\n X = np.ceil(vcl_A.value)\n Y = p.ElementCeil(vcl_A).result\n act_diff = math.fabs(diff(X, Y))\n if act_diff > epsilon:\n raise RuntimeError(\"Failed: elementwise ceil\")\n print(\"Test: elementwise ceil passed\")\n\n # - cos\n X = np.cos(vcl_A.value)\n Y = p.ElementCos(vcl_A).result\n act_diff = math.fabs(diff(X, Y))\n if act_diff > epsilon:\n raise RuntimeError(\"Failed: elementwise cos\")\n print(\"Test: elementwise cos passed\")\n\n # - cosh\n X = np.cosh(vcl_A.value)\n Y = p.ElementCosh(vcl_A).result\n act_diff = math.fabs(diff(X, Y))\n if act_diff > epsilon:\n raise RuntimeError(\"Failed: elementwise cosh\")\n print(\"Test: elementwise cosh passed\")\n\n # - exp\n X = np.exp(vcl_A.value)\n Y = p.ElementExp(vcl_A).result\n act_diff = math.fabs(diff(X, Y))\n if act_diff > epsilon:\n raise RuntimeError(\"Failed: elementwise exp\")\n print(\"Test: elementwise exp passed\")\n\n # - fabs\n X = np.fabs(vcl_A.value)\n Y = p.ElementFabs(vcl_A).result\n act_diff = math.fabs(diff(X, Y))\n if act_diff > epsilon:\n raise RuntimeError(\"Failed: elementwise fabs\")\n print(\"Test: elementwise fabs passed\")\n\n # - floor\n X = np.floor(vcl_A.value)\n Y = p.ElementFloor(vcl_A).result\n act_diff = math.fabs(diff(X, Y))\n if act_diff > epsilon:\n raise RuntimeError(\"Failed: elementwise floor\")\n print(\"Test: elementwise floor passed\")\n\n # - log\n X = np.log(vcl_A.value)\n Y = p.ElementLog(vcl_A).result\n act_diff = math.fabs(diff(X, Y))\n if act_diff > epsilon:\n raise RuntimeError(\"Failed: elementwise log\")\n print(\"Test: elementwise log passed\")\n\n # - log10\n X = np.log10(vcl_A.value)\n Y = p.ElementLog10(vcl_A).result\n act_diff = math.fabs(diff(X, Y))\n if act_diff > epsilon:\n raise RuntimeError(\"Failed: elementwise log10\")\n print(\"Test: elementwise log10 passed\")\n\n # - sin\n X = np.sin(vcl_A.value)\n Y = p.ElementSin(vcl_A).result\n act_diff = math.fabs(diff(X, Y))\n if act_diff > epsilon:\n raise RuntimeError(\"Failed: elementwise sin\")\n print(\"Test: elementwise sin passed\")\n\n # - sinh\n X = np.sinh(vcl_A.value)\n Y = p.ElementSinh(vcl_A).result\n act_diff = math.fabs(diff(X, Y))\n if act_diff > epsilon:\n raise RuntimeError(\"Failed: elementwise sinh\")\n print(\"Test: elementwise sinh passed\")\n\n # - sqrt\n X = np.sqrt(vcl_A.value)\n Y = p.ElementSqrt(vcl_A).result\n act_diff = math.fabs(diff(X, Y))\n if act_diff > epsilon:\n raise RuntimeError(\"Failed: elementwise sqrt\")\n print(\"Test: elementwise sqrt passed\")\n\n # - tan\n X = np.tan(vcl_A.value)\n Y = p.ElementTan(vcl_A).result\n act_diff = math.fabs(diff(X, Y))\n if act_diff > epsilon:\n raise RuntimeError(\"Failed: elementwise tan\")\n print(\"Test: elementwise tan passed\")\n\n # - tanh\n X = np.tanh(vcl_A.value)\n Y = p.ElementTanh(vcl_A).result\n act_diff = math.fabs(diff(X, Y))\n if act_diff > epsilon:\n raise RuntimeError(\"Failed: elementwise tanh\")\n print(\"Test: elementwise tanh passed\")\n\n # - norm1\n X = np.linalg.norm(vcl_A.value, 1)\n Y = p.norm(vcl_A, 1) # or vcl_A.norm(1)\n act_diff = math.fabs(X - Y)\n if act_diff > epsilon:\n print(vcl_A)\n #raise RuntimeError(\"Failed: norm(1)\")\n print(\"Test: norm(1) passed\")\n\n # - norm2\n X = np.linalg.norm(vcl_A.value, 2)\n Y = vcl_A.norm(2) # or vcl_A.norm(1)\n act_diff = math.fabs(X - Y)\n if act_diff > epsilon:\n raise RuntimeError(\"Failed: norm(2)\")\n print(\"Test: norm(2) passed\")\n\n # - norm_inf\n X = np.linalg.norm(vcl_A.value, np.inf)\n Y = vcl_A.norm(np.inf)\n act_diff = math.fabs(X - Y)\n if act_diff > epsilon:\n raise RuntimeError(\"Failed: norm(inf)\")\n print(\"Test: norm(inf) passed\")\n\n # in-place multiply-division-add\n X = vcl_C.value\n X += alpha.value * vcl_A.value + vcl_B.value / beta.value\n vcl_C += alpha * vcl_A + vcl_B / beta\n act_diff = math.fabs(diff(X, vcl_C))\n if act_diff > epsilon:\n raise RuntimeError(\"Failed: in-place multiply-division-add\")\n print(\"Test: in-place multiply-division-add passed\")\n\n # lengthy sum of scaled vectors\n X = alpha.value * vcl_A.value - vcl_B.value / beta.value + vcl_A.value * beta.value - vcl_B.value / alpha.value + vcl_C.value\n Y = alpha * vcl_A - vcl_B / beta + vcl_A * beta - vcl_B / alpha + vcl_C\n act_diff = math.fabs(diff(X, Y))\n if act_diff > epsilon:\n raise RuntimeError(\"Failed: lengthy sum of scaled vectors\")\n print(\"Test: lengthy sum of scaled vectors passed\")\n\n # sub-expression\n X = vcl_A.value + (((vcl_C.value + vcl_B.value) * alpha.value) - vcl_B.value) / beta.value\n Y = vcl_A + (((vcl_C + vcl_B) * alpha) - vcl_B) / beta\n act_diff = math.fabs(diff(X, Y))\n if act_diff > epsilon:\n raise RuntimeError(\"Failed: vector sub-expression test %s\")\n print(\"Test: vector sub-expression passed\")\n\n # plane rotation\n V = (alpha * vcl_A + beta * vcl_B).result\n W = (alpha * vcl_B - beta * vcl_A).result\n p.plane_rotation(vcl_A, vcl_B, alpha, beta)\n act_diffB = math.fabs(diff(W, vcl_B))\n act_diffA = math.fabs(diff(V, vcl_A))\n act_diffA = math.fabs(diff(V.value, vcl_A.value))\n if act_diffA > epsilon or act_diffB > epsilon:\n print(act_diffA, act_diffB)\n print(vcl_A)\n print(V)\n print(p.ElementFabs(V - vcl_A))\n #print(W, vcl_B)\n raise RuntimeError(\"Failed: plane rotation\")\n print(\"Test: plane rotation passed\")\n\n return os.EX_OK", "title": "" }, { "docid": "d556a15552c6832f338f8411f257788b", "score": "0.5123618", "text": "def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):\n if method == \"__call__\":\n # only support unary functions in simple calls\n\n # check the input\n arrs = []\n for arg in inputs:\n if isinstance(arg, numbers.Number):\n arrs.append(arg)\n elif isinstance(arg, np.ndarray):\n if arg.shape != self.data.shape:\n print(arg.shape, self.data.shape)\n raise RuntimeError(\"Data shapes incompatible\")\n arrs.append(arg)\n elif isinstance(arg, self.__class__):\n self.assert_field_compatible(arg)\n arrs.append(arg.data)\n else:\n # unsupported type\n return NotImplemented\n\n if \"out\" in kwargs:\n # write to given field\n out = kwargs.pop(\"out\")[0]\n self.assert_field_compatible(out)\n kwargs[\"out\"] = (out.data,)\n ufunc(*arrs, **kwargs)\n return out\n else:\n # return new field\n return self.__class__(self.grid, data=ufunc(*arrs, **kwargs))\n else:\n return NotImplemented", "title": "" }, { "docid": "ea4cd2ab2bd75e5f1e7aa9a52826dd47", "score": "0.51178366", "text": "def _call_numpy(func, args, kwargs):\n\n args, kwargs = _get_cupy_args(args, kwargs)\n numpy_args, numpy_kwargs = _get_numpy_args(args, kwargs)\n numpy_res = func(*numpy_args, **numpy_kwargs)\n cupy_res = _get_cupy_result(numpy_res)\n\n return _get_fallback_result(cupy_res)", "title": "" }, { "docid": "d4bf6e6c38d79c037c3dc0597b7e1347", "score": "0.51174337", "text": "def test_ptrb(self):\n self.work_testc2(\n input = [4.0],\n outtype = 'double *',\n )", "title": "" }, { "docid": "cf72fb4d6b25a99154334df994ed625f", "score": "0.51038694", "text": "def test_compact(self):\n \n nbkeypoints = 1000 #constant value\n keypoints = numpy.random.rand(nbkeypoints,4).astype(numpy.float32)\n for i in range(0,nbkeypoints):\n if ((numpy.random.rand(1))[0] < 0.75):\n keypoints[i]=(-1,-1,-1,-1)\n \n self.gpu_keypoints = pyopencl.array.to_device(queue, keypoints)\n self.output = pyopencl.array.empty(queue, (nbkeypoints,4), dtype=numpy.float32, order=\"C\")\n self.output.fill(-1.0,queue)\n self.counter = pyopencl.array.zeros(queue, (1,), dtype=numpy.int32, order=\"C\")\n wg = max(self.wg),\n shape = calc_size((keypoints.shape[0],), wg)\n nbkeypoints = numpy.int32(nbkeypoints)\n \n t0 = time.time()\n k1 = self.program.compact(queue, shape, wg, \n \tself.gpu_keypoints.data, self.output.data, self.counter.data, nbkeypoints)\n res = self.output.get()\n count = self.counter.get()[0]\n t1 = time.time()\n ref, count_ref = my_compact(keypoints,nbkeypoints)\n \n t2 = time.time()\n\n res_sort_arg = res[:,0].argsort(axis=0) \n res_sort = res[res_sort_arg]\n ref_sort_arg = ref[:,0].argsort(axis=0) \n ref_sort = ref[ref_sort_arg]\n delta = abs((res_sort - ref_sort)).max()\n self.assert_(delta < 1e-5, \"delta=%s\" % (delta))\n logger.info(\"delta=%s\" % delta)\n if PROFILE:\n logger.info(\"Global execution time: CPU %.3fms, GPU: %.3fms.\" % (1000.0 * (t2 - t1), 1000.0 * (t1 - t0)))\n logger.info(\"Compact operation took %.3fms\" % (1e-6 * (k1.profile.end - k1.profile.start)))", "title": "" }, { "docid": "ed7d0e51080031fd8ddbac91ba6eed8a", "score": "0.50997454", "text": "def test_qubit_unitary_jax_jit(self, U, num_wires):\n import jax\n from jax import numpy as jnp\n\n U = jnp.array(U)\n f = lambda m: qml.QubitUnitary(m, wires=range(num_wires)).matrix()\n out = jax.jit(f)(U)\n assert qml.math.allclose(out, qml.QubitUnitary(U, wires=range(num_wires)).matrix())", "title": "" }, { "docid": "b36fb1861f7ff8329ae48c853b164ce6", "score": "0.50941974", "text": "def test_fromfunction(self):\n def f(*global_inds):\n return 1.0\n \n try:\n comm = create_comm_of_size(4)\n except InvalidCommSizeError:\n pass\n else:\n try:\n a = densedistarray.fromfunction(f, (16,16), dtype='int64', dist=('b','c'), comm=comm)\n except NullCommError:\n pass\n else:\n self.assertEquals(a.shape, (16,16))\n self.assertEquals(a.dtype, np.dtype('int64'))\n for global_inds, value in densedistarray.ndenumerate(a):\n self.assertEquals(1.0, value)\n comm.Free()", "title": "" }, { "docid": "4e26d3ae5dbd5e81c6608d13301d7514", "score": "0.5093251", "text": "def test_profiling_without_blosc_and_numexpr(tmpdir):\n import sys\n\n sys.modules[\"blosc\"] = None\n sys.modules[\"numexpr\"] = None\n try:\n with disable_thread_pools():\n pass\n finally:\n del sys.modules[\"blosc\"]\n del sys.modules[\"numexpr\"]", "title": "" }, { "docid": "664f9eda918c876c3e77b603fc740549", "score": "0.50916165", "text": "def test_vectorize_output_kwarg(self):\n def check(ufunc):\n a = np.arange(10, 16, dtype='int32')\n out = np.zeros_like(a)\n got = ufunc(a, a, out=out)\n self.assertIs(got, out)\n self.assertPreciseEqual(out, a + a)\n with self.assertRaises(TypeError):\n ufunc(a, a, zzz=out)\n\n # With explicit sigs\n ufunc = vectorize(['int32(int32, int32)'], nopython=True)(add)\n check(ufunc)\n # With implicit sig\n ufunc = vectorize(nopython=True)(add)\n check(ufunc) # compiling\n check(ufunc) # after compiling", "title": "" }, { "docid": "dd9393b752da41dfb5f3bce162b93074", "score": "0.50913954", "text": "def warmup():\n # Initialize a feature_matrix\n feat_mat = np.array([[0.1, 0.2], [0.3, 0.4]])\n\n # Warmup the chaotic sampler\n cs.warmup()\n\n # Execute extract features\n out = transform(\n feat_mat, initial_cond=0.1, trajectory_len=100, epsilon=0.01, threshold=0.2\n )\n\n # Check if output matches expected value\n if out.shape == (2, 8) and out[0, 5] == 12:\n print(\"> Numba JIT warmup successful for transform ...\")\n else:\n print(\"> Numba JIT warmup failed for transform ...\")", "title": "" }, { "docid": "8aeb4739ee01bec4d23b094d4595e2f2", "score": "0.5090371", "text": "def numba_jit(*args, **kwargs): # pylint: disable=unused-argument\n def decorator(func):\n \"\"\"Decorator that smply returns the function being decorated\"\"\"\n return func\n return decorator", "title": "" }, { "docid": "1947828b3ed91dd11e3c4f8c39750d96", "score": "0.50802976", "text": "def test_ptra(self):\n self.work_testc2(\n input = None,\n outtype = 'double *',\n )", "title": "" }, { "docid": "f32a9e5dfca71a178fd9d1f8ae61dc8d", "score": "0.5066096", "text": "def is_numba_compat_strict() -> bool:\n return STRICT_NUMBA_COMPAT_CHECK", "title": "" }, { "docid": "b6c43ec1790e174b29f69947c4564968", "score": "0.50514966", "text": "def test_err6(self):\n\n @functor_api\n def err6():\n pass\n\n f(FunctorAPIData())", "title": "" }, { "docid": "5bc9763d35c403cdb4c5fccbabfb2c93", "score": "0.50390375", "text": "def test_in_seq_call(self):\n def a(i):\n return i + 1\n\n def b(i):\n return i + 2\n\n def foo(f, g):\n r = 0\n for f_ in (f, g):\n r = r + f_(r)\n return r\n\n sig = int64(int64)\n\n for decor in [mk_cfunc_func(sig), mk_wap_func(sig),\n mk_njit_with_sig_func(sig)]:\n for jit_opts in [dict(nopython=True), dict(forceobj=True)]:\n jit_ = jit(**jit_opts)\n with self.subTest(decor=decor.__name__):\n a_ = decor(a)\n b_ = decor(b)\n self.assertEqual(jit_(foo)(a_, b_), foo(a, b))", "title": "" }, { "docid": "25d957d764650756e3e3f36911563824", "score": "0.50347453", "text": "def test_lots_of_loops(self):\n\n def get_info(pipeline):\n @njit(pipeline_class=pipeline)\n def foo(tup):\n acc = 0\n for i in tup:\n acc += i\n for j in tup + (4, 5, 6):\n acc += 1 - j\n if j > 5:\n break\n else:\n acc -= 2\n for i in tup:\n acc -= i % 2\n\n return acc\n\n x = (1, 2, 3)\n self.assertEqual(foo(x), foo.py_func(x))\n cres = foo.overloads[foo.signatures[0]]\n func_ir = cres.metadata['preserved_ir']\n return func_ir, cres.fndesc\n\n ignore_loops_ir, ignore_loops_fndesc = \\\n get_info(self.LoopIgnoringCompiler)\n canonicalise_loops_ir, canonicalise_loops_fndesc = \\\n get_info(self.LoopCanonicalisingCompiler)\n\n # check CFG is the same\n def compare_cfg(a, b):\n a_cfg = compute_cfg_from_blocks(flatten_labels(a.blocks))\n b_cfg = compute_cfg_from_blocks(flatten_labels(b.blocks))\n self.assertEqual(a_cfg, b_cfg)\n\n compare_cfg(ignore_loops_ir, canonicalise_loops_ir)\n\n # check there's three * N more call types in the canonicalised one:\n # len(tuple arg)\n # range(of the len() above)\n # getitem(tuple arg, index)\n self.assertEqual(len(ignore_loops_fndesc.calltypes) + 3 * 3,\n len(canonicalise_loops_fndesc.calltypes))", "title": "" }, { "docid": "9429e4c993e8e196545bf32b868bda87", "score": "0.50087196", "text": "def test_instantiated_global_resolve_functions():\n A = np.random.rand(10)\n reg_A = np.copy(A)\n reg_A[cfg.q] = (reg_A[cfg.p // 2] * 4 + cfg.p) + val\n\n instantiated_global_with_funcs(A)\n\n assert np.allclose(A, reg_A)", "title": "" }, { "docid": "a7d3bf4becb700cfb793cfef9493ebb2", "score": "0.5000231", "text": "def testprimitives(self):", "title": "" }, { "docid": "e85dca732125097b6130725104f559d7", "score": "0.49917844", "text": "def test_acs_small_pure_python(self):\n self.test_acs_small(cython=False)", "title": "" }, { "docid": "aa144ed62786d06305a7536fcafe6cf0", "score": "0.49886647", "text": "def test_method():\r\n x = PureFoo(2)\r\n print(x.puremeth(2))\r\n if cython.compiled:\r\n print(isinstance(x(), float))\r\n else:\r\n print(True)\r\n return", "title": "" }, { "docid": "dc4e8a476c6cdfd10bc10a4f7a5d7a0b", "score": "0.49883735", "text": "def test_inlined_loops(self):\n\n def get_info(pipeline):\n @njit(pipeline_class=pipeline)\n def foo(tup):\n def bar(n):\n acc = 0\n for i in range(n):\n acc += 1\n return acc\n\n acc = 0\n for i in tup:\n acc += i\n acc += bar(i)\n\n return acc\n\n x = (1, 2, 3)\n self.assertEqual(foo(x), foo.py_func(x))\n cres = foo.overloads[foo.signatures[0]]\n func_ir = cres.metadata['preserved_ir']\n return func_ir, cres.fndesc\n\n ignore_loops_ir, ignore_loops_fndesc = \\\n get_info(self.LoopIgnoringCompiler)\n canonicalise_loops_ir, canonicalise_loops_fndesc = \\\n get_info(self.LoopCanonicalisingCompiler)\n\n # check CFG is the same\n def compare_cfg(a, b):\n a_cfg = compute_cfg_from_blocks(flatten_labels(a.blocks))\n b_cfg = compute_cfg_from_blocks(flatten_labels(b.blocks))\n self.assertEqual(a_cfg, b_cfg)\n\n compare_cfg(ignore_loops_ir, canonicalise_loops_ir)\n\n # check there's 2 * N - 1 more call types in the canonicalised one:\n # The -1 comes from the closure being inlined and and the call removed.\n # len(tuple arg)\n # range(of the len() above)\n # getitem(tuple arg, index)\n self.assertEqual(len(ignore_loops_fndesc.calltypes) + 5,\n len(canonicalise_loops_fndesc.calltypes))", "title": "" }, { "docid": "726366baa975fbd59a0e173804b951df", "score": "0.49869093", "text": "def test_nested_call(self):\n builder = UFuncBuilder(outer,\n targetoptions={'nopython': True})\n builder.add(\"(int64, int64)\")\n ufunc = builder.build_ufunc()\n self.assertEqual(ufunc(-1, 3), 2)", "title": "" }, { "docid": "6c1c8410b2b71f41357fbf727c776604", "score": "0.4984698", "text": "def pre_numba_checks(label, *args):\n if label == \"criterion_full_in\":\n v, q, beta, lambda_ = args\n np.testing.assert_equal(beta >= 0, True)\n np.testing.assert_equal(lambda_ >= 0, True)\n np.testing.assert_almost_equal(sum(q), 1)\n np.testing.assert_equal(np.all(q > 0), True)\n np.testing.assert_equal(np.all(np.isfinite(v)), True)\n elif label == \"criterion_full_out\":\n (rslt,) = args\n # We do encounter the case that the criterion function is infinite. However,\n # this is not a problem, since it is only called in a minimization which\n # discards this evaluation point.\n np.testing.assert_equal(np.isfinite(rslt) or rslt == np.inf, True)\n elif label == \"calculate_p_in\":\n v, q, lambda_ = args\n np.testing.assert_equal(lambda_ >= 0, True)\n np.testing.assert_almost_equal(sum(q), 1)\n np.testing.assert_equal(np.all(q > 0), True)\n np.testing.assert_equal(np.all(np.isfinite(v)), True)\n elif label == \"calculate_p_out\":\n (p,) = args\n np.testing.assert_equal(np.all(p >= 0.0), True)\n np.testing.assert_almost_equal(sum(p), 1.0)\n elif label in [\"get_worst_case_in\", \"get_worst_case_outcome_in\"]:\n v, q, beta = args\n np.testing.assert_equal(np.all(np.isfinite(v)), True)\n np.testing.assert_almost_equal(sum(q), 1)\n np.testing.assert_equal(np.all(q > 0), True)\n np.testing.assert_equal(beta >= 0.0, True)\n elif label == \"get_worst_case_out\":\n p, q, beta, rslt = args\n np.testing.assert_almost_equal(sum(p), 1)\n np.testing.assert_equal(np.all(p >= 0.0), True)\n np.testing.assert_almost_equal(sum(q), 1)\n np.testing.assert_equal(np.all(q > 0), True)\n np.testing.assert_equal(beta >= 0.0, True)\n np.testing.assert_equal(rslt[2] == 0, True)\n np.testing.assert_equal(entropy(p, q) - beta < 0.0001, True)\n elif label == \"get_worst_case_outcome_out\":\n v, q, beta, is_cost, rslt = args\n np.testing.assert_equal(\n min(v) - SMALL_FLOAT <= rslt <= max(v) + SMALL_FLOAT, True\n )\n else:\n raise NotImplementedError", "title": "" }, { "docid": "35734db44b3db9d6a7913050ca58dbc7", "score": "0.49725357", "text": "def test_err4(self):\n\n @functor_api\n def err4(data):\n data.a = 2\n data.b[0] = 2\n\n test1()", "title": "" }, { "docid": "a94bf30e592b10d2f98e1fb61d5faf28", "score": "0.49700972", "text": "def test_acs_large_pure_python(self):\n self.test_acs_large(cython=False)", "title": "" }, { "docid": "f097a32b51de5fee266490514ff72242", "score": "0.49660826", "text": "def test_err1(self):\n\n @functor_api\n def err1(*args):\n pass", "title": "" }, { "docid": "03c012e52aeee6198793e4c707cb87e4", "score": "0.496195", "text": "def mem_nsb():\n _min3p.f90wrap_mem_nsb()", "title": "" }, { "docid": "0f469ffaa9afed0207fef7d9433c54a5", "score": "0.49597725", "text": "def test_CompiledProgram1():\n train_program = fluid.Program()\n startup_program = fluid.Program()\n train_program.random_seed = 33\n startup_program.random_seed = 33\n place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda(\n ) else fluid.CPUPlace()\n if not fluid.is_compiled_with_cuda():\n os.environ['CPU_NUM'] = str(2)\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n exe = fluid.Executor(place)\n data = fluid.layers.data(name='X', shape=[1], dtype='float32')\n hidden = fluid.layers.fc(input=data, size=10)\n loss = fluid.layers.mean(hidden)\n test_program = train_program.clone(for_test=True)\n fluid.optimizer.SGD(learning_rate=0.01).minimize(loss)\n\n exe.run(startup_program)\n build_strategy = fluid.BuildStrategy()\n build_strategy.fuse_all_reduce_ops = True\n compiled_train_prog = fluid.CompiledProgram(\n train_program).with_data_parallel(\n loss_name=loss.name, build_strategy=build_strategy)\n # 注意:如果此处不设置share_vars_from=compiled_train_prog,测试过程中用的参数与训练使用的参数是不一致\n compiled_test_prog = fluid.CompiledProgram(\n test_program).with_data_parallel(\n share_vars_from=compiled_train_prog)\n\n train_data = np.ones(shape=(10, 1)).astype('float32')\n loss_data, = exe.run(compiled_train_prog,\n feed={\"X\": train_data},\n fetch_list=[loss.name])\n print(loss_data)\n if platform.system() == \"Darwin\" or platform.system() == \"Linux\":\n tools.compare(loss_data, [0.09116864, 0.09116864])\n else:\n tools.compare(loss_data, [0.09116864])\n test_data = np.ones(shape=(10, 1)).astype('float32')\n loss_data, = exe.run(compiled_test_prog,\n feed={\"X\": test_data},\n fetch_list=[loss.name])\n\n print(loss_data)\n if platform.system() == \"Darwin\" or platform.system() == \"Linux\":\n tools.compare(loss_data, [0.08916866, 0.08916866])\n else:\n tools.compare(loss_data, [0.08916866])", "title": "" }, { "docid": "4e3ef083cace23eb37bbd3812ff665d0", "score": "0.49452928", "text": "def _in_jit() -> bool:\n return core.cur_sublevel().level > 0", "title": "" }, { "docid": "732ec7930e66d6657b16344f8aa5a945", "score": "0.49450707", "text": "def _func_91_90_fp32(args):\n tvm_ib, data, dst, data_ub, data_res, data_tail, reg, device_core_num, \\\n block_index, n_begin, ub_ele_eight, cp_align_len, num_g = args\n\n _, col_len, row_in = data.shape\n row_out = dst.shape[2]\n row_zu_in = 8\n row_out_block = _ceil_div(row_out, cp_align_len)\n row_out_align = _ceil_fill(row_out, cp_align_len)\n num_col_ub = ub_ele_eight // row_out_align\n num_ub_loop = col_len // 8 // num_col_ub\n num_ub_mod = col_len % (8 * num_col_ub)\n n_index = num_g * device_core_num + block_index\n\n with tvm_ib.for_range(0, num_ub_loop, name=\"num_u\") as num_u:\n for num_zi in range(row_zu_in):\n data_offset = n_index * col_len * row_in\\\n + num_u * 8 * num_col_ub * row_in\\\n + num_zi * row_in + n_begin - 2 * (num_zi % 4)\n ub_offset = 0\n n_burst = num_col_ub\n burst_len_data = row_out_block\n src_stride = (8 * row_in) // cp_align_len - burst_len_data\n dst_stride = 0\n args = tvm_ib, data, data_ub[num_zi], data_offset, ub_offset, \\\n n_burst, burst_len_data, src_stride, \\\n dst_stride, cp_align_len\n _func_gm_to_ub_align(args)\n\n mask1, mask2 = _set_mask_slice(0 + 2 * (num_zi % 4), 0)\n tvm_ib.emit(tvm.call_extern(\n dst.dtype, \"set_vector_mask\",\n tvm.const(mask1, dtype=\"uint64\"),\n tvm.const(mask2, dtype=\"uint64\")))\n ub_offset = 0\n res_offset = num_zi * row_out - 2 * (num_zi % 4)\n repeat = num_col_ub\n srcm0 = 1\n dstm0 = 1\n srcm1 = row_out_block\n dstm1 = row_out * row_zu_in // cp_align_len\n args = tvm_ib, data_ub[num_zi], data_res, \\\n ub_offset, res_offset, \\\n repeat, srcm0, dstm0, srcm1, dstm1, cp_align_len\n _func_vadds(args)\n\n mask1, mask2 = _set_mask_slice(0, 128 - 90 - 2 * (num_zi % 4))\n tvm_ib.emit(tvm.call_extern(\n dst.dtype, \"set_vector_mask\",\n tvm.const(mask1, dtype=\"uint64\"),\n tvm.const(mask2, dtype=\"uint64\")))\n ub_offset = 64\n res_offset = num_zi * row_out - 2 * (num_zi % 4) + 64\n repeat = num_col_ub\n srcm0 = 1\n dstm0 = 1\n srcm1 = row_out_block\n dstm1 = row_out * row_zu_in // cp_align_len\n args = tvm_ib, data_ub[num_zi], data_res, \\\n ub_offset, res_offset, \\\n repeat, srcm0, dstm0, srcm1, dstm1, cp_align_len\n _func_vadds(args)\n\n tvm_ib.emit(tvm.call_extern(\n dst.dtype, \"set_vector_mask\",\n tvm.const(-1, dtype=\"uint64\"),\n tvm.const(-1, dtype=\"uint64\")))\n\n dst_offset = n_index * col_len * row_out \\\n + num_u * 8 * num_col_ub * row_out\n burst_len_dst = (8 * num_col_ub * row_out) // cp_align_len\n tvm_ib.emit(\n tvm.call_extern(\n dst.dtype, \"copy_ubuf_to_gm\",\n dst.access_ptr('w', offset=dst_offset),\n data_res.access_ptr(\"r\", offset=0),\n 0, 1, burst_len_dst, 0, 0))\n\n with tvm_ib.if_scope(num_ub_mod > 0):\n col_group = num_ub_mod // row_zu_in\n col_mod = num_ub_mod % row_zu_in\n\n for num_zi in range(row_zu_in):\n with tvm_ib.if_scope(num_zi < num_ub_mod):\n data_offset = n_index * col_len * row_in\\\n + num_ub_loop * 8 * num_col_ub * row_in\\\n + num_zi * row_in + n_begin - 2 * (num_zi % 4)\n ub_offset = 0\n n_burst = tvm.select(num_zi < col_mod, col_group + 1,\n col_group)\n burst_len_data = row_out_block\n src_stride = (8 * row_in) // cp_align_len - burst_len_data\n dst_stride = 0\n args = tvm_ib, data, data_ub[num_zi], data_offset, ub_offset, \\\n n_burst, burst_len_data, src_stride, \\\n dst_stride, cp_align_len\n _func_gm_to_ub_align(args)\n\n mask1, mask2 = _set_mask_slice(0 + 2 * (num_zi % 4), 0)\n tvm_ib.emit(tvm.call_extern(\n dst.dtype, \"set_vector_mask\",\n tvm.const(mask1, dtype=\"uint64\"),\n tvm.const(mask2, dtype=\"uint64\")))\n ub_offset = 0\n res_offset = num_zi * row_out - 2 * (num_zi % 4)\n repeat = tvm.select(num_zi < col_mod, col_group + 1,\n col_group)\n srcm0 = 1\n dstm0 = 1\n srcm1 = row_out_block\n dstm1 = row_out * row_zu_in // cp_align_len\n args = tvm_ib, data_ub[num_zi], data_res, \\\n ub_offset, res_offset, \\\n repeat, srcm0, dstm0, srcm1, dstm1, cp_align_len\n _func_vadds(args)\n\n mask1, mask2 = _set_mask_slice(0, 128 - 90 - 2 * (num_zi % 4))\n tvm_ib.emit(tvm.call_extern(\n dst.dtype, \"set_vector_mask\",\n tvm.const(mask1, dtype=\"uint64\"),\n tvm.const(mask2, dtype=\"uint64\")))\n ub_offset = 64\n res_offset = num_zi * row_out - 2 * (num_zi % 4) + 64\n repeat = tvm.select(num_zi < col_mod, col_group + 1,\n col_group)\n srcm0 = 1\n dstm0 = 1\n srcm1 = row_out_block\n dstm1 = row_out * row_zu_in // cp_align_len\n args = tvm_ib, data_ub[num_zi], data_res, \\\n ub_offset, res_offset, \\\n repeat, srcm0, dstm0, srcm1, dstm1, cp_align_len\n _func_vadds(args)\n\n tvm_ib.emit(tvm.call_extern(\n dst.dtype, \"set_vector_mask\",\n tvm.const(-1, dtype=\"uint64\"),\n tvm.const(-1, dtype=\"uint64\")))\n\n dst_offset = n_index * col_len * row_out \\\n + num_ub_loop * 8 * num_col_ub * row_out\n mod_len = num_ub_mod * row_out\n with tvm_ib.if_scope(mod_len % cp_align_len == 0):\n burst_len_dst = mod_len // cp_align_len\n tvm_ib.emit(\n tvm.call_extern(\n dst.dtype, \"copy_ubuf_to_gm\",\n dst.access_ptr('w', offset=dst_offset),\n data_res.access_ptr(\"r\", offset=0),\n 0, 1, burst_len_dst, 0, 0))\n with tvm_ib.else_scope():\n mod_len_align = mod_len - cp_align_len\n burst_len_dst_a = _ceil_div(mod_len_align, cp_align_len)\n tvm_ib.emit(\n tvm.call_extern(\n dst.dtype, \"copy_ubuf_to_gm\",\n dst.access_ptr('w', offset=dst_offset),\n data_res.access_ptr(\"r\", offset=0),\n 0, 1, burst_len_dst_a, 0, 0))\n with tvm_ib.for_range(0, cp_align_len, name=\"num_a\") as num_a:\n tvm_ib.emit(tvm.call_extern(\n data_res.dtype, \"reg_mov\",\n tvm.call_extern(reg.dtype, \"reg\", reg[0]),\n data_res.access_ptr('r',\n offset=mod_len_align + num_a)\n ))\n tvm_ib.emit(tvm.call_extern(\n data_tail.dtype, \"reg_mov\",\n data_tail.access_ptr('w',\n offset=num_a),\n tvm.call_extern(reg.dtype, \"reg\", reg[0])\n ))\n tvm_ib.emit(\n tvm.call_extern(\n dst.dtype, \"copy_ubuf_to_gm\",\n dst.access_ptr('w', offset=dst_offset + mod_len_align),\n data_tail.access_ptr(\"r\", offset=0),\n 0, 1, 1, 0, 0))", "title": "" }, { "docid": "231515e002e8c29c42db057c6ce256b6", "score": "0.49420968", "text": "def jit_to_python_function(node, speedup):\n logger.debug(\n 'Translate C function %s into its python version', node.op_type)\n if node.op_type not in trans_from_jit_to_python:\n logger.error(\n '%s is not Supported! Please report an issue at https://github.com/microsoft/nni. Thanks~', node.op_type)\n # return None to skip the mask inference for this node\n return None\n return trans_from_jit_to_python[node.op_type](node, speedup)", "title": "" }, { "docid": "f5171b8c0421ab0d05ffa471a06a5062", "score": "0.49389973", "text": "def test_udp_udf_apply_neighborhood_with_parameter(api100, user_defined_process_registry, set_parameters, udf_code):\n udf_code = textwrap.dedent(udf_code)\n udp_id = random_name(\"udp\")\n udp_spec = {\n \"id\": udp_id,\n \"parameters\": [\n {\"name\": \"data\", \"schema\": {\"type\": \"object\", \"subtype\": \"raster-cube\"}},\n {\"name\": \"offset\", \"default\": 10, \"optional\": True, \"schema\": {\"type\": \"number\"}},\n ],\n \"process_graph\": {\n \"apply_neighborhood\": {\n \"process_id\": \"apply_neighborhood\",\n \"arguments\": {\n \"data\": {\"from_parameter\": \"data\"},\n \"process\": {\"process_graph\": {\"udf\": {\n \"process_id\": \"run_udf\",\n \"arguments\": {\n \"data\": {\"from_parameter\": \"data\"},\n \"udf\": udf_code,\n \"runtime\": \"Python\",\n \"context\": {\n \"offset\": {\"from_parameter\": \"offset\"},\n }\n },\n \"result\": True\n }}},\n \"size\": [{'dimension': 'x', 'unit': 'px', 'value': 32},\n {'dimension': 'y', 'unit': 'px', 'value': 32}],\n \"overlap\": [{'dimension': 'x', 'unit': 'px', 'value': 8},\n {'dimension': 'y', 'unit': 'px', 'value': 8}],\n },\n \"result\": True\n }\n }\n }\n user_defined_process_registry.save(user_id=TEST_USER, process_id=udp_id, spec=udp_spec)\n\n udp_args = {\"data\": {\"from_node\": \"lc\"}}\n if set_parameters:\n udp_args[\"offset\"] = 20\n\n response = api100.check_result({\n \"lc\": {\n \"process_id\": \"load_collection\",\n \"arguments\": {\n \"id\": \"TestCollection-LonLat4x4\",\n \"temporal_extent\": [\"2021-01-01\", \"2021-02-01\"],\n \"spatial_extent\": {\"west\": 0.0, \"south\": 0.0, \"east\": 1.0, \"north\": 1.0},\n \"bands\": [\"Longitude\", \"Day\"]\n },\n },\n \"udp\": {\"process_id\": udp_id, \"arguments\": udp_args},\n \"save\": {\n \"process_id\": \"save_result\",\n \"arguments\": {\"data\": {\"from_node\": \"udp\"}, \"format\": \"json\"},\n \"result\": True,\n }\n })\n result = response.assert_status_code(200).json\n _log.info(repr(result))\n\n assert result[\"dims\"] == [\"t\", \"bands\", \"x\", \"y\"]\n data = result[\"data\"]\n\n expected = np.array([\n [[[.0] * 4, [.25] * 4, [.5] * 4, [.75] * 4], [[5] * 4] * 4],\n [[[.0] * 4, [.25] * 4, [.5] * 4, [.75] * 4], [[15] * 4] * 4],\n [[[.0] * 4, [.25] * 4, [.5] * 4, [.75] * 4], [[25] * 4] * 4],\n ]) + (20 if set_parameters else 10)\n\n assert_equal(data, expected)", "title": "" }, { "docid": "5fcaf5728f8e254c49547469bd9c203d", "score": "0.4934049", "text": "def binmatevap():\n _min3p.f90wrap_binmatevap()", "title": "" }, { "docid": "8162a479b4390634b212fe136c5df5d1", "score": "0.4932546", "text": "def mem_nc():\n _min3p.f90wrap_mem_nc()", "title": "" }, { "docid": "bd8e681334a1d94ed3c046010c219651", "score": "0.492062", "text": "def test_return_from_subroutine(self):\n for address in range(0x200, 0xFFFF, 0x10):\n self.cpu.memory[self.cpu.registers['sp']] = address & 0x00FF\n self.cpu.memory[self.cpu.registers['sp'] + 1] = \\\n (address & 0xFF00) >> 8\n self.cpu.registers['sp'] += 2\n self.cpu.registers['pc'] = 0\n self.cpu.return_from_subroutine()\n self.assertEqual(self.cpu.registers['pc'], address)", "title": "" }, { "docid": "5e25a30213dc9aa0a12b88327e07da7e", "score": "0.49152178", "text": "def dummy():\n pass", "title": "" }, { "docid": "e4a42e16d20786608db179d563520091", "score": "0.49000862", "text": "def numba_cpu_is_supported(min_version: str) -> bool:\n module_available, _ = check_lib_version(\"numba\", checked_version=min_version, operator=operator.ge)\n\n # If numba is not installed\n if module_available is None:\n return False\n return True", "title": "" }, { "docid": "086c01ccce36058a5c91e84c6372916d", "score": "0.489969", "text": "def test_dynamic_functions(self):\r\n out = self.oc.ones(1, 2)\r\n assert np.allclose(out, np.ones((1, 2)))\r\n\r\n U, S, V = self.oc.svd([[1, 2], [1, 3]], nout=3)\r\n assert np.allclose(U, ([[-0.57604844, -0.81741556], [-0.81741556, 0.57604844]]))\r\n assert np.allclose(S, ([[3.86432845, 0.0], [0.0, 0.25877718]]))\r\n assert np.allclose(V, ([[-0.36059668, -0.93272184], [-0.93272184, 0.36059668]]))\r\n out = self.oc.roundtrip(1)\r\n assert out == 1\r\n with pytest.raises(Oct2PyError):\r\n self.oc.eval(\"_spam\")", "title": "" }, { "docid": "853718f53dee06524521ab49804a0e50", "score": "0.48861194", "text": "def test(self):\n par = [self.draw_from_prior() for i in xrange(100)]\n numexpr_temp = self.numexpr\n for self.numexpr in [True, False]:\n t0 = systemtime()\n for i in par:\n dump = self.__call__(i)\n print \"numexpr: %s. %f seconds\" % (self.numexpr, (systemtime() - t0)/100)\n self.numexpr, self.simple = numexpr_temp, simple_temp", "title": "" }, { "docid": "ed0d4e4851db3dbed987a3b15e4f71e1", "score": "0.48782396", "text": "def cuda(self, block_x, thread_x):", "title": "" }, { "docid": "ed0d4e4851db3dbed987a3b15e4f71e1", "score": "0.48782396", "text": "def cuda(self, block_x, thread_x):", "title": "" }, { "docid": "4dee04e6de6761d84274a78b066f8f6d", "score": "0.48730534", "text": "def test_basic(self, func_name, ndim, keepdims):\n shape = (5,) * ndim\n size = prod(shape)\n in_np = np.random.random(shape)\n\n # set an element to nan\n index_nan = np.random.randint(low=0, high=size)\n index_nan = np.unravel_index(index_nan, shape)\n in_num = num.array(in_np)\n in_num[index_nan] = num.nan\n in_np[index_nan] = np.nan\n\n func_np = getattr(np, func_name)\n func_num = getattr(num, func_name)\n\n # make sure numpy and cunumeric give the same out array and max val\n out_np = np.unravel_index(func_np(in_np, keepdims=keepdims), shape)\n out_num = np.unravel_index(func_num(in_num, keepdims=keepdims), shape)\n\n index_array_np = in_np[out_np]\n index_array_num = in_num[out_num]\n\n assert np.array_equal(out_num, out_np)\n assert np.array_equal(index_array_num, index_array_np)", "title": "" }, { "docid": "7bcff47568183d80ef50ca90ab5bde28", "score": "0.4868857", "text": "def test_usng_proxy(self):\r\n\r\n pass", "title": "" }, { "docid": "c97213e406bcdf3574bb3cf18b614c58", "score": "0.4865445", "text": "def test_broadcast_jax_jit(self, cost_fn, expected_res, expected_grad, tol):\n x = jnp.array([0.0, 1e-7, 0.456, np.pi / 2 - 1e-7, np.pi / 2])\n\n jitted_cost = jax.jit(cost_fn)\n res = jitted_cost(x)\n grad = qml.math.diag(jax.jacobian(jitted_cost)(x))\n\n assert qml.math.allclose(res, expected_res(x), tol)\n assert qml.math.allclose(grad, expected_grad(x), tol)", "title": "" }, { "docid": "11973900832d6932899eba14c062d301", "score": "0.48623976", "text": "def test_udp_udf_reduce_temporal_with_parameter(api100, user_defined_process_registry, set_offset, udf_code):\n udf_code = textwrap.dedent(udf_code)\n udp_id = random_name(\"udp\")\n udp_spec = {\n \"id\": udp_id,\n \"parameters\": [\n {\"name\": \"data\", \"schema\": {\"type\": \"object\", \"subtype\": \"raster-cube\"}},\n {\"name\": \"offset\", \"default\": 12, \"optional\": True, \"schema\": {\"type\": \"number\"}},\n ],\n \"process_graph\": {\n \"reduce\": {\n \"process_id\": \"reduce_dimension\",\n \"arguments\": {\n \"data\": {\"from_parameter\": \"data\"},\n \"dimension\": \"t\",\n \"reducer\": {\"process_graph\": {\"udf\": {\n \"process_id\": \"run_udf\",\n \"arguments\": {\n \"data\": {\"from_parameter\": \"data\"},\n \"udf\": udf_code,\n \"runtime\": \"Python\",\n \"context\": {\"offset\": {\"from_parameter\": \"offset\"}}\n },\n \"result\": True\n }}}\n },\n \"result\": True\n }\n }\n }\n user_defined_process_registry.save(user_id=TEST_USER, process_id=udp_id, spec=udp_spec)\n\n udp_args = {\"data\": {\"from_node\": \"lc\"}}\n if set_offset:\n udp_args[\"offset\"] = 56\n response = api100.check_result({\n \"lc\": {\n \"process_id\": \"load_collection\",\n \"arguments\": {\n \"id\": \"TestCollection-LonLat4x4\",\n \"temporal_extent\": [\"2021-01-01\", \"2021-02-01\"],\n \"spatial_extent\": {\"west\": 0.0, \"south\": 0.0, \"east\": 1.0, \"north\": 1.0},\n \"bands\": [\"Longitude\", \"Day\"]\n },\n },\n \"udp\": {\"process_id\": udp_id, \"arguments\": udp_args},\n \"save\": {\n \"process_id\": \"save_result\",\n \"arguments\": {\"data\": {\"from_node\": \"udp\"}, \"format\": \"json\"},\n \"result\": True,\n }\n })\n result = response.assert_status_code(200).json\n _log.info(repr(result))\n\n assert result[\"dims\"] == [\"bands\", \"x\", \"y\"]\n data = result[\"data\"]\n expected_offset = 56 if set_offset else 12\n assert_equal(data, expected_offset + np.array([\n np.array([[0, .25, .5, .75]] * 4).T,\n np.full((4, 4), fill_value=25)\n ]))", "title": "" }, { "docid": "14721bc1ed15b23119fedde2fb6b3f33", "score": "0.4859281", "text": "def test_fromfunction_complicated(self):\n def f(*global_inds):\n return sum(global_inds)\n\n d = Distribution.from_shape(comm=self.comm,\n shape=(16, 16), dist=('b', 'c'))\n a = localarray.fromfunction(f, d, dtype='int64')\n self.assertEqual(a.global_shape, (16,16))\n self.assertEqual(a.dtype, np.dtype('int64'))\n for global_inds, value in localarray.ndenumerate(a):\n self.assertEqual(sum(global_inds), value)", "title": "" }, { "docid": "fc04bac43016f0e610e05d2ceac44353", "score": "0.48557413", "text": "def test_unsigned_byte_args(self):\n from _rawffi.alt import CDLL, types\n libfoo = CDLL(self.libfoo_name)\n sum_xy = libfoo.getfunc('sum_xy_us', [types.ubyte, types.ubyte],\n types.ubyte)\n assert sum_xy(100, 40) == 140\n assert sum_xy(200, 60) == 260 % 256", "title": "" }, { "docid": "d6c651d4bc97735261cc29de3232a887", "score": "0.48512462", "text": "def numba_cuda_is_supported(min_version: str) -> bool:\n module_available = numba_cpu_is_supported(min_version)\n\n # If numba is not installed\n if module_available is None:\n return False\n\n if module_available is not True:\n return False\n from numba import cuda\n\n if not hasattr(cuda, \"is_supported_version\"):\n # assume cuda is supported, but it may fail due to CUDA incompatibility\n return False\n\n try:\n cuda_available = cuda.is_available()\n cuda_compatible = cuda.is_supported_version() if cuda_available else False\n if is_numba_compat_strict():\n return cuda_available and cuda_compatible\n return cuda_available\n\n except OSError:\n # dlopen(libcudart.dylib) might fail if CUDA was never installed in the first place.\n return False", "title": "" }, { "docid": "e4163ba7ba7ba25a354f8c85fe6e8b43", "score": "0.4850991", "text": "def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): # noqa: C901\n in_place = OUT in kwargs\n for x in inputs + kwargs.get(OUT, ()):\n # Use GlobalOffsetArray instead of type(self) for isinstance to\n # allow subclasses that don't override __array_ufunc__ to\n # handle GlobalOffsetArray objects.\n if not isinstance(x, self._HANDLED_TYPES + (GlobalOffsetArray,)):\n return NotImplemented\n\n # global offset to use in the result\n result = global_offset = None\n if len(inputs) == 2:\n left = inputs[0]\n right = inputs[1]\n\n try:\n smaller = larger = None\n left_in_right = left.is_contained_within(right)\n right_in_left = right.is_contained_within(left)\n\n if left_in_right and right_in_left:\n # same bounds/size\n global_offset = left.global_offset\n else:\n smaller = left if left_in_right else right\n larger = right if left_in_right else left\n\n sub_left = left[smaller.bounds()]\n sub_right = right[smaller.bounds()]\n\n sub_inputs = (sub_left, sub_right)\n sub_kwargs = {}\n\n if in_place:\n # only perform op if there are values to operate on\n if sub_left.size and sub_right.size:\n sub_kwargs[OUT] = tuple(o[right.bounds()] for o in kwargs[OUT])\n getattr(ufunc, method)(*sub_inputs, **sub_kwargs)\n\n result = left\n global_offset = left.global_offset\n else:\n if not left_in_right and not right_in_left:\n raise ValueError(\"Non-in-place operations on overlapping GlobalOffsetArrays unsupported. \"\n \"Left bounds: %s, Right bounds: %s\" % (left.bounds(), right.bounds()))\n # Return a copy of the larger operand and perform in place on the sub_array of that copy\n sample_type = type(getattr(ufunc, method)(sub_left.item(0), sub_right.item(1)))\n\n result = larger.astype(sample_type)\n sub_kwargs[OUT] = (result[smaller.bounds()])\n sub_result = getattr(ufunc, method)(*sub_inputs, **sub_kwargs)\n result[smaller.bounds()] = sub_result\n\n global_offset = larger.global_offset\n except AttributeError: # At least one of arguments is not a GlobalOffsetArray\n try:\n global_offset = left.global_offset\n except AttributeError: # Left is not a GlobalOffsetArray\n global_offset = right.global_offset\n\n inputs = (left, right)\n\n # Must defer to the implementation of the ufunc on unwrapped values to avoid infinite loop\n inputs = tuple(i.view(np.ndarray) if isinstance(i, GlobalOffsetArray) else i for i in inputs)\n if in_place:\n kwargs[OUT] = tuple(o.view(np.ndarray) if isinstance(o, GlobalOffsetArray) else o for o in kwargs[OUT])\n\n if result is None:\n result = getattr(ufunc, method)(*inputs, **kwargs)\n\n if type(result) is tuple:\n # multiple return values\n return tuple(type(self)(x, global_offset=global_offset) for x in result)\n elif method == 'at':\n # no return value\n return None\n else:\n # one return value\n return type(self)(result, global_offset=global_offset)", "title": "" }, { "docid": "cba3ffa3c4a883ae37da0460fb8545dd", "score": "0.48429114", "text": "def dummy_function():\n return", "title": "" }, { "docid": "43d8d8abcba09b7d0d5aecacd25f7c13", "score": "0.48396683", "text": "def test_gpukmeansumap_cudf():\n _create_context()\n\n n_molecules, dao, mol_df = _fetch_chembl_test_dataset()\n\n wf = GpuKmeansUmap(n_molecules=n_molecules,\n dao=dao, pca_comps=64)\n mol_df = mol_df.compute()\n wf.cluster(df_mol_embedding=mol_df)", "title": "" }, { "docid": "840c602699f933712748046d54c0281f", "score": "0.48286018", "text": "def test_basics_c( self ):\n self.do_gradually( _crc32 )\n self.do_basics( _crc32 )", "title": "" }, { "docid": "289dc2b90a17b6f4b4ac02d96b976540", "score": "0.48283702", "text": "def test_prob3numba(ignore_fails=False, define_as_ref=False):\n\n # Pull first test case to test calling `propagate_array`\n tc_name, tc = next(iter(TEST_CASES.items()))\n tc_ = deepcopy(tc)\n logging.info(\n \"Testing call and return shape of `propagate_array` with test case '%s'\",\n tc_name,\n )\n\n # Test simple broadcasting over `nubars` and `energies` where both have\n # same shape, as this is the \"typical\" use case\n input_shape = (4, 5)\n\n # Without broadcasting, a single probability matrix is 3x3\n prob_array_shape = (3, 3)\n\n # Broadcasted shape\n out_shape = input_shape + prob_array_shape\n\n nubars = np.full(shape=input_shape, fill_value=tc_[\"nubar\"], dtype=IX)\n energies = np.full(shape=input_shape, fill_value=tc_[\"energy\"], dtype=FX)\n\n # Fill with NaN to ensure all elements are assinged a value\n probabilities = np.full(shape=out_shape, fill_value=np.nan, dtype=FX)\n\n propagate_array(\n tc_[\"dm\"].astype(FX),\n tc_[\"pmns\"].astype(CX),\n tc_[\"mat_pot\"].astype(CX),\n nubars,\n energies,\n tc_[\"layer_densities\"].astype(FX),\n tc_[\"layer_distances\"].astype(FX),\n # output:\n probabilities,\n )\n\n # Check that all probability matrices have no NaNs and are equal to one\n # another\n ref_probs = probabilities[0, 0]\n for i in range(input_shape[0]):\n for j in range(input_shape[1]):\n probs = probabilities[i, j]\n assert np.all(np.isfinite(probs))\n assert np.all(probs == ref_probs)\n\n # Run all test cases\n for tc_name, tc in TEST_CASES.items():\n run_test_case(\n tc_name, tc, ignore_fails=ignore_fails, define_as_ref=define_as_ref\n )", "title": "" }, { "docid": "93f110550533402b9df7174bcd7f92c5", "score": "0.4826382", "text": "def test_ptrg(self):\n self.work_testc2(\n input = None,\n outtype = 'double **',\n )", "title": "" }, { "docid": "686d59700796b9ff03893f657cafd174", "score": "0.48228624", "text": "def test_err3(self):\n\n @functor_api\n def err3(data):\n return 1\n\n f(FunctorAPIData())", "title": "" } ]
afa6717e6fb11a1ff402ef058e0f7c37
Create binary array to pick out which idxs are accepted.
[ { "docid": "7b15995a47c10cea86fcd8a454f84d0c", "score": "0.0", "text": "def _get_accept_masks(accept_prob: tf.Tensor):\n accept_mask = tf.cast(\n accept_prob > tf.random.uniform(tf.shape(accept_prob)),\n dtype=TF_FLOAT,\n )\n reject_mask = 1. - accept_mask\n\n return accept_mask, reject_mask", "title": "" } ]
[ { "docid": "2239fb1e619ab3b14d59c8d1c33c740e", "score": "0.585864", "text": "def bit_ids(self):\r\n return self.bit_IFT[:, 0]", "title": "" }, { "docid": "46d00f8d3b3120319105711da9cbabac", "score": "0.57368064", "text": "def encode_bin(data: list, cat: set) -> np.ndarray:\n import numpy as np\n\n cat = list(cat)\n enc_list = np.zeros(len(cat), dtype='uint8')\n for e in data:\n enc_list[cat.index(e)] = 1\n \n return enc_list", "title": "" }, { "docid": "b236a6f1676cd24f844b86e349c8586f", "score": "0.5716696", "text": "def playoffID(year):\n ids = np.array([])\n for i in range(1, 5):\n for j in range(1, 8 // 2**(i - 1) + 1):\n for k in range(1, 8): # This is the only part where there can be false IDs\n ids = np.append(ids, str(year) + '03' + '0' + str(i) + str(j) + str(k))\n return ids", "title": "" }, { "docid": "1bc46d790e7061ce0d35dbd61989377b", "score": "0.5716071", "text": "def list_binary(length):\n return np.array([\"\".join(seq) for seq in it.product(\"01\", repeat=length)])", "title": "" }, { "docid": "0c85a730fcb4b1cedbe0ab814d8b7643", "score": "0.56233704", "text": "def __binary_data_array(self):\n self.data_array = []\n __is_down_to_up = True\n j = len(self.__unmask_matrix[0]) - 1 # cols\n while j > 0:\n if __is_down_to_up:\n for i in range(len(self.__unmask_matrix) - 1, -1, -1): # rows\n self.__fill_array(i, j)\n __is_down_to_up = False\n else:\n for i in range(len(self.__unmask_matrix)):\n self.__fill_array(i, j)\n __is_down_to_up = True\n if j is 8:\n j = j - 3\n else:\n j = j - 2\n return self.data_array", "title": "" }, { "docid": "054de8f71308db1b4d942bd31b158a80", "score": "0.55675966", "text": "def ids(self):\n if len(self) == 0:\n return np.array([])\n ids = []\n for (_, _, _, ids_b) in self.itershards():\n ids.append(np.atleast_1d(np.squeeze(ids_b)))\n return np.concatenate(ids)", "title": "" }, { "docid": "b8fce4a443a57000d08ececfe3d3e27f", "score": "0.55412513", "text": "def bitarray(bytes_array):\n ba = []\n index = 1\n for byte in bytes_array:\n for bit in range(7):\n if byte & 1 << bit:\n ba.append(index)\n index += 1\n return ba", "title": "" }, { "docid": "29d07fb370077bbf301b6f5e52e8ec37", "score": "0.54997665", "text": "def toBitArray(self):\n\t\treturn self._toBitArray(BitArray())", "title": "" }, { "docid": "f5e37a2240e7b45137f3c86cee9ebc5c", "score": "0.5469191", "text": "def gen_inputs(self):\n\n # compute all possible input bit arrays\n inputs = [list(seq) for seq in \n itertools.product([False, True], repeat=self.num_bits)]\n\n return inputs", "title": "" }, { "docid": "f3f24667614b2167ee83a2ab57877d93", "score": "0.54678345", "text": "def create_selectors(active_stations, n_stations):\n selectors = np.zeros(n_stations, dtype=int)\n for i in active_stations:\n selectors[i] = 1\n return selectors.tolist()", "title": "" }, { "docid": "0585b4532532508fbfc1492f6ea3931a", "score": "0.54325104", "text": "def ids(self):\n if len(self) == 0:\n return np.array([])\n ids = []\n for (_, _, _, ids_b) in self.itershards():\n ids.append(np.atleast_1d(np.squeeze(ids_b)))\n return np.concatenate(ids)", "title": "" }, { "docid": "e33cda2f76245bdf2fdbefe9cb8fface", "score": "0.54246455", "text": "def hand_to_binary(card_strs: Sequence[str]) -> list[int]:\n bhand = []\n for c in card_strs:\n bhand.append(Card.new(c))\n return bhand", "title": "" }, { "docid": "08d03d445f1492ff93ec429466944b20", "score": "0.5406121", "text": "def uniqe_binary_words(self):\n for word in self.word_list:\n #create new arr\n b_word=[[0]*self.q for _ in range(self.n)]\n for i,num in enumerate(word):\n #some nums are 0.0 which is float which is bad\n b_word[i][int(num)]=1\n one_list=[item for sublist in b_word for item in sublist] \n self.binary_word_list.append(one_list)", "title": "" }, { "docid": "34fb945f3d69d5bd6c24d08dc9bbe796", "score": "0.5398654", "text": "def get_input_collections_as_binary_arrays(input_collections: Iterable[Iterable[any]]) -> Tuple[List, List]:\n # First get complete set of input elements\n all_input_elements = list(sorted(set([el for row in input_collections for el in row])))\n # For each input collection, create array with 1 for element exists or 0 for not exists\n # (extension: count the number of elements - for use with multiple element counting version)\n binarised_input_collections = [tf.Variable([1 if el in row else 0 for el in all_input_elements], dtype=tf.int32)\n for row in input_collections]\n print('Binarised input rows')\n return all_input_elements, binarised_input_collections", "title": "" }, { "docid": "c50aa972d63c3e44c90453fc91faaca6", "score": "0.5391031", "text": "def getDecisionBytes(self):\n return []", "title": "" }, { "docid": "0e632825ba4d3339c97635ea385abffd", "score": "0.537347", "text": "def encode_binary_arr(input_arr):\n # the first bit\n b = input_arr[0]\n # the runs\n runs = count_binary_runs(input_arr)\n # build the bit array\n a = bitarray()\n a.append(b)\n for run in runs:\n run = int(run)\n if run == 1:\n a.extend(B00)\n continue\n if run == 2:\n a.extend(B01)\n continue\n if run == 3:\n a.extend(B10)\n continue\n # run is 4 or more\n a.extend(B11)\n # now record number of bits for the run\n bl = run.bit_length()\n a.extend(int2ba(bl, NUM_BITS_RUN_LEN))\n # now record the run\n a.extend(int2ba(run))\n return a", "title": "" }, { "docid": "ca0711c8ad53e882b118bfea44579a39", "score": "0.53690076", "text": "def create_n(N):\n\n all_n = np.array([])\n max_bin_len = len(bin(2 ** N - 1)[2:]) # need this for 2 -> 010 instead of 10\n\n for i in range(2**N):\n all_n = np.append(all_n, bin(i)[2:].zfill(max_bin_len))\n\n return all_n", "title": "" }, { "docid": "b8d78f4bd8a62a7960efd111fdc0de99", "score": "0.53677636", "text": "def bits_to_array(bits: str, n: int, minv: float, maxv: float):\n arr = np.zeros(n)\n k = len(bits) // n\n for i in range(n):\n num = int(bits[i * k:(i + 1) * k], 2)\n num /= (2**k-1)\n arr[i] = num*(maxv-minv) + minv\n return arr", "title": "" }, { "docid": "bf180d9fe5bac15dc9f46fbd740b2d8a", "score": "0.53379965", "text": "def IntArr(prefix, sz, N):\n return [ BitVec('%s__%s' % (prefix, i), sz) for i in range(N) ]", "title": "" }, { "docid": "bac7eac3710f0b57b7afcaa165ab5027", "score": "0.53350055", "text": "def as_input(self) -> np.ndarray:\n output = np.zeros(98, dtype=np.uint8)\n s = self.state\n for i in range(42):\n if s[i] == 1:\n output[2*i] = 1\n elif s[i] == -1:\n output[2*i + 1] = 1\n return output", "title": "" }, { "docid": "01519bdc39599a55a072124eab0f3d9d", "score": "0.53345585", "text": "def bytearray_to_many_hot(array):\n masks = [2**7, 2**6, 2**5, 2**4, 2**3, 2**2, 2**1, 1]\n bit_vector = []\n for a in array:\n for mask in masks:\n bit_vector.append((a & mask) > 0)\n return np.array(bit_vector, dtype=np.uint8)", "title": "" }, { "docid": "bc7f19e009210db03bf52fb7352a9935", "score": "0.5334291", "text": "def regularID(year):\n ids = np.array([])\n if year > 2016:\n num = 1271\n else:\n num = 1230\n for i in range(1, num + 1):\n ids = np.append(ids, str(year) + '02' + str(i).zfill(4))\n return ids", "title": "" }, { "docid": "bda979e98065da6b420242d742aef426", "score": "0.5303551", "text": "def generate_binary(n):\r\n\tbin_arr = range(0, int(np.power(2,n)))\r\n\tbin_arr = [bin(i)[2:] for i in bin_arr]\r\n\tmax_len = len(max(bin_arr, key=len))\r\n\tbin_arr = [i.zfill(max_len) for i in bin_arr]\r\n\treturn bin_arr", "title": "" }, { "docid": "34425993530a76c7e008b38c22cd6150", "score": "0.5253228", "text": "def binaryArray(self, num, m):\n return (np.array(list(np.binary_repr(num).zfill(m))).astype(np.int8)).tolist()", "title": "" }, { "docid": "0e685402a81cb64b22a938d66cf4e9bb", "score": "0.5250867", "text": "def subsets(self, nums: List[int]) -> List[List[int]]:\n n = len(nums)\n res = []\n for i in range(2 ** n, 2 ** (n + 1)):\n mask = bin(i)[3:]\n res.append([nums[j] for j in range(n) if mask[j] == '1'])\n return res", "title": "" }, { "docid": "dea0d3e64a0f61c88389ec4de957c383", "score": "0.5248047", "text": "def bitwise_array(value):\n if np.isscalar(value):\n return value\n val = np.asarray(value)\n return [base64.b64encode(val), val.shape, val.dtype.str]", "title": "" }, { "docid": "0a0b14c44cb521986200032787778822", "score": "0.52476174", "text": "def createGameID(year, gameTypes):\n ids = np.array([])\n for y in year:\n for g in gameTypes:\n ids = np.append(ids, createGameNum(y, g))\n return ids", "title": "" }, { "docid": "1f4896ec180165f5d24e3f7c1232a704", "score": "0.5207223", "text": "def eight():\n return [\n [0, 1, 1, 0],\n [1, 0, 0, 1],\n [1, 0, 0, 1],\n [0, 1, 1, 0],\n [1, 0, 0, 1],\n [1, 0, 0, 1],\n [0, 1, 1, 0],\n ]", "title": "" }, { "docid": "67f3e2258c19e112baebca9a51285aa9", "score": "0.5177024", "text": "def seq_to_bitlist(seq):\n if not seq:\n return []\n if isinstance(seq, str):\n return map(is_nonzero_string_int, seq)\n else:\n return map(is_nonzero_int, seq)", "title": "" }, { "docid": "92aec596265535e4739e3bdadf04d872", "score": "0.51719564", "text": "def create_array():\n \n thefile = open('C:/Documents and Settings/steven/repos/Python-Codes/Algo_One_Stanford/assignment_1_IntegerArray.txt'\n, 'r')\n \n ans = []\n \n for line in thefile:\n if len(line[:-1])!= 0:\n ans.append(int(line[:-1]))\n \n return ans", "title": "" }, { "docid": "e65742abb33cb73b61663da5e30e2264", "score": "0.5166966", "text": "def binary_var_list(size, name=None):\n return integer_var_list(size, BINARY_DOMAIN, name=name)", "title": "" }, { "docid": "f230639e4c6656bc478d4b44120313e7", "score": "0.5151411", "text": "def make_bitarray_seq(self, n_0, n_m):\n seq_bits = bitarray()\n for site in xrange(0, self.nbss):\n for indiv in xrange(n_0, n_m):\n seq_bits.extend(self.sim.getSite(site,indiv))\n return seq_bits", "title": "" }, { "docid": "34464d21c8f6001aaa8c8c2a22d77ad0", "score": "0.51510555", "text": "def vb_decode(arr):\n n_list = []\n n = 0\n for b in arr:\n n = (n << 7) | (b & 127)\n if b & 128:\n n_list.append(n)\n n = 0\n return n_list", "title": "" }, { "docid": "263c35e9d8148e6806a187e7507271d7", "score": "0.51446116", "text": "def bytes2bits(d):\n\n # Note that the comprehension returns a list of bools; use array\n # to cast it to ints. The bools work without the cast since Pyton\n # treats True == 1, but just to play it safe....\n return array([(d & mask) != 0 for mask in masks], dtype=int)", "title": "" }, { "docid": "abf46ffb3c058e3533fdc309f13ba1e7", "score": "0.51430213", "text": "def gen_all_holds(hand):\r\n ans = set([()])\r\n if len(hand) == 0:\r\n return ans\r\n \r\n masks = gen_all_sequences([0,1], len(hand))\r\n for mask in masks:\r\n idx = 0\r\n new_seq = list()\r\n for dummy_item in mask:\r\n if dummy_item != 0:\r\n new_seq.append(hand[idx])\r\n idx += 1\r\n else:\r\n idx += 1\r\n ans.add(tuple(new_seq))\r\n \r\n \r\n return ans\r\n #return set([()])\r", "title": "" }, { "docid": "272557c6cc176140c51c768ae6754c14", "score": "0.5128276", "text": "def ReadIDsForAllSignedBinaries(self):", "title": "" }, { "docid": "7eaed453824eadb49f17530b5817c641", "score": "0.5124349", "text": "def hand_to_binary(card_strs):\r\n bhand = []\r\n for c in card_strs:\r\n bhand.append(Card.new(c))\r\n return bhand", "title": "" }, { "docid": "214b7ec11fc0a001f636c455686ff96d", "score": "0.51189196", "text": "def create_candidates(dataset):\n log.info('create_candidates')\n\n c1 = set([])\n for transaction in dataset:\n c1.update(transaction)\n\n return [frozenset([_]) for _ in c1]", "title": "" }, { "docid": "59c6b9eeffcdc805b7ff092791f3923a", "score": "0.5118251", "text": "def generateCardArr(self):\n\n dk = []\n for i, x in enumerate(self.partition):\n dk.extend([i] * x)\n return dk", "title": "" }, { "docid": "937c1b2a1f1fe0abd12dce6599056965", "score": "0.51127744", "text": "def encode_recessive(self) -> pd.arrays.IntegerArray:\n # TODO: Return multiple arrays for multiple alternate alleles?\n if len(self.variant.alleles) > 2:\n raise ValueError(\"Recessive encoding can only be used with one allele\")\n\n all_minor = (self.allele_idxs == 1).all(axis=1).astype(\"float\")\n all_minor[(self.allele_idxs == MISSING_IDX).any(axis=1)] = np.nan\n result = pd.array(data=all_minor, dtype=\"UInt8\")\n return result", "title": "" }, { "docid": "6ebf6ed9d17a6396cad6731878c11480", "score": "0.5108371", "text": "def binary_data():\n pool = range(0, 256)\n return random_data(pool)", "title": "" }, { "docid": "427e6bbaf6a5f3ec8f20a647fa5fb512", "score": "0.5102198", "text": "def get_bin_array(f_name):\n raw_signal = get_raw_signal(f_name)\n bit_length, raw_signal = get_bit_length(raw_signal)\n bin_arr = sample_signal(raw_signal, bit_length)\n\n if not check_preamble(bin_arr[:preamble_length]):\n print(\"Preamble incorrect...\")\n sys.exit()\n bin_arr = bin_arr[preamble_length:] # remove preamble\n\n return bin_arr", "title": "" }, { "docid": "6c9d5e4f54f065b0ba980a147df3c600", "score": "0.5091926", "text": "def get_bits(num, gen):\n out = 0\n for i in range(num):\n out <<= 1\n val = gen.next()\n if val != []:\n out += val & 0x01\n else:\n return []\n\n return out", "title": "" }, { "docid": "3886dd09a81d5088a29de4c4ad57a8f2", "score": "0.50887614", "text": "def parse_bits(self) -> list[bool]:\n bits = f\"{int(self.data, 16):016b}\"\n return [bool(int(x)) for x in bits]", "title": "" }, { "docid": "1f3b7256344bd149c414d60a94588da5", "score": "0.50688004", "text": "def bin(n):\n result = [];\n while n != 0:\n result = [1 & n] + result;\n n = n >> 1;\n return result;", "title": "" }, { "docid": "fd150f67f0650f64f34326d0634550e9", "score": "0.50611687", "text": "def idx_array(self):\n idx, cnts = self.split_data()\n start = 0\n idx_data = []\n for end in idx:\n if np.sum(cnts[start:end]):\n # indices for sample regions containing ANY neutral data (final column is a boolean flag)\n idx_data.append([start, end, 1])\n else:\n # indices for sample regions with NO data\n idx_data.append([start, end, 0])\n start = end\n return np.array(idx_data)", "title": "" }, { "docid": "363b70b07de516b0cfda09da1c5bf65f", "score": "0.50541806", "text": "def createCode(self):\n root = heapq.heappop(self.freqHeap) # remove from tree\n code = bitarray()\n self.traverse(root, code) # recursion to leaf, adding 0s and 1s\n # print(\"SELF.CODES = \", self.codes)\n return self.codes", "title": "" }, { "docid": "0d4e022f6840e65bedc4ef4b4785b309", "score": "0.50489426", "text": "def encode_as_binary_array(msg):\n msg = msg.encode(\"utf-8\")\n msg = msg.hex()\n msg = [msg[i:i + 2] for i in range(0, len(msg), 2)]\n msg = [bin(int(el, base=16))[2:] for el in msg]\n msg = [\"0\" * (8 - len(el)) + el for el in msg]\n return \"\".join(msg)", "title": "" }, { "docid": "4d642f95e62013b85c0d06ee8d02010e", "score": "0.5028773", "text": "def deconstruct_flag(flagval):\n bit_list = [1, 2, 4, 8, 16, 32, 64, 128]\n flagval = int(flagval)\n #out_bit_list = []\n out_idx_list = np.zeros(9,dtype=int)\n if flagval == 0:\n #out_bit_list = [0]\n out_idx_list[0] =1\n if flagval > 0:\n idx =1\n for bit in bit_list:\n if flagval & bit > 0:\n #out_bit_list.append(bit)\n out_idx_list[idx] = 1\n if bit > flagval: break\n idx += 1\n return (out_idx_list)", "title": "" }, { "docid": "fd820121e60d427e6d6e21b70807f6c7", "score": "0.50272053", "text": "def ToArray(self):\n pass", "title": "" }, { "docid": "c5b27235fbd20e6d3bce1436f2631f26", "score": "0.50209373", "text": "def generate_bit(text: str) -> List[str]:\n text = list(bytes(str(text), 'utf-8'))\n b_text = []\n for x in text:\n #code here\n b = bin(x)[2:]\n if len(b) < 8:\n for _ in range(8 - len(b)):\n b = '0' + b\n #print(len(b), b)\n byte = list(b)\n for bit in byte:\n b_text.append(bit)\n return b_text", "title": "" }, { "docid": "f0a0484de4607a7b824b5bb9b1aa7685", "score": "0.50160867", "text": "def _get_all_ids(target_id: str) -> list:\n rvalue = {target_id: True, \"*\": True}\n length = len(target_id)\n # Compute all N-grams\n for N in range(length):\n # Compute N-grams\n size = length - N\n span = N + 1\n rvalue[\"*\" + target_id[:span] + \"*\"] = True\n rvalue[target_id[:span] + \"*\"] = True\n for i in range(1, size - 1):\n rvalue[\"*\" + target_id[i:i + span] + \"*\"] = True\n rvalue[\"*\" + target_id[size - 1:size - 1 + span]] = True\n rvalue[\"*\" + target_id[size - 1:size - 1 + span] + \"*\"] = True\n\n return list(rvalue.keys())", "title": "" }, { "docid": "3665fde1f9b172eb493f22e817afec93", "score": "0.5007764", "text": "def get_array(self):\n arr = np.zeros(self.length)\n for elem in self.cars:\n arr[elem.position] = 1\n return arr", "title": "" }, { "docid": "4ac4e1250cf20c3114ac1497bf22413a", "score": "0.50035816", "text": "def get_case_data(self):\n return [\n # i32x4\n ['#', 'i32x4'],\n [\"not\", ['0'], ['-1'], ['i32x4', 'i32x4']],\n [\"not\", ['-1'], ['0'], ['i32x4', 'i32x4']],\n [\"not\", [['-1', '0', '-1', '0']], [['0', '-1', '0', '-1']], ['i32x4', 'i32x4']],\n [\"not\", [['0', '-1', '0', '-1']], [['-1', '0', '-1', '0']], ['i32x4', 'i32x4']],\n [\"not\", ['0x55555555'], ['0xAAAAAAAA'], ['i32x4', 'i32x4']],\n [\"not\", ['3435973836'], ['858993459'], ['i32x4', 'i32x4']],\n ['not', ['01_234_567_890'], ['3060399405'], ['i32x4', 'i32x4']],\n ['not', ['0x0_1234_5678'], ['0xedcba987'], ['i32x4', 'i32x4']],\n [\"and\", [['0', '-1'], ['0', '-1', '0', '-1']], [['0', '0', '0', '-1']], ['i32x4', 'i32x4', 'i32x4']],\n [\"and\", ['0', '0'], ['0'], ['i32x4', 'i32x4', 'i32x4']],\n [\"and\", ['0', '-1'], ['0'], ['i32x4', 'i32x4', 'i32x4']],\n [\"and\", ['0', '0xFFFFFFFF'], ['0'], ['i32x4', 'i32x4', 'i32x4']],\n [\"and\", ['1', '1'], ['1'], ['i32x4', 'i32x4', 'i32x4']],\n [\"and\", ['255', '85'], ['85'], ['i32x4', 'i32x4', 'i32x4']],\n [\"and\", ['255', '128'], ['128'], ['i32x4', 'i32x4', 'i32x4']],\n [\"and\", ['2863311530', ['10', '128', '5', '165']], [['10', '128', '0', '160']],\n ['i32x4', 'i32x4', 'i32x4']],\n [\"and\", ['0xFFFFFFFF', '0x55555555'], ['0x55555555'], ['i32x4', 'i32x4', 'i32x4']],\n [\"and\", ['0xFFFFFFFF', '0xAAAAAAAA'], ['0xAAAAAAAA'], ['i32x4', 'i32x4', 'i32x4']],\n [\"and\", ['0xFFFFFFFF', '0x0'], ['0x0'], ['i32x4', 'i32x4', 'i32x4']],\n [\"and\", ['0x55555555', ['0x5555', '0xFFFF', '0x55FF', '0x5FFF']], ['0x5555'],\n ['i32x4', 'i32x4', 'i32x4']],\n ['and', ['01_234_567_890', '01_234_567_890'], ['1234567890'], ['i32x4', 'i32x4', 'i32x4']],\n ['and', ['0x0_1234_5678', '0x0_90AB_cdef'], ['0x10204468'], ['i32x4', 'i32x4', 'i32x4']],\n [\"or\", [['0', '0', '-1', '-1'], ['0', '-1', '0', '-1']], [['0', '-1', '-1', '-1']],\n ['i32x4', 'i32x4', 'i32x4']],\n [\"or\", ['0', '0'], ['0'], ['i32x4', 'i32x4', 'i32x4']],\n [\"or\", ['0', '-1'], ['-1'], ['i32x4', 'i32x4', 'i32x4']],\n [\"or\", ['0', '0xFFFFFFFF'], ['0xFFFFFFFF'], ['i32x4', 'i32x4', 'i32x4']],\n [\"or\", ['1', '1'], ['1'], ['i32x4', 'i32x4', 'i32x4']],\n [\"or\", ['255', '85'], ['255'], ['i32x4', 'i32x4', 'i32x4']],\n [\"or\", ['255', '128'], ['255'], ['i32x4', 'i32x4', 'i32x4']],\n [\"or\", ['2863311530', ['10', '128', '5', '165']], [['2863311530', '2863311535']],\n ['i32x4', 'i32x4', 'i32x4']],\n [\"or\", ['0xFFFFFFFF', '0x55555555'], ['0xFFFFFFFF'], ['i32x4', 'i32x4', 'i32x4']],\n [\"or\", ['0xFFFFFFFF', '0xAAAAAAAA'], ['0xFFFFFFFF'], ['i32x4', 'i32x4', 'i32x4']],\n [\"or\", ['0xFFFFFFFF', '0x0'], ['0xFFFFFFFF'], ['i32x4', 'i32x4', 'i32x4']],\n [\"or\", ['0x55555555', ['0x5555', '0xFFFF', '0x55FF', '0x5FFF']],\n [['0x55555555', '0x5555ffff', '0x555555ff', '0x55555fff']],\n ['i32x4', 'i32x4', 'i32x4']],\n ['or', ['01_234_567_890', '01_234_567_890'], ['1234567890'], ['i32x4', 'i32x4', 'i32x4']],\n ['or', ['0x0_1234_5678', '0x0_90AB_cdef'], ['0x92bfdfff'], ['i32x4', 'i32x4', 'i32x4']],\n [\"xor\", [['0', '0', '-1', '-1'], ['0', '-1', '0', '-1']], [['0', '-1', '-1', '0']],\n ['i32x4', 'i32x4', 'i32x4']],\n [\"xor\", ['0', '0'], ['0'], ['i32x4', 'i32x4', 'i32x4']],\n [\"xor\", ['0', '-1'], ['-1'], ['i32x4', 'i32x4', 'i32x4']],\n [\"xor\", ['0', '0xFFFFFFFF'], ['0xFFFFFFFF'], ['i32x4', 'i32x4', 'i32x4']],\n [\"xor\", ['1', '1'], ['0'], ['i32x4', 'i32x4', 'i32x4']],\n [\"xor\", ['255', '85'], ['170'], ['i32x4', 'i32x4', 'i32x4']],\n [\"xor\", ['255', '128'], ['127'], ['i32x4', 'i32x4', 'i32x4']],\n [\"xor\", ['2863311530', ['10', '128', '5', '165']],\n [['2863311520', '2863311402', '2863311535', '2863311375']],\n ['i32x4', 'i32x4', 'i32x4']],\n [\"xor\", ['0xFFFFFFFF', '0x55555555'], ['0xAAAAAAAA'], ['i32x4', 'i32x4', 'i32x4']],\n [\"xor\", ['0xFFFFFFFF', '0xAAAAAAAA'], ['0x55555555'], ['i32x4', 'i32x4', 'i32x4']],\n [\"xor\", ['0xFFFFFFFF', '0x0'], ['0xFFFFFFFF'], ['i32x4', 'i32x4', 'i32x4']],\n [\"xor\", ['0x55555555', ['0x5555', '0xFFFF', '0x55FF', '0x5FFF']],\n [['0x55550000', '0x5555AAAA', '0x555500AA', '0x55550AAA']],\n ['i32x4', 'i32x4', 'i32x4']],\n ['xor', ['01_234_567_890', '01_234_567_890'], ['0'], ['i32x4', 'i32x4', 'i32x4']],\n ['xor', ['0x0_1234_5678', '0x0_90AB_cdef'], ['0x829f9b97'], ['i32x4', 'i32x4', 'i32x4']],\n [\"bitselect\", ['0xAAAAAAAA', '0xBBBBBBBB',\n ['0x00112345', '0xF00FFFFF', '0x10112021', '0xBBAABBAA']],\n [['0xBBAABABA', '0xABBAAAAA', '0xABAABBBA', '0xAABBAABB']],\n ['i32x4', 'i32x4', 'i32x4', 'i32x4']],\n [\"bitselect\", ['0xAAAAAAAA', '0xBBBBBBBB', '0x00000000'], ['0xBBBBBBBB'],\n ['i32x4', 'i32x4', 'i32x4', 'i32x4']],\n [\"bitselect\", ['0xAAAAAAAA', '0xBBBBBBBB', '0x11111111'], ['0xAAAAAAAA'],\n ['i32x4', 'i32x4', 'i32x4', 'i32x4']],\n [\"bitselect\", ['0xAAAAAAAA', '0xBBBBBBBB',\n ['0x01234567', '0x89ABCDEF', '0xFEDCBA98', '0x76543210']],\n [['0xBABABABA', '0xABABABAB']],\n ['i32x4', 'i32x4', 'i32x4', 'i32x4']],\n [\"bitselect\", ['0xAAAAAAAA', '0x55555555',\n ['0x01234567', '0x89ABCDEF', '0xFEDCBA98', '0x76543210']],\n [['0x54761032', '0xDCFE98BA', '0xAB89EFCD', '0x23016745']],\n ['i32x4', 'i32x4', 'i32x4', 'i32x4']],\n [\"bitselect\", ['0xAAAAAAAA', '0x55555555',\n ['0x55555555', '0xAAAAAAAA', '0x00000000', '0xFFFFFFFF']],\n [['0x00000000', '0xFFFFFFFF', '0x55555555', '0xAAAAAAAA']],\n ['i32x4', 'i32x4', 'i32x4', 'i32x4']],\n ['bitselect', ['01_234_567_890', '03_060_399_406', '0xcdefcdef'], ['2072391874'],\n ['i32x4', 'i32x4', 'i32x4', 'i32x4']],\n ['bitselect', ['0x0_1234_5678', '0x0_90AB_cdef', '0xcdefcdef'], ['0x10244468'],\n ['i32x4', 'i32x4', 'i32x4', 'i32x4']],\n [\"andnot\", [['0', '-1'], ['0', '-1', '0', '-1']], [['0', '0', '-1', '0']], ['i32x4', 'i32x4', 'i32x4']],\n [\"andnot\", ['0', '0'], ['0'], ['i32x4', 'i32x4', 'i32x4']],\n [\"andnot\", ['0', '-1'], ['0'], ['i32x4', 'i32x4', 'i32x4']],\n [\"andnot\", ['0', '0xFFFFFFFF'], ['0'], ['i32x4', 'i32x4', 'i32x4']],\n [\"andnot\", ['1', '1'], ['0'], ['i32x4', 'i32x4', 'i32x4']],\n [\"andnot\", ['255', '85'], ['170'], ['i32x4', 'i32x4', 'i32x4']],\n [\"andnot\", ['255', '128'], ['127'], ['i32x4', 'i32x4', 'i32x4']],\n [\"andnot\", ['2863311530', ['10', '128', '5', '165']], [['2863311520', '2863311402', '2863311530', '2863311370']],\n ['i32x4', 'i32x4', 'i32x4']],\n [\"andnot\", ['0xFFFFFFFF', '0x55555555'], ['0xAAAAAAAA'], ['i32x4', 'i32x4', 'i32x4']],\n [\"andnot\", ['0xFFFFFFFF', '0xAAAAAAAA'], ['0x55555555'], ['i32x4', 'i32x4', 'i32x4']],\n [\"andnot\", ['0xFFFFFFFF', '0x0'], ['0xFFFFFFFF'], ['i32x4', 'i32x4', 'i32x4']],\n [\"andnot\", ['0x55555555', ['0x5555', '0xFFFF', '0x55FF', '0x5FFF']], ['0x55550000'],\n ['i32x4', 'i32x4', 'i32x4']],\n ['andnot', ['01_234_567_890', '01_234_567_890'], ['0'], ['i32x4', 'i32x4', 'i32x4']],\n ['andnot', ['0x0_1234_5678', '0x0_90AB_cdef'], ['0x02141210'], ['i32x4', 'i32x4', 'i32x4']],\n\n ['#', 'for float special data [e.g. -nan nan -inf inf]'],\n [\"not\", ['-nan'], ['5.87747e-39'], ['f32x4', 'f32x4']],\n [\"not\", ['nan'], ['-5.87747e-39'], ['f32x4', 'f32x4']],\n [\"not\", ['-inf'], ['0x007fffff'], ['f32x4', 'i32x4']],\n [\"not\", ['inf'], ['0x807fffff'], ['f32x4', 'i32x4']],\n\n [\"and\", ['-nan', '-nan'], ['0xffc00000'], ['f32x4', 'f32x4', 'i32x4']],\n [\"and\", ['-nan', 'nan'], ['nan'], ['f32x4', 'f32x4', 'f32x4']],\n [\"and\", ['-nan', '-inf'], ['-inf'], ['f32x4', 'f32x4', 'f32x4']],\n [\"and\", ['-nan', 'inf'], ['inf'], ['f32x4', 'f32x4', 'f32x4']],\n [\"and\", ['nan', 'nan'], ['nan'], ['f32x4', 'f32x4', 'f32x4']],\n [\"and\", ['nan', '-inf'], ['inf'], ['f32x4', 'f32x4', 'f32x4']],\n [\"and\", ['nan', 'inf'], ['inf'], ['f32x4', 'f32x4', 'f32x4']],\n [\"and\", ['-inf', '-inf'], ['-inf'], ['f32x4', 'f32x4', 'f32x4']],\n [\"and\", ['-inf', 'inf'], ['inf'], ['f32x4', 'f32x4', 'f32x4']],\n [\"and\", ['inf', 'inf'], ['inf'], ['f32x4', 'f32x4', 'f32x4']],\n\n [\"or\", ['-nan', '-nan'], ['0xffc00000'], ['f32x4', 'f32x4', 'i32x4']],\n [\"or\", ['-nan', 'nan'], ['0xffc00000'], ['f32x4', 'f32x4', 'i32x4']],\n [\"or\", ['-nan', '-inf'], ['0xffc00000'], ['f32x4', 'f32x4', 'i32x4']],\n [\"or\", ['-nan', 'inf'], ['0xffc00000'], ['f32x4', 'f32x4', 'i32x4']],\n [\"or\", ['nan', 'nan'], ['nan'], ['f32x4', 'f32x4', 'f32x4']],\n [\"or\", ['nan', '-inf'], ['0xffc00000'], ['f32x4', 'f32x4', 'i32x4']],\n [\"or\", ['nan', 'inf'], ['nan'], ['f32x4', 'f32x4', 'f32x4']],\n [\"or\", ['-inf', '-inf'], ['-inf'], ['f32x4', 'f32x4', 'f32x4']],\n [\"or\", ['-inf', 'inf'], ['-inf'], ['f32x4', 'f32x4', 'f32x4']],\n [\"or\", ['inf', 'inf'], ['inf'], ['f32x4', 'f32x4', 'f32x4']],\n\n [\"xor\", ['-nan', '-nan'], ['0'], ['f32x4', 'f32x4', 'f32x4']],\n [\"xor\", ['-nan', 'nan'], ['-0'], ['f32x4', 'f32x4', 'f32x4']],\n [\"xor\", ['-nan', '-inf'], ['0x00400000'], ['f32x4', 'f32x4', 'i32x4']],\n [\"xor\", ['-nan', 'inf'], ['0x80400000'], ['f32x4', 'f32x4', 'i32x4']],\n [\"xor\", ['nan', 'nan'], ['0'], ['f32x4', 'f32x4', 'f32x4']],\n [\"xor\", ['nan', '-inf'], ['0x80400000'], ['f32x4', 'f32x4', 'i32x4']],\n [\"xor\", ['nan', 'inf'], ['0x00400000'], ['f32x4', 'f32x4', 'i32x4']],\n [\"xor\", ['-inf', '-inf'], ['0'], ['f32x4', 'f32x4', 'f32x4']],\n [\"xor\", ['-inf', 'inf'], ['0x80000000'], ['f32x4', 'f32x4', 'i32x4']],\n [\"xor\", ['inf', 'inf'], ['0'], ['f32x4', 'f32x4', 'f32x4']],\n\n [\"bitselect\", ['-nan', '-nan','0xA5A5A5A5'], ['0xffc00000'], ['f32x4', 'f32x4', 'f32x4', 'i32x4']],\n [\"bitselect\", ['-nan', 'nan','0xA5A5A5A5'], ['nan'], ['f32x4', 'f32x4', 'f32x4', 'f32x4']],\n [\"bitselect\", ['-nan', '-inf','0xA5A5A5A5'], ['-inf'], ['f32x4', 'f32x4', 'f32x4', 'f32x4']],\n [\"bitselect\", ['-nan', 'inf','0xA5A5A5A5'], ['inf'], ['f32x4', 'f32x4', 'f32x4', 'f32x4']],\n [\"bitselect\", ['nan', 'nan','0xA5A5A5A5'], ['nan'], ['f32x4', 'f32x4', 'f32x4', 'f32x4']],\n [\"bitselect\", ['nan', '-inf','0xA5A5A5A5'], ['-inf'], ['f32x4', 'f32x4', 'f32x4', 'f32x4']],\n [\"bitselect\", ['nan', 'inf','0xA5A5A5A5'], ['inf'], ['f32x4', 'f32x4', 'f32x4', 'f32x4']],\n [\"bitselect\", ['-inf', '-inf','0xA5A5A5A5'], ['-inf'], ['f32x4', 'f32x4', 'f32x4', 'f32x4']],\n [\"bitselect\", ['-inf', 'inf','0xA5A5A5A5'], ['inf'], ['f32x4', 'f32x4', 'f32x4', 'f32x4']],\n [\"bitselect\", ['inf', 'inf','0xA5A5A5A5'], ['inf'], ['f32x4', 'f32x4', 'f32x4', 'f32x4']],\n\n [\"andnot\", ['-nan', '-nan'], ['0x00000000'], ['f32x4', 'f32x4', 'i32x4']],\n [\"andnot\", ['-nan', 'nan'], ['-0'], ['f32x4', 'f32x4', 'f32x4']],\n [\"andnot\", ['-nan', '-inf'], ['0x00400000'], ['f32x4', 'f32x4', 'i32x4']],\n [\"andnot\", ['-nan', 'inf'], ['0x80400000'], ['f32x4', 'f32x4', 'i32x4']],\n [\"andnot\", ['nan', 'nan'], ['0x00000000'], ['f32x4', 'f32x4', 'f32x4']],\n [\"andnot\", ['nan', '-inf'], ['0x00400000'], ['f32x4', 'f32x4', 'i32x4']],\n [\"andnot\", ['nan', 'inf'], ['0x00400000'], ['f32x4', 'f32x4', 'i32x4']],\n [\"andnot\", ['-inf', '-inf'], ['0x00000000'], ['f32x4', 'f32x4', 'f32x4']],\n [\"andnot\", ['-inf', 'inf'], ['0x80000000'], ['f32x4', 'f32x4', 'i32x4']],\n [\"andnot\", ['inf', 'inf'], ['0x00000000'], ['f32x4', 'f32x4', 'i32x4']]\n ]", "title": "" }, { "docid": "c1817be91922e2277b0dcf8d9e0445e4", "score": "0.5003252", "text": "def encode(self, plain:List[int], seed:int) -> List[int]:\n\t\tencoded = [0]*256\n\t\tfor i in range(256):\n\t\t\tindexVar = i*8+seed\n\t\t\tfor b in range(8):\n\t\t\t\tif ((plain[i]) & (1<<b)):\n\t\t\t\t\tindex = self.encodeMap[(b+indexVar)%2048]\n\t\t\t\t\tindex8 = int(index/8)\n\t\t\t\t\tencoded[index8] = encoded[index8] + (1<<(index%8))\n\t\treturn encoded", "title": "" }, { "docid": "bb733f08ab2012bcad797bef5b3843cd", "score": "0.4981728", "text": "def id_list(self):\n return numpy.array(self.analog_signals.keys())", "title": "" }, { "docid": "3466061f63385b8fcde861e900f9830b", "score": "0.49754524", "text": "def _validation_set(self) -> List[str]:\n return (\n \"A00001,A00002,A00003,A00004,A00005,A00006,A00007,A00008,A00009,A00010,\"\n \"A00011,A00012,A00013,A00014,A00015,A00016,A00017,A00018,A00019,A00020,\"\n \"A00021,A00022,A00023,A00024,A00025,A00026,A00027,A00028,A00029,A00030,\"\n \"A00031,A00032,A00033,A00034,A00035,A00036,A00037,A00038,A00039,A00040,\"\n \"A00041,A00042,A00043,A00044,A00045,A00046,A00047,A00048,A00049,A00050,\"\n \"A00051,A00052,A00053,A00054,A00055,A00056,A00057,A00058,A00059,A00060,\"\n \"A00061,A00062,A00063,A00064,A00065,A00066,A00067,A00068,A00069,A00070,\"\n \"A00071,A00072,A00073,A00074,A00075,A00076,A00077,A00078,A00079,A00080,\"\n \"A00081,A00082,A00083,A00084,A00085,A00086,A00087,A00088,A00089,A00090,\"\n \"A00091,A00092,A00093,A00094,A00095,A00096,A00097,A00098,A00099,A00100,\"\n \"A00101,A00102,A00103,A00104,A00105,A00106,A00107,A00108,A00109,A00110,\"\n \"A00111,A00112,A00113,A00114,A00115,A00116,A00117,A00118,A00119,A00120,\"\n \"A00121,A00122,A00123,A00124,A00125,A00126,A00127,A00128,A00129,A00130,\"\n \"A00131,A00132,A00133,A00134,A00135,A00136,A00137,A00138,A00139,A00140,\"\n \"A00141,A00142,A00143,A00144,A00145,A00146,A00147,A00148,A00149,A00150,\"\n \"A00151,A00152,A00153,A00154,A00155,A00156,A00157,A00158,A00159,A00160,\"\n \"A00161,A00162,A00163,A00164,A00165,A00166,A00167,A00168,A00169,A00170,\"\n \"A00171,A00172,A00173,A00174,A00175,A00176,A00177,A00178,A00179,A00180,\"\n \"A00181,A00182,A00183,A00184,A00185,A00186,A00187,A00188,A00189,A00190,\"\n \"A00191,A00192,A00193,A00194,A00195,A00196,A00197,A00198,A00199,A00200,\"\n \"A00201,A00202,A00203,A00204,A00205,A00206,A00207,A00208,A00209,A00210,\"\n \"A00211,A00212,A00213,A00214,A00215,A00216,A00217,A00218,A00219,A00220,\"\n \"A00221,A00222,A00223,A00224,A00225,A00226,A00227,A00228,A00229,A00230,\"\n \"A00231,A00232,A00233,A00234,A00235,A00236,A00237,A00238,A00239,A00240,\"\n \"A00241,A00242,A00244,A00245,A00247,A00248,A00249,A00253,A00267,A00271,\"\n \"A00301,A00321,A00375,A00395,A00397,A00405,A00422,A00432,A00438,A00439,\"\n \"A00441,A00456,A00465,A00473,A00486,A00509,A00519,A00520,A00524,A00542,\"\n \"A00551,A00585,A01006,A01070,A01246,A01299,A01521,A01567,A01707,A01727,\"\n \"A01772,A01833,A02168,A02372,A02772,A02785,A02833,A03549,A03738,A04086,\"\n \"A04137,A04170,A04186,A04216,A04282,A04452,A04522,A04701,A04735,A04805\"\n ).split(\",\")", "title": "" }, { "docid": "8c346313213e10d6330e295e1629ef43", "score": "0.49739546", "text": "def _scheme_matches_base_array(array):", "title": "" }, { "docid": "6c9734a98eb60daba658d664b7dfa61b", "score": "0.49625647", "text": "def Bits(seq: int, length: int) -> array.array:\n b = bytes(format(seq, \"b\"), \"ascii\")\n tab = bytes.maketrans(b\"01\", b\"\\xff\\x01\")\n res = array.array(\"b\", [-1]) * (length - len(b))\n res.frombytes(b.translate(tab))\n res.reverse()\n return res", "title": "" }, { "docid": "2dac1e4ae85cbb565b2c1eb6a88cc549", "score": "0.49570727", "text": "def filter_valid(self, keys):\n return np.fromiter(filter(lambda id: id in self.embed.ind, keys), dtype=np.int32)\n # return np.fromiter((key for key in keys if key in self.embed.ind), dtype=np.int32)", "title": "" }, { "docid": "0a70bf51e790e3d2ee306ee9bd7f7e5f", "score": "0.4953", "text": "def _create_id_array(dataobject, attributeType):\n\tif not dataobject:\n\t\traise RuntimeError (\"dataobject cannot be None\")\n\tif dataobject.IsA(\"vtkCompositeDataSet\"):\n\t\tids = []\n\t\tfor ds in dataobject:\n\t\t\tids.append(_create_id_array(ds, attributeType))\n\t\treturn dsa.VTKCompositeDataArray(ids)\n\telse:\n\t\treturn dsa.VTKArray(\\\n\t\t\t\tnp.arange(dataobject.GetNumberOfElements(attributeType)))", "title": "" }, { "docid": "ce15fa769d05dd5961e06c1276dd85f0", "score": "0.4952757", "text": "def gen_x_arr(self):\n arr = np.array([\n self.x11(),\n self.x12(),\n self.x13(),\n self.x14(),\n self.x15(),\n self.x16()])\n\n return arr", "title": "" }, { "docid": "5592ebf75cbc54080d4eaf025624defb", "score": "0.494987", "text": "def get_array_ids(self, num):\n return np.arange(num)", "title": "" }, { "docid": "3592ae0b40510a4080bb61570597d4e7", "score": "0.49389845", "text": "def create_mask(head_size, n_heads, selected_heads):\n mask = np.zeros(n_heads)\n for h in selected_heads:\n mask[int(h)] = 1\n \n return np.repeat(mask, head_size)", "title": "" }, { "docid": "78ad3ccc72db7d45ac0152a30df7614a", "score": "0.49345985", "text": "def bits(self):\n results = {}\n for i in self._bits:\n # Hack to filter out EPR pairs\n if len(i) < 15:\n results[i] = self._bits[i]\n return results", "title": "" }, { "docid": "76637c74dea7a1f6ddc0c2255230f82b", "score": "0.49299204", "text": "def gen_all_holds(hand):\n\n binary_mask = []\n dice = len(hand)\n\n # creating a binary mask\n for idx in range(2**dice):\n binary_mask.append(str(bin(idx))[2:].rjust(dice, '0'))\n\n # applying the binary mask to the hand\n all_holds = set([()])\n for mask in binary_mask:\n hold = []\n for die in range(dice):\n if mask[die] == '1':\n hold.append(hand[die])\n hold.sort()\n all_holds.add(tuple(hold))\n\n return all_holds", "title": "" }, { "docid": "0fa166d84e012dd628f72cf2775ba16c", "score": "0.49288845", "text": "def extend(self, iterable):\n super(BitArray, self).extend([int(bool(item)) for item in iterable])", "title": "" }, { "docid": "fb919936740bed900d08ca4faf7b0c7d", "score": "0.49260423", "text": "def Encoding(y_vect):\n return np.array([0 if i == y_vect[0] else 1 for i in y_vect])", "title": "" }, { "docid": "d20271617388959b8db5cf6e7955f1d4", "score": "0.49243528", "text": "def ids(self, inp):\n if isinstance(inp, (str, bytes)):\n return self._token2id.get(inp, self.unk)\n else:\n return [self.ids(xi) for xi in inp]", "title": "" }, { "docid": "52b9e38a72a87d7121860b9dc686a3e8", "score": "0.49230495", "text": "def pnt_ids(self):\r\n return np.concatenate([np.repeat(i[0], i[2] - i[1]) for i in self.IFT])", "title": "" }, { "docid": "8683c2eae5bc93662d82bf464d7113f8", "score": "0.49217698", "text": "def _get_detector_mask(packets):\n detector_masks = np.array([\n [bool(int(x)) for x in format(packets.get('NIX00407')[i], '032b')][::-1] # reverse ind\n for i in range(len(packets.get('NIX00407')))], np.ubyte)\n\n return detector_masks", "title": "" }, { "docid": "c80ccb3e27399119b19d7b58552197b7", "score": "0.49197975", "text": "def encode_matrix_bin(data: list, cat: list) -> np.ndarray:\n import numpy as np\n\n return np.array([ encode_bin(s, cat) for s in data ], dtype='uint8')", "title": "" }, { "docid": "7f7ea9f9a3cb93205c85bdb1d62f6bb6", "score": "0.4916686", "text": "async def np_from_bits(self, x):\n # TODO: also handle negative numbers with sign bit (NB: from_bits() in random.py)\n *shape, l = x.shape\n await self.returnType((type(x), True, tuple(shape)))\n x = await self.gather(x)\n shifts = np.arange(l)\n s = np.sum(x.value << shifts, axis=x.ndim-1)\n return type(x).field.array(s)", "title": "" }, { "docid": "2d5a0a102c79bce92df9465bf7f90eda", "score": "0.4915359", "text": "def make_binary(length):\n if length == 0:\n return ['']\n \n prev_binary = make_binary(length - 1)\n new_binary = []\n for binary in prev_binary:\n new_binary.append(binary + '0')\n new_binary.append(binary + '1')\n return new_binary", "title": "" }, { "docid": "7303ad0c7968f1884eb89b569efba427", "score": "0.4900981", "text": "def checkio2(array):", "title": "" }, { "docid": "ef9f184d52e80b4e2c5e10ffbf7df3c4", "score": "0.48998168", "text": "def to_array(self, one_hot_encoded=False, states=False):\n seq = np.asarray(self.data, dtype=np.uint8)\n # TODO\n n_symbols = N_AA_SYMBOLS if not states else N_STATES\n if one_hot_encoded:\n data = np.zeros((self.__len__(), n_symbols), dtype=np.uint8)\n data[np.arange(self.__len__()), seq] = 1\n seq = data\n return seq", "title": "" }, { "docid": "2876c34c5f8cf8108bd9a670979eaa0a", "score": "0.48953593", "text": "def _ints_arr_to_bits(ints_arr, out):\n m = ints_arr.shape[0]\n out[0] = 0\n for i in range(m):\n out[0] |= np.uint64(1) << np.uint64(ints_arr[i])", "title": "" }, { "docid": "524d85a33ec4eef6ebda631ce04b9401", "score": "0.48894444", "text": "def _generate_random_ints(n_sids):\n symbol_ids = []\n\n for _ in range(n_sids):\n\n symbol_ids.append(np.random.randint(low = 1, high = n_sids * 100))\n \n return symbol_ids", "title": "" }, { "docid": "c6aefcddd369f29b33606ca0ffc9866b", "score": "0.4887505", "text": "def make_known_categories_bitsets(self):\n\n categorical_features_indices = np.flatnonzero(self.is_categorical_)\n\n n_features = self.is_categorical_.size\n n_categorical_features = categorical_features_indices.size\n\n f_idx_map = np.zeros(n_features, dtype=np.uint32)\n f_idx_map[categorical_features_indices] = np.arange(\n n_categorical_features, dtype=np.uint32\n )\n\n known_categories = self.bin_thresholds_\n\n known_cat_bitsets = np.zeros(\n (n_categorical_features, 8), dtype=X_BITSET_INNER_DTYPE\n )\n\n # TODO: complexity is O(n_categorical_features * 255). Maybe this is\n # worth cythonizing\n for mapped_f_idx, f_idx in enumerate(categorical_features_indices):\n for raw_cat_val in known_categories[f_idx]:\n set_bitset_memoryview(known_cat_bitsets[mapped_f_idx], raw_cat_val)\n\n return known_cat_bitsets, f_idx_map", "title": "" }, { "docid": "8d3bb092df773f0b1b00604f89ee5654", "score": "0.48840278", "text": "def encode_additive(self) -> pd.arrays.IntegerArray:\n # TODO: Return multiple arrays for multiple alternate alleles?\n if len(self.variant.alleles) > 2:\n raise ValueError(\"Additive encoding can only be used with one allele\")\n\n allele_sum = self.allele_idxs.sum(axis=1).astype(\"float\")\n allele_sum[(self.allele_idxs == MISSING_IDX).any(axis=1)] = np.nan\n result = pd.array(data=allele_sum, dtype=\"UInt8\")\n return result", "title": "" }, { "docid": "f3cff7342b5881c5b82b94dfdc661baf", "score": "0.4879144", "text": "def bytes2bitlist(data):\n return [1 if data[i//8] & 1 << (7-i) % 8 else 0 for i in range(len(data)*8)]", "title": "" }, { "docid": "d1fb8864715d3ee58862739c227c7871", "score": "0.48759228", "text": "def _binary_flags_to_list(self, flags, n_models):\n items = []\n for row in range(flags.shape[0]):\n current = []\n\n # We loop in reverse (check left-most bits first)\n for i in range(n_models, -1, -1):\n # Check if bit `i` is == 1\n if ((1 << i) & int(flags[row])) > 0:\n current.append(i)\n items.append(current)\n return items", "title": "" }, { "docid": "92b7b664a5f221e337c3b4111e1d8df5", "score": "0.4864887", "text": "def parse_array(x):\n\n return [c.strip() == '1' for c in x]", "title": "" }, { "docid": "6216e9df3d27eeb4554c107121272344", "score": "0.4864712", "text": "def test_decode_bitwise_pids_durango(self):\n supported_pids = bitwise_pids(DURANGO_SUPPORTED_PIDS_RESPONSE)\n assert supported_pids == {\n '01': True,\n '02': False,\n '03': True,\n '04': True,\n '05': True,\n '06': True,\n '07': True,\n '08': False,\n '09': False,\n '0A': False,\n '0B': True,\n '0C': True,\n '0D': True,\n '0E': True,\n '0F': True,\n '10': False,\n '11': True,\n '12': False,\n '13': True,\n '14': True,\n '15': True,\n '16': False,\n '17': False,\n '18': False,\n '19': False,\n '1A': False,\n '1B': False,\n '1C': True,\n '1D': False,\n '1E': False,\n '1F': False,\n '20': False\n }", "title": "" }, { "docid": "7d16c64fd295dcea08fea7758e5e0b7d", "score": "0.48646784", "text": "def encode(s: str) -> np.ndarray:\n # Convert to bitarray\n b = bytes(s, \"utf-8\")\n bits = ba()\n bits.frombytes(b)\n\n # Convert to ndarray of size (4, n)\n flat_array = np.frombuffer(bits.unpack(), dtype=bool).astype(int) # ik this is weird, i think i need it\n array = flat_array.reshape(4, int(len(flat_array)/4), order='F').T\n\n # Encode each 4 bit message\n out = list()\n for message in array:\n out.append(list(encode_743(message)))\n\n return np.array(out)", "title": "" }, { "docid": "555ccaba8ead6d6736f689ebf0352ba7", "score": "0.4864027", "text": "def extractIds(self, data=None, positive=True):\n\n # use self.data if data is not given\n if data is None:\n data = self.data\n\n # get all unique elements and keep those that are positive if required\n ids = numpy.unique(data)\n if positive:\n ids = ids.compress(ids>0)\n ids = numpy.asarray(ids, dtype='int')\n\n # return ids \n return ids", "title": "" }, { "docid": "f1b7e60ca3faf5525201b6100724efec", "score": "0.4859808", "text": "def prep_answer_gen_target_data(padded_seqs,max_nb_words,vocab_size): \r\n answer_gen_target_data = np.zeros((len(padded_seqs), max_nb_words,vocab_size),dtype='float32')\r\n for i,seq in enumerate(padded_seqs):\r\n for j,index in enumerate(seq):\r\n answer_gen_target_data[i,j,index] = 1\r\n \r\n return answer_gen_target_data", "title": "" }, { "docid": "2a335f362e112e90d734a53eb7678381", "score": "0.4856086", "text": "def sbyte_array_parser(buffer):\n return tuple(n & 255 for n in buffer)", "title": "" }, { "docid": "9428ccc736e31bc284ebd295a60079fc", "score": "0.4848803", "text": "def input_ids(self):\n return [x.id for x in self.input_nodes]", "title": "" }, { "docid": "c10d8a2905f1371c5c042f0af24a3aba", "score": "0.48481876", "text": "def encode_repetition(bits: list) -> list:\n new_bits = []\n for bit in bits:\n for i in range(4):\n new_bits.append(bit)\n return new_bits", "title": "" }, { "docid": "fe01489dbebf2809b65b54ec2ed07e72", "score": "0.4848043", "text": "def _pack_bits(data, model_ids, sfr_ids):\n # change the outputs of the define sources to boolean and pack into bits\n assert isinstance(data, list), 'data must be dict'\n assert len(data) == len(model_ids), 'len data must equal len of model ids'\n assert (all([isinstance(e, dict) for e in data])), 'all entries must be dict'\n assert (all([np.in1d(sfr_ids, e.keys()).all() for e in data])), 'all sfr_ids must be present in data'\n\n outdata = {}\n for i, mid in enumerate(model_ids):\n temp = np.concatenate([data[i][e][np.newaxis] for e in sfr_ids], 0).astype(bool)\n outdata[mid] = np.packbits(temp)\n return outdata", "title": "" }, { "docid": "5e0544595d9cbc3088f1b0f152333103", "score": "0.48478276", "text": "def merge_conditions_array(conds):\n merged = []\n if not conds:\n return merged\n # Group by sample index, and get a single common bit for all conds on that sample\n conds.sort(key=itemgetter(0))\n for idx,group in itertools.groupby(conds, itemgetter(0)):\n genbits = [x[1] for x in group] # only the genotype bit\n common_bits = reduce(__and__, genbits)\n merged.append((idx, common_bits))\n return merged", "title": "" }, { "docid": "20769fb968ea407a0e519fa78e699162", "score": "0.48426878", "text": "def mask_legal_actions(self,state):\n actions = np.zeros(self.action_size)\n legal_actions = state.get_symbol_legal_actions()\n for a in legal_actions:\n actions[self.actionstr_to_idx[a]] = 1\n return actions", "title": "" }, { "docid": "68acf63c721170b347baf474db92cc9f", "score": "0.4842304", "text": "def create_ints(floating: int, one_mask: int, loc: int) -> List[int]:\n res = [loc | one_mask]\n for i in range(36):\n if bit_mask := ((1 << i) & floating):\n nres = [bit_mask | x for x in res] + [~bit_mask & x for x in res]\n res = nres[:]\n return res", "title": "" }, { "docid": "c23157fed5086f2ed4af7caf8f15d318", "score": "0.48321867", "text": "def ip2arr(ip_str: str) -> np.ndarray:\n result = np.array([])\n for seg in ip_str.split('.'):\n result = np.concatenate((result, dec2arr(int(seg), 8)), axis=0)\n return result", "title": "" }, { "docid": "5cdb35c6a37072dd8fb236f77ac4543c", "score": "0.48309064", "text": "def _get_1d_boolean_mask(size, true_ids):\n mask = np.zeros(size, dtype=bool)\n mask[true_ids] = np.ones(len(true_ids), dtype=bool)\n return mask", "title": "" }, { "docid": "952d79dcc826a8653bcd3ee1e3ddf584", "score": "0.48305756", "text": "def test_id_array(self):\n b4 = Base([1, 2, 3])\n self.assertEqual(b4.id, [1, 2, 3])", "title": "" }, { "docid": "737c3236a469fbfc447a603334d01056", "score": "0.48304775", "text": "def transform_to_binary(ascii_list):\n binary_list = []\n for element in ascii_list:\n i = bin(element).replace('b', '')\n binary_list.append(i)\n return binary_list", "title": "" }, { "docid": "a43945d1f8f0d6cb5faed7312930fa42", "score": "0.4830392", "text": "def embed_ids(all_ids, ids):\n row_vec = np.zeros(len(all_ids))\n missing = []\n for id in ids:\n try:\n index = all_ids.index(id)\n row_vec[index] = 1\n except ValueError:\n missing.append(id)\n\n row_vec_sparse = sp.csc_matrix(row_vec)\n return (row_vec_sparse, missing)", "title": "" }, { "docid": "202f9cbbe5e266b073a600bd9116cfe0", "score": "0.48301378", "text": "def to_bits(self) -> BitArray:\n if self.data is None:\n return BitArray('0b1') + self.left.to_bits() + self.right.to_bits()\n else:\n return BitArray('0b0') + BitArray(bytes(self.data, encoding='utf8'))", "title": "" }, { "docid": "e7167f5a4f58bdc2739c32122f25d878", "score": "0.48301348", "text": "def od_pairs(self):\r\n return np.asarray([np.c_[p[:-1], p[1:]] for p in self.bits])", "title": "" } ]
819ec84c2c0ec3aff03438ab3c1dd5fc
Delete Vserver's VLAN configuration from ports
[ { "docid": "c8810e698d34c56b1cabdec3c2ee9b0d", "score": "0.7316536", "text": "def _delete_vserver_vlans(self, network_interfaces_on_vlans):\n for interface in network_interfaces_on_vlans:\n try:\n home_port = interface['home-port']\n port, vlan = home_port.split('-')\n node = interface['home-node']\n self._client.delete_vlan(node, port, vlan)\n except exception.NetAppException:\n LOG.exception(\"Deleting Vserver VLAN failed.\")", "title": "" } ]
[ { "docid": "8dc58697abd63fd1230d85da89499814", "score": "0.66775584", "text": "def _delete_vsx_interface_vlan_v1(vlan_id, **kwargs):\n ports_list = port.get_all_ports(**kwargs)\n vlan_name = \"vlan\" + str(vlan_id)\n\n if \"/rest/v1/system/ports/%s\" % vlan_name not in ports_list:\n logging.warning(\"FAIL: Deleting VSX information from VLAN Interface '%d' failed \"\n \"because VLAN Interface doesn't exist\" % vlan_id)\n return False\n else:\n\n port_data = port.get_port(vlan_name, depth=0, selector=\"configuration\", **kwargs)\n\n port_data[\"vsx_active_forwarding_enable\"] = False\n port_data[\"vsx_sync\"] = []\n port_data[\"vsx_virtual_ip4\"] = []\n port_data.pop('vsx_virtual_gw_mac_v4', None)\n\n port_data.pop('name', None) # must remove this item from the json since name can't be modified\n port_data.pop('origin', None) # must remove this item from the json since origin can't be modified\n\n target_url = kwargs[\"url\"] + \"system/ports/%s\" % vlan_name\n put_data = json.dumps(port_data, sort_keys=True, indent=4)\n response = kwargs[\"s\"].put(target_url, data=put_data, verify=False)\n\n if not common_ops._response_ok(response, \"PUT\"):\n logging.warning(\"FAIL: Deleting VSX information from VLAN Interface '%d' failed with status code %d: %s\"\n % (vlan_id, response.status_code, response.text))\n return False\n else:\n logging.info(\"SUCCESS: Deleting VSX information from VLAN Interface '%d' succeeded\" % vlan_id)\n return True", "title": "" }, { "docid": "4945016ee60e239f9935b3289de9d4d1", "score": "0.6584275", "text": "def clear_vlan(self, ports=None):\n\n # validate ports and state\n self.set_vlan(ports=ports, vlan=[])", "title": "" }, { "docid": "8a0e4e765908e640b7cc3b24db069ba8", "score": "0.6464528", "text": "def delete_vlan(self, node, port, vlan):\n query = {\n 'vlan.base_port.name': port,\n 'node.name': node,\n 'vlan.tag': vlan,\n }\n\n try:\n self.send_request('/network/ethernet/ports/', 'delete',\n query=query)\n except netapp_api.api.NaApiError as e:\n if e.code == netapp_api.EREST_ENTRY_NOT_FOUND:\n LOG.debug('VLAN %(vlan)s on port %(port)s node %(node)s '\n 'was not found')\n elif (e.code == netapp_api.EREST_INTERFACE_BOUND or\n e.code == netapp_api.EREST_PORT_IN_USE):\n LOG.debug('VLAN %(vlan)s on port %(port)s node %(node)s '\n 'still used by LIF and cannot be deleted.',\n {'vlan': vlan, 'port': port, 'node': node})\n else:\n msg = _('Failed to delete VLAN %(vlan)s on '\n 'port %(port)s node %(node)s: %(err_msg)s')\n msg_args = {\n 'vlan': vlan,\n 'port': port,\n 'node': node,\n 'err_msg': e.message\n }\n raise exception.NetAppException(msg % msg_args)", "title": "" }, { "docid": "8fa767ae9fcbb144d30acdf35eae5385", "score": "0.6395742", "text": "def delete_vsx_interface_vlan(vlan_id, **kwargs):\n if kwargs[\"url\"].endswith(\"/v1/\"):\n return _delete_vsx_interface_vlan_v1(vlan_id, **kwargs)\n else: # Updated else for when version is v10.04\n return _delete_vsx_interface_vlan(vlan_id, **kwargs)", "title": "" }, { "docid": "b0b96d65f58d43cfa73fc7bd2e0d8c89", "score": "0.6390189", "text": "def ex_delete_vlan(self, vlan):\n delete_node = ET.Element(\"deleteVlan\", {\"xmlns\": TYPES_URN})\n delete_node.set(\"id\", vlan.id)\n result = self.connection.request_with_orgId_api_2(\n \"network/deleteVlan\", method=\"POST\", data=ET.tostring(delete_node)\n ).object\n\n response_code = findtext(result, \"responseCode\", TYPES_URN)\n return response_code in [\"IN_PROGRESS\", \"OK\"]", "title": "" }, { "docid": "9daf4adbc1952981e54b5b3622a3c89a", "score": "0.63019174", "text": "def delete_default_vlan(self):\n\n if int(self.mgm_vlan) != int(1):\n print('Delete default VLAN')\n self.make_instruction('config vlan default delete 1-6')", "title": "" }, { "docid": "e15b847d981ef4aa3107bb12cd30a6b9", "score": "0.62913823", "text": "def del_vlan(self, vlan):\n ofmsgs = []\n for table in (self.vlan_acl_table, self.egress_acl_table):\n if table is not None:\n ofmsgs.append(table.flowdel(match=table.match(vlan=vlan)))\n return ofmsgs", "title": "" }, { "docid": "9f02f1d7a1fd7d9a007e81a421cbd178", "score": "0.62837595", "text": "def delete_dev_vlans(vlanid, auth, url, devid=None, devip=None):\n if devip is not None:\n devid = get_dev_details(devip, auth, url)['id']\n remove_dev_vlan_url = \"/imcrs/vlan/delvlan?devId=\" + str(devid) + \"&vlanId=\" + str(vlanid)\n f_url = url + remove_dev_vlan_url\n response = requests.delete(f_url, auth=auth, headers=HEADERS)\n try:\n if response.status_code == 204:\n print('Vlan deleted')\n return response.status_code\n elif response.status_code == 409:\n print('Unable to delete VLAN.\\nVLAN does not Exist\\nDevice does not support VLAN '\n 'function')\n return response.status_code\n except requests.exceptions.RequestException as error:\n return \"Error:\\n\" + str(error) + \" delete_dev_vlans: An Error has occured\"", "title": "" }, { "docid": "772c0a1fc1df3e31657f6e86b992c2bc", "score": "0.6187439", "text": "def _delete_vserver(self, vserver, security_services=None,\n needs_lock=True):\n\n ipspace_name = self._client.get_vserver_ipspace(vserver)\n\n vserver_client = self._get_api_client(vserver=vserver)\n network_interfaces = vserver_client.get_network_interfaces()\n snapmirror_policies = self._client.get_snapmirror_policies(vserver)\n\n interfaces_on_vlans = []\n vlans = []\n for interface in network_interfaces:\n if '-' in interface['home-port']:\n interfaces_on_vlans.append(interface)\n vlans.append(interface['home-port'])\n\n if vlans:\n vlans = '-'.join(sorted(set(vlans))) if vlans else None\n vlan_id = vlans.split('-')[-1]\n else:\n vlan_id = None\n\n def _delete_vserver_without_lock():\n # NOTE(dviroel): always delete all policies before deleting the\n # vserver\n for policy in snapmirror_policies:\n vserver_client.delete_snapmirror_policy(policy)\n\n # NOTE(dviroel): Attempt to delete all vserver peering\n # created by replication\n self._delete_vserver_peers(vserver)\n\n self._client.delete_vserver(vserver,\n vserver_client,\n security_services=security_services)\n ipspace_deleted = False\n if (ipspace_name and ipspace_name not in CLUSTER_IPSPACES\n and not self._client.ipspace_has_data_vservers(\n ipspace_name)):\n self._client.delete_ipspace(ipspace_name)\n ipspace_deleted = True\n\n if not ipspace_name or ipspace_deleted:\n # NOTE(dviroel): only delete vlans if they are not being used\n # by any ipspaces and data vservers.\n self._delete_vserver_vlans(interfaces_on_vlans)\n\n @utils.synchronized('netapp-VLAN-%s' % vlan_id, external=True)\n def _delete_vserver_with_lock():\n _delete_vserver_without_lock()\n\n if needs_lock:\n return _delete_vserver_with_lock()\n else:\n return _delete_vserver_without_lock()", "title": "" }, { "docid": "970c4a115a1c1bcfc72d582b8db0dd27", "score": "0.6123552", "text": "def unconfigure_switchport_vlan_mapping(device, interface, vlan):\n cmd = [f\"interface {interface}\", f\"no switchport vlan mapping {vlan}\"]\n try:\n device.configure(cmd)\n except SubCommandFailure as e:\n log.error(e)\n raise SubCommandFailure(\"Could not unconfigure switchport vlan mapping\")", "title": "" }, { "docid": "6070b88472df97d81eb2c6250589c2f7", "score": "0.6047876", "text": "def _delete_vsx_interface_vlan(vlan_id, **kwargs):\n ints_list = interface.get_all_interfaces(**kwargs)\n vlan_name = \"vlan\" + str(vlan_id)\n\n if vlan_name not in ints_list:\n logging.warning(\"FAIL: Deleting VSX information to VLAN Interface '%d' failed because \"\n \"VLAN Interface doesn't exist\" % vlan_id)\n return False\n else:\n interface_vsx_data = interface.get_interface(vlan_name, depth=2, selector=\"writable\", **kwargs)\n\n interface_vsx_data[\"vsx_active_forwarding_enable\"] = None\n interface_vsx_data[\"vsx_sync\"] = None\n interface_vsx_data[\"vsx_virtual_gw_mac_v4\"] = None\n interface_vsx_data[\"vsx_virtual_ip4\"] = []\n\n target_url = kwargs[\"url\"] + \"system/interfaces/\" + vlan_name\n put_data = json.dumps(interface_vsx_data, sort_keys=True, indent=4)\n response = kwargs[\"s\"].put(target_url, data=put_data, verify=False)\n\n if not common_ops._response_ok(response, \"PUT\"):\n logging.warning(\"FAIL: Deleting VSX information from VLAN Interface '%d' failed with status code %d: %s\"\n % (vlan_id, response.status_code, response.text))\n return False\n else:\n logging.info(\"SUCCESS: Deleting VSX information from VLAN Interface '%d' succeeded\" % vlan_id)\n return True", "title": "" }, { "docid": "655f582c89dfe568c46b7fab8b970fa3", "score": "0.6039648", "text": "def test_PortMirrorToVlanAddRemove(self, dvs, testlog):\n dvs.setup_db()\n pmap = dvs.counters_db.get_entry(\"COUNTERS_PORT_NAME_MAP\", \"\")\n pmap = dict(pmap)\n\n session = \"TEST_SESSION\"\n src_ports = \"Ethernet12,Ethernet16\"\n src_asic_ports = [\"Ethernet12\", \"Ethernet16\"]\n vlan_id = \"10\"\n vlan = \"Vlan10\"\n\n # create mirror session\n self.dvs_mirror.create_erspan_session(session, \"5.5.5.5\", \"6.6.6.6\", \"0x6558\", \"8\", \"100\", \"0\", None, src_ports, direction=\"TX\")\n self.dvs_mirror.verify_session_status(session, status=\"inactive\")\n\n # create vlan; create vlan member\n self.dvs_vlan.create_vlan(vlan_id)\n self.dvs_vlan.create_vlan_member(vlan_id, \"Ethernet4\")\n\n # bring up vlan and member\n dvs.set_interface_status(vlan, \"up\")\n dvs.set_interface_status(\"Ethernet4\", \"up\")\n\n # add ip address to vlan 6\n dvs.add_ip_address(vlan, \"6.6.6.0/24\")\n self.dvs_mirror.verify_session_status(session, status=\"inactive\")\n\n # create neighbor to vlan 6\n dvs.add_neighbor(vlan, \"6.6.6.6\", \"66:66:66:66:66:66\")\n self.dvs_mirror.verify_session_status(session, status=\"inactive\")\n\n # create fdb entry to ethernet4\n dvs.create_fdb(vlan_id, \"66-66-66-66-66-66\", \"Ethernet4\")\n self.dvs_mirror.verify_session_status(session)\n\n src_mac = dvs.runcmd(\"bash -c \\\"ip link show eth0 | grep ether | awk '{print $2}'\\\"\")[1].strip().upper()\n expected_asic_db = {\"SAI_MIRROR_SESSION_ATTR_MONITOR_PORT\": pmap.get(\"Ethernet4\"),\n \"SAI_MIRROR_SESSION_ATTR_TYPE\": \"SAI_MIRROR_SESSION_TYPE_ENHANCED_REMOTE\",\n \"SAI_MIRROR_SESSION_ATTR_ERSPAN_ENCAPSULATION_TYPE\": \"SAI_ERSPAN_ENCAPSULATION_TYPE_MIRROR_L3_GRE_TUNNEL\",\n \"SAI_MIRROR_SESSION_ATTR_IPHDR_VERSION\": \"4\",\n \"SAI_MIRROR_SESSION_ATTR_TOS\": \"32\",\n \"SAI_MIRROR_SESSION_ATTR_TTL\": \"100\",\n \"SAI_MIRROR_SESSION_ATTR_SRC_IP_ADDRESS\": \"5.5.5.5\",\n \"SAI_MIRROR_SESSION_ATTR_DST_IP_ADDRESS\": \"6.6.6.6\",\n \"SAI_MIRROR_SESSION_ATTR_DST_MAC_ADDRESS\": \"66:66:66:66:66:66\",\n \"SAI_MIRROR_SESSION_ATTR_SRC_MAC_ADDRESS\": src_mac,\n \"SAI_MIRROR_SESSION_ATTR_GRE_PROTOCOL_TYPE\": \"25944\",\n \"SAI_MIRROR_SESSION_ATTR_VLAN_HEADER_VALID\": \"true\",\n \"SAI_MIRROR_SESSION_ATTR_VLAN_TPID\": \"33024\",\n \"SAI_MIRROR_SESSION_ATTR_VLAN_ID\": vlan_id,\n \"SAI_MIRROR_SESSION_ATTR_VLAN_PRI\": \"0\",\n \"SAI_MIRROR_SESSION_ATTR_VLAN_CFI\": \"0\"}\n self.dvs_mirror.verify_session(dvs, session, asic_db=expected_asic_db, src_ports=src_asic_ports, asic_size=16, direction=\"TX\")\n\n # remove fdb entry\n dvs.remove_fdb(vlan_id, \"66-66-66-66-66-66\")\n self.dvs_mirror.verify_session_status(session, status=\"inactive\")\n\n # remove neighbor\n dvs.remove_neighbor(vlan, \"6.6.6.6\")\n self.dvs_mirror.verify_session_status(session, status=\"inactive\")\n\n # remove ip address\n dvs.remove_ip_address(vlan, \"6.6.6.0/24\")\n self.dvs_mirror.verify_session_status(session, status=\"inactive\")\n\n # bring down vlan and member\n dvs.set_interface_status(\"Ethernet4\", \"down\")\n dvs.set_interface_status(vlan, \"down\")\n\n # remove vlan member; remove vlan\n self.dvs_vlan.remove_vlan_member(vlan_id, \"Ethernet4\")\n self.dvs_vlan.get_and_verify_vlan_member_ids(0)\n self.dvs_vlan.remove_vlan(vlan_id)\n\n # remove mirror session\n self.dvs_mirror.remove_mirror_session(session)\n self.dvs_mirror.verify_no_mirror()", "title": "" }, { "docid": "97ae43fa85ae8c03c755c70ea941f107", "score": "0.6019906", "text": "def pop_vlan(graph):\n\tfor switch in graph.switches:\n\t\t# table 201\n\t\t# pop vlan if direct switch\n\t\tprint commands.getoutput(\"ovs-ofctl -O OpenFlow15 add-flow \" +switch.name + \" \\\"table=201, priority=50,dl_vlan=1, reg2=3, action=strip_vlan,resubmit(,202)\\\"\")\n\t\tprint commands.getoutput(\"ovs-ofctl -O OpenFlow15 add-flow \" +switch.name + \" \\\"table=201, priority=30, action=resubmit(,202)\\\"\")", "title": "" }, { "docid": "58baefc403cfd00db5df26259a9ebbb6", "score": "0.5998295", "text": "def do_delete_virtual_link(self, account, link_id):\n drv = self._use_driver(account)\n try:\n port_list = drv.neutron_port_list(**{'network_id': link_id})\n for port in port_list:\n if ((port['device_owner'] == 'compute:None') or (port['device_owner'] == '')):\n self.do_delete_port(account, port['id'], no_rwstatus=True)\n self.do_delete_network(account, link_id, no_rwstatus=True)\n except Exception as e:\n self.log.exception(\"Exception %s occured during virtual-link deletion\", str(e))\n raise", "title": "" }, { "docid": "69cff4242a1d428afe4d4c4e9bfbe1f2", "score": "0.59257895", "text": "def del_port(self, port, bridge=None, if_exists=True):", "title": "" }, { "docid": "9349725ea7c009308d53122cf99492f7", "score": "0.5890672", "text": "def delete_vlink(session, name):\n url = \"http://\" + host + \"/rest/conf\"\n response = session.post(url)\n logger.debug('POST rest/conf response headers %s', response.headers)\n\n new_uri = response.headers['location']\n url = \"http://\" + host + \"/\" + new_uri + \"/delete/service/dhcp-server/shared-network-name/\" + name\n response = session.put(url)\n logger.debug('PUT delete vlink %s response text %s', name, response.text)\n\n url = \"http://\" + host + \"/\" + new_uri + \"/commit\"\n response = session.post(url)\n logger.debug('POST commit vlink %s response text %s', name, response.text)\n\n url = \"http://\" + host + \"/\" + new_uri\n response = session.delete(url)\n logger.debug('DELETE rest/conf response headers %s', response.headers)\n\n return response.text", "title": "" }, { "docid": "7793dd55a4a4f73b527cffeaecab38e7", "score": "0.58887994", "text": "def delete_port_postcommit(self, context):\n if self._is_supported_deviceowner(context.current):\n vlan_segment, vxlan_segment = self._get_segments(\n context.top_bound_segment,\n context.bottom_bound_segment)\n vni = self._port_action_vxlan(context.current, vxlan_segment,\n self._delete_nve_member) if vxlan_segment else 0\n self._port_action_vlan(context.current, vlan_segment,\n self._delete_switch_entry, vni)", "title": "" }, { "docid": "7793dd55a4a4f73b527cffeaecab38e7", "score": "0.58887994", "text": "def delete_port_postcommit(self, context):\n if self._is_supported_deviceowner(context.current):\n vlan_segment, vxlan_segment = self._get_segments(\n context.top_bound_segment,\n context.bottom_bound_segment)\n vni = self._port_action_vxlan(context.current, vxlan_segment,\n self._delete_nve_member) if vxlan_segment else 0\n self._port_action_vlan(context.current, vlan_segment,\n self._delete_switch_entry, vni)", "title": "" }, { "docid": "171ff0a7121db6af1128940e100851d2", "score": "0.58538145", "text": "def delete_host_from_vlan(self, eth_src, vlan):\n ofmsgs = [\n self.eth_src_table.flowdel(\n self.eth_src_table.match(vlan=vlan, eth_src=eth_src)\n )\n ]\n for table in (self.eth_dst_table, self.eth_dst_hairpin_table):\n if table:\n ofmsgs.append(table.flowdel(table.match(vlan=vlan, eth_dst=eth_src)))\n return ofmsgs", "title": "" }, { "docid": "db9863977aad1b53ae012221731a1ce9", "score": "0.5831912", "text": "def interface_detach(self, server, port_id):\n self._delete('/servers/%s/os-interface/%s' % (base.getid(server),\n port_id))", "title": "" }, { "docid": "06514105a7ba0ea731a59b55312f8f42", "score": "0.57897705", "text": "def remove_vlan(self, action: ConnectivityActionModel, target: Any) -> str:\n raise NotImplementedError()", "title": "" }, { "docid": "da9403d6e810adfd181a47efcf9c019e", "score": "0.57702273", "text": "def unconfigure_evpn_l3_instance_vlan_association(device,vlan_id,vni_id):\n\n configs = []\n configs.append(\"vlan configuration {vlan_id}\".format(vlan_id = vlan_id))\n configs.append(\"no member vni {vni_id}\".format(vni_id = vni_id))\n try:\n device.configure(configs)\n except SubCommandFailure as e:\n raise SubCommandFailure(\n \"Failed to unconfigure VLAN association to EVPN L3 instance on device \"\n 'Error:{e}'.format(e=e)\n )", "title": "" }, { "docid": "50d4801da6ce4d02725256de01977f8c", "score": "0.5765059", "text": "def _remove_vlan_from_nb(self, req_nb, vlan_id):\n raise NotImplementedError()", "title": "" }, { "docid": "d79762e0a09e5e57132297ad01cf2c89", "score": "0.5734335", "text": "def pop_vlan_for_hosts(graph):\n\thosts = []\n\tfor node in graph.nodes:\n\t\tif node.name in graph.topo.hosts():\n\t\t\thosts.append(node)\n\n\tfor host in hosts:\n\t\thost_ip = graph.topo.nodeInfo(host.name)[\"ip\"].split(\"/\")[0]\n\t\tfor link in host.links:\n\t\t\tswitch = link.next_node\n\t\t\tport = link.next_port\n\t\t\t# table 0\n\t\t\t# removed resubmit to table 2\n\t\t\tprint commands.getoutput(\"ovs-ofctl -O OpenFlow15 add-flow \" +switch.name + \" \\\"table=200, priority=50,ip, nw_dst=\"+host_ip + \", actions=load:3->NXM_NX_REG2[0..15]\\\"\")", "title": "" }, { "docid": "f8147f75a1ac9f5b0244ce102e9eabfc", "score": "0.57335097", "text": "def cleanup_segment_ports(self, segment_id):\n segment_ports = self.get_os_nsx_segment_ports(segment_id)\n for p in segment_ports:\n try:\n self.nsxpolicy.segment_port_security_profiles.delete(\n segment_id, p['id'])\n except Exception:\n pass\n try:\n self.nsxpolicy.segment_port_discovery_profiles.delete(\n segment_id, p['id'])\n except Exception:\n pass\n try:\n self.nsxpolicy.segment_port_qos_profiles.delete(\n segment_id, p['id'])\n except Exception:\n pass\n try:\n self.nsxpolicy.segment_port.delete(segment_id, p['id'])\n except exceptions.ManagerError as e:\n print(\"Failed to delete segment port %s: %s\" % (p['id'], e))", "title": "" }, { "docid": "e3f86c919cb22a369f8947bb143d4b83", "score": "0.5732334", "text": "def _remove_vhost_net():\n _DPDK_MODULE_MANAGER.remove_module('vhost-net')\n try:\n tasks.run_task(['sudo', 'rm', '-f', '/dev/vhost-net'], _LOGGER,\n 'Removing \\'/dev/vhost-net\\' directory...', True)\n except subprocess.CalledProcessError:\n _LOGGER.error('Unable to remove directory \\'/dev/vhost-net\\'.')", "title": "" }, { "docid": "e2e3ae0fc5f1a64ecefc08ff4230230e", "score": "0.56914115", "text": "def remove_vm_firewall(self, firewall):\n pass", "title": "" }, { "docid": "b2d8ce355ea33f9baa6fcaf3e0fc6473", "score": "0.56601435", "text": "def _sync_delete_ports(self, combined_res_info, vsm_ip):\n (vsm_vmn_dict, neutron_ports) = combined_res_info\n vsm_port_uuids = set()\n for port_dict in vsm_vmn_dict.values():\n port_props = port_dict['properties']\n port_ids = set(port_props['portId'].split(','))\n vsm_port_uuids = vsm_port_uuids.union(port_ids)\n neutron_port_uuids = set(self._get_uuids(n1kv_const.PORTS,\n neutron_ports))\n for (vmnetwork_name, port_dict) in vsm_vmn_dict.items():\n port_props = port_dict['properties']\n port_ids = port_props['portId'].split(',')\n extra_ports = [port for port in port_ids if port not in\n neutron_port_uuids]\n for port_id in extra_ports:\n # delete these ports from VSM\n try:\n self.n1kvclient.delete_n1kv_port(vmnetwork_name, port_id,\n vsm_ip=vsm_ip)\n except (n1kv_exc.VSMError, n1kv_exc.VSMConnectionFailed):\n LOG.warning(_LW('Sync Exception: Port delete failed for %s'\n '.') % port_id)", "title": "" }, { "docid": "c71ee05c93ae3f767cdf90f02575c9fe", "score": "0.564027", "text": "def _delete_switch_entry(self, port, vlan_id, device_id, host_id, vni,\n is_provider_vlan):\n connections = self._get_active_port_connections(port, host_id)\n\n # (nexus_port,switch_ip) will be unique in each iteration.\n # But switch_ip will repeat if host has >1 connection to same switch.\n # So track which switch_ips already have vlan removed in this loop.\n vlan_already_removed = []\n for switch_ip, intf_type, nexus_port, is_native in connections:\n\n # if there are no remaining db entries using this vlan on this\n # nexus switch port then remove vlan from the switchport trunk.\n port_id = self.format_interface_name(intf_type, nexus_port)\n auto_create = True\n auto_trunk = True\n if is_provider_vlan:\n auto_create = cfg.CONF.ml2_cisco.provider_vlan_auto_create\n auto_trunk = cfg.CONF.ml2_cisco.provider_vlan_auto_trunk\n\n try:\n nxos_db.get_port_vlan_switch_binding(port_id, vlan_id,\n switch_ip)\n except excep.NexusPortBindingNotFound:\n pass\n else:\n continue\n\n if auto_trunk:\n self.driver.disable_vlan_on_trunk_int(\n switch_ip, vlan_id, intf_type, nexus_port,\n is_native)\n\n # if there are no remaining db entries using this vlan on this\n # nexus switch then remove the vlan.\n if auto_create:\n try:\n nxos_db.get_nexusvlan_binding(vlan_id, switch_ip)\n except excep.NexusPortBindingNotFound:\n # Do not perform a second time on same switch\n if switch_ip not in vlan_already_removed:\n self.driver.delete_vlan(switch_ip, vlan_id)\n vlan_already_removed.append(switch_ip)", "title": "" }, { "docid": "00bb2f3c11cbe80d21057768a6dd1e35", "score": "0.5615145", "text": "def del_interface(self, br_name, iface_name):\n self.cli_send_command(command=\"ovs-vsctl del-port {0} {1} \".format(br_name, iface_name))\n self.update_map(iface_name, delete=True)", "title": "" }, { "docid": "286caf16474c18d04532ddc0d5e95b65", "score": "0.5610358", "text": "def _delete_switch_entry(self, vlan_id, device_id, host_id, vni,\n is_provider_vlan):\n host_connections = self._get_switch_info(host_id)\n\n # (nexus_port,switch_ip) will be unique in each iteration.\n # But switch_ip will repeat if host has >1 connection to same switch.\n # So track which switch_ips already have vlan removed in this loop.\n vlan_already_removed = []\n for switch_ip, intf_type, nexus_port in host_connections:\n\n # if there are no remaining db entries using this vlan on this\n # nexus switch port then remove vlan from the switchport trunk.\n port_id = '%s:%s' % (intf_type, nexus_port)\n auto_create = True\n auto_trunk = True\n if is_provider_vlan:\n auto_create = cfg.CONF.ml2_cisco.provider_vlan_auto_create\n auto_trunk = cfg.CONF.ml2_cisco.provider_vlan_auto_trunk\n\n try:\n nxos_db.get_port_vlan_switch_binding(port_id, vlan_id,\n switch_ip)\n except excep.NexusPortBindingNotFound:\n pass\n else:\n continue\n\n if auto_trunk:\n self.driver.disable_vlan_on_trunk_int(\n switch_ip, vlan_id, intf_type, nexus_port)\n\n # if there are no remaining db entries using this vlan on this\n # nexus switch then remove the vlan.\n if auto_create:\n try:\n nxos_db.get_nexusvlan_binding(vlan_id, switch_ip)\n except excep.NexusPortBindingNotFound:\n # Do not perform a second time on same switch\n if switch_ip not in vlan_already_removed:\n self.driver.delete_vlan(switch_ip, vlan_id)\n vlan_already_removed.append(switch_ip)", "title": "" }, { "docid": "5a68d158fe9d383935e434748a35f1ce", "score": "0.56015086", "text": "def delete_port_precommit(self, context):\n if self._is_supported_deviceowner(context.current):\n vlan_segment, vxlan_segment = self._get_segments(\n context.top_bound_segment,\n context.bottom_bound_segment)\n vni = self._port_action_vxlan(context.current, vxlan_segment,\n self._delete_nve_db) if vxlan_segment else 0\n self._port_action_vlan(context.current, vlan_segment,\n self._delete_nxos_db, vni)", "title": "" }, { "docid": "5a68d158fe9d383935e434748a35f1ce", "score": "0.56015086", "text": "def delete_port_precommit(self, context):\n if self._is_supported_deviceowner(context.current):\n vlan_segment, vxlan_segment = self._get_segments(\n context.top_bound_segment,\n context.bottom_bound_segment)\n vni = self._port_action_vxlan(context.current, vxlan_segment,\n self._delete_nve_db) if vxlan_segment else 0\n self._port_action_vlan(context.current, vlan_segment,\n self._delete_nxos_db, vni)", "title": "" }, { "docid": "710db63fbc6083fee77b7a14634853db", "score": "0.5584715", "text": "def unconfigure_evpn_instance_vlan_based(device, instance):\n try:\n device.configure(\n \"no l2vpn evpn instance {instance} vlan-based\".format(\n instance=instance)\n )\n except SubCommandFailure as e:\n log.error(\"Could not unconfig l2vpn evpn instance {instance},\"\n \"Error:\\n{error}\".format(instance=instance, error=e)\n )\n raise", "title": "" }, { "docid": "9bfc4653eeb870dbe4ae37b61238e979", "score": "0.55569756", "text": "def deletehostGavel(macaddr):\n driver = GraphDatabase.driver(\"bolt://localhost\", auth=basic_auth(\"neo4j\", \"gavel\"))\n session = driver.session()\n result = session.run(\"match (n: Host {mac:{switchdpid}}) Detach delete n;\",{\"switchdpid\":macaddr})", "title": "" }, { "docid": "e982cde709eb0fdb48e2dbea114edfa2", "score": "0.55529416", "text": "def unconfigure_evpn_l2_instance_vlan_association(device,vlan_id,evpn_instance,vni_id):\n\n configs = []\n configs.append(\"vlan configuration {vlan_id}\".format(vlan_id = vlan_id))\n configs.append(\"no member evpn-instance {evpn_instance} vni {vni_id}\".format(evpn_instance=evpn_instance,vni_id = vni_id))\n\n try:\n device.configure(configs)\n except SubCommandFailure as e:\n raise SubCommandFailure(\n \"Failed to unconfigure VLAN association to EVPN L2 instance on device \"\n 'Error:{e}'.format(e=e)\n )", "title": "" }, { "docid": "f1dfabe5bbe7bc302e3cb5f740df030f", "score": "0.5532891", "text": "def cleanup_os_dhcp_logical_ports(self):\n os_lports = self.get_os_dhcp_logical_ports()\n print(\"Number of OS Logical Ports to be deleted: %s\" % len(os_lports))\n for p in os_lports:\n try:\n self.nsxlib.logical_port.update(\n p['id'], None, attachment_type=None)\n self.nsxlib.logical_port.delete(p['id'])\n except Exception as e:\n print(\"ERROR: Failed to delete logical port %s, error %s\" %\n (p['id'], e))\n else:\n print(\"Successfully deleted logical port %s\" % p['id'])", "title": "" }, { "docid": "ee3e43d3b6463f29e513bdbce99e57b6", "score": "0.5519092", "text": "def xmlrpc_del_port_from_bridge(self, port):\n ret = vswitch.ovs_vsctl_del_port_from_bridge(port)\n if ret == 0:\n self.ports.discard(port)\n return ret", "title": "" }, { "docid": "ddf57b878f79a066ce343dde2f161fbd", "score": "0.54949164", "text": "def delete_web_interface(port: int) -> None:\n ...", "title": "" }, { "docid": "9ed04f3f16f47252ba1151b853f53049", "score": "0.54796225", "text": "def set_vlan(self, ports=None, vlan=None):\n\n # validate ports and state\n ports = ports if ports is not None else self.get_acquired_ports()\n ports = self.psv.validate(\n 'VLAN', ports, (PSV_ACQUIRED, PSV_SERVICE, PSV_IDLE))\n\n vlan = VLAN(vlan)\n\n if vlan:\n self.logger.pre_cmd(\n \"Setting port(s) {0} with {1}: \".format(ports, vlan.get_desc()))\n else:\n self.logger.pre_cmd(\n \"Clearing port(s) {0} VLAN configuration: \".format(ports))\n\n rc = self.__set_vlan(ports, vlan)\n self.logger.post_cmd(rc)\n\n if not rc:\n raise STLError(rc)", "title": "" }, { "docid": "14c601f232c19395db0dd02974968822", "score": "0.54728156", "text": "def cleanup(self):\n for port in self.ports:\n # Remove ports that were added to existing bridges\n vswitch.ovs_vsctl_del_port_from_bridge(port)\n\n for bridge in self.bridges:\n # Remove bridges that were added for L3 tests\n vswitch.ovs_vsctl_del_bridge(bridge)\n\n for pbridge in self.pbridges:\n # Remove bridges that were added for VLAN tests\n vswitch.ovs_vsctl_del_pbridge(pbridge[0], pbridge[1])", "title": "" }, { "docid": "1c4a896c4f5451782ef8456899383839", "score": "0.5453675", "text": "def _delete_nxos_db(self, vlan_id, device_id, host_id, vni,\n is_provider_vlan):\n try:\n rows = nxos_db.get_nexusvm_bindings(vlan_id, device_id)\n for row in rows:\n nxos_db.remove_nexusport_binding(row.port_id, row.vlan_id,\n row.vni, row.switch_ip, row.instance_id,\n row.is_provider_vlan)\n except excep.NexusPortBindingNotFound:\n return", "title": "" }, { "docid": "6911fb28d3bb6fc92fbf60a4472c9d6a", "score": "0.5433101", "text": "def clean_virtual_service_from_avi_config(avi_config, controller_version):\n vs_list = copy.deepcopy(avi_config['VirtualService'])\n avi_config['VirtualService'] = []\n if parse_version(controller_version) >= parse_version('17.1'):\n avi_config['VirtualService'] = \\\n [vs for vs in vs_list\n if vs['vip'][0]['ip_address']['addr'] != '0.0.0.0']\n else:\n avi_config['VirtualService'] = \\\n [vs for vs in vs_list\n if vs['ip_address']['addr'] != '0.0.0.0']", "title": "" }, { "docid": "8ad9d2171b02184c8ac478738d71573f", "score": "0.5430895", "text": "def delete_vserver(self, vserver_name, vserver_client,\n security_services=None):\n vserver_info = self.get_vserver_info(vserver_name)\n if vserver_info is None:\n LOG.error(\"Vserver %s does not exist.\", vserver_name)\n return\n svm_uuid = self._get_unique_svm_by_name(vserver_name)\n\n is_dp_destination = vserver_info.get('subtype') == 'dp_destination'\n root_volume_name = self.get_vserver_root_volume_name(vserver_name)\n volumes_count = vserver_client.get_vserver_volume_count()\n\n # NOTE(dviroel): 'dp_destination' vservers don't allow to delete its\n # root volume. We can just call vserver-destroy directly.\n if volumes_count == 1 and not is_dp_destination:\n try:\n vserver_client.offline_volume(root_volume_name)\n except netapp_api.api.NaApiError as e:\n if e.code == netapp_api.EREST_ENTRY_NOT_FOUND:\n LOG.error(\"Cannot delete Vserver %s. \"\n \"Failed to put volumes offline. \"\n \"Entry doesn't exist.\", vserver_name)\n else:\n raise\n vserver_client.delete_volume(root_volume_name)\n\n elif volumes_count > 1:\n msg = _(\"Cannot delete Vserver. Vserver %s has shares.\")\n raise exception.NetAppException(msg % vserver_name)\n\n if security_services and not is_dp_destination:\n self._terminate_vserver_services(vserver_name, vserver_client,\n security_services)\n\n self.send_request(f'/svm/svms/{svm_uuid}', 'delete')", "title": "" }, { "docid": "e84c1db15af6c40af33122e097b07633", "score": "0.5397548", "text": "def vlan(debug, sh):\n\n tok = sh.get_token()\n if \"\" == tok:\n return\n if 1 == debug:\n print \"vlan: \" + tok\n #\n # We have just entered vlan configuration.\n # We need a vlan name , network number and mask.\n # This information is either on the vlan line or we need to generate\n # them.\n #\n\n vlan_name = \"\"\n network_number = \"\"\n \n # We are looking for two optional fields. The vlan name and the network.\n # Note that \"net\" is a keyword.\n #\n # vlan[vlan1]\n # vlan[net 10.0.0.1]\n # vlan[vlan1 net 10.0.0.1/24]\n # vlan[vlan1 net 10.0.0.1/255.255.255.0]\n #\n if \"[\" == tok:\n list = [\"\", \"\"]\n offset = 0\n while 1:\n tok = sh.get_token()\n if 1 == debug:\n print \"vlan: \" + tok\n if \"\" == tok or \"]\" == tok:\n break\n if \"net\" == tok:\n offset = 1\n else:\n list[offset] = tok\n\n vlan_name = list[0]\n network_number = list[1]\n\n if \"=\" != tok:\n tok = sh.get_token()\n if 1 == debug:\n print \"vlan: \" + tok\n\n if 1 == debug:\n print \"vlan name: \" + vlan_name + \" \" + network_number\n\n #\n # This token must be an \"=\"\n #\n if \"=\" != tok:\n raise MyError, sh.error_leader() + \"expected \" + '\"=\"' + \" got \" + tok\n\n hosts = []\n hosts = host(debug, sh)\n if 1 == debug:\n print \"vlan: \", hosts\n # Place holder for netmask\n return [[vlan_name, network_number, \"\"]] + hosts", "title": "" }, { "docid": "51c4a8b5bc6f90b0d1aa2538a21f0c57", "score": "0.53908324", "text": "def remove_vlans(self, actions: Collection[ConnectivityActionModel]) -> None:\n self._execute_actions(self.remove_vlan, actions)", "title": "" }, { "docid": "f721f2b5ea4fb72bd2da4d0b2b1560db", "score": "0.53588355", "text": "def remove_vlan_from_nb(adapter, host_uuid, nb_uuid, vlan_id,\n fail_if_pvid=False, existing_nbs=None):\n _get_bridger(adapter, host_uuid).remove_vlan_from_nb(nb_uuid, vlan_id,\n fail_if_pvid,\n existing_nbs)", "title": "" }, { "docid": "21356d3080f1f48951eea9f74a83a843", "score": "0.5344984", "text": "def delete_port(self, context, details):\n port_id = details['port_id']\n devices = self._get_devices()\n self.fdb_tracker.delete_port(devices, port_id)", "title": "" }, { "docid": "11e7b9027f9dd8c6b7a41424881a2fc0", "score": "0.53422105", "text": "def verify_vlan_change(self):\n _, host_port_maps, _ = self.topo.create_port_maps()\n yaml_config = yaml_load(self.CONFIG)\n intf_config = yaml_config[\"dps\"][self.topo.switches_by_id[1]][\"interfaces\"]\n\n for host_i in host_port_maps:\n # Find a host on the second switch\n if 1 in host_port_maps[host_i]:\n port = host_port_maps[host_i][1][0]\n if \"native_vlan\" in intf_config[port]:\n prev_name = intf_config[port][\"native_vlan\"]\n for v_i in range(self.NUM_VLANS):\n # Make sure that the new VLAN will be different\n new_name = self.topo.vlan_name(v_i)\n if new_name != prev_name:\n intf_config[port][\"native_vlan\"] = new_name\n break\n else:\n # Keep on searching for a host VLAN to change\n continue\n # Created a different VLAN so now stop searching\n break\n\n new_config = yaml_dump(yaml_config)\n self.update_and_revert_config(self.CONFIG, new_config, None)", "title": "" }, { "docid": "e9f9fa175d1883d41b467374502126cc", "score": "0.5332795", "text": "def delete_port_postcommit(self, context):\n port = context.current\n profile_id = port.get(n1kv_const.N1KV_PROFILE, None)\n # If profile UUID is not present in the port object, we need\n # not send the port delete notification to the VSM since the port\n # does not exist on the VSM due to failure in create_port_precommit.\n if not profile_id:\n return\n\n vmnetwork_name = \"%s%s_%s\" % (n1kv_const.VM_NETWORK_PREFIX,\n profile_id,\n port['network_id'])\n try:\n self.n1kvclient.delete_n1kv_port(vmnetwork_name, port['id'])\n except(n1kv_exc.VSMError, n1kv_exc.VSMConnectionFailed):\n raise ml2_exc.MechanismDriverError()\n LOG.info(_LI(\"Delete port(postcommit) succeeded for port: \"\n \"%(id)s on network: %(network_id)s with policy \"\n \"profile ID: %(profile_id)s\"),\n {\"network_id\": port['network_id'],\n \"id\": port['id'],\n \"profile_id\": profile_id})", "title": "" }, { "docid": "b805e7edaafc8336b8ba1261c022d628", "score": "0.5310383", "text": "def configure_mgm_vlan(self):\n\n print('Configure management VLAN')\n if int(self.mgm_vlan) == int(1):\n self.make_instruction('config vlan default delete 1-4')\n self.make_instruction('config vlan default add tagged 5-6')\n else:\n self.make_instruction('create vlan mgm_' + self.mgm_vlan + ' tag ' + self.mgm_vlan)\n self.make_instruction('config vlan mgm_' + self.mgm_vlan + ' add tagged 5-6')", "title": "" }, { "docid": "0f466f40195c99c870bd0f8af0b8e249", "score": "0.53082585", "text": "def __vlan_intf_config(config, port_config_list, duthost, ixia_ports):\n mg_facts = duthost.minigraph_facts(host=duthost.hostname)['ansible_facts']\n if 'minigraph_vlans' in mg_facts:\n vlan_facts = mg_facts['minigraph_vlans']\n else:\n return True\n\n if len(vlan_facts) == 0:\n return True\n\n vlan_member = {}\n for k, v in list(vlan_facts.items()):\n vlan_member[k] = v['members']\n\n vlan_intf_facts = mg_facts['minigraph_vlan_interfaces']\n vlan_intf = {}\n for v in vlan_intf_facts:\n if __valid_ipv4_addr(v['addr']):\n vlan_intf[v['attachto']] = v\n\n dut_mac = str(duthost.facts['router_mac'])\n\n \"\"\" For each Vlan \"\"\"\n for vlan in vlan_member:\n phy_intfs = vlan_member[vlan]\n gw_addr = str(vlan_intf[vlan]['addr'])\n prefix = str(vlan_intf[vlan]['prefixlen'])\n vlan_subnet = '{}/{}'.format(gw_addr, prefix)\n vlan_ip_addrs = get_addrs_in_subnet(vlan_subnet, len(phy_intfs))\n\n \"\"\" For each physical interface attached to this Vlan \"\"\"\n for i in range(len(phy_intfs)):\n phy_intf = phy_intfs[i]\n vlan_ip_addr = vlan_ip_addrs[i]\n\n port_ids = [id for id, ixia_pot in enumerate(ixia_ports)\n if ixia_pot['peer_port'] == phy_intf]\n if len(port_ids) != 1:\n return False\n\n port_id = port_ids[0]\n mac = __gen_mac(port_id)\n ethernet = Ethernet(name='Ethernet Port {}'.format(port_id),\n mac=Pattern(mac))\n\n ip_stack = Ipv4(name='Ipv4 Port {}'.format(port_id),\n address=Pattern(vlan_ip_addr),\n prefix=Pattern(prefix),\n gateway=Pattern(gw_addr),\n ethernet=ethernet)\n\n device = Device(name='Device Port {}'.format(port_id),\n device_count=1,\n container_name=config.ports[port_id].name,\n choice=ip_stack)\n\n config.devices.append(device)\n\n port_config = IxiaPortConfig(id=port_id,\n ip=vlan_ip_addr,\n mac=mac,\n gw=gw_addr,\n gw_mac=dut_mac,\n prefix_len=prefix,\n port_type=IxiaPortType.VlanMember,\n peer_port=phy_intf)\n\n port_config_list.append(port_config)\n\n return True", "title": "" }, { "docid": "7ff3ea792d4d0621eb21cb58f44ae553", "score": "0.53043056", "text": "def delete(self):\n self.__load()\n\n self.__port_forward.clear()\n\n for _, ovs in self.__openvswitch.items():\n ovs.del_vswitch()\n\n for _, ns in self.__namespace.items():\n ns.del_namespace()\n ns.del_interface_d()", "title": "" }, { "docid": "fb329e89b769bee30ed4d9659962ca56", "score": "0.5250586", "text": "def unplug(self, instance, vif):\n LOG.debug(_('Unplug'))\n iface_id = vif['id']\n dev = self.get_vif_devname(vif)\n br_name = self._get_br_name(dev)\n\n import socket\n from nova_contrail_vif.gen_py.instance_service import ttypes\n port = ttypes.Port(self._convert_to_bl(iface_id), \n self._convert_to_bl(instance['uuid']), \n dev, \n vif['network']['subnets'][0]['ips'][0]['address'],\n self._convert_to_bl(vif['network']['id']),\n vif['address'],\n instance['display_name'],\n instance['hostname'],\n instance['host'],\n self._convert_to_bl(instance['project_id']))\n\n self._agent_inform(port, iface_id, False)\n linux_net.LinuxBridgeInterfaceDriver.remove_bridge(br_name)\n linux_net.delete_net_dev(dev)", "title": "" }, { "docid": "1bd92285d6b0cd0f49a6e34caa9f5cf7", "score": "0.52376753", "text": "def delete_hp_switch_port(context, record_dict):\n session = context.session\n with session.begin(subtransactions=True):\n if(record_dict['id']):\n session.query(models.HPSwitchPort).filter_by(\n id=record_dict['id']).delete()", "title": "" }, { "docid": "200312f0f4d7b05d2917a86a535fad6f", "score": "0.52341753", "text": "def delete_network_interface(self, vserver_name, interface_name):\n self.disable_network_interface(vserver_name, interface_name)\n\n query = {\n 'svm.name': vserver_name,\n 'name': interface_name\n }\n self.send_request('/network/ip/interfaces', 'delete', query=query)", "title": "" }, { "docid": "d034dd56fccd94267037f6c604037bae", "score": "0.5230417", "text": "def _delete_nxos_db(self, unused, vlan_id, device_id, host_id, vni,\n is_provider_vlan):\n try:\n rows = nxos_db.get_nexusvm_bindings(vlan_id, device_id)\n for row in rows:\n nxos_db.remove_nexusport_binding(row.port_id, row.vlan_id,\n row.vni, row.switch_ip, row.instance_id,\n row.is_provider_vlan)\n except excep.NexusPortBindingNotFound:\n return", "title": "" }, { "docid": "5b2951239fd7d4406f04ee5387b178b5", "score": "0.52080804", "text": "def _vlan_all_ports(vlan, exclude_unicast):\n return list(vlan.flood_ports(vlan.get_ports(), exclude_unicast))", "title": "" }, { "docid": "9df868bda989b596c5eeadeb928f7eee", "score": "0.51932406", "text": "def AptUninstall(vm):\n del vm", "title": "" }, { "docid": "97de4a07d42489db6ac79ac91c16ab17", "score": "0.519324", "text": "def test_MirrorToVlanAddRemove(self, dvs, testlog):\n self.setup_db(dvs)\n\n session = \"TEST_SESSION\"\n\n marker = dvs.add_log_marker()\n # create mirror session\n self.create_mirror_session(session, \"5.5.5.5\", \"6.6.6.6\", \"0x6558\", \"8\", \"100\", \"0\")\n assert self.get_mirror_session_state(session)[\"status\"] == \"inactive\"\n self.check_syslog(dvs, marker, \"Attached next hop observer .* for destination IP 6.6.6.6\", 1)\n\n # create vlan; create vlan member\n self.create_vlan(dvs, \"6\")\n self.create_vlan_member(\"6\", \"Ethernet4\")\n\n # bring up vlan and member\n self.set_interface_status(dvs, \"Vlan6\", \"up\")\n self.set_interface_status(dvs, \"Ethernet4\", \"up\")\n\n # add ip address to vlan 6\n self.add_ip_address(\"Vlan6\", \"6.6.6.0/24\")\n assert self.get_mirror_session_state(session)[\"status\"] == \"inactive\"\n\n # create neighbor to vlan 6\n self.add_neighbor(\"Vlan6\", \"6.6.6.6\", \"66:66:66:66:66:66\")\n assert self.get_mirror_session_state(session)[\"status\"] == \"inactive\"\n\n # create fdb entry to ethernet4\n self.create_fdb(\"6\", \"66-66-66-66-66-66\", \"Ethernet4\")\n assert self.get_mirror_session_state(session)[\"status\"] == \"active\"\n\n # check asic database\n tbl = swsscommon.Table(self.adb, \"ASIC_STATE:SAI_OBJECT_TYPE_MIRROR_SESSION\")\n mirror_entries = tbl.getKeys()\n assert len(mirror_entries) == 1\n\n (status, fvs) = tbl.get(mirror_entries[0])\n assert status == True\n assert len(fvs) == 16\n for fv in fvs:\n if fv[0] == \"SAI_MIRROR_SESSION_ATTR_MONITOR_PORT\":\n assert dvs.asicdb.portoidmap[fv[1]] == \"Ethernet4\"\n elif fv[0] == \"SAI_MIRROR_SESSION_ATTR_TYPE\":\n assert fv[1] == \"SAI_MIRROR_SESSION_TYPE_ENHANCED_REMOTE\"\n elif fv[0] == \"SAI_MIRROR_SESSION_ATTR_ERSPAN_ENCAPSULATION_TYPE\":\n assert fv[1] == \"SAI_ERSPAN_ENCAPSULATION_TYPE_MIRROR_L3_GRE_TUNNEL\"\n elif fv[0] == \"SAI_MIRROR_SESSION_ATTR_IPHDR_VERSION\":\n assert fv[1] == \"4\"\n elif fv[0] == \"SAI_MIRROR_SESSION_ATTR_TOS\":\n assert fv[1] == \"32\"\n elif fv[0] == \"SAI_MIRROR_SESSION_ATTR_TTL\":\n assert fv[1] == \"100\"\n elif fv[0] == \"SAI_MIRROR_SESSION_ATTR_SRC_IP_ADDRESS\":\n assert fv[1] == \"5.5.5.5\"\n elif fv[0] == \"SAI_MIRROR_SESSION_ATTR_DST_IP_ADDRESS\":\n assert fv[1] == \"6.6.6.6\"\n elif fv[0] == \"SAI_MIRROR_SESSION_ATTR_SRC_MAC_ADDRESS\":\n assert fv[1] == dvs.runcmd(\"bash -c \\\"ip link show eth0 | grep ether | awk '{print $2}'\\\"\")[1].strip().upper()\n elif fv[0] == \"SAI_MIRROR_SESSION_ATTR_DST_MAC_ADDRESS\":\n assert fv[1] == \"66:66:66:66:66:66\"\n elif fv[0] == \"SAI_MIRROR_SESSION_ATTR_GRE_PROTOCOL_TYPE\":\n assert fv[1] == \"25944\" # 0x6558\n elif fv[0] == \"SAI_MIRROR_SESSION_ATTR_VLAN_HEADER_VALID\":\n assert fv[1] == \"true\"\n elif fv[0] == \"SAI_MIRROR_SESSION_ATTR_VLAN_TPID\":\n assert fv[1] == \"33024\"\n elif fv[0] == \"SAI_MIRROR_SESSION_ATTR_VLAN_ID\":\n assert fv[1] == \"6\"\n elif fv[0] == \"SAI_MIRROR_SESSION_ATTR_VLAN_PRI\":\n assert fv[1] == \"0\"\n elif fv[0] == \"SAI_MIRROR_SESSION_ATTR_VLAN_CFI\":\n assert fv[1] == \"0\"\n else:\n assert False\n\n # remove fdb entry\n self.remove_fdb(\"6\", \"66-66-66-66-66-66\")\n assert self.get_mirror_session_state(session)[\"status\"] == \"inactive\"\n\n # remove neighbor\n self.remove_neighbor(\"Vlan6\", \"6.6.6.6\")\n assert self.get_mirror_session_state(session)[\"status\"] == \"inactive\"\n\n # remove ip address\n self.remove_ip_address(\"Vlan6\", \"6.6.6.0/24\")\n assert self.get_mirror_session_state(session)[\"status\"] == \"inactive\"\n\n # bring down vlan and member\n self.set_interface_status(dvs, \"Ethernet4\", \"down\")\n self.set_interface_status(dvs, \"Vlan6\", \"down\")\n\n # remove vlan member; remove vlan\n self.remove_vlan_member(\"6\", \"Ethernet4\")\n self.remove_vlan(\"6\")\n\n marker = dvs.add_log_marker()\n # remove mirror session\n self.remove_mirror_session(session)\n self.check_syslog(dvs, marker, \"Detached next hop observer for destination IP 6.6.6.6\", 1)", "title": "" }, { "docid": "66b31354ff4fd2c5087263e7ce186e8d", "score": "0.51905274", "text": "def test_macvlan(self):\n\n p = {\n \"macvlan\": {\n \"eth0\": {\"macvlan.mode\": \"bridge\", \"link\": \"eth0\", \"type\": \"macvlan\"}\n }\n }\n\n self.run_function(\"grains.setval\", [\"lxc.nic\", p])\n\n self.run_function(\n \"lxc.init\",\n [self.prefix],\n profile=\"sshd\",\n nic=\"macvlan\",\n seed=False,\n start=False,\n )\n\n f = \"/var/lib/lxc/{}/config\".format(self.prefix)\n conf = self.run_function(\"lxc.read_conf\", [f])\n\n # Due to a segfault in lxc-destroy caused by invalid configs,\n # truncate the config.\n self.run_function(\"cmd.run\", [\"truncate -s 0 {}\".format(f)])\n\n self.assertEqual(conf.get(\"lxc.network.type\"), \"macvlan\")", "title": "" }, { "docid": "3267b00be68041c036143e27379c6719", "score": "0.5152567", "text": "def destroy(self, configuration: str) -> None:", "title": "" }, { "docid": "3fe3b877b5a896fbd62410146c7aa8ab", "score": "0.5136925", "text": "def test_vhost(self):\n self.logger.info(\"Vhost User add interfaces\")\n\n # create interface 1 (VirtualEthernet0/0/0)\n vhost_if1 = VppVhostInterface(self, sock_filename=\"/tmp/sock1\")\n vhost_if1.add_vpp_config()\n vhost_if1.admin_up()\n\n # create interface 2 (VirtualEthernet0/0/1)\n vhost_if2 = VppVhostInterface(self, sock_filename=\"/tmp/sock2\")\n vhost_if2.add_vpp_config()\n vhost_if2.admin_up()\n\n # verify both interfaces in the show\n ifs = self.vapi.cli(\"show interface\")\n self.assertIn(\"VirtualEthernet0/0/0\", ifs)\n self.assertIn(\"VirtualEthernet0/0/1\", ifs)\n\n # verify they are in the dump also\n if_dump = self.vapi.sw_interface_vhost_user_dump()\n self.assertTrue(vhost_if1.is_interface_config_in_dump(if_dump))\n self.assertTrue(vhost_if2.is_interface_config_in_dump(if_dump))\n\n # delete VirtualEthernet0/0/1\n self.logger.info(\"Deleting VirtualEthernet0/0/1\")\n vhost_if2.remove_vpp_config()\n\n self.logger.info(\"Verifying VirtualEthernet0/0/1 is deleted\")\n\n ifs = self.vapi.cli(\"show interface\")\n # verify VirtualEthernet0/0/0 still in the show\n self.assertIn(\"VirtualEthernet0/0/0\", ifs)\n\n # verify VirtualEthernet0/0/1 not in the show\n self.assertNotIn(\"VirtualEthernet0/0/1\", ifs)\n\n # verify VirtualEthernet0/0/1 is not in the dump\n if_dump = self.vapi.sw_interface_vhost_user_dump()\n self.assertFalse(vhost_if2.is_interface_config_in_dump(if_dump))\n\n # verify VirtualEthernet0/0/0 is still in the dump\n self.assertTrue(vhost_if1.is_interface_config_in_dump(if_dump))\n\n # delete VirtualEthernet0/0/0\n self.logger.info(\"Deleting VirtualEthernet0/0/0\")\n vhost_if1.remove_vpp_config()\n\n self.logger.info(\"Verifying VirtualEthernet0/0/0 is deleted\")\n\n # verify VirtualEthernet0/0/0 not in the show\n ifs = self.vapi.cli(\"show interface\")\n self.assertNotIn(\"VirtualEthernet0/0/0\", ifs)\n\n # verify VirtualEthernet0/0/0 is not in the dump\n if_dump = self.vapi.sw_interface_vhost_user_dump()\n self.assertFalse(vhost_if1.is_interface_config_in_dump(if_dump))", "title": "" }, { "docid": "05aec9efe5e51c9466e162df8894c2b2", "score": "0.5132742", "text": "def sophos_firewall_ip_host_delete_command(client: Client, name: str) -> CommandResults:\n return generic_delete(client, name, IP_HOST['endpoint_tag']) # type: ignore", "title": "" }, { "docid": "9100199948c0845b8eb15a9f3e35b63c", "score": "0.5124472", "text": "def _remove_vlan_from_nb(self, req_nb, vlan_id):\n # Find the matching trunk adapter.\n matching_tas = None\n for trunk in req_nb.seas[0].addl_adpts:\n if vlan_id in trunk.tagged_vlans:\n matching_tas = self._trunk_list(req_nb, trunk)\n break\n\n # A load balanced SEA requires at least a primary adapter and at least\n # one additional adapter. We can't remove a trunk from the SEA if it\n # is load balanced, but only has a single additional\n can_remove_for_lb = (len(req_nb.seas[0].addl_adpts) > 1\n if req_nb.load_balance else True)\n\n for matching_ta in matching_tas:\n if len(matching_ta.tagged_vlans) == 1 and can_remove_for_lb:\n # Last VLAN, so it can be removed from the SEA.\n for sea in req_nb.seas:\n if matching_ta in sea.addl_adpts:\n sea.addl_adpts.remove(matching_ta)\n break\n else:\n # Otherwise, we just remove it from the list.\n matching_ta.tagged_vlans.remove(vlan_id)", "title": "" }, { "docid": "dee82c419a7897959d14dcbbf6417420", "score": "0.5122543", "text": "def deallocate_for_instance(self, context, instance, **kwargs):\n LOG.debug('deallocate_for_instance()', instance=instance)\n # This used to get a list of ports matching this device from Neutron and free them all.\n\t# We could instead list the port IDs of the VIFs and unbound the ones we know about.\n #search_opts = {'device_id': instance.uuid}\n client = self.client\n data = client.list_ports(owner=SERVICE_NAME, device=instance.uuid)\n\tports = data.keys()\n\n # Reset device_id and device_owner for ports\n self._unbind_ports(context, ports)\n\n # NOTE(arosen): This clears out the network_cache only if the instance\n # hasn't already been deleted. This is needed when an instance fails to\n # launch and is rescheduled onto another compute node. If the instance\n # has already been deleted this call does nothing.\n base_api.update_instance_cache_with_nw_info(self, context, instance,\n network_model.NetworkInfo([]))", "title": "" }, { "docid": "d180728e7ccdbbd4a3c5f8d06c4d1568", "score": "0.511118", "text": "def vm_delete(server_host, hostname):\n existed = True\n active = True\n state = server_host.sh_virsh_dominfo_state(hostname)\n if state is None:\n existed = False\n active = False\n elif state == \"shut off\":\n active = False\n\n if active:\n command = (\"virsh destroy %s\" % hostname)\n retval = server_host.sh_run(command)\n if retval.cr_exit_status:\n logging.error(\"failed to run command [%s] on host [%s], \"\n \"ret = [%d], stdout = [%s], stderr = [%s]\",\n command,\n server_host.sh_hostname,\n retval.cr_exit_status,\n retval.cr_stdout,\n retval.cr_stderr)\n return -1\n\n if existed:\n command = (\"virsh undefine %s\" % hostname)\n retval = server_host.sh_run(command)\n if retval.cr_exit_status:\n logging.error(\"failed to run command [%s] on host [%s], \"\n \"ret = [%d], stdout = [%s], stderr = [%s]\",\n command,\n server_host.sh_hostname,\n retval.cr_exit_status,\n retval.cr_stdout,\n retval.cr_stderr)\n return -1\n\n return 0", "title": "" }, { "docid": "6ad40117cdbde3ca12d2c3fd2159c6c0", "score": "0.51094055", "text": "def _delete_nve_member(self, vni, device_id, mcast_group, host_id):\n host_nve_connections = self._get_switch_nve_info(host_id)\n for switch_ip in host_nve_connections:\n if not nxos_db.get_nve_vni_switch_bindings(vni, switch_ip):\n self.driver.delete_nve_member(switch_ip,\n const.NVE_INT_NUM, vni)\n if (cfg.CONF.ml2_cisco.vxlan_global_config and\n not nxos_db.get_nve_switch_bindings(switch_ip)):\n self.driver.disable_vxlan_feature(switch_ip)", "title": "" }, { "docid": "43e704d78d28d98226fe4ef482078add", "score": "0.51067436", "text": "def _disconnect_container(self):\n br_idx = self.ip.link_lookup(ifname=self.bridge)\n for link in self.ip.get_links():\n if br_idx[0] == link.get_attr('IFLA_MASTER'):\n link_name = link.get_attr('IFLA_IFNAME')\n if 'veth' in link_name:\n self.log.debug('Bringing down veth pair {} on bridge: {}'\n .format(link_name, self.bridge))\n self.ip.link('set', index=link['index'], state='down')", "title": "" }, { "docid": "b421f2871c1d80a2fad6991589d8909a", "score": "0.50907063", "text": "def _unbind_ports(self, context, ports):\n\n for port_id in ports:\n try:\n\t\tself.client.unbind(port_id)\n except Exception:\n LOG.exception(_LE(\"Unable to clear device ID \"\n \"for port '%s'\"), port_id)", "title": "" }, { "docid": "b69493bf84093b03310ded6d8a2ac6f7", "score": "0.5089561", "text": "def destroy(logger, interface):\n con = getXenConnection(interface.getHost().getLocation().getID(), logger)\n if not con:\n return\n xen, session, _ = con\n\n reference = findInterface(interface, logger, xen, session)\n if reference:\n result = xen.VIF.destroy(session, reference)\n if result['Status'] == 'Failure':\n logger.error(\"Error deleting VIF #\" + str(interface.getID()) + \" of VM \" + interface.getHost().getID() + \" failed. Error: \" + str(result['ErrorDescription']))\n else:\n logger.error(\"No interface found to delete\")", "title": "" }, { "docid": "72429fdbe2f38b8482cc57fd01cd37d3", "score": "0.5081305", "text": "def del_dot1x_flow_pair(self, port_num, nfv_sw_port_num, mac):\n ofmsgs = [\n self.port_acl_table.flowdel(\n match=self.port_acl_table.match(\n in_port=nfv_sw_port_num,\n eth_type=valve_packet.ETH_EAPOL,\n eth_src=mac,\n ),\n priority=self.override_priority,\n ),\n self.port_acl_table.flowdel(\n match=self.port_acl_table.match(\n in_port=port_num, eth_type=valve_packet.ETH_EAPOL\n ),\n priority=self.override_priority,\n ),\n ]\n return ofmsgs", "title": "" }, { "docid": "bd325f191740e91fb912e48e650a2b58", "score": "0.50763696", "text": "def test_vip_delete_lbaasdriver(self):\n\n self._test_lbaasdriver('DELETE_VIP')", "title": "" }, { "docid": "96eea4941080949f03c8a30fe77af139", "score": "0.50739443", "text": "def delete(self, *args, **kwargs):\n\n try:\n\n if len(args) != 2:\n raise ValueError(\"Invalid url\")\n\n tenant_id = UUID(args[0])\n addr = EtherAddress(args[1])\n\n tenant = RUNTIME.tenants[tenant_id]\n tenant_pnfdevs = getattr(tenant, self.server.PNFDEV.ALIAS)\n pnfdev = tenant_pnfdevs[addr]\n\n tenant.remove_pnfdev(pnfdev)\n\n except ValueError as ex:\n self.send_error(400, message=ex)\n except KeyError as ex:\n self.send_error(404, message=ex)\n self.set_status(204, None)", "title": "" }, { "docid": "56731d138ff024261511f790835651c6", "score": "0.50672835", "text": "def _delete_nve_member(self, vni, device_id, mcast_group, host_id):\n host_nve_connections = self._get_switch_nve_info(host_id)\n for switch_ip in host_nve_connections:\n\n if not nxos_db.get_nve_vni_switch_bindings(vni, switch_ip):\n self.driver.delete_nve_member(switch_ip,\n const.NVE_INT_NUM, vni)\n if (cfg.CONF.ml2_cisco.vxlan_global_config and\n not nxos_db.get_nve_switch_bindings(switch_ip)):\n self.driver.disable_vxlan_feature(switch_ip)", "title": "" }, { "docid": "1deaa8be4df1b811ecacd950ef79efcc", "score": "0.5057544", "text": "def remover_container_por_porta(porta=\"\"):\n try:\n client = docker.from_env()\n lista_container = client.containers.list()\n #client.containers.list(filters={\"ancestor\": \"nginx\"})\n #conectando.attrs['HostConfig']\n for cada_container in lista_container:\n conectando = client.containers.get(cada_container.id)\n print(\"Removendo o container %s\" % str(conectando.short_id))\n #print(conectando.attrs['HostConfig']['PortBindings'])\n if (conectando.attrs['HostConfig']['PortBindings'] != \"\"):\n conectando.remove(force=True)\n except docker.errors.NotFound as e:\n logando('Erro!! Esse comando nao existe', e)\n except Exception as e:\n logando('Erro!!! Favor verificar o comando digitado', e)\n finally:\n print(\"Containers removidos com sucesso\")", "title": "" }, { "docid": "f17e5f2807a37f01208309433e2909e7", "score": "0.50543135", "text": "def _parseVlan(self, vlan):\n return {'iSCSIPortInfo': {'vlan': vlan}}", "title": "" }, { "docid": "8e9835ce6eb924299fcb79cb6287a43a", "score": "0.50541824", "text": "def remove_veth(endpoint):\n try:\n netns.remove_veth(endpoint.name)\n except CalledProcessError:\n app.logger.warn(\"Failed to delete veth %s\", endpoint.name)", "title": "" }, { "docid": "9f185b9544f3b8eff4313b8ad8f2d990", "score": "0.5053817", "text": "def configure_users_vlan(self):\n\n print('Configure users VLAN')\n self.make_instruction('create vlan vlan' + self.user_vlan + ' tag ' + self.user_vlan)\n self.make_instruction('config vlan vlan' + self.user_vlan + ' add tagged 5-6')\n self.make_instruction('config vlan vlan' + self.user_vlan + ' add untagged 1-4')", "title": "" }, { "docid": "c13ce3d0648735d65daa935aff8dfdd8", "score": "0.5053767", "text": "def test_disable_different_vn_on_same_vlan_across_vpgs_in_enterprise(self):\n proj_obj, fabric_obj, pr_obj = self._create_prerequisites(\n disable_vlan_vn_uniqueness_check=True)\n\n # Create Physical Interface for VPG-1\n esi_id = '00:11:22:33:44:55:66:77:88:99'\n pi_name = self.id() + '_physical_interface1'\n pi = PhysicalInterface(name=pi_name,\n parent_obj=pr_obj,\n ethernet_segment_identifier=esi_id)\n pi_uuid = self._vnc_lib.physical_interface_create(pi)\n pi_obj = self._vnc_lib.physical_interface_read(id=pi_uuid)\n\n fabric_name = fabric_obj.get_fq_name()\n pi_fq_name_1 = pi_obj.get_fq_name()\n\n # Create Physical Interface for VPG-2\n esi_id = '00:11:22:33:44:55:66:77:88:99'\n pi_name = self.id() + '_physical_interface2'\n pi = PhysicalInterface(name=pi_name,\n parent_obj=pr_obj,\n ethernet_segment_identifier=esi_id)\n pi_uuid_2 = self._vnc_lib.physical_interface_create(pi)\n pi_obj = self._vnc_lib.physical_interface_read(id=pi_uuid_2)\n\n fabric_name = fabric_obj.get_fq_name()\n pi_fq_name_2 = pi_obj.get_fq_name()\n\n # Create VPG-1\n vpg_name = \"vpg-1\"\n vpg = VirtualPortGroup(vpg_name, parent_obj=fabric_obj)\n vpg_uuid = self.api.virtual_port_group_create(vpg)\n vpg_obj_1 = self._vnc_lib.virtual_port_group_read(id=vpg_uuid)\n vpg_name_1 = vpg_obj_1.get_fq_name()\n\n # Create VPG-2\n vpg_name = \"vpg-2\"\n vpg = VirtualPortGroup(vpg_name, parent_obj=fabric_obj)\n vpg_uuid = self.api.virtual_port_group_create(vpg)\n vpg_obj_2 = self._vnc_lib.virtual_port_group_read(id=vpg_uuid)\n vpg_name_2 = vpg_obj_2.get_fq_name()\n\n # Create VN-1\n vn1 = VirtualNetwork('vn1-%s' % (self.id()), parent_obj=proj_obj)\n self.api.virtual_network_create(vn1)\n\n # Create VN-2\n vn2 = VirtualNetwork('vn2-%s' % (self.id()), parent_obj=proj_obj)\n self.api.virtual_network_create(vn2)\n\n # Create a VMI that's attached to vpg-1 and having reference\n # to vn1\n vmi_obj = VirtualMachineInterface(self.id() + \"1\",\n parent_obj=proj_obj)\n vmi_obj.set_virtual_network(vn1)\n\n # Create KV_Pairs for this VMI\n kv_pairs = self._create_kv_pairs(pi_fq_name_1,\n fabric_name,\n vpg_name_1)\n\n vmi_obj.set_virtual_machine_interface_bindings(kv_pairs)\n\n vmi_obj.set_virtual_machine_interface_properties(\n VirtualMachineInterfacePropertiesType(sub_interface_vlan_tag=42))\n vmi_uuid_1 = self.api.virtual_machine_interface_create(vmi_obj)\n vpg_obj_1.add_virtual_machine_interface(vmi_obj)\n self.api.virtual_port_group_update(vpg_obj_1)\n\n # Create a VMI that's attached to vpg-2 and having reference\n # to vn2, but with same vlan_tag=42, this should fail\n vmi_obj_2 = VirtualMachineInterface(self.id() + \"2\",\n parent_obj=proj_obj)\n vmi_obj_2.set_virtual_network(vn2)\n\n # Create KV_Pairs for this VMI\n kv_pairs = self._create_kv_pairs(pi_fq_name_2,\n fabric_name,\n vpg_name_2)\n\n vmi_obj_2.set_virtual_machine_interface_bindings(kv_pairs)\n\n vmi_obj_2.set_virtual_machine_interface_properties(\n VirtualMachineInterfacePropertiesType(sub_interface_vlan_tag=42))\n\n # since disable_vlan_vn_uniqueness_check=True\n # validations at second VPG do not happen\n vmi_uuid_2 = self.api.virtual_machine_interface_create(vmi_obj_2)\n\n self.api.virtual_machine_interface_delete(id=vmi_uuid_2)\n self.api.virtual_machine_interface_delete(id=vmi_uuid_1)\n self.api.virtual_port_group_delete(id=vpg_obj_1.uuid)\n self.api.virtual_port_group_delete(id=vpg_obj_2.uuid)\n self.api.physical_interface_delete(id=pi_uuid)\n self.api.physical_interface_delete(id=pi_uuid_2)\n self.api.physical_router_delete(id=pr_obj.uuid)\n self.api.fabric_delete(id=fabric_obj.uuid)", "title": "" }, { "docid": "ad297599e8e788cda643818403cc0724", "score": "0.505302", "text": "def fix_vrouter_configs():\n execute(\"fix_vrouter_configs_node\", env.host_string)", "title": "" }, { "docid": "6fe3fe9b5084e6c6b34e41fccb2c7eb1", "score": "0.50497806", "text": "def RouteDelete(self, route_config):\n pass", "title": "" }, { "docid": "25302050c5b9a0a58c245a517d8cfec3", "score": "0.5040378", "text": "def delete_hp_switch_lag_port(context, record_dict):\n session = context.session\n with session.begin(subtransactions=True):\n if(record_dict['id']):\n session.query(models.HPSwitchLAGPort).filter_by(\n id=record_dict['id']).delete()", "title": "" }, { "docid": "13267e1202941be1a670942e2f1279e9", "score": "0.50309986", "text": "def delete_port(context, port_id):\n\n session = context.session\n with session.begin(subtransactions=True):\n port = get_port(context, port_id)\n if port:\n session.delete(port)\n return port", "title": "" }, { "docid": "fb685e40b835d74d202c51acec6bca2a", "score": "0.50248146", "text": "def delete_kvs(self):", "title": "" }, { "docid": "35135063333641c3329b6a228f61b46b", "score": "0.5017005", "text": "def delete(request):\n try:\n ids = request.DATA.get('ids')\n delete_pools = request.DATA.get('delete_pools', True)\n\n if delete_pools:\n vports_pool = VipPortToPool.objects.filter(\n requisicao_vip__id__in=ids\n )\n\n for vport in vports_pool:\n\n server_pool = vport.server_pool\n\n related = VipPortToPool.objects.filter(\n server_pool=server_pool\n ).exclude(\n requisicao_vip=vport.requisicao_vip\n )\n\n if related:\n raise pool_exceptions.PoolConstraintVipException()\n\n vport.delete(request.user)\n\n for member in server_pool.serverpoolmember_set.all():\n member.delete(request.user)\n\n server_pool.delete(request.user)\n\n vips_request = RequisicaoVips.objects.filter(id__in=ids)\n for vrequest in vips_request:\n \"\"\" if vip has DSRl3 \"\"\"\n # traffic=OptionVip.objects.filter(nome_opcao_txt='DSRL3')\n # traffic.id should be equal 48\n # if vrequest.trafficreturn.id == 48:\n # try:\n # dsrl3= DsrL3_to_Vip.get_by_vip_id(ids)\n # dsrl3.delete(request.user)\n # except RequisicaoVipsMissingDSRL3idError, e:\n # log.error(u'Requisao Vip nao possui id DSRL3 correspondente cadastrado no banco')\n # raise RequisicaoVipsMissingDSRL3idError(\n # e, 'Requisao Vip com id %s possui DSRl3 id não foi encontrado' %\n # ids)\n vrequest.remove(request.user, vrequest.id)\n\n syncs.delete_new(ids)\n\n except Exception, exception:\n log.error(exception)\n raise api_exceptions.NetworkAPIException()", "title": "" }, { "docid": "d45ca82bd654374d2d2873d043d3b14a", "score": "0.5013724", "text": "def configure_switchport_vlan_mapping(device, interface, vlan):\n cmd = [f\"interface {interface}\", f\"switchport vlan mapping {vlan}\"]\n try:\n device.configure(cmd)\n except SubCommandFailure as e:\n log.error(e)\n raise SubCommandFailure(\"Could not configure switchport vlan mapping\")", "title": "" }, { "docid": "4810b7ddf6661cfcdb61f703cdbcc89f", "score": "0.5013675", "text": "def _configure_port_binding(self, is_provider_vlan, duplicate_type,\n switch_ip, vlan_id,\n intf_type, nexus_port, vni):\n\n # This implies VLAN, VNI, and Port are all duplicate.\n # Then there is nothing to configure in Nexus.\n if duplicate_type == const.DUPLICATE_PORT:\n return\n\n if is_provider_vlan:\n vlan_name = cfg.CONF.ml2_cisco.provider_vlan_name_prefix\n auto_create = cfg.CONF.ml2_cisco.provider_vlan_auto_create\n auto_trunk = cfg.CONF.ml2_cisco.provider_vlan_auto_trunk\n else:\n vlan_name = cfg.CONF.ml2_cisco.vlan_name_prefix\n auto_create = True\n auto_trunk = True\n vlan_name_max_len = const.NEXUS_MAX_VLAN_NAME_LEN - len(str(vlan_id))\n if len(vlan_name) > vlan_name_max_len:\n vlan_name = vlan_name[:vlan_name_max_len]\n LOG.warn(_LW(\"Nexus: truncating vlan name to %s\"), vlan_name)\n vlan_name = vlan_name + str(vlan_id)\n\n # if type DUPLICATE_VLAN, don't create vlan\n if duplicate_type == const.DUPLICATE_VLAN:\n auto_create = False\n\n if auto_create and auto_trunk:\n LOG.debug(\"Nexus: create & trunk vlan %s\", vlan_name)\n self.driver.create_and_trunk_vlan(\n switch_ip, vlan_id, vlan_name, intf_type, nexus_port,\n vni)\n elif auto_create:\n LOG.debug(\"Nexus: create vlan %s\", vlan_name)\n self.driver.create_vlan(switch_ip, vlan_id, vlan_name, vni)\n elif auto_trunk:\n LOG.debug(\"Nexus: trunk vlan %s\", vlan_name)\n self.driver.enable_vlan_on_trunk_int(switch_ip, vlan_id,\n intf_type, nexus_port)", "title": "" }, { "docid": "c1ea690e7c2fbe57c343f3227de971ce", "score": "0.5012373", "text": "def process_tv_config_delete_request(request_context,\n client_single_request,\n single_server_response,\n async_client_factory):\n async_kvstore_client = async_client_factory.kvstore_client()\n\n tv_config_delete_request = client_single_request.tvConfigDeleteRequest\n\n device_ids = tv_config_delete_request.deviceId\n\n LOGGER.debug('device_ids to delete=%s', device_ids)\n yield validate_devices(set(device_ids), request_context, async_kvstore_client)\n # need to fetch configs before deleting to make sure we're not deleting the captain\n # and orphaning the workers, if so, for each one that is a captain, \n # we need to fetch the other configs in the grid\n # delete the to be deleted captain from the device_id list, and update\n # the existing members of the grid\n\n tvs = yield get_drone_mode_tvs(request_context, async_kvstore_client, device_ids=device_ids)\n # fetch configs from kvstore\n # check to see if they are currently captain\n for tv in tvs:\n raw_id = base64.urlsafe_b64decode(str(tv[constants.KEY]))\n encoded_id = b64encode_to_str(raw_id)\n tv[constants.DEVICE_ID] = encoded_id\n\n if (has_grid(tv, is_json=True)):\n tv_config = TVConfig(**tv)\n grid_ids = tv.get(constants.TV_GRID, {}).get(constants.DEVICE_IDS, [])\n yield remove_from_grid(tv_config=tv_config,\n device_ids=grid_ids,\n request_context=request_context,\n async_kvstore_client=async_kvstore_client)\n\n entries_to_delete = []\n\n for device_id in device_ids:\n kvstore_key = b64_to_urlsafe_b64(device_id)\n post_data = {constants.KEY: kvstore_key}\n entries_to_delete.append(post_data)\n\n deleted_ids = yield async_kvstore_client.async_batch_save_request(\n request_context.system_auth_header,\n constants.DRONE_MODE_TVS_COLLECTION_NAME,\n entries_to_delete,\n owner=request_context.current_user)\n deleted_device_ids = [urlsafe_b64_to_b64(deleted_id) for deleted_id in deleted_ids]\n if deleted_device_ids:\n single_server_response.tvConfigDeleteResponse.deletedIds.extend(deleted_device_ids)\n\n if not deleted_ids:\n raise SpacebridgeApiRequestError('None of the device_ids={} were deleted'.format(device_ids), status_code=http.INTERNAL_SERVER_ERROR)\n\n elif len(deleted_ids) != len(device_ids):\n kvstore_keys = {b64_to_urlsafe_b64(device_id) for device_id in device_ids}\n LOGGER.error('TV configs with these ids: %s were deleted, '\n 'while TV configs with these ids: %s were not.',\n deleted_ids, list(kvstore_keys-set(deleted_ids)))\n # update current deleted tv subscriptions and\n # all ipads registered to current user\n\n yield process_subscriptions(request_context,\n async_client_factory,\n tv_device_ids=device_ids)\n\n\n LOGGER.info('Successful TV Config Delete Request for device_ids=%s', deleted_device_ids)", "title": "" }, { "docid": "f0d5789af7408d323b2248462c19ba62", "score": "0.5011434", "text": "def remove_config(self):\n shutil.rmtree(self.config_dir, ignore_errors=True)\n if self.vpnservice and self.vpnservice.get('pptpconnections'):\n creds = self.vpnservice['pptpconnections'][0]['credentials']\n self.delete_user(self.vpnservice['id'], creds)", "title": "" }, { "docid": "a89387140f8a47f6cdd4496209b7770f", "score": "0.49973664", "text": "def delete_ports(self, ports, check=True):\n for port in ports:\n self._client.port.delete(port.uuid)\n\n if check:\n self.check_ports_presence(ports, must_present=False)", "title": "" }, { "docid": "3c083345b142aa4072e57105a64e5650", "score": "0.49961054", "text": "def test_PortMirrorDestMoveVlan(self, dvs, testlog):\n dvs.setup_db()\n pmap = dvs.counters_db.get_entry(\"COUNTERS_PORT_NAME_MAP\", \"\")\n pmap = dict(pmap)\n\n session = \"TEST_SESSION\"\n src_ports = \"Ethernet0\"\n src_asic_ports = [\"Ethernet0\"]\n\n # create mirror session\n self.dvs_mirror.create_erspan_session(session, \"7.7.7.7\", \"8.8.8.8\", \"0x6558\", \"8\", \"100\", \"0\", None, src_ports)\n self.dvs_mirror.verify_session_status(session, status=\"inactive\")\n\n # bring up port; add ip; add neighbor; add route\n dvs.set_interface_status(\"Ethernet32\", \"up\")\n dvs.add_ip_address(\"Ethernet32\", \"80.0.0.0/31\")\n dvs.add_neighbor(\"Ethernet32\", \"80.0.0.1\", \"02:04:06:08:10:12\")\n dvs.add_route(\"8.8.0.0/16\", \"80.0.0.1\")\n\n self.dvs_mirror.verify_session_status(session)\n\n # check monitor port\n expected_asic_db = {\"SAI_MIRROR_SESSION_ATTR_MONITOR_PORT\": pmap.get(\"Ethernet32\")}\n self.dvs_mirror.verify_session(dvs, session, asic_db=expected_asic_db)\n\n # mirror session move round 1\n # create vlan; create vlan member; bring up vlan and member\n self.dvs_vlan.create_vlan(\"9\")\n self.dvs_vlan.create_vlan_member(\"9\", \"Ethernet48\")\n dvs.set_interface_status(\"Vlan9\", \"up\")\n dvs.set_interface_status(\"Ethernet48\", \"up\")\n\n self.dvs_mirror.verify_session_status(session)\n\n # add ip address to vlan 9\n dvs.add_ip_address(\"Vlan9\", \"8.8.8.0/24\")\n self.dvs_mirror.verify_session_status(session, status=\"inactive\")\n\n # create neighbor to vlan 9\n dvs.add_neighbor(\"Vlan9\", \"8.8.8.8\", \"88:88:88:88:88:88\")\n self.dvs_mirror.verify_session_status(session, status=\"inactive\")\n\n # create fdb entry to ethernet48\n dvs.create_fdb(\"9\", \"88-88-88-88-88-88\", \"Ethernet48\")\n self.dvs_mirror.verify_session_status(session)\n\n # check monitor port\n expected_asic_db = {\"SAI_MIRROR_SESSION_ATTR_MONITOR_PORT\": pmap.get(\"Ethernet48\"),\n \"SAI_MIRROR_SESSION_ATTR_VLAN_HEADER_VALID\": \"true\",\n \"SAI_MIRROR_SESSION_ATTR_VLAN_TPID\": \"33024\",\n \"SAI_MIRROR_SESSION_ATTR_VLAN_ID\": \"9\",\n \"SAI_MIRROR_SESSION_ATTR_VLAN_PRI\": \"0\",\n \"SAI_MIRROR_SESSION_ATTR_VLAN_CFI\": \"0\"}\n\n self.dvs_mirror.verify_session(dvs, session, asic_db=expected_asic_db, src_ports=src_asic_ports)\n\n # mirror session move round 2\n # remove fdb entry\n dvs.remove_fdb(\"9\", \"88-88-88-88-88-88\")\n self.dvs_mirror.verify_session_status(session, status=\"inactive\")\n\n # remove neighbor\n dvs.remove_neighbor(\"Vlan9\", \"8.8.8.8\")\n self.dvs_mirror.verify_session_status(session, status=\"inactive\")\n\n # remove ip address\n dvs.remove_ip_address(\"Vlan9\", \"8.8.8.0/24\")\n self.dvs_mirror.verify_session_status(session)\n\n expected_asic_db = {\"SAI_MIRROR_SESSION_ATTR_MONITOR_PORT\": pmap.get(\"Ethernet32\")}\n self.dvs_mirror.verify_session(dvs, session, asic_db=expected_asic_db)\n\n # bring down vlan and member; remove vlan member; remove vlan\n dvs.set_interface_status(\"Ethernet48\", \"down\")\n dvs.set_interface_status(\"Vlan9\", \"down\")\n self.dvs_vlan.remove_vlan_member(\"9\", \"Ethernet48\")\n self.dvs_vlan.get_and_verify_vlan_member_ids(0)\n self.dvs_vlan.remove_vlan(\"9\")\n\n # remove route; remove neighbor; remove ip; bring down port\n dvs.remove_route(\"8.8.8.0/24\")\n dvs.remove_neighbor(\"Ethernet32\", \"80.0.0.1\")\n dvs.remove_ip_address(\"Ethernet32\", \"80.0.0.0/31\")\n dvs.set_interface_status(\"Ethernet32\", \"down\")\n\n # remove mirror session\n self.dvs_mirror.remove_mirror_session(session)\n self.dvs_mirror.verify_no_mirror()", "title": "" }, { "docid": "32253efe68131ba0f59db9f2a86a6d88", "score": "0.49798825", "text": "def remove_tacacs_server(device, remove_config=None, keyword='tacacs'):\r\n config_list = []\r\n if not remove_config:\r\n remove_config = get_running_config_section(device=device, keyword=keyword)\r\n\r\n for line in remove_config:\r\n line = line.strip()\r\n if line.startswith(keyword):\r\n config_list.append(line)\r\n\r\n unconfig_list = list(map(lambda conf: \"no \" + conf, config_list))\r\n\r\n try:\r\n device.configure(unconfig_list)\r\n except SubCommandFailure as e:\r\n raise SubCommandFailure(\r\n \"Failed to remove {config} configuration on device \"\r\n \"{device}\".format(device=device.name, config=config_list)\r\n ) from e", "title": "" }, { "docid": "12b7e80f94dbd04f7f35d68445987e4d", "score": "0.49782634", "text": "def destroy_server(self, serverid: int) -> None:\n sql = \"DELETE FROM server WHERE id = :id LIMIT 1\"\n self.execute(sql, {'id': serverid})", "title": "" }, { "docid": "58e310b9d63c3d33dc3f3174c63ee3cd", "score": "0.49580336", "text": "def delete_config(self, key):\n self.delete((\"config\", key))", "title": "" }, { "docid": "6c7176c6feaba5ea720a851af2c8ee26", "score": "0.4955531", "text": "def config_set_destroy(context, config_set_id):\n session = get_session()\n with session.begin():\n config_set_ref = _config_set_get(context,\n config_set_id,\n session=session)\n query_role = session.query(models.Host).filter_by(\\\n config_set_id=config_set_id).filter_by(deleted=False)\n if query_role.all():\n msg = \"config_set %s is being used by other host\"\\\n % config_set_id\n raise exception.Forbidden(msg)\n query_role = session.query(models.Role).filter_by(\\\n config_set_id=config_set_id).filter_by(deleted=False)\n if query_role.all():\n msg = \"config_set %s is being used by other role\"\\\n % config_set_id\n raise exception.Forbidden(msg)\n\n config_set_ref.delete(session=session)\n\n config_item_refs =\\\n _config_item_get_by_config_set_id(context,\n config_set_id,\n session=session)\n for config_item_ref in config_item_refs:\n config_id = config_item_ref.config_id\n config_item_ref.delete(session=session)\n if not _config_item_set_get_by_config_id(context,\n config_id,\n session=session):\n config_ref = _config_get(context,\n config_id,\n session=session)\n config_ref.delete(session=session)\n\n return config_set_ref", "title": "" }, { "docid": "13b1dac69c2d13b11a937f234099d7c4", "score": "0.49542674", "text": "def delete(self, port_id):\n LOG.debug(\"API CALL: %s DELETE\" % str(self.__class__.__name__))\n try:\n port = self.api.compute.find_port_by_name_or_id(port_id)\n if port is None:\n return Response(\"Port with id %s does not exists.\\n\" %\n port_id, status=404)\n stack = None\n for s in self.api.compute.stacks.values():\n for p in s.ports.values():\n if p.id == port_id:\n stack = s\n if stack is not None:\n if port.net_name in stack.nets:\n stack.nets[port.net_name].withdraw_ip_address(\n port.ip_address)\n for server in stack.servers.values():\n try:\n server.port_names.remove(port.name)\n except ValueError:\n pass\n\n # delete the port\n self.api.compute.delete_port(port.id)\n\n return Response('', status=204, mimetype='application/json')\n\n except Exception as ex:\n LOG.exception(\"Neutron: Delete port exception.\")\n return Response(str(ex), status=500,\n mimetype='application/json')", "title": "" }, { "docid": "4241646a2c52566a800d9c3a7dcfd482", "score": "0.4947514", "text": "def cleanup(self):\n byteblower_instance = api.ByteBlower.InstanceGet()\n if self.port_1:\n self.server.PortDestroy(self.port_1)\n self.port_1 = None\n\n if self.port_2:\n self.server.PortDestroy(self.port_2)\n self.port_2 = None\n\n if self.server is not None:\n byteblower_instance.ServerRemove(self.server)\n self.server = None", "title": "" }, { "docid": "7ab7de2136b1c0af26db4ce4e3895e7e", "score": "0.49399748", "text": "def editconfig_delete_operation(self, **kwargs):\n global xmlstr\n global req_ingress_ports\n global req_egress_ports\n\n self.create_box('test_editconfig_delete_operation')\n self.split_port_list(kwargs['ingress_ports'], kwargs['egress_ports'])\n s = time.time()\n LOG.info('-----[ create xml for edit-config DELETE operation ]-----\\n')\n xmlstr = self.create_xml('delete')\n result = self.edit_config_opr()\n\n if result == 'PASS':\n LOG.info('-----[ create xml for get-config operation ]-----\\n')\n crossconnects = Element(\n 'opsw:cross-connects', {'xmlns:plts': \"http://www.polatis.com/yang/polatis-switch\",\n 'xmlns:opsw': \"http://www.polatis.com/yang/optical-switch\"})\n\n xmlstr = tostring(crossconnects)\n LOG.info('xmlstr : \\n%s' % xmlstr)\n xmlout = self.getconfig_rpc_request(xmlstr, kwargs['file_name'])\n req_ingress_ports = []\n req_egress_ports = []\n result = self.compare_list(xmlout)\n LOG.info('req_ingress_ports : %s\\n' % req_ingress_ports)\n LOG.info('req_egress_ports : %s\\n' % req_egress_ports)\n e = time.time()\n d = int(round((e - s) * 1000))\n csvOutput(\n 'cross-connects',\n 'editconfig_delete_operation',\n d,\n result)\n else:\n LOG.info('getting error from switch : FAIL')\n nose.tools.assert_equals('PASS', result)", "title": "" }, { "docid": "d244ff35870787676d1fc1aa2f81bfed", "score": "0.4937491", "text": "def handle_link_down(self, port):\n self.ports.remove_port(port)\n\n # TODO: fill this in!", "title": "" } ]
27d6330cd2ac3e15327c1b9ecb2d6a37
return query for insert/update/delete our attributes preferrably this method should return an IBSQuery instance this is important when query can be large this method maybe overidded to customize the behaviour
[ { "docid": "e79cc87d22407219176bb96d2e160572", "score": "0.0", "text": "def getQuery(self,ibs_query,src,action,**args):\n\tself.checkInput(src,action,args)\n\tif self.query_funcs.has_key(src+\"_\"+action):\n\t return self.__callQueryFunc(ibs_query,src,action,args)\n\telse:\n\t return \"\"", "title": "" } ]
[ { "docid": "fdbd1a3ebbdc263a93c58c51a21926da", "score": "0.6932648", "text": "def create_query(self):\n return db.models.sql.Query(self.model, connection)", "title": "" }, { "docid": "08defddebdbbeebcc24c766343365faf", "score": "0.675028", "text": "def query(self):\n query = Query(self.data_connector, self.model)\n return query", "title": "" }, { "docid": "c96e559b67e523adb8113a2d358c82f6", "score": "0.6665941", "text": "def create_query(self):\n return Query()", "title": "" }, { "docid": "cacb03b4338f2675f7d6f6b500139502", "score": "0.6468284", "text": "def get_query_object(self):\n return self.session_helper.Session.query(self.get_table())", "title": "" }, { "docid": "43e4106c5bca60d1812c4f51ab52db16", "score": "0.6376609", "text": "def query(self):\n if self._query:\n return self._query\n\n if not self.load:\n operations = LoadOp('')\n else:\n operations = self.load\n\n if self.qualifiers:\n qualifiers = iter(self.qualifiers)\n bool_op = query_parser.parse(next(qualifiers))\n for qualifier in qualifiers:\n bool_op = And(bool_op, query_parser.parse(qualifier))\n\n operations = SelectionOp(operations, bool_op)\n #operations.append(SelectionOp(bool_op))\n\n\n operations = query_parser.parse_select(operations, self.column_exps)\n\n\n if self.grouping or self.has_aggregates(operations):\n operations = GroupByOp(operations, *self.grouping)\n \n if self.ordering:\n # todo: eleminate ordering if it matches\n # the grouping since we already sort\n #operations.append(self.ordering)\n operations = OrderByOp(operations, *self.ordering)\n\n if self.stop is not None or self.start is not None:\n if self.start and self.stop:\n stop = self.start + self.stop\n else:\n stop = self.stop \n operations = SliceOp(operations, self.start, stop)\n\n self._query = Query(self.dataset, operations)\n return self._query", "title": "" }, { "docid": "42203bdfa9558b3afce00a393e65b64f", "score": "0.6357215", "text": "def db_query():\r\n return Post.query", "title": "" }, { "docid": "278ec3aa046e257429e5901f73f801d1", "score": "0.6339401", "text": "def GetQuery(self):\n return self.dbHandlerModule.Query(self.dbHandler)", "title": "" }, { "docid": "333de52a471ba9d4a67d711ab0f86e88", "score": "0.63304245", "text": "def statement(self) -> 'dsl.Statement':\n return self.query", "title": "" }, { "docid": "58814c1f0e6537e20b5e44617f4f85fa", "score": "0.6285863", "text": "def q(self):\n return Q(db=self)", "title": "" }, { "docid": "9205783d6234221640758ce3a5a16e32", "score": "0.6245928", "text": "def get_query(self):\n return self.model_class.objects.all()", "title": "" }, { "docid": "4ce527a47020b0ab10f9fc96c5fb3ee3", "score": "0.62158376", "text": "def generate_query(self):\n self.query = (self._add_select_statement()\n + self._add_case_statement()\n + self._add_from_statement()\n + self._add_group_by_statement())\n return self.query", "title": "" }, { "docid": "a2c70b6faf4c45d24435240deeca3867", "score": "0.62008256", "text": "def query(self):\n return self.__structure.query", "title": "" }, { "docid": "09ae8fb01a55bbca6d78397cb8f8d5ae", "score": "0.615041", "text": "def query(self):\n return self._query", "title": "" }, { "docid": "25f23b5256305fc7b8d85c3fa43ee53e", "score": "0.61282086", "text": "def sql(self):\n return self.get_sql()", "title": "" }, { "docid": "d496fd3d1aed43c353ba5b0ce16a39ed", "score": "0.606959", "text": "def _query(self, *criterion):\n query = select(self.model_class)\n\n if criterion:\n query = query.where(*criterion)\n\n return query", "title": "" }, { "docid": "285223688f08c60eef80defa013d16c7", "score": "0.6051768", "text": "def query(self,*args, **kwargs):\n\n return self.database.query(*args, **kwargs)", "title": "" }, { "docid": "2c1ca06021ca8d303157265c776be5e2", "score": "0.6025383", "text": "def query(self, query):\n pass", "title": "" }, { "docid": "531f41981d69b50593906aa440b42854", "score": "0.60249907", "text": "def _query(self):\n return self.sa_wrapper.session.query(self.mapper_class)", "title": "" }, { "docid": "68073b38e65b556a77f7eada990359e4", "score": "0.6010646", "text": "def query(self):\r\n raise RuntimeError(\"Must implement query!\")", "title": "" }, { "docid": "1faa4bd0038a9ad1d6882b2c4ed25ed9", "score": "0.59930897", "text": "def copy(self):\n q = Query(self.client, self.entity)\n q.attribute = self.attribute\n q.aggregate = self.aggregate\n q.order = list(self.order)\n q.conditions = self.conditions.copy()\n q.includes = self.includes.copy()\n q.limit = self.limit\n return q", "title": "" }, { "docid": "32b941b2e75aaf138f44e8a890e365cc", "score": "0.5981838", "text": "def sql(self):\n return self._sql", "title": "" }, { "docid": "09e24660cdb376bd8f3d80721215e2c3", "score": "0.5968768", "text": "def _sql(self):\n if self.definition:\n return self.definition, []\n if self.manager_name and self.manager_method:\n manager = getattr(get_user_model(), self.manager_name)\n fn = getattr(manager, self.manager_method)\n return fn().query.sql_with_params()", "title": "" }, { "docid": "b950d931a74e6088396fb71a47139f51", "score": "0.5965743", "text": "def dbGenerateSaveQuery(self, env):\n\t\tmode = 'insert'\n\n\t\targs = []\n\t\tfor x in self.fields:\n\t\t\tif (not self.values.has_key(x)) or \\\n\t\t\t self.allFields[x].dbIdentity:\n\t\t\t\tcontinue\n\t\t\tif self.allFields[x].maxlength and \\\n\t\t\t type(self.values[x]) == type(''):\n\t\t\t\targs.append((self.values[x])[ \\\n\t\t\t\t :self.allFields[x].maxlength])\n\t\t\telse:\n\t\t\t\targs.append(self.values[x])\n\n\t\twhere = []\n\n\t\tPK = self.findPK()\n\n\t\t# We use list(PK) here because deleting keys from a list while\n\t\t# looping over it causes incorrect results.\n\t\tfor key in list(PK):\n\t\t\tif (not self.values.has_key(key)) or \\\n\t\t (self.allFields[key].dbIdentity and \\\n\t\t self.values[key] == None \\\n\t\t ) or \\\n\t\t (self.values[key] != None and \\\n\t\t\t (self.allFields[key].dbType == 'datetime' or \\\n\t\t self.allFields[key].dbType == 'image' or \\\n\t\t self.allFields[key].dbType == 'ntext' or \\\n\t\t self.allFields[key].dbType == 'text' \\\n\t\t ) \\\n\t\t ):\n\t\t\t\tPK.remove(key)\n\t\t\t\n\n\t\tif len(PK) > 0:\n\t\t\tmode = 'update'\n\n\t\t\tfor key in PK:\n\t\t\t\tif where != []:\n\t\t\t\t\twhere.append(' AND ')\n\t\t\t\twhere.append('\"')\n\t\t\t\twhere.append(self.allFields[key].dbName)\n\t\t\t\twhere.append('\"')\n\n\t\t\t\tif self.values[key] == None:\n\t\t\t\t\twhere.append(' IS NULL')\n\t\t\t\telse:\n\t\t\t\t\twhere.append('=?')\n\t\t\t\t\targs.append(self.values[key])\n\n\t\tsql = []\n\t\tif mode == 'update':\n\t\t\tsql.append('UPDATE \"')\n\t\t\tsql.append(self.dbTable)\n\t\t\tsql.append('\" SET \"')\n\t\t\tsql.append('\"=?,\"'.join([self.allFields[x].dbName \\\n\t\t\t for x in self.fields \\\n\t\t\t if (not self.allFields[x].dbIdentity) \\\n\t\t\t and self.values.has_key(x)]))\n\t\t\tsql.append('\"=? WHERE ')\n\t\t\tsql.extend(where)\n\t\telif mode == 'insert':\n\t\t\tsql.append('INSERT INTO \"')\n\t\t\tsql.append(self.dbTable)\n\t\t\tsql.append('\" (\"')\n\t\t\tsql.append('\",\"'.join([self.allFields[x].dbName \\\n\t\t\t for x in self.fields \\\n\t\t\t if (not self.allFields[x].dbIdentity) \\\n\t\t\t and self.values.has_key(x)]))\n\t\t\tsql.append('\") VALUES (')\n\t\t\tsql.append(','.join(['?' \\\n\t\t\t for x in self.fields \\\n\t\t\t if (not self.allFields[x].dbIdentity) \\\n\t\t\t and self.values.has_key(x)]))\n\t\t\tsql.append(')')\n\n\t\treturn (''.join(sql), args)", "title": "" }, { "docid": "07c1e0a778a2a3b40d22b194b082d715", "score": "0.5961476", "text": "def create_query_set(self):\n return self.model_class.objects.create(**self.input_req_data)", "title": "" }, { "docid": "5e50a0933961bd878d370cd123419605", "score": "0.59613794", "text": "def augment_query(self):\n pass", "title": "" }, { "docid": "e0a9e47148514d6a74998baf9e1e6c95", "score": "0.595517", "text": "def get_query(self):\n return self.model.objects", "title": "" }, { "docid": "05a3dc3ef025a66b7ab9851fbd557a4c", "score": "0.5939954", "text": "def generateQuery(self,ibs_query,src,action,**args):\n\tif action==\"delete\":\n\t if src==\"user\":\n\t\treturn self.__deleteUserAttr(ibs_query,args[\"attr_updater_attrs\"],args[\"users\"])\n\t elif src==\"group\":\n\t\treturn self.__deleteGroupAttr(ibs_query,args[\"attr_updater_attrs\"],args[\"group_obj\"])\n\telif action==\"change\":\n\t if src==\"user\":\n\t\treturn self.__changeUserAttr(ibs_query,args[\"attr_updater_attrs\"],args[\"users\"])\n\t elif src==\"group\":\n\t\treturn self.__changeGroupAttr(ibs_query,args[\"attr_updater_attrs\"],args[\"group_obj\"])", "title": "" }, { "docid": "dec9e986ab9a389da997ed46d5aa6424", "score": "0.5926493", "text": "def query(self):\n pass", "title": "" }, { "docid": "18e8538bd799afc57689b62d66446798", "score": "0.59074384", "text": "def get_query_set(self):\r\n return QuerySet(self.model, using=self._db)", "title": "" }, { "docid": "36ad7368c356595dea2685bb032c27e1", "score": "0.5837833", "text": "def getQuery(self):\n return self.query", "title": "" }, { "docid": "05f043ef7ddb5fd826f7b65ec6c90a61", "score": "0.5806789", "text": "def get_sql(self):\n return self.__sql", "title": "" }, { "docid": "4f5d9ad449ab53b88ec4adeefe444b20", "score": "0.580268", "text": "def get_query_set(self):\n try:\n return self.model_class.objects.filter(**self.filter_req_data)\n except Exception as e:\n print('G45')\n return None", "title": "" }, { "docid": "ffd3374cf0e077d0a7abc12aa8930a83", "score": "0.5791093", "text": "def query(self):\n return self.__p['query']", "title": "" }, { "docid": "4cbf9a40f1842a78b33ebe42ee7f74f4", "score": "0.5784289", "text": "def get_query(self):\n self._assert_active()\n query_entities = []\n join_entities = []\n join_conditions = []\n query_options = []\n for col, callback in zip(self._columns, self._columns_query_callback):\n results = callback(col)\n if len(results)<3 or len(results)>4:\n raise ValueError(\"Internal error: incorrect number of results returned from a query callback\")\n query_entity, join_entity, join_condition = results[:3]\n if len(results)==4:\n query_options.append(results[3])\n\n if query_entity is not None:\n query_entities.append(query_entity)\n if join_entity is not None:\n assert join_condition is not None\n join_entities.append(join_entity)\n join_conditions.append(join_condition)\n else:\n assert join_entity is None\n assert join_condition is None\n\n q = self._session.query(*query_entities).select_from(self.get_table())\n\n for table, condition in zip(join_entities, join_conditions):\n q = q.outerjoin(table, condition)\n # use outer join so that null IDs translate to null in output, rather than disappearing\n\n q = q.options(*query_options)\n\n return q", "title": "" }, { "docid": "014d6c82783d302a51c49767fc1fe375", "score": "0.5776344", "text": "def aut_query(self) -> AutSingleSEQuery:\n aut_const = self._get_automata_constraints()\n return AutSingleSEQuery(self.eq, aut_const)", "title": "" }, { "docid": "cc43a19dcf1572059d1d6dd21acef2a6", "score": "0.57734346", "text": "def query(cls):\n return session.query(cls).filter()", "title": "" }, { "docid": "116cd02460d32398e210abb3d6ff2089", "score": "0.57177114", "text": "def query(self):\n hook = PostgresHook(postgres_conn_id=self.postgres_conn_id)\n conn = hook.get_conn()\n cursor = conn.cursor()\n cursor.execute(self.sql, self.parameters)\n return cursor", "title": "" }, { "docid": "92f2a3743f235868f8c5263a5f6aba91", "score": "0.5713828", "text": "def get_query(self):\n return self.session.query(self.model) if is_super_admin() else super(UserAdmin, self).get_query()", "title": "" }, { "docid": "76d4903b11eef35263e9042e6261ccbf", "score": "0.57129556", "text": "def query(self) -> 'dsl.Query':\n return Query(self)", "title": "" }, { "docid": "2fe887f61c7d6cb9fe83bedfabc33189", "score": "0.57127523", "text": "def attr_query(self, *args, **kwargs):\r\n\r\n kwargs['entity'] = self.entity\r\n\r\n return self.do_attr_query(*args, **kwargs)", "title": "" }, { "docid": "aa9e57773984b4dbc4b94ad0950a6acd", "score": "0.5691214", "text": "def sql(self):\n return self._sql_", "title": "" }, { "docid": "48ec566283b1999a588864afd917d090", "score": "0.56751853", "text": "def build_query(self) -> str:\n pass", "title": "" }, { "docid": "c48cb6204131db00fac128603bfc73ff", "score": "0.56740475", "text": "def get_bank_query(self):\n raise errors.Unimplemented()", "title": "" }, { "docid": "c48cb6204131db00fac128603bfc73ff", "score": "0.56740475", "text": "def get_bank_query(self):\n raise errors.Unimplemented()", "title": "" }, { "docid": "fc7d04302bb10080b82f0ed08ac62851", "score": "0.5667871", "text": "def params(self):\n return self.query", "title": "" }, { "docid": "f06c2759082206c6c9258538714ce5fb", "score": "0.56661457", "text": "def create_query(self):\n # this must be overridden to return a Query subclass appropriate for \n # the service type\n return DalQuery(self.baseurl, self.protocol, self.version)", "title": "" }, { "docid": "0ff92dde857cfb997f8b883c03dcbf23", "score": "0.5665864", "text": "def query_transaction(self):\n pass", "title": "" }, { "docid": "29dbf4249bf8145b404e29c1c7010c82", "score": "0.56547195", "text": "def get_query_instance(self):\n try:\n return self.model_class.objects.get(**self.filter_req_data)\n except Exception as e:\n print(e)\n print('G29')\n return None", "title": "" }, { "docid": "d28c9bd9684980d5dbdf6c913298ade3", "score": "0.5630514", "text": "def update_query_set(self):\n return self.model_class.objects.filter(**self.filter_req_data).update(**self.input_req_data)", "title": "" }, { "docid": "5bfcefd33c699def1630fb2239740306", "score": "0.56261474", "text": "def get_set_info(self, attributes=None, subset=None, element_class='superblock'):\n # Fetch the requested data class ORM\n data_class = self.get('class', element_class)\n \n # Populate attribute with column names if not supplied\n if attributes is None:\n c = data_class.__table__.columns\n attributes = c.keys()\n\n # Fetch columns from class\n columns = self.get_columns(attributes, data_class)\n query = self.query(*columns)\n\n # Filter for subset\n if subset:\n query = query.filter(columns[0].in_(subset))\n\n # Aggregate result by first attribute\n query = query.group_by(columns[0]) \n \n # Return query\n return query", "title": "" }, { "docid": "46b374e6d870639218135517235746bd", "score": "0.56063306", "text": "def _query(self, *args, **kwargs):", "title": "" }, { "docid": "6c0f54fd6f6bf4ccde523b6a5cf28fef", "score": "0.56037515", "text": "def get_query_set(self):\n return MultiDBQuerySet(self.model, query=self.create_query())", "title": "" }, { "docid": "780a25d33c8fb32620ef200b44fed984", "score": "0.5597257", "text": "def _all_query(self):\r\n return xapian.Query('')", "title": "" }, { "docid": "439c651fbe8a4f9bccc2c7e9c5df9e68", "score": "0.5585572", "text": "def _setup_query(self):\r\n self.query.clear_deferred_loading()\r\n self.query.clear_select_fields()\r\n\r\n if self._fields:\r\n self.extra_names = []\r\n self.aggregate_names = []\r\n if not self.query.extra and not self.query.aggregates:\r\n # Short cut - if there are no extra or aggregates, then\r\n # the values() clause must be just field names.\r\n self.field_names = list(self._fields)\r\n else:\r\n self.query.default_cols = False\r\n self.field_names = []\r\n for f in self._fields:\r\n # we inspect the full extra_select list since we might\r\n # be adding back an extra select item that we hadn't\r\n # had selected previously.\r\n if f in self.query.extra:\r\n self.extra_names.append(f)\r\n elif f in self.query.aggregate_select:\r\n self.aggregate_names.append(f)\r\n else:\r\n self.field_names.append(f)\r\n else:\r\n # Default to all fields.\r\n self.extra_names = None\r\n self.field_names = [f.attname for f in self.model._meta.fields]\r\n self.aggregate_names = None\r\n\r\n self.query.select = []\r\n if self.extra_names is not None:\r\n self.query.set_extra_mask(self.extra_names)\r\n self.query.add_fields(self.field_names, True)\r\n if self.aggregate_names is not None:\r\n self.query.set_aggregate_mask(self.aggregate_names)", "title": "" }, { "docid": "19d6dfa72b6d737429d27175753e6844", "score": "0.5577367", "text": "def get_query():\n return CiscoC2900Query", "title": "" }, { "docid": "cbd72a53fbf720e04d32b4a9f3b93daf", "score": "0.55605036", "text": "def make_query(self, *args, **kwargs):\n query = Q() # initial is an empty query\n query_dict = self._build_query_dict(self.cleaned_data)\n if 'negate' in self.cleaned_data and self.cleaned_data['negate']:\n query = query & ~Q(**query_dict)\n else:\n query = query & Q(**query_dict)\n return query", "title": "" }, { "docid": "32db06747ab9fb818519933590c5c58c", "score": "0.55571645", "text": "def getQuery(self,ibs_query,src,action,dic_args):\n\tself.callOnAll(\"getQuery\",[ibs_query,src,action],dic_args)\n\treturn ibs_query", "title": "" }, { "docid": "a8fe1a6eeb87ddf8874bec334993d457", "score": "0.5520603", "text": "def get_bin_query(self):\n # Implemented from kitosid template for -\n # osid.resource.BinQuerySession.get_bin_query_template\n return self._get_provider_session('bin_query_session').get_bin_query()", "title": "" }, { "docid": "c4da4686611ce5860d74c43affe167ff", "score": "0.5512854", "text": "def retrieve(self, query):\n\t\tpass", "title": "" }, { "docid": "f99da69e22f3f2969267f0eb00b5f37f", "score": "0.55079424", "text": "def _get_delete_query(self):\n foreign = self.get_attribute(self.__foreign_key)\n\n query = self.new_query().where(self.__foreign_key, foreign)\n\n return query.where(self.__other_key, self.get_attribute(self.__other_key))", "title": "" }, { "docid": "f5d1631575c4940346195fddb7d22383", "score": "0.55043894", "text": "def do_attr_query(cls, key=(), value=(), number=(),\r\n subkey=(), ignore_hidden=True, sort_by_keys=False,\r\n glob=False, count=False, querybase=None, return_query=False,\r\n entity=None):\r\n\r\n clusto.flush()\r\n if querybase:\r\n query = querybase\r\n else:\r\n query = Attribute.query()\r\n\r\n ### This is bunk, gotta fix it\r\n if isinstance(cls, Driver):\r\n query = query.filter(and_(Attribute.entity_id==Entity.entity_id,\r\n Entity.driver == cls._driver_name,\r\n Entity.type == cls._clusto_type))\r\n\r\n if entity:\r\n query = query.filter_by(entity_id=entity.entity_id)\r\n\r\n if key is not ():\r\n if glob:\r\n query = query.filter(Attribute.key.like(key.replace('*', '%')))\r\n else:\r\n query = query.filter_by(key=key)\r\n\r\n if subkey is not ():\r\n if glob and subkey:\r\n query = query.filter(Attribute.subkey.like(subkey.replace('*', '%')))\r\n else:\r\n query = query.filter_by(subkey=subkey)\r\n\r\n if value is not ():\r\n typename = Attribute.get_type(value)\r\n\r\n if typename == 'relation':\r\n if isinstance(value, Driver):\r\n value = value.entity.entity_id\r\n query = query.filter_by(relation_id=value)\r\n\r\n else:\r\n query = query.filter_by(**{typename+'_value':value})\r\n\r\n if number is not ():\r\n if isinstance(number, bool) or number is None:\r\n if number == True:\r\n query = query.filter(Attribute.number != None)\r\n else:\r\n query = query.filter(Attribute.number == None)\r\n elif isinstance(number, (int, long)):\r\n query = query.filter_by(number=number)\r\n\r\n else:\r\n raise TypeError(\"number must be either a boolean or an integer.\")\r\n\r\n if ignore_hidden and ((key and not key.startswith('_')) or key is ()):\r\n query = query.filter(not_(Attribute.key.like('\\\\_%', escape='\\\\')))\r\n\r\n if sort_by_keys:\r\n query = query.order_by(Attribute.key)\r\n\r\n if count:\r\n return query.count()\r\n\r\n if return_query:\r\n return query\r\n\r\n return query.all()", "title": "" }, { "docid": "3b241303036132d2de554c1e8b6f73a2", "score": "0.55037624", "text": "def get_assessment_query(self):\n raise errors.Unimplemented()", "title": "" }, { "docid": "f08f423f9102dfa8794e03bb0b9963d2", "score": "0.5493706", "text": "def select(self, query, fields, attributes):\r\n if attributes.get('for_update', False) and not 'cache' in attributes:\r\n self.execute('BEGIN IMMEDIATE TRANSACTION;')\r\n return super(SQLiteAdapter, self).select(query, fields, attributes)", "title": "" }, { "docid": "dbc40afabc71ac34532aabcfb0c71e14", "score": "0.54823154", "text": "def select(self, query, fields, attributes):\n if attributes.get('for_update', False) and not 'cache' in attributes:\n self.execute('BEGIN IMMEDIATE TRANSACTION;')\n return super(SQLiteAdapter, self).select(query, fields, attributes)", "title": "" }, { "docid": "86b7b204576fea8e61fa510e6c9d3d37", "score": "0.54804856", "text": "def build_query(self, query_params):\n return", "title": "" }, { "docid": "0365a4d98db9c50db5667d6bfc960bca", "score": "0.5456984", "text": "def _setup_query(self):\r\n self.values = []\r\n self.related_ids = None\r\n if not hasattr(self, 'related_updates'):\r\n self.related_updates = {}", "title": "" }, { "docid": "c12840b7f6a8ef5065d0888c4c3290f0", "score": "0.54515636", "text": "def get_query(self):\n return {\"_id\": self.operation[\"_id\"]}", "title": "" }, { "docid": "ca01a46508314921f085d0b576dc09f7", "score": "0.54368305", "text": "def get_query_set(self):\r\n return self.model.QuerySet(self.model)", "title": "" }, { "docid": "893dbe3a6ef33e0204dcce65f6378c06", "score": "0.5434883", "text": "def query(self, query: _Q) -> _R:\n return NotImplemented", "title": "" }, { "docid": "9cb32f1a5985f216f4dd462008a6e3b8", "score": "0.54307604", "text": "def new_subquery(self):\n return Query()", "title": "" }, { "docid": "eba24bd3a1740fed36a80f6aebdffc07", "score": "0.5424503", "text": "def execute_query(self, query):\r\n pass", "title": "" }, { "docid": "36f18eab68af72c63d02748e7e6ca45a", "score": "0.54172903", "text": "def query(self, query_statement, args, context=None):\n pass", "title": "" }, { "docid": "517348536d819812d1a49a0eba3c0bfe", "score": "0.5411079", "text": "def as_sql(self):\r\n from django.db.models.base import Model\r\n\r\n self.pre_sql_setup()\r\n if not self.query.values:\r\n return '', ()\r\n table = self.query.tables[0]\r\n qn = self.quote_name_unless_alias\r\n result = ['UPDATE %s' % qn(table)]\r\n result.append('SET')\r\n values, update_params = [], []\r\n for field, model, val in self.query.values:\r\n if hasattr(val, 'prepare_database_save'):\r\n val = val.prepare_database_save(field)\r\n else:\r\n val = field.get_db_prep_save(val, connection=self.connection)\r\n\r\n # Getting the placeholder for the field.\r\n if hasattr(field, 'get_placeholder'):\r\n placeholder = field.get_placeholder(val, self.connection)\r\n else:\r\n placeholder = '%s'\r\n\r\n if hasattr(val, 'evaluate'):\r\n val = SQLEvaluator(val, self.query, allow_joins=False)\r\n name = field.column\r\n if hasattr(val, 'as_sql'):\r\n sql, params = val.as_sql(qn, self.connection)\r\n values.append('%s = %s' % (qn(name), sql))\r\n update_params.extend(params)\r\n elif val is not None:\r\n values.append('%s = %s' % (qn(name), placeholder))\r\n update_params.append(val)\r\n else:\r\n values.append('%s = NULL' % qn(name))\r\n if not values:\r\n return '', ()\r\n result.append(', '.join(values))\r\n where, params = self.query.where.as_sql(qn=qn, connection=self.connection)\r\n if where:\r\n result.append('WHERE %s' % where)\r\n return ' '.join(result), tuple(update_params + params)", "title": "" }, { "docid": "650742fa114de8bc3a9221b40be2793b", "score": "0.54013205", "text": "def get_sql(self):\n self._create_sql_blocks()\n return self._get_sql(data=self.store, sqldb=self.source)", "title": "" }, { "docid": "950f71bfcc2335e1a825815979bb96dc", "score": "0.54004455", "text": "def to_storage_query(self):\n from .query import ForceFieldQuery\n\n return ForceFieldQuery.from_data_object(self)", "title": "" }, { "docid": "ee74b25a3dc372d06001295b2cdfcfcb", "score": "0.5393471", "text": "def query(self):\n\n return QueryList(self)", "title": "" }, { "docid": "2bbde091b23d3e62d23a8dc1fe124e38", "score": "0.5391204", "text": "def report(self):\n return Query(self)", "title": "" }, { "docid": "3dd33f64cb07ada07316c5465a5caaa7", "score": "0.53829485", "text": "def query(cls, q=()):\n return Query(cls, q)", "title": "" }, { "docid": "4e47f66a738a13d5f09ad4c1f560e37d", "score": "0.53790164", "text": "def base_query():\n categories = db_session.query(Category).all()\n musics = db_session.query(Music).all()\n return categories, musics", "title": "" }, { "docid": "307323391f19c93851272fd7c37474aa", "score": "0.5372562", "text": "def get_attributes_query(self, acc, con, obj, attrs):\n # Catch bad query\n if attrsStartWith(attrs) == \"BAD\":\n return \"BAD\"\n\n # Object Scope\n if obj != \"\" and obj is not None:\n Ouri = \"'/\" + acc + \"/\" + con + \"/\" + obj + \"'\"\n Curi = \"'/\" + acc + \"/\" + con + \"'\"\n Auri = \"'/\" + acc + \"'\"\n domain = attrsStartWith(attrs)\n if domain == 'object':\n uri = Ouri\n elif domain == 'container':\n uri = Curi\n else:\n uri = Auri\n return \"\"\"SELECT %s,%s_uri\n FROM Metadata\n WHERE %s_uri=%s\n \"\"\" % (attrs, domain, domain, domain, uri)\n\n # Container Scope\n elif con != \"\" and con is not None:\n uri = \"'/\" + acc + \"/\" + con + \"'\"\n Auri = \"'/\" + acc + \"'\"\n if attrsStartWith(attrs) == 'object':\n return (\"SELECT %s,object_uri \"\n \"FROM Metadata \"\n \"WHERE object_container_name=%s\"\n ) % (attrs, \"'\" + con + \"'\")\n\n elif attrsStartWith(attrs) == 'container':\n return (\"SELECT %s,container_uri \"\n \"FROM Metadata \"\n \"WHERE container_uri=%s\"\n ) % (attrs, uri)\n\n elif attrsStartWith(attrs) == 'account':\n return (\"SELECT %s,account_uri \"\n \"FROM Metadata \"\n \"WHERE account_uri=%s\"\n ) % (attrs, Auri)\n\n # Account scope\n elif acc != \"\" and acc is not None:\n uri = \"'/\" + acc + \"'\"\n if attrsStartWith(attrs) == 'object':\n return (\"SELECT %s,object_uri \"\n \"FROM Metadata \"\n \"WHERE object_account_name='%s'\"\n ) % (attrs, acc)\n\n elif attrsStartWith(attrs) == 'container':\n return (\"SELECT %s,container_uri \"\n \"FROM Metadata \"\n \"WHERE container_account_name='%s'\"\n ) % (attrs, acc)\n\n elif attrsStartWith(attrs) == 'account':\n return (\"SELECT %s,account_uri \"\n \"FROM Metadata \"\n \"WHERE account_uri=%s\"\n ) % (attrs, uri)", "title": "" }, { "docid": "9f524a926df281c069399bdacac0dd2e", "score": "0.53652805", "text": "def query_db(self, query_obj: dict) -> dict:\n return next(self.connect_db().find(query_obj))", "title": "" }, { "docid": "319b37a551781476a78274d1e1ad230c", "score": "0.5360408", "text": "def sql():", "title": "" }, { "docid": "860bd5625e175750dc9101688f245765", "score": "0.5345052", "text": "def query_db(self, query, args=(), one=False):\n # Parse question marks\n parsed_query = replace_questionmarks(query, args)\n # Run GQL Query\n rs = db.GqlQuery(parsed_query)\n return rs", "title": "" }, { "docid": "d44782e342646545b19e7075dfeb524e", "score": "0.5341343", "text": "def query_data(self):\r\n\r\n if not hasattr(self, '_query_data_cache'):\r\n data = self.safe_cleaned_data\r\n\r\n if self.quick_rules:\r\n data, query = quick.parse_quickadd(\r\n data.get('query'),\r\n self.quick_rules)\r\n query = ' '.join(query)\r\n\r\n # Data in form fields overrides any quick specifications\r\n for k, v in self.safe_cleaned_data.items():\r\n if v is not None:\r\n data[k] = v\r\n else:\r\n query = data.get('query')\r\n\r\n self._query_data_cache = query, data\r\n return self._query_data_cache", "title": "" }, { "docid": "faf974a1e0c913ee7211998c0de31231", "score": "0.5325988", "text": "def mount_query(self):\n\n where_statements = []\n for key, value in self.experiment_params.iteritems():\n # get params from yaml \n exp_name = key\n date_start = value['dates'][0] if value['dates'][0] else self.yesterday_date\n date_end = value['dates'][1] if value['dates'][1] else self.yesterday_date\n\n\n where_statements.append(\"(theday >= '{}' and theday <= '{}' \"\\\n \"and experimenttypename = '{}')\".format(date_start,\n date_end,\n exp_name))\n\n where = \" or \\n\".join(where_statements)\n\n #return '{} where {}'.format(self.base_query, where)\n\treturn self.base_query.format(where)", "title": "" }, { "docid": "17d628801d237a1ab94310a8050973cc", "score": "0.53220284", "text": "def query(self):\n return self.details[KEY_QUERY]", "title": "" }, { "docid": "cff1ca1fd03292a6206e03822bfbb801", "score": "0.5321142", "text": "def get_query_set(self):\n\t\tif router.is_testing(self.db):\n\t\t\treturn super(SalesforceManager, self).get_query_set()\n\t\telse:\n\t\t\tfrom salesforce.backend import query, compiler\n\t\t\tq = query.SalesforceQuery(self.model, where=compiler.SalesforceWhereNode)\n\t\t\treturn query.SalesforceQuerySet(self.model, query=q, using=self.db)", "title": "" }, { "docid": "bba3075cc1f5eef19600a83384b822a6", "score": "0.5318845", "text": "def find(self, query):\n pass", "title": "" }, { "docid": "b18577a655914eb8ef9a9d6524b2018c", "score": "0.53027356", "text": "def get_base_query(self):\n return Search(using=self.client, index=self.index_name)", "title": "" }, { "docid": "1117bf734139cd201034978d32cfc6cf", "score": "0.52958304", "text": "def generate_q_expression(self, query):", "title": "" }, { "docid": "d53b8ca242973d98477a43721b8292c8", "score": "0.5286453", "text": "def _set_keys_for_save_query(self, query):\n query.where(self.__foreign_key, self.get_attribute(self.__foreign_key))\n\n return query.where(self.__other_key, self.get_attribute(self.__other_key))", "title": "" }, { "docid": "c0616413a7eec2e654786778e43c6ccd", "score": "0.5275257", "text": "def _where(self, query, **kwargs):\n query = self._auto_where(query, **kwargs)\n return query", "title": "" }, { "docid": "9ee76a2797a4b753d58db1522a7266e9", "score": "0.5262284", "text": "def get_query_specs(self):\n save_changed_rows = {\n cdr_consts.QUERY:\n SANDBOX_QUERY.render(project=self.project_id,\n dataset=self.dataset_id,\n sandbox_dataset=self.sandbox_dataset_id,\n intermediary_table=SAVE_TABLE_NAME),\n }\n\n clean_numeric_ppi_query = {\n cdr_consts.QUERY:\n CLEAN_NUMERIC_PPI_QUERY.render(project=self.project_id,\n dataset=self.dataset_id),\n cdr_consts.DESTINATION_TABLE:\n 'observation',\n cdr_consts.DESTINATION_DATASET:\n self.dataset_id,\n cdr_consts.DISPOSITION:\n WRITE_TRUNCATE\n }\n\n return [save_changed_rows, clean_numeric_ppi_query]", "title": "" }, { "docid": "7ba89b316e479883fcd2e9b717919536", "score": "0.5257515", "text": "def query_params(self):\n\n return self.GWsky_config.update({\"catalog\" : self.entry_catalog.get(),\n \"column_1\" : self.entry_column_1.get(),\n \"column_2\" : self.entry_column_2.get(),\n \"filter_1\" : self.entry_column_filter_1.get()})\n #\"filter_2\" : self.entry_column_filter_2.get()})", "title": "" }, { "docid": "4e449e8456e6029c279013989b983e7d", "score": "0.52554697", "text": "def get_query(self, question):\n\n question = question_sanitize(question)\n for returnModel in self.get_queries(question):\n return returnModel\n return None, None, None", "title": "" }, { "docid": "2eb927240bf493372551386612f9ddab", "score": "0.5253605", "text": "def get_raw_sql(self):\n return self._raw_sql", "title": "" }, { "docid": "fc83b9523473a137f0396d552eb67d0c", "score": "0.5253298", "text": "def query(self, query):\n query_list = copy.copy(self._query)\n query_list.append(query)\n return self._clone(query=query_list)", "title": "" }, { "docid": "7101d5f50a0be9da187602c3c728c72f", "score": "0.5250709", "text": "def copy(self):\n copyQC = QueryConditions()\n copyQC.data = dict(self.data)\n return copyQC", "title": "" }, { "docid": "ddf2a1dab84a5352f66804fe483a66ae", "score": "0.52458775", "text": "def query_model(session: Session, sa_class: DeclarativeMeta, only_pk=False) -> Query:\n opts = (noload('*'),)\n if only_pk:\n pk = get_pk(sa_class)\n opts += tuple(\n defer(prop.key)\n for prop in class_mapper(sa_class).iterate_properties\n if isinstance(prop, ColumnProperty)\n if prop.key != pk)\n return session.query(sa_class).options(*opts)", "title": "" }, { "docid": "41c6863a4bdfc5eb7a719171bcdcf7d8", "score": "0.5244448", "text": "def db_query(self, query):\n # print(self.db_path)\n\n conn = sqlite3.connect(self.db_path)\n\n conn.execute(query)\n\n conn.commit()\n conn.close()", "title": "" }, { "docid": "fd5541688b5e22a55b25518da54be20e", "score": "0.52416366", "text": "def select(self, query, fields, attributes):\n sql = self._select(query, fields, attributes)\n cache = attributes.get('cache', None)\n if cache and attributes.get('cacheable',False):\n del attributes['cache']\n (cache_model, time_expire) = cache\n key = self.uri + '/' + sql\n if len(key)>200: key = hashlib_md5(key).hexdigest()\n args = (sql,fields,attributes)\n return cache_model(\n key,\n lambda self=self,args=args:self._select_aux(*args),\n time_expire)\n else:\n return self._select_aux(sql,fields,attributes)", "title": "" } ]
e86259982dceb897147a0f82302c3f73
Called when a ticket is created.
[ { "docid": "8c67b05089d8ecae10759092b41e8665", "score": "0.81197613", "text": "def ticket_created(self, ticket):\r\n self.watch_complete(ticket)", "title": "" } ]
[ { "docid": "91f7bc790a62ecdcd4fcbc04a83db5af", "score": "0.7272276", "text": "def create_ticket(self, ticket):\n\n if ticket.user:\n person = self.require_person(ticket.user.email, user=ticket.user)\n elif ticket.email:\n person = self.require_person(ticket.email)\n else:\n raise ValueError(\n \"Either user or email need to be specified on the DeskProTicket instance\"\n )\n\n if not ticket.deskpro_id:\n cc = []\n\n for _cc in ticket.cc_set.all():\n cc.append(_cc.email)\n\n ticket_response = self.create(\n \"tickets\",\n {\n \"subject\": ticket.subject,\n \"person\": {\"id\": person[\"id\"]},\n \"status\": \"awaiting_agent\",\n \"cc\": cc,\n },\n )\n\n ticket.deskpro_ref = ticket_response[\"ref\"]\n ticket.deskpro_id = ticket_response[\"id\"]\n\n else:\n self.reopen_ticket(ticket)\n\n self.create(\n f\"tickets/{ticket.deskpro_id}/messages\",\n {\n \"message\": ticket.body.replace(\"\\n\", \"<br />\\n\"),\n \"person\": person[\"id\"],\n \"format\": \"html\",\n },\n )", "title": "" }, { "docid": "eeb2e5fa65290c8dc600f0612a1b2f46", "score": "0.721396", "text": "def newTicket(self):\n ticket = self.getTicket()\n self.currentTicket = ticket\n ticket.writeTicket()\n #enable the print button since a ticket has been submitted.\n self.prnt.setEnabled(True)\n if config.platform != 'pi':\n self.updateTicketList()\n ticket.displayTicket()\n else:\n self.displayTicket()", "title": "" }, { "docid": "68843b59fb1993c1902108fb73ac03fc", "score": "0.71423763", "text": "def add_ticket():\n ticket = Ticket(game='Tom v. Travis',\n section='12',\n game_day=datetime.datetime.now() - timedelta(days=15),\n row='32',\n seat_no='3')\n db.session.add(ticket)\n db.session.commit()\n return 'Ticket Added successfully!!!'", "title": "" }, { "docid": "4c8b71dd2ee8eed90b4691ecf6bbdeb0", "score": "0.71180385", "text": "def create_ticket(request, ticket_type):\n\n if ticket_type == 'bug':\n header = 'Log a new bug'\n elif ticket_type == 'feature':\n header = 'Request a new feature'\n\n if request.method == 'POST':\n create_ticket_form = CreateTicketForm(request.POST, label_suffix='')\n\n if create_ticket_form.is_valid():\n new_ticket = create_ticket_form.save(commit=False)\n new_ticket.ticket_type = ticket_type\n new_ticket.userID = request.user\n new_ticket.lastUpdatedBy = request.user.username\n new_ticket.lastUpdatedDateTime = timezone.now()\n new_ticket.save()\n messages.success(request, 'Your ' + ticket_type + ' has been logged successfully!')\n return redirect(reverse('tracker', args=['all', 'dateTimeCreated', 1]))\n else:\n create_ticket_form = CreateTicketForm(label_suffix='')\n\n return render(request, 'create-ticket.html', {'create_ticket_form': create_ticket_form, 'header': header})", "title": "" }, { "docid": "fde0cfecb3425247ffa273d81c967ceb", "score": "0.7113387", "text": "def create(self, alarm_id, **kwargs):\n if CONF.request_tracker.reopen_rejected:\n l = [d for d in self.search(status=[\"rejected\"]) \n if d[\"CF.{%s}\" % self.custom_field] == alarm_id]\n if l:\n logger.debug((\"Found a ticket '%s' in rejected status with \"\n \"the same alarm ID %s associated.\"\n % (l[0][\"id\"], alarm_id)))\n self.set_status([l[0][\"id\"]], \"open\")\n return \n\n logger.debug(\"Creating ticket for alarm %s\" % alarm_id)\n kwargs.update({\"alarm_id\": alarm_id})\n try:\n payload = {\n \"content\": {\n \"Queue\" : CONF.request_tracker.queue,\n \"Subject\": Template(CONF.request_tracker.new_subject).substitute(kwargs),\n \"Text\" : Template(CONF.request_tracker.new_body).substitute(kwargs),\n }\n }\n except KeyError, e:\n raise delato.exception.MissingTemplateArgument(str(e))\n\n payload[\"content\"][\"CF-%s\" % self.custom_field] = alarm_id\n logger.debug(\"Ticket content: %s\" % payload[\"content\"])\n \n if not CONF.request_tracker.noop:\n try:\n response = self.conn.post(path='ticket/new', payload=payload,)\n logger.debug(\"Ticket parsed: %s\" % response.parsed)\n logger.debug(\"Ticket status: %s\" % response.status)\n if response.status_int != 200:\n raise delato.exception.CreateTicketException(response.status)\n ticket_id = response.parsed[0][0][1]\n ticket_no = ticket_id.split(\"/\")[1]\n logger.info(\"Ticket %s (alarm %s) has been successfully created\" \n % (ticket_no, alarm_id))\n self.cache_data.append(self.get_ticket(ticket_id))\n logger.debug(\"CACHE updated with the recently created ticket %s\" \n % ticket_no)\n except RTResourceError as e:\n logger.error(e.response.status_int)\n logger.error(e.response.status)\n logger.error(e.response.parsed)", "title": "" }, { "docid": "53c5d0e07a9643ce781f746ecb94cb07", "score": "0.68974173", "text": "def create(self, req, summary, description, attributes = {}, notify=False):\n t = model.Ticket(self.env)\n t['status'] = 'new'\n t['summary'] = summary\n t['description'] = description\n t['reporter'] = req.authname or 'anonymous'\n for k, v in attributes.iteritems():\n t[k] = v\n t.insert()\n\n if notify:\n try:\n tn = TicketNotifyEmail(self.env)\n tn.notify(t, newticket=True)\n except Exception, e:\n self.log.exception(\"Failure sending notification on creation \"\n \"of ticket #%s: %s\" % (t.id, e))\n\t\t\n return t.id", "title": "" }, { "docid": "bc10b33404d5fb911eed722c95050db0", "score": "0.6830202", "text": "def create_ticket(self, context, ticket_entity):\n client = util.get_freshdesk_client(context[\"headers\"])\n ticket_schema = FreshdeskTicket(**ticket_entity)\n ticket = client.tickets.create_ticket(ticket_schema.subject,\n email=ticket_schema.email,\n type=ticket_schema.type,\n description=ticket_schema.description,\n priority=ticket_schema.priority,\n cc_emails=[\n ticket_schema.cc_email]\n )\n\n # response from sdk call is given in the format of object\n keys = (ticket.__dict__)[\"_keys\"]\n response = {}\n for key in keys:\n response[key] = (ticket.__dict__)[key]\n return response", "title": "" }, { "docid": "aafcdc3e471c44cacfd8e62d80b5e645", "score": "0.68142825", "text": "def new_ticket(request):\n if request.method == 'GET':\n template_name = 'ticket_create.html'\n ticket_form = TicketForm()\n # print(\"My ticket form is {}\".format(ticket_form))\n return render(request, template_name, {'ticket_form': ticket_form})\n\n elif request.method == 'POST':\n form_data = request.POST\n error_message = None\n \n if error_message is not None:\n ticket_form = TicketForm(request.POST, request.FILES)\n # print(\"My ticket form is {}\".format(ticket_form))\n template_name = 'ticket_create.html'\n return render(request, template_name, { 'ticket_form': ticket_form, 'error_message': error_message })\n \n t = Ticket(\n # seller = request.user,\n priority = form_data['priority'],\n subject = form_data['subject'],\n description = form_data['description']\n \n # category = Category.objects.get(pk=form_data['category']),\n # image_path = image_path,\n # local_delivery = delivery,\n # city = city,\n # is_active = 1\n )\n \n t.save()\n template_name = 'ticket_details.html'\n return render(request, template_name, { 'ticket': t })", "title": "" }, { "docid": "656870303932e4201276d7f95d646287", "score": "0.6767664", "text": "def create_ticket(self, t_type, props=None):\n if props is None:\n props = {}\n self._ticket_counter += 1\n ticket = AgiloTicketModelManager(self.env).create(t_type=t_type, save=False)\n ticket[Key.SUMMARY] = u'%s n.%s' % (t_type.title(), self._ticket_counter)\n ticket[Key.DESCRIPTION] = u'Description for ' + t_type\n ticket[Key.STATUS] = Status.NEW\n for field_name, value in props.items():\n assert ticket.is_writeable_field(field_name), field_name\n value = self._replace_object_by_name(field_name, value)\n ticket[field_name] = value\n AgiloTicketModelManager(self.env).save(ticket)\n\n self.objects.append(ticket)\n return ticket", "title": "" }, { "docid": "cc94cc6457c0df657ee0a47fc342c5d6", "score": "0.67302436", "text": "def create_ticket(template,\n context_factory=None,\n queue=\"Test\",\n ticket_id_key=\"ticket_id\"):\n\n @wraps(create_ticket)\n def _create_ticket(obj, eng):\n user = User.query.get(obj.id_user)\n if not user:\n obj.log.error(\n \"No user found for object {0}, skipping ticket creation\".format(\n obj.id))\n return\n context = {}\n if context_factory:\n context = context_factory(user, obj)\n body = render_template(\n template,\n **context\n ).strip()\n\n submit_rt_ticket(obj,\n queue,\n context.get('subject'),\n body,\n user.email,\n ticket_id_key)\n\n return _create_ticket", "title": "" }, { "docid": "de8075fba5f7d0042fd86013193ded75", "score": "0.67213106", "text": "def create_ticket(request):\n tkt = {\"created\": timezone.now(), \"queue\": None}\n fields = [\"title\",\"submitter_email\",\"description\"]\n for field in fields:\n if request.POST.get(field):\n tkt[field] = request.POST.get(field)\n else: \n return JsonResponse({'error': f\"Field {field} is missing\"})\n opt_fields = ['priority','due_date']\n for field in opt_fields:\n if request.POST.get(field):\n tkt[field] = request.POST.get(field)\n \n if request.POST.get('queue'):\n q = request.POST.get('queue')\n if q.isnumeric():\n tkt[\"queue\"] = TicketQueue.objects.filter(pk=int(q)).first()\n elif q.lower() == \"vendor\":\n #verify user has access to such queue\n tkt[\"queue\"] = get_vendor_queue(request.user)\n else:\n tkt[\"queue\"] = TicketQueue.objects.filter(title__iexact=q).first()\n\n if not tkt[\"queue\"]:\n return JsonResponse({'error': f\"Queue name or queue id required and valid\"})\n \n try:\n ticket = Ticket(**tkt)\n ticket.save()\n return JsonResponse({'success': f\"Created Ticket\",\"pk\": ticket.pk})\n except Exception as e:\n logger.info(f\"Failed to create ticket using async request error {e}\")\n return JsonResponse({'error': f\"Ticket creation failed see logs for details.\"})", "title": "" }, { "docid": "56ac59b8f42dd9073c37209c0a79707e", "score": "0.66960883", "text": "def _insert_ticket(self, **kw):\n return insert_ticket(self.env, **kw)", "title": "" }, { "docid": "33a8b3d944a512b87dab59dcaf54de7c", "score": "0.6653604", "text": "def test_create_zendesk_ticket(self):\n self._mock_zendesk_api()\n\n name = 'Test user'\n email = '[email protected]'\n subject = 'Test Ticket'\n body = 'I want a refund!'\n tags = ['auto_refund']\n ticket_created = self.call_create_zendesk_ticket(name, email, subject, body, tags)\n assert ticket_created\n last_request = httpretty.last_request()\n\n # Verify the headers\n expected = {\n 'content-type': JSON,\n 'Authorization': 'Basic {}'.format(base64.b64encode(\n f'{ZENDESK_USER}/token:{ZENDESK_API_KEY}'.encode('utf8')).decode('utf8')\n )\n }\n self.assertDictContainsSubset(expected, last_request.headers)\n\n # Verify the content\n expected = {\n 'ticket': {\n 'requester': {\n 'name': name,\n 'email': email\n },\n 'subject': subject,\n 'comment': {'body': body},\n 'tags': ['LMS'] + tags\n }\n }\n self.assertDictEqual(json.loads(last_request.body.decode('utf8')), expected)", "title": "" }, { "docid": "6d826be4287fe7bc1dcb7b9d0a9f2013", "score": "0.6568732", "text": "def create_ticket():\n # Get all the parametes\n title = str(request.json.get(\"title\"))\n category = str(request.json.get(\"category\"))\n\n # Get the id_user_hash from the jwt_token\n id_user_hash = get_jwt_identity()\n\n # Define template message\n resp_msg = {\"id_ticket\": \"\"}\n\n if (title and category):\n # Get the id_user\n id_user = db.session.query(Users.id_user).filter(\n Users.id_user_hash == id_user_hash\n ).first()\n\n if id_user:\n # Generate uuid for the id_ticket_hash\n id_ticket_hash = str(uuid4())\n timestamp = datetime.utcnow().replace(microsecond=0)\n\n # Catch the MESSAGE API call status error\n try:\n # Create new channel\n payload = {\n \"id_ticket_hash\": id_ticket_hash,\n \"title\": title\n }\n resp = requests.post(\n current_app.config[\"MESSAGE_API\"] + \"create_channel\",\n json=payload)\n # Check the return status\n resp.raise_for_status()\n id_channel = resp.json().get(\"id_channel\")\n\n if id_channel:\n # Add user to the new channel\n payload = {\n \"id_channel\": id_channel,\n \"id_user_hash\": id_user_hash\n }\n resp = requests.post(\n current_app.config[\"MESSAGE_API\"] + \"join_channel\",\n json=payload)\n # Check return status\n resp.raise_for_status()\n id_member = resp.json().get(\"id_member\")\n\n if id_member:\n # Create ticket record entry\n ticket = TicketRecords(\n id_ticket_hash=id_ticket_hash,\n id_creator=id_user,\n id_channel=id_channel,\n title=title,\n category=category,\n create_timestamp=timestamp,\n last_activity_timestamp=timestamp\n )\n # if save_status:\n db.session.add(ticket)\n db.session.commit()\n resp_msg[\"id_ticket\"] = id_ticket_hash\n return jsonify(resp_msg), 201\n except Exception as e:\n current_app.logger.info(\"Unable to create ticket: \" + str(e))\n else:\n return jsonify(resp_msg), 401\n else:\n return jsonify(resp_msg), 400\n\n return jsonify(resp_msg), 500", "title": "" }, { "docid": "9dbd029692327c8594fd92629b011562", "score": "0.65082705", "text": "def getTicket(self):\n date = self.date.dateTime().toPyDateTime().strftime('%m-%d-%Y %H:%M')\n customerName = str(self.newCustomer.text()).strip()\n if len(customerName) == 0:\n if self.customers.currentIndex() == 0:\n name = 'noname'\n else:\n name = self.customerList[self.customers.currentIndex()-1]['name']\n else:\n name = customerName\n\n ticketNum = self.Ticket.getTicketNum(self.date.dateTime().toPyDateTime())\n return self.Ticket(customerName,date,ticketNum,self.paidState.currentText(),self.comment.text(),\n self.smallbagsQ.value(),self.smallbagsP.value(),self.bigbagsQ.value(),self.bigbagsP.value(),\n self.smallblockQ.value(),self.smallblockP.value(),self.largeblockQ.value(),\n self.largeblockP.value(),self.vendorQ.value(),self.vendorP.value(),self.storageQ.value(),\n self.storageP.value(),self.freightQ.value(),self.freightP.value(),self.otherQ.value(),\n self.otherP.value(),self.total.value(),str(self.checkNum.text()))", "title": "" }, { "docid": "ce6c7e36fefa39142af90b1706173454", "score": "0.64838517", "text": "def createTicketWithGivenData(ticket_data):\n try:\n ticketDataObj = Ticket(\n emp=Employee.objects.get(emp_id=EMP_ID['emp_id']),\n type=ticket_data.ticket_type,\n to=ticket_data.ticket_to,\n subject=ticket_data.ticket_subject,\n description=ticket_data.ticket_description,\n priority=ticket_data.ticket_priority\n )\n ticketDataObj.save()\n sendMailToUser(ticketDataObj.emp.email_id, ticket_data)\n response = \"Ticket created successfully.\"\n except Exception as e:\n print(\"***ERROR in creating Ticket\", e)\n response = \"Something went wrong please try again later.\"\n return response", "title": "" }, { "docid": "b9275e828b235eacead66de2e5f39fe4", "score": "0.648182", "text": "def cli(env, title, subject_id, body, hardware_identifier, virtual_identifier, priority):\n ticket_mgr = SoftLayer.TicketManager(env.client)\n\n if body is None:\n body = click.edit('\\n\\n' + ticket.TEMPLATE_MSG)\n created_ticket = ticket_mgr.create_ticket(\n title=title,\n body=body,\n subject=subject_id,\n priority=priority)\n\n if hardware_identifier:\n hardware_mgr = SoftLayer.HardwareManager(env.client)\n hardware_id = helpers.resolve_id(hardware_mgr.resolve_ids, hardware_identifier, 'hardware')\n ticket_mgr.attach_hardware(created_ticket['id'], hardware_id)\n\n if virtual_identifier:\n vs_mgr = SoftLayer.VSManager(env.client)\n vs_id = helpers.resolve_id(vs_mgr.resolve_ids, virtual_identifier, 'VS')\n ticket_mgr.attach_virtual_server(created_ticket['id'], vs_id)\n\n env.fout(ticket.get_ticket_results(ticket_mgr, False, created_ticket['id']))", "title": "" }, { "docid": "61bf87e18eee3ea2a007a3cfdc06e607", "score": "0.62572205", "text": "def register_plane_ticket(self):\n\n origin = self.line_edit_origem.text()\n destination = self.line_edit_destination.text()\n departure_date = self.date_edit_departure.date().getDate()\n arrival_date = self.date_edit_arrival.date().getDate()\n number_of_rooms = int(self.combo_number_of_rooms.currentText())\n only_departure = self.check_only_departure.isChecked()\n guest_ages = self.table_guests.get_column(0)\n\n guest_ages = [int(i) for i in guest_ages]\n \n handler = WebserviceHandler()\n handler.register_plane_ticket(origin, \n destination, \n departure_date, \n arrival_date, \n number_of_rooms, \n only_departure,\n guest_ages)\n\n QMessageBox.about(self, \"Success\", \"plane ticket registered.\")\n\n self.line_edit_origem.clear()\n self.line_edit_destination.clear()\n\n self.clear_layout(self.vlayout_guests)\n data = OrderedDict([('Guest age', [])])\n self.table_guests = MyTable(data, 0, 1)\n self.vlayout_guests.addWidget(self.table_guests)\n\n self.date_edit_arrival.setDateTime(QtCore.QDateTime(QtCore.QDate(2015, 1, 1), QtCore.QTime(0, 0, 0)))\n\n self.date_edit_departure.setDateTime(QtCore.QDateTime(QtCore.QDate(2015, 1, 1), QtCore.QTime(0, 0, 0)))\n\n self.combo_number_of_rooms.setCurrentIndex(0)\n self.combo_age.setCurrentIndex(0)\n\n self.check_only_departure.setChecked(False)", "title": "" }, { "docid": "546ef93409ad4f88d210084ca2335ed7", "score": "0.6243731", "text": "def call_create_zendesk_ticket(self, name='Test user', email='[email protected]', subject='Test Ticket',\n body='I want a refund!', tags=None):\n tags = tags or ['auto_refund']\n return create_zendesk_ticket(name, email, subject, body, tags)", "title": "" }, { "docid": "2ce913f1d8e530d8f1f7a9add18f8954", "score": "0.62391675", "text": "def make_ticket(issue_tracker):\n # ensure that we have a connection\n issue_tracker.connect()\n return issue_tracker.create_issue(project=issue_tracker.default_project(),\n issue_type=zazu.util.pick(issue_tracker.issue_types(), 'Pick type'),\n summary=zazu.util.prompt('Enter a title'),\n description=zazu.util.prompt('Enter a description'),\n component=zazu.util.pick(issue_tracker.issue_components(), 'Pick component'))", "title": "" }, { "docid": "23facd969bed57ddc86642c4153fe790", "score": "0.62376416", "text": "def create(self, validated_data):\n queues = HelpdeskUser(self.context['request'].user).get_queues()\n queue_choices = [(q.id, q.title) for q in queues]\n data = validated_data.copy()\n data['body'] = data['description']\n # TicketForm needs id for ForeignKey (not the instance themselves)\n data['queue'] = data['queue'].id\n if data.get('assigned_to'):\n data['assigned_to'] = data['assigned_to'].id\n if data.get('merged_to'):\n data['merged_to'] = data['merged_to'].id\n\n files = {'attachment': data.pop('attachment', None)}\n\n ticket_form = TicketForm(\n data=data, files=files, queue_choices=queue_choices)\n if ticket_form.is_valid():\n ticket = ticket_form.save(user=self.context['request'].user)\n ticket.set_custom_field_values()\n return ticket\n\n raise ValidationError(ticket_form.errors)", "title": "" }, { "docid": "16a23d357f24329cd305892b3dcd249e", "score": "0.6228205", "text": "def tickettype_create(client_id, tickettype_info, event_id=None, eventpart_id=None):\n # Set defaults for missing values\n name = tickettype_info.get('name')\n description = tickettype_info.get('description')\n price = tickettype_info.get('price') or defaults.TICKETTYPE_PRICE\n currency = tickettype_info.get('currency') or defaults.TICKETTYPE_CURRENCY\n units = tickettype_info.get('units') or defaults.TICKETTYPE_AMOUNT\n min_order = tickettype_info.get('minimum_order') or defaults.TICKETTYPE_MIN_ORDER\n max_order = tickettype_info.get('maximum_order') or defaults.TICKETTYPE_MAX_ORDER\n \n sales_start = tickettype_info.get('sales_start')\n sales_end = tickettype_info.get('sales_start')\n \n if event_id is None and eventpart_id is None:\n raise ex.TickeeError(\"eventpart_id or event_id required.\")\n \n # price must not be less than zero\n if price < 0:\n raise ex.InvalidPriceError(\"The price must be 0 or more.\")\n # units must not be less than zero\n if units < 0:\n raise ex.InvalidAmountError(\"The quantity of tickets must be 0 or more.\")\n # minimum order must not be less than zero\n if min_order and min_order < 0:\n raise ex.InvalidAmountError(\"The minimum order limit must be 1 or more.\")\n # maximum order must not be less than minimum order\n if max_order and max_order < min_order:\n raise ex.InvalidAmountError(\"The maximum order limit must be equal or more than the minimum limit.\")\n \n \n # decide on sales start/end times\n if event_id:\n if client_id is not None:\n require_event_owner(client_id, event_id)\n event = lookup_event_by_id(event_id)\n default_sale_start = event.get_start_date()\n default_sale_end = event.get_end_date()\n elif eventpart_id:\n if client_id is not None:\n require_eventpart_owner(client_id, eventpart_id)\n eventpart = lookup_eventpart_by_id(eventpart_id)\n default_sale_start = eventpart.starts_on\n default_sale_end = eventpart.ends_on\n else:\n raise ex.EventPartNotFoundError('Tickettype needs to be connect to either an event or eventpart.')\n \n if sales_start is None:\n sales_start = default_sale_start\n else:\n sales_start = datetime.datetime.utcfromtimestamp(int(sales_start))\n \n if sales_end is None:\n sales_end = default_sale_end\n else:\n sales_end = datetime.datetime.utcfromtimestamp(int(sales_end))\n \n # create ticket type\n tickettype = create_tickettype(price, units, currency, name, \n None, min_order, max_order, \n sales_start, sales_end)\n \n # set description\n if description is not None:\n tickettype.set_description(description.get('text'), description.get('language'))\n \n # link new tickettype to eventpart / event\n if event_id:\n link_tickettype_to_event(tickettype, event)\n\n elif eventpart_id:\n link_tickettype_to_eventpart(tickettype, eventpart)\n\n return tickettype_to_dict2(tickettype)", "title": "" }, { "docid": "8bc19e5489c70e266921b5f703651511", "score": "0.61528504", "text": "def create_ticket(self, title=None, body=None, subject=None, priority=None):\n current_user = self.account.getCurrentUser()\n new_ticket = {\n 'subjectId': subject,\n 'assignedUserId': current_user['id'],\n 'title': title,\n }\n if priority is not None:\n new_ticket['priority'] = int(priority)\n\n created_ticket = self.ticket.createStandardTicket(new_ticket, body)\n return created_ticket", "title": "" }, { "docid": "cc94c8aeea66efa83fab714ad2b843fa", "score": "0.61395323", "text": "def test_set_ticket(self):\n testflow.step(\"Set ticket test\")\n assert ll_vms.startVm(\n positive=True, vm=self.vm_name\n ), \"Failed to start vm\"\n testflow.step(\"Ticket running vm %s\", self.vm_name)\n assert ll_vms.ticketVm(\n True, self.vm_name, config.ticket_expire_time\n ), \"Failed to set ticket to VM\"", "title": "" }, { "docid": "88740bcae769e72784e4d4049101bace", "score": "0.61339134", "text": "def create_zendesk_ticket(\n requester_name,\n requester_email,\n subject,\n body,\n group=None,\n custom_fields=None,\n uploads=None,\n tags=None,\n additional_info=None\n):\n if tags:\n # Remove duplicates from tags list\n tags = list(set(tags))\n\n data = {\n 'ticket': {\n 'requester': {\n 'name': requester_name,\n 'email': requester_email\n },\n 'subject': subject,\n 'comment': {\n 'body': body,\n 'uploads': uploads\n },\n 'custom_fields': custom_fields,\n 'tags': tags\n }\n }\n\n # Encode the data to create a JSON payload\n payload = json.dumps(data)\n\n if not (settings.ZENDESK_URL and settings.ZENDESK_OAUTH_ACCESS_TOKEN):\n log.error(_std_error_message(\"zendesk not configured\", payload))\n return status.HTTP_503_SERVICE_UNAVAILABLE\n\n if group:\n if group in settings.ZENDESK_GROUP_ID_MAPPING:\n group_id = settings.ZENDESK_GROUP_ID_MAPPING[group]\n data['ticket']['group_id'] = group_id\n else:\n msg = f\"Group ID not found for group {group}. Please update ZENDESK_GROUP_ID_MAPPING\"\n log.error(_std_error_message(msg, payload))\n return status.HTTP_400_BAD_REQUEST\n\n # Set the request parameters\n url = urljoin(settings.ZENDESK_URL, '/api/v2/tickets.json')\n\n try:\n response = requests.post(url, data=payload, headers=_get_request_headers())\n\n # Check for HTTP codes other than 201 (Created)\n if response.status_code == status.HTTP_201_CREATED:\n log.debug(f'Successfully created ticket for {requester_email}')\n else:\n log.error(\n _std_error_message(\n f'Unexpected response: {response.status_code} - {response.content}',\n payload\n )\n )\n if additional_info:\n try:\n ticket = response.json()['ticket']\n except (ValueError, KeyError):\n log.error(\n _std_error_message(\n \"Got an unexpected response from zendesk api. Can't\"\n \" get the ticket number to add extra info. {}\".format(additional_info),\n response.content\n )\n )\n return status.HTTP_400_BAD_REQUEST\n return post_additional_info_as_comment(ticket['id'], additional_info)\n\n return response.status_code\n except Exception: # pylint: disable=broad-except\n log.exception(_std_error_message('Internal server error', payload))\n return status.HTTP_500_INTERNAL_SERVER_ERROR", "title": "" }, { "docid": "7694ac25b15c5f180b2486c47619db1d", "score": "0.60976124", "text": "def createEvent(self):\n logger.debug(\"CreateEvent in Consituent\")\n pass", "title": "" }, { "docid": "87265fbcceafe97cdb1db55b512a5381", "score": "0.60873175", "text": "def create(self, values):\n METHOD = self.env['anytracker.method']\n IMPORTANCE = self.env['anytracker.importance']\n if not values.get('importance_id'):\n if values.get('parent_id'):\n method = self.browse(values.get('parent_id')).method_id\n else:\n method = METHOD.browse(values['method_id'])\n importances = IMPORTANCE.search([\n ('method_id', '=', method.id),\n ('default', '=', True)])\n if len(importances) > 1:\n raise except_orm(\n _('Anytracker Configuration Error'),\n _(\"Two importances are configured as the default one \"\n \"in the '{}' method\".format(method.name)))\n if len(importances) == 1:\n values['importance_id'] = importances[0].id\n return super(Ticket, self).create(values)", "title": "" }, { "docid": "5d67ebe4a44fb9ba6d08f834563b5669", "score": "0.6051381", "text": "def __init__(self, queue, message, ticket, deadline, creation_time=None, redirect_key=None):\n \n self.queue = queue\n self.message = message\n self.ticket = ticket\n self._set_deadline(deadline, creation_time)\n self.redirect_key = redirect_key", "title": "" }, { "docid": "d36f67083d47b417e3ec51f5bddd8570", "score": "0.60497504", "text": "def _mkTicket(self, prefix, data, _timeout=None):\n timeout = _timeout or self.lifespan\n ticket = self._generate(prefix)\n self._tickets[ticket] = data\n dc = self.reactor.callLater(timeout, self.expireTicket, ticket)\n self._delays[ticket] = (dc, timeout)\n return defer.succeed(ticket)", "title": "" }, { "docid": "fc667264152ba761e46d2ebd8b167ebc", "score": "0.60252756", "text": "def _handle_ticket_form(request, form):\n if not form.is_valid():\n messages.error(\n request, \"The form is invalid, ensure all required fields are provided.\"\n )\n return None\n\n requestor = form.cleaned_data[\"email\"]\n requestor_meta = \" \".join(\n [\n form.cleaned_data[\"first_name\"],\n form.cleaned_data[\"last_name\"],\n requestor,\n ]\n )\n header = \"\\n\".join(\n [\n f\"[{key}] {value}\"\n for key, value in [\n (\"Opened by\", request.user),\n (\"Category\", form.cleaned_data[\"category\"]),\n (\"Project\", form.cleaned_data[\"project_id\"]),\n (\"Site\", form.cleaned_data[\"site\"]),\n (\"Lease ID\", form.cleaned_data[\"lease_id\"]),\n (\"Instance ID\", form.cleaned_data[\"instance_id\"]),\n (\"Resource\", \"Chameleon\"),\n ]\n ]\n )\n\n ticket_body = f\"\"\"{header}\n\n {form.cleaned_data[\"problem_description\"]}\n\n ---\n {requestor_meta}\n \"\"\"\n\n rt = rtUtil.DjangoRt()\n\n ticket = rtModels.Ticket(\n subject=form.cleaned_data[\"subject\"],\n problem_description=ticket_body,\n requestor=requestor,\n cc=form.cleaned_data.get(\"cc\", []),\n )\n\n ticket_id = rt.createTicket(ticket)\n\n if ticket_id < 0:\n logger.error(f\"Error creating ticket for {requestor}\")\n messages.error(\n request, (\"There was an error creating your ticket. Please try again.\")\n )\n return None\n\n logger.info(f\"Created ticket #{ticket_id} for {requestor}\")\n\n if \"attachment\" in request.FILES:\n attachment = request.FILES[\"attachment\"]\n mime_type, encoding = mimetypes.guess_type(attachment.name)\n files = [(attachment.name, attachment, mime_type)]\n success = rt.replyToTicket(ticket_id, files=files)\n if not success:\n logger.error(f\"Error adding attachment to #{ticket_id}\")\n\n add_openstack_data.apply_async(\n kwargs={\n \"username\": request.user.username,\n \"ticket_id\": ticket_id,\n },\n )\n\n messages.success(\n request,\n (\n f\"Ticket #{ticket_id} has been successfully created. \"\n \"We will respond to your request as soon as possible.\"\n ),\n )\n\n return ticket_id", "title": "" }, { "docid": "388b50c2a46115b7272eea3a61087328", "score": "0.6005961", "text": "def create(self, context):\n pass", "title": "" }, { "docid": "5f52b757fb2f30f9b11ea22ffcab9b14", "score": "0.5973704", "text": "def create(self):\n pass", "title": "" }, { "docid": "d3ec987901a0081ef0e59ab9f101cae8", "score": "0.59528667", "text": "def create(self):\n \n pass", "title": "" }, { "docid": "357c58a490d8714497468fa55711ee3e", "score": "0.5912921", "text": "def _created(self):\n self.oid = self.transport.add(self.parent.oid)\n pass", "title": "" }, { "docid": "ef7dd71f85a30767dc11f2ed7b24865e", "score": "0.58648646", "text": "def create_job_ticket(job: Job, db_session: SessionLocal):\n plugin = plugin_service.get_active(db_session=db_session, plugin_type=\"ticket\")\n\n title = job.job_code\n if job.visibility == Visibility.restricted:\n title = job.job_type.name\n\n job_type_plugin_metadata = job_type_service.get_by_name(\n db_session=db_session, name=job.job_type.name\n ).get_meta(plugin.slug)\n\n ticket = plugin.instance.create(\n job.id,\n title,\n job.job_type.name,\n job.job_priority.name,\n job.commander.code,\n job.reporter.code,\n job_type_plugin_metadata,\n )\n ticket.update({\"resource_type\": plugin.slug})\n\n event_service.log(\n db_session=db_session,\n source=plugin.job_code,\n description=\"External ticket created\",\n job_id=job.id,\n )\n\n return ticket", "title": "" }, { "docid": "9856984b2a3aeed61064d5250fd98a68", "score": "0.5810864", "text": "def __init__(self, uuid):\n try:\n db = rdb[cdb].split(':')\n results = r.db(db[0]).table(db[1]).get(uuid).run(g.rdb_conn)\n except RqlRuntimeError:\n print(u\"TICKETSMODEL: InitTicket: Critical Failure: Saving Throw Failed! while looking up UUID: {}\".format(uuid))\n\n if results is None:\n raise NoSuchUUIDExists\n\n self.id = results['id']\n self.updated_at = results['meta']['updated_at']\n\n\n # These fields may potentially be missing.\n try:\n self.source = results['meta']['source']\n except KeyError:\n self.source = \"Unknown Source\"\n\n try:\n self.name = results['meta']['name']\n except KeyError:\n self.name = \"No Name\"\n\n try:\n self.email = results['meta']['email']\n except KeyError:\n self.email = \"No Email\"\n\n try:\n self.phone = results['meta']['phone']\n except KeyError:\n self.phone = \"No Phone Number\"\n\n try:\n self.archived = results['meta']['archived']\n except KeyError:\n self.archived = False\n\n try:\n self.message = results['message']\n except KeyError:\n self.message = \"Empty Message\"\n\n try:\n self.results = results\n except KeyError:\n self.results = {}\n\n #print(\n # u\"TICKETSMODEL: Ticket_ID: {} Source: {} Name: {} Phone: {} Email: {} Archived: {}\".format(\n # self.id, self.source, self.name, self.phone, self.email, self.archived ))\n print(\n u\"TICKETSMODEL: Ticket_ID: {} Source: {} Archived: {}\".format(\n self.id, self.source, self.archived ))", "title": "" }, { "docid": "1088965004b5fe275dfcc1b7f4c118ce", "score": "0.5807754", "text": "def test_1_ticket_can_be_created_by_booster(self):\n print('aaaaaa1')\n booster_ticket_action = models.BoosterTicketAction(user=self.booster)\n min_mmr = 1000\n max_mmr = 2500\n day_used = 10\n price = 2000\n error, ticket_data = booster_ticket_action.create_ticket(min_mmr, max_mmr, day_used, price)\n\n self.assertEquals(min_mmr, ticket_data.get('min_mmr'))\n self.assertEquals(max_mmr, ticket_data.get('max_mmr'))\n self.assertEquals(day_used, ticket_data.get('day_used'))\n self.assertEquals(price, ticket_data.get('price'))\n self.assertEquals(self.booster.id, ticket_data.get('booster').get('id'))\n self.assertEquals([], ticket_data.get('clients'))\n self.assertEquals(1, ticket_data.get('status'))", "title": "" }, { "docid": "93581918e223cbaccdb438bb729d97d5", "score": "0.57930106", "text": "def viewTicket(self):\n ticketFile = self.ticketList[self.tickets.currentRow()]['fullpath']\n ticket = self.Ticket.getTicket(ticketFile)\n ticketDetails = ticket.getDetails()\n i = self.customers.findText(ticketDetails['customerName'])\n if i == -1:\n self.newCustomer.setText(ticketDetails['customerName'])\n self.customers.setCurrentIndex(0)\n else:\n self.customers.setCurrentIndex(i)\n self.newCustomer.clear()\n\n date = datetime.strptime( ticketDetails['date'], '%m-%d-%Y %H:%M')\n self.date.setDateTime(QDateTime(date.year, date.month, date.day, date.hour, date.minute))\n self.paidState.setCurrentIndex(self.paidState.findText(ticketDetails['paidState']))\n self.checkNum.setText(ticketDetails['checkNum'])\n self.comment.setText(ticketDetails['comment'])\n self.smallbagsQ.setValue(ticketDetails['smallbagsQ'])\n self.smallbagsP.setValue(ticketDetails['smallbagsP'])\n self.bigbagsQ.setValue(ticketDetails['bigbagsQ'])\n self.bigbagsP.setValue(ticketDetails['bigbagsP'])\n self.smallblockQ.setValue(ticketDetails['smallblockQ'])\n self.smallblockP.setValue(ticketDetails['smallblockP'])\n self.largeblockQ.setValue(ticketDetails['largeblockQ'])\n self.largeblockP.setValue(ticketDetails['largeblockP'])\n self.vendorQ.setValue(ticketDetails['vendorQ'])\n self.vendorP.setValue(ticketDetails['vendorP'])\n self.storageQ.setValue(ticketDetails['storageQ'])\n self.storageP.setValue(ticketDetails['storageP'])\n self.freightQ.setValue(ticketDetails['freightQ'])\n self.freightP.setValue(ticketDetails['freightP'])\n self.otherQ.setValue(ticketDetails['otherQ'])\n self.otherP.setValue(ticketDetails['otherP'])\n self.total.setValue(ticketDetails['total'])", "title": "" }, { "docid": "857d295d428c3f81f6855a8c213483d6", "score": "0.5788134", "text": "def new_time_entry(self,context,payload):\n timeentry_obj=FreshbooksTimeentry(\n name=payload[\"name\"],\n object_id=payload[\"object_id\"],\n account_id=payload[\"account_id\"],\n business_id=payload[\"business_id\"]\n )\n return timeentry_obj.__dict__", "title": "" }, { "docid": "68fc05aa955b6430e53da89f5f4e0520", "score": "0.5787397", "text": "def Ticket():\n from agile_analytics.models import AgileTicket\n\n def _Ticket(**kwargs):\n flow_logs = kwargs.pop('flow_logs')\n key = kwargs.pop('key')\n t = AgileTicket(key=key, ttype=\"Story\")\n for key, value in kwargs.items():\n setattr(t, key, value)\n for fl in flow_logs:\n t.flow_log.append(fl)\n return t\n return _Ticket", "title": "" }, { "docid": "f6b63f976b9d82944d7a83ce205dfd45", "score": "0.5756706", "text": "def createUserAuthTicket(self,userAuthInfo, responseFields = None):\r\n\r\n\t\turl = MozuUrl(\"/api/commerce/customer/authtickets/?responseFields={responseFields}\", \"POST\", UrlLocation.TenantPod, False);\r\n\t\turl.formatUrl(\"responseFields\", responseFields);\r\n\t\tself.client.withResourceUrl(url).withBody(userAuthInfo).execute();\r\n\t\treturn self.client.result();", "title": "" }, { "docid": "7a16f523f2eed796a78f43e2f26ed245", "score": "0.574678", "text": "def create(self):\n return", "title": "" }, { "docid": "fb85fc19e483d35abbbefe67ecb3f42c", "score": "0.57318336", "text": "def ticket_queue(subject, body, user):\n\n DeskProTicket.objects.create(\n subject=f\"{settings.EMAIL_SUBJECT_PREFIX}{subject}\",\n body=body,\n user=user,\n )", "title": "" }, { "docid": "aa10eac30299451ffd030b9744fa240a", "score": "0.5713851", "text": "async def ticket_creation_message(self, ctx, *, message):\n await self.config.guild(ctx.guild).openmessage.set(message)\n if message == \"{default}\":\n await ctx.send(\"Ticket creation message restored to default.\")\n else:\n await ctx.send(\"Ticket creation message successfully set.\")", "title": "" }, { "docid": "79d97b0adb49646d0a4dd134c1656f47", "score": "0.57035893", "text": "def add_ticket(self, ticket_id: str, title: str, body: str):\n if self.vectorizer is None:\n logger.info(\"[SIMILAR] Vectorizer is not in cache, loading ...\")\n _, self.vectorizer = load_vectorizer(STEMMER)\n\n vectors = self.vectorizer(title, body)\n self.add_ticket_vectors(ticket_id, vectors, title, body)", "title": "" }, { "docid": "eca1c31f56ab5ab2aca99f0415d45945", "score": "0.5675692", "text": "def new_contact_created(sender, **kwargs):\n created = kwargs['created']\n if created:\n contact = kwargs['instance']\n connection = contact.default_connection\n if not connection:\n logging.debug('no connection found for recipient {0}, unable '\n 'to send'.format(contact))\n return\n text = _(\"You have been registered for TrialConnect.\")\n try:\n queue_message_to_contact(contact, text)\n logging.debug(\"Sending wecome message '%s' to '%s'.\" % (text, contact))\n except Exception, e:\n logging.exception(e)", "title": "" }, { "docid": "d1afd5bec31fa786a16f144fbbf88cd6", "score": "0.5674008", "text": "def handle_entity_create( self, sender, entity ):\n assert object_thread( self )\n self.logger.debug( 'received entity create signal' )\n # @todo : decide what to do when a new entity has been created,\n # probably do nothing\n return", "title": "" }, { "docid": "2feebaa50da2470d77f0a698f7ba2eeb", "score": "0.56717867", "text": "def created(self, created):\n\n self._created = created", "title": "" }, { "docid": "2feebaa50da2470d77f0a698f7ba2eeb", "score": "0.56717867", "text": "def created(self, created):\n\n self._created = created", "title": "" }, { "docid": "2feebaa50da2470d77f0a698f7ba2eeb", "score": "0.56717867", "text": "def created(self, created):\n\n self._created = created", "title": "" }, { "docid": "2feebaa50da2470d77f0a698f7ba2eeb", "score": "0.56717867", "text": "def created(self, created):\n\n self._created = created", "title": "" }, { "docid": "2feebaa50da2470d77f0a698f7ba2eeb", "score": "0.56717867", "text": "def created(self, created):\n\n self._created = created", "title": "" }, { "docid": "2feebaa50da2470d77f0a698f7ba2eeb", "score": "0.56717867", "text": "def created(self, created):\n\n self._created = created", "title": "" }, { "docid": "2feebaa50da2470d77f0a698f7ba2eeb", "score": "0.56717867", "text": "def created(self, created):\n\n self._created = created", "title": "" }, { "docid": "2feebaa50da2470d77f0a698f7ba2eeb", "score": "0.56717867", "text": "def created(self, created):\n\n self._created = created", "title": "" }, { "docid": "2feebaa50da2470d77f0a698f7ba2eeb", "score": "0.56717867", "text": "def created(self, created):\n\n self._created = created", "title": "" }, { "docid": "2feebaa50da2470d77f0a698f7ba2eeb", "score": "0.56717867", "text": "def created(self, created):\n\n self._created = created", "title": "" }, { "docid": "2feebaa50da2470d77f0a698f7ba2eeb", "score": "0.56717867", "text": "def created(self, created):\n\n self._created = created", "title": "" }, { "docid": "2feebaa50da2470d77f0a698f7ba2eeb", "score": "0.56717867", "text": "def created(self, created):\n\n self._created = created", "title": "" }, { "docid": "2feebaa50da2470d77f0a698f7ba2eeb", "score": "0.56717867", "text": "def created(self, created):\n\n self._created = created", "title": "" }, { "docid": "2feebaa50da2470d77f0a698f7ba2eeb", "score": "0.56717867", "text": "def created(self, created):\n\n self._created = created", "title": "" }, { "docid": "2feebaa50da2470d77f0a698f7ba2eeb", "score": "0.56717867", "text": "def created(self, created):\n\n self._created = created", "title": "" }, { "docid": "2feebaa50da2470d77f0a698f7ba2eeb", "score": "0.56717867", "text": "def created(self, created):\n\n self._created = created", "title": "" }, { "docid": "1a2bcc9e402b52352dc590ec16496edf", "score": "0.5630126", "text": "def test_4_ticket_taken_by_user(self):\n print('aaaaaa4')\n client_ticket_action = models.ClientTicketAction(user=self.user, ticket=self.ticket)\n error, message = client_ticket_action.take_ticket()\n\n self.assertIsNone(error)", "title": "" }, { "docid": "dfcd0b1a8dcf822c289b66d8bbd26570", "score": "0.5606953", "text": "def test_2_ticket_create_error(self):\n print('aaaaaa2')\n booster_ticket_action = models.BoosterTicketAction(user=self.user)\n min_mmr = 1000\n max_mmr = 4000\n day_used = 5\n price = 10000\n error, ticket = booster_ticket_action.create_ticket(min_mmr, max_mmr, day_used, price)\n\n self.assertIsNotNone(error)", "title": "" }, { "docid": "26999af0a45fb0b4a0303a997c136deb", "score": "0.56031555", "text": "async def ticket(self, ctx):\n guild = ctx.guild\n support = guild.get_role(new_mention_id)\n overwrites = {\n guild.default_role: discord.PermissionOverwrite(read_messages=False),\n guild.me: discord.PermissionOverwrite(read_messages=True,send_messages=True),\n ctx.author: discord.PermissionOverwrite(read_messages=True,send_messages=True),\n support : discord.PermissionOverwrite(read_messages=True,send_messages=True),\n }\n category = guild.get_channel(tickets_category)\n channel = await guild.create_text_channel(f\"{ctx.author.name} Ticket\",overwrites=overwrites,category=category)\n await channel.send(f\"{support.mention}, <@{ctx.author.id}>\")\n await channel.send(embed=discord.Embed(title ='Ticket Channel',description=f\"Please let us know your issue so we can help you\\n<@{ctx.author.id}>, <@&{support.id}>\",color=embed_color))\n await ctx.send(embed=discord.Embed(title = 'Ticket Created',description=f\"Your ticket can be found in <#{channel.id}>\",color=embed_color))", "title": "" }, { "docid": "f52da10573e7320cba19bba18bd1cda5", "score": "0.55961174", "text": "def createTicket(statement, ticket_data):\n response = ''\n\n ticket_type = ticket_data.ticket_type\n if ticket_type is None:\n if statement.upper() in TICKET_TYPE:\n ticket_data.ticket_type = statement\n else:\n return \"Please select a valid type from the given options or \" \\\n \"type 'cancel' to exit operation.\"\n response = \"\"\"\n Ticket needs to be sent to: 1) IT support, 2) HR support, \n 3) Facility support, 4) App support, 5) Finance support or 'cancel' \n to exit the operation.\n \"\"\"\n return response\n\n ticket_to = ticket_data.ticket_to\n if ticket_to is None:\n if statement.upper() in TICKET_TO:\n ticket_data.ticket_to = statement\n else:\n return \"Please select a valid option from the given options or \" \\\n \"type 'cancel' to exit operation.\"\n response = \"Please provide subject of the Ticket.\"\n return response\n\n ticket_subject = ticket_data.ticket_subject\n if ticket_subject is None:\n ticket_data.ticket_subject = statement\n response = \"Enter Description of the Ticket\"\n return response\n\n ticket_description = ticket_data.ticket_description\n if ticket_description is None:\n ticket_data.ticket_description = statement\n response = \"Please select priority of the Ticket 1) Low, 2) Normal, \" \\\n \"3) Medium, 4) High, 5) Very High or 'cancel' \" \\\n \"to exit the operation.\"\n return response\n\n ticket_priority = ticket_data.ticket_priority\n if ticket_priority is None:\n if statement.upper() in TICKET_PRIORITY:\n ticket_data.ticket_priority = statement\n\n response = createTicketWithGivenData(ticket_data)\n ticket_data.clear()\n else:\n return \"Please select a valid option from the given options or \" \\\n \"type 'cancel' to exit operation.\"\n return response\n\n if not response:\n response = statement\n return response", "title": "" }, { "docid": "4e40c58ae67adaa1f1417bc534ec1a17", "score": "0.5594471", "text": "def perform_create(self, serializer):\n logger.info('Message.perform_create')\n\n # Add the author and the publication date\n serializer.save(author=self.request.user, publication_date=datetime.datetime.now())\n logger.info('New message saved')\n # Notify tornado\n r = requests.post('http://localhost:1234/message_update', data={'event': 'create'})\n logger.info('Tornado notified: %s' % r.status_code)", "title": "" }, { "docid": "5cdffcdcf752545da8eafe8d83b7daf1", "score": "0.5593366", "text": "def create_badge(self, order, item, ticket_option):\n\n ticket_option = TicketOption.objects.get(sku=item.sku)\n ticket = ticket_option.ticket\n\n badge = self.model()\n badge.order = order\n badge.order_item = item\n badge.ticket = ticket_option\n badge.first_name = order.billing_detail_first_name\n badge.last_name = order.billing_detail_last_name\n badge.option = ticket_option.title\n\n if ticket.title.endswith(' Pass'):\n badge.type = ticket.title[:-5]\n else:\n badge.type = ticket.title\n\n badge.save()\n\n return badge", "title": "" }, { "docid": "68deadb5d64fbc50624dbbe06cc626d6", "score": "0.5582979", "text": "def create_issue(self, issue, connector_instance):", "title": "" }, { "docid": "e1bbc4b33b7dbc15d419b2ded2175230", "score": "0.558019", "text": "def on_create(sender=None, request=None, **kw):\n master = get_master(sender)\n if master is None:\n return\n log_change(ChangeTypes.create, request, master,\n sender, dd.obj2str(sender, True))", "title": "" }, { "docid": "585320b4f0cb7ba2a944b6f33dccf4d6", "score": "0.5548451", "text": "def handle_create(self):\n queue_name = self.physical_resource_name()\n queue = self.client().queue(queue_name, auto_create=False)\n metadata = self.properties.get('metadata')\n if metadata:\n queue.metadata(new_meta=metadata)\n self.resource_id_set(queue_name)", "title": "" }, { "docid": "9f9021d8558002fca5de4b816820d763", "score": "0.5548248", "text": "async def when_created(*args):\n\tpass", "title": "" }, { "docid": "fb77559f936b89125067ebeff93674f6", "score": "0.5535027", "text": "def create(cls, lottery_id, user_id, numbers):\n ticket_id = database_cursor(\n sql='''INSERT INTO tickets VALUES (%s, %s, %s, %s);''',\n variable=(None, lottery_id, user_id, TODAY),\n cursor_type='last_id')\n\n for number in numbers:\n database_cursor(\n sql='''INSERT INTO ticket_numbers VALUES (%s, %s, %s);''',\n variable=(None, ticket_id, number))\n\n return cls(ticket_id, lottery_id, user_id, TODAY, numbers)", "title": "" }, { "docid": "e014c0e16caa69770ee283b29613d2c8", "score": "0.55110085", "text": "def do_new_appointment(self, args):\n if self.logged_in:\n self.tablet.new_appointment({\n 'name' : args.name,\n 'timestamp' : args.timestamp,\n 'price' : args.price,\n 'account' : self.account_address,\n 'key' : self.account_key \n })\n else:\n print('Please sign in first!')", "title": "" }, { "docid": "6acbd9b3efa9db9d3850fd5d725d135d", "score": "0.55074936", "text": "def do_create(self, args):\n if not args.description:\n # TODO(scott): figure out message templating\n args.description = cli.get_text_from_editor(\"# fill this in\")\n if not args.notes:\n args.notes = \"\"\n event = self.manager.create_event(args)\n self._show_event(event)", "title": "" }, { "docid": "4a920745f63559c6fa529251153b09a4", "score": "0.550075", "text": "def ticket(ctx, ticket):\n issue_id = make_issue_descriptor(ctx.obj.repo.active_branch.name).id if not ticket else ticket\n verify_ticket_exists(ctx.obj.issue_tracker(), issue_id)\n url = ctx.obj.issue_tracker().browse_url(issue_id)\n click.echo('Opening \"{}\"'.format(url))\n webbrowser.open_new(url)", "title": "" }, { "docid": "251db8fce822e9d8e839908fb12c47d4", "score": "0.54927963", "text": "def process_IN_CREATE(self, event): # pylint: disable=C0103\n log.debug('received an event for CREATE')\n self._put_item_to_queue(event.pathname)", "title": "" }, { "docid": "a3acaebee32c82b6210362cf7be271a6", "score": "0.54869044", "text": "def create(self, *args, **kwargs): # pylint: disable=arguments-differ\n pass", "title": "" }, { "docid": "90c7333b76f5a81015db52b8ec80fc91", "score": "0.548339", "text": "def __init__(self, postURL=\"\",tenant=\"\", username=\"\", password=\"\", role=\"\"):\n PARAMS = {'tenant':tenant, 'username':username,'password':password, 'role':role}\n if not postURL == \"\":\n self.postReq = requests.post(url=postURL, data = PARAMS)\n else:\n self.postReq = \"\"\n self.__tickets = Ticket_Library()\n self.__ticketNumber = 0", "title": "" }, { "docid": "2bc0b1d9fa9ce0c44205b06072db47cc", "score": "0.5480287", "text": "def create(self):\n raise NotImplementedError", "title": "" }, { "docid": "8b47928e4a0eee1dd0433946260b5f7b", "score": "0.547738", "text": "def ticket_deleted(self, ticket):\r\n pass", "title": "" }, { "docid": "4577844ff9b0db2ad23ae53465732f9f", "score": "0.5466633", "text": "def before_create(self, data: dict) -> None:", "title": "" }, { "docid": "ee3f9791614c63ce5512cf3d6ecc10e7", "score": "0.5464601", "text": "def ticket_changed(self, ticket, comment, author, old_values):\r\n self.watch_complete(ticket, old_values)", "title": "" }, { "docid": "50cebd87907a006b6fd937f74f0c263d", "score": "0.5457233", "text": "def add_tickets(create_user, add_flights):\n\n user = create_user(USER)\n tickets = [{\n \"ticket_ref\": \"LOS29203SLC\",\n \"paid\": False,\n \"flight\": add_flights[0],\n \"type\": \"ECO\",\n \"seat_number\": \"E001\",\n \"made_by\": user,\n }, {\n \"ticket_ref\": \"LOS24933SLC\",\n \"paid\": False,\n \"flight\": add_flights[1],\n \"type\": \"ECO\",\n \"seat_number\": \"E001\",\n \"made_by\": user\n }]\n\n return [Ticket.objects.create(**ticket) for ticket in tickets]", "title": "" }, { "docid": "2574998af34f858a4d00dfb06dfe8ff5", "score": "0.54562265", "text": "def create(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "2574998af34f858a4d00dfb06dfe8ff5", "score": "0.54562265", "text": "def create(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "699a717e709b8fa6c12e80438a5ed57a", "score": "0.54536855", "text": "def _create(self, data):\n pass", "title": "" }, { "docid": "38b03b2d0e41b07cc6063d022f70dfc4", "score": "0.54212576", "text": "def submit_rt_ticket(obj, queue, subject, body, requestors, ticket_id_key):\n rt_instance = get_instance() if current_app.config.get(\n \"PRODUCTION_MODE\") else None\n if not rt_instance:\n obj.log.error(\"No RT instance available. Skipping!\")\n obj.log.info(\n \"Was going to submit: {subject}\\n\\n{body}\\n\\n\"\n \"To: {requestors} Queue: {queue}\".format(\n queue=queue,\n subject=subject,\n requestors=requestors,\n body=body\n )\n )\n return\n\n # Trick to prepare ticket body\n body = \"\\n \".join([line.strip() for line in body.split(\"\\n\")])\n rt_queue = current_app.config.get(\"BIBCATALOG_QUEUES\") or queue\n\n payload = dict(\n Queue=rt_queue,\n Subject=subject,\n Text=body,\n )\n recid = obj.extra_data.get(\"recid\") or obj.data.get(\"control_number\") \\\n or obj.data.get(\"recid\")\n if recid:\n payload['CF_RecordID'] = recid\n\n # Check if requests is set and also ignore admin due to RT mail loop\n if requestors and \"[email protected]\" not in requestors:\n payload['requestors'] = requestors\n\n ticket_id = rt_instance.create_ticket(**payload)\n\n obj.extra_data[ticket_id_key] = ticket_id\n obj.log.info(\"Ticket {0} created:\\n{1}\".format(\n ticket_id,\n body.encode(\"utf-8\", \"ignore\")\n ))\n return True", "title": "" }, { "docid": "6e062aee12fa7aef587de26dd208ea2f", "score": "0.54032797", "text": "def create(self):\n NotImplementedError", "title": "" }, { "docid": "0edda30c41d856af4cb235edae23deae", "score": "0.54002213", "text": "def OnCreate(self, form):\n pass", "title": "" }, { "docid": "66c46cf0bd00a313e8cefd5f0d453679", "score": "0.538187", "text": "def create(self, name):\n pass", "title": "" }, { "docid": "c358e0f6dab7e813c6f0f394dacb9aaa", "score": "0.5366018", "text": "def bug_create(\n self, release_num, application, environment,\n status, description, cc_mail=''\n ):\n\n self.output.log('Creating new bug via bugzilla REST API...', True)\n url = '{0}/rest/bug?token={1}'.format(self.host, self.token)\n data = self._get_json_create(\n release_num, application,\n environment, status, description, cc_mail\n )\n\n self.output.log(data)\n\n req = requests.post(url, data=json.dumps(data), headers=HEADERS)\n try:\n new_bug_id = req.json()['id']\n except KeyError:\n print('\\nERROR: {0}!\\n'.format(req.text))\n exit(1)\n\n self.output.log('\\nNew bug ID: {0}\\nDONE!\\n\\n'.format(new_bug_id))\n return new_bug_id", "title": "" }, { "docid": "57db7c3a00065f12b5c57db5b095bca3", "score": "0.53633904", "text": "def test_fire_on_new_thread(self, fire):\n u = UserFactory()\n f = ForumFactory()\n self.client.login(username=u.username, password=\"testpass\")\n post(\n self.client,\n \"forums.new_thread\",\n {\"title\": \"a title\", \"content\": \"a post\"},\n args=[f.slug],\n )\n # NewThreadEvent.fire() is called.\n assert fire.called", "title": "" }, { "docid": "57df844aaf433af448e53998415242d8", "score": "0.5361304", "text": "def mkServiceTicket(self, username, service):\n def doit(_):\n return self._mkTicket('ST-', {\n 'username': username,\n 'service': service,\n })\n return self._validService(service).addCallback(doit)", "title": "" }, { "docid": "4a09b49c308a05922f372a3164bb3ff8", "score": "0.5353129", "text": "def on_attachment_created(sender, **kwargs):\n if not isinstance(sender, LogEntry):\n is_new = kwargs.get('created', False)\n attachment = kwargs.get('instance')\n\n if attachment and is_new:\n crocdoc_service = CrocdocAttachmentService(attachment=attachment)\n crocdoc_service.process()\n\n todo = attachment.todo\n todostatus_service = ToDoStatusService(todo_item=todo)\n todostatus_service.process()\n\n # increment the attachment count\n todo.num_attachments_plus()\n\n verb = u'{name} uploaded an attachment: \"{filename}\" on the checklist item {todo} for {project}'.format(name=attachment.uploaded_by.get_full_name(), filename=attachment.filename, todo=attachment.todo, project=attachment.project).encode('utf-8')\n action.send(attachment.uploaded_by,\n verb=verb,\n action_object=attachment,\n target=attachment.todo,\n content=verb,\n attachment=attachment.filename,\n todo=attachment.todo.name,\n status=attachment.todo.display_status,\n event='todo.attachment.created')", "title": "" }, { "docid": "96ed4a5ca30c2c3d02d89bd36ed59fa7", "score": "0.5350341", "text": "def do_ticket(self):\n form = TicketForm()\n if form.validate_on_submit():\n flash('Message Sent.', 'success')\n return redirect(request.args.get('next') or url_for('TicketsView:index'))\n return render_template('tickets/ticketform-{}.html'.format(form.source.data), form=form)", "title": "" }, { "docid": "faaf77fc48fff8eac762c6381506f892", "score": "0.5337949", "text": "def run_new(self, owner=None):\n template = self.resolve_template()\n\n if not template:\n if self.message_file:\n if self.message_file == \"-\":\n template = sys.stdin.read()\n else:\n with open(self.message_file) as fp:\n template = fp.read()\n else:\n template = DEFAULT_TEMPLATE\n\n # Parse the template ahead of time, allowing us to insert the Owner/To.\n ep = email.parser.Parser()\n em = ep.parsestr(template)\n body = em.get_payload()\n headers = OrderedDict(em.items())\n\n # The owner specified on the command line always prevails.\n if owner:\n headers[\"To\"] = owner\n\n # If all else fail, assign it to yourself.\n if not headers[\"To\"]:\n headers[\"To\"] = self.username\n\n self.login()\n\n valid = False\n while not valid:\n # Get the properties at each iteration, in case an admin updated\n # the list in the mean time.\n options = self.get_property_options()\n\n # Assume the user will produce a valid ticket\n valid = True\n\n # Load the current values in a temp file for editing\n (fd, filename) = tempfile.mkstemp(suffix=\".cm.ticket\")\n fp = os.fdopen(fd, \"w\")\n\n fp.write(self._format_headers(headers))\n fp.write(\"\\n\\n\")\n fp.write(body)\n\n fp.close()\n\n # When reading the message from stdin, we can't edit, skip editor.\n if not self.message_file:\n self.editor(filename)\n\n # Use the email parser to get the headers.\n ep = email.parser.Parser()\n with open(filename, \"r\") as fp:\n em = ep.parse(fp)\n\n os.unlink(filename)\n\n body = em.get_payload()\n headers = OrderedDict(em.items())\n\n errors = []\n fuzzy_match_fields = (\"Milestone\", \"Component\", \"Type\", \"Version\",\n \"Priority\")\n\n # Ensures all the required fields are filled-in\n for key in self.required_fields:\n if key in fuzzy_match_fields:\n continue\n if not headers.get(key) or \"**ERROR**\" in headers[key]:\n errors.append(\"Invalid '{}': cannot be blank\".format(key))\n\n # Some fields are tolerant to incomplete values, this is where we\n # try to complete them.\n for key in fuzzy_match_fields:\n lkey = key.lower()\n if lkey not in options:\n continue\n\n valid_options = options[lkey]\n\n # The specified value is not available in the multi-choice.\n if key in headers and headers[key] not in valid_options:\n m = text.fuzzy_find(headers[key], valid_options)\n if m:\n # We found a close match, update the value with it.\n headers[key] = m\n else:\n # We didn't find a close match. If the user entered\n # something explicitly or if this field is required,\n # this is an error, else just wipe the value and move\n # on.\n if headers[key] or key in self.required_fields:\n joined_options = \", \".join(valid_options)\n errors.append(u\"Invalid '{}': expected: {}\"\n .format(key, joined_options))\n else:\n headers[key] = \"\"\n\n if errors:\n valid = False\n print(\"\\nFound the following errors:\")\n for error in errors:\n print(u\" - {}\".format(error))\n\n try:\n if not self.message_file:\n self.input(\"\\n-- Hit Enter to return to editor, \"\n \"^C to abort --\\n\")\n except KeyboardInterrupt:\n raise exceptions.FatalError(\"ticket creation interrupted\")\n\n # There is no editor loop when reading message from stdin, just\n # print the errors and exit.\n if self.message_file:\n break\n\n # Since the body is expected to be using CRLF line termination, we\n # replace newlines by CRLF if no CRLF is found.\n if \"\\r\\n\" not in body:\n body = body.replace(\"\\n\", \"\\r\\n\")\n\n fields_data = {\n \"field_summary\": headers.get(\"Subject\", \"\"),\n \"field_type\": headers.get(\"Type\", \"\"),\n \"field_version\": headers.get(\"Version\", \"\"),\n \"field_description\": body,\n \"field_milestone\": headers.get(\"Milestone\", \"\"),\n \"field_component\": headers.get(\"Component\", \"\"),\n \"field_owner\": headers.get(\"To\", \"\"),\n \"field_keywords\": headers.get(\"Keywords\", \"\"),\n \"field_cc\": headers.get(\"Cc\", \"\"),\n \"field_attachment\": \"\",\n }\n\n # Assume anything outside of the original headers it to be included as\n # fields.\n for key, value in headers.items():\n field_name = \"field_\" + key.lower()\n if field_name not in fields_data:\n fields_data[field_name] = value\n\n r = self.post(\"/newticket\", fields_data)\n\n if r.status_code != 200:\n message = text.extract_message(r.text)\n if not message:\n message = \"unable to create new ticket\"\n raise exceptions.RequestException(message)\n\n try:\n ticket_id = int(r.url.split(\"/\")[-1])\n except:\n raise exceptions.RequestException(\"returned ticket_id is invalid.\")\n\n self.open_in_browser_on_request(ticket_id)\n\n return [\"ticket #{} created\".format(ticket_id)]", "title": "" }, { "docid": "bdc213d9a3b6849f6cb4412a14b5a1a5", "score": "0.5334145", "text": "def issue_create(\n jira_assignee\n , auth_name = 'USER' \n , auth_passwd = 'PASSWORD'\n , jira_project = 'TIGER'\n , jira_label = 'test' # multiple with ,\n , jira_title = 'test : 테스트'\n , jira_issuetype = 'Task'\n , jira_description = 'test : 테스트'\n , jira_component = 'CMU'\n , jira_priority = 'P2'\n , jira_reporter = ''\n , jira_watcher = '' # multiple with ,\n , jira_attachment = '' # multiple with ,\n , jira_remaindate = 5\n):\n import jira.client\n\n if not jira_assignee:\n print(\"ERROR : you should set assignee\")\n quit()\n if not jira_reporter:\n jira_reporter = auth_name\n if not jira_project:\n jira_project = 'TIGER'\n if not jira_issuetype:\n jira_issuetype = 'Task'\n if not jira_component:\n jira_component = 'CMU'\n if not jira_priority:\n jira_priority = 'P2'\n if jira_remaindate < 1:\n jira_remaindate = 1\n jira_duedate = str(datetime.datetime.now() + datetime.timedelta(days = jira_remaindate)).split()[0]\n \n print(\"Make Issue Ticket\")\n print(\"\\tauth_name : {}\" , auth_name)\n print(\"\\tauth_passwd : {}\" , auth_passwd)\n print(\"\\tjira_project : {}\" , jira_project)\n print(\"\\tjira_label : {}\" , jira_label)\n print(\"\\tjira_title : {}\" , jira_title)\n print(\"\\tjira_issuetype : {}\" , jira_issuetype)\n print(\"\\tjira_description : {}\" , jira_description)\n print(\"\\tjira_component : {}\" , jira_component)\n print(\"\\tjira_priority : {}\" , jira_priority)\n print(\"\\tjira_reporter : {}\" , jira_reporter)\n print(\"\\tjira_assignee : {}\" , jira_assignee)\n print(\"\\tjira_watcher : {}\" , jira_watcher)\n print(\"\\tjira_attachment : {}\" , jira_attachment)\n print(\"\\tjira_remaindate : {}\" , jira_remaindate)\n print(\"\\t\\tjira_duedate : {}\" , jira_duedate)\n\n return \n\n options = {'server': 'http://vlm.lge.com/issue'}\n jira = jira.client.JIRA(options, basic_auth = (auth_name, auth_passwd)) # Noted!! you need change your username and password\n\n # label , title , issuetype , component , watcher , attachment , duedate\n # auth_user , auth_password\n # project , assignee , desc\n jira_task_dict = {\n 'project' : { 'key': jira_project },\n 'issuetype' : { 'name' : jira_issuetype },\n 'summary' : jira_title,\n 'description' : jira_description,\n 'duedate': jira_duedate,\n 'priority': { 'name' : jira_priority},\n 'components': [{\"name\": jira_component}],\n 'reporter': {'name': jira_reporter},\n 'assignee': {'name': jira_assignee}\n }\n\n issue = jira.create_issue(fields=jira_task_dict)\n print('Jira issue id: ', issue)\n # print(\"{} to the power {} equals {}\".format(args.x, args.y, answer)\n\n labels = jira_label.split(',')\n for label in labels:\n issue.fields.labels.append(label)\n issue.update(fields={\"labels\": issue.fields.labels})\n watchers = jira_watcher.split(',')\n for watcher in watchers:\n jira.add_watcher(issue, watcher)\n #add attachments file\n attachs = jira_attachment.split(',')\n for attach in attachs:\n jira.add_attachment(issue=issue, attachment = attach)", "title": "" }, { "docid": "0b1808e3db4353103c84993c9c2544d2", "score": "0.5317905", "text": "def test_init_ticket_2 (self):\n product1 = my_solution.Product(2, 2)\n product2 = my_solution.Product(5, 1)\n ticket = my_solution.Ticket([product1, product2])\n\n self.assertTrue(hasattr(ticket, \"products\"))\n self.assertIs(len(ticket.products), 2)\n self.assertIs(ticket.products[0].amount, 2)\n self.assertIs(ticket.products[1].price, 5)", "title": "" } ]
a4e6545ac2bd52a24944ef49a2ee388a
Test case for patch_hyperflex_software_version_policy
[ { "docid": "ab51cc834c4d97dcc025b138e2442456", "score": "0.9544049", "text": "def test_patch_hyperflex_software_version_policy(self):\n pass", "title": "" } ]
[ { "docid": "4d5e955de88f71dff747e6600e7521ce", "score": "0.91287255", "text": "def test_update_hyperflex_software_version_policy(self):\n pass", "title": "" }, { "docid": "4c50caac1f2c8f81955c4ae6602bfcf9", "score": "0.8505383", "text": "def test_create_hyperflex_software_version_policy(self):\n pass", "title": "" }, { "docid": "5a7f3009a51464068546ede10a6635a1", "score": "0.7917746", "text": "def test_patch_hyperflex_server_firmware_version(self):\n pass", "title": "" }, { "docid": "627e4005a364c9ff4564f193045e3e32", "score": "0.7743433", "text": "def test_delete_hyperflex_software_version_policy(self):\n pass", "title": "" }, { "docid": "039f976248c5d208bf66deb3b0be5d3d", "score": "0.7643674", "text": "def test_get_hyperflex_software_version_policy_list(self):\n pass", "title": "" }, { "docid": "25d9e04ac6008e1c16203e2fe923ed9e", "score": "0.74859905", "text": "def test_patch_hyperflex_hxdp_version(self):\n pass", "title": "" }, { "docid": "e82e448846e3647d66d3fe8f81801588", "score": "0.74026227", "text": "def test_patch_hyperflex_auto_support_policy(self):\n pass", "title": "" }, { "docid": "57084439114f77aa22389c87106dba78", "score": "0.72823536", "text": "def test_update_hyperflex_server_firmware_version(self):\n pass", "title": "" }, { "docid": "b297ec42cf29ef5982814111da5d4609", "score": "0.7243158", "text": "def test_update_hyperflex_auto_support_policy(self):\n pass", "title": "" }, { "docid": "0c45d8416b6fae5eaef2f0fa5d9c356e", "score": "0.7240627", "text": "def test_get_hyperflex_software_version_policy_by_moid(self):\n pass", "title": "" }, { "docid": "a0196c69aaf9442caf41e0685285d42d", "score": "0.71915513", "text": "def test_update_hyperflex_hxdp_version(self):\n pass", "title": "" }, { "docid": "9a0b80f9ac3ed886d1e53f2bb102b98a", "score": "0.7168357", "text": "def test_patch_hyperflex_sys_config_policy(self):\n pass", "title": "" }, { "docid": "d4a6e43fae60dea66984c199b2b03fe1", "score": "0.6953103", "text": "def test_update_hyperflex_sys_config_policy(self):\n pass", "title": "" }, { "docid": "84fdaf76a49aa07faa223033e0204ccc", "score": "0.68808866", "text": "def test_changeVersionsWithPrerelease(self):\n self._testVersionChanging(9, 2, 7, 38)", "title": "" }, { "docid": "96da65ed50c1944fb9928799a2e04912", "score": "0.6871456", "text": "def test_patch_hyperflex_vcenter_config_policy(self):\n pass", "title": "" }, { "docid": "1e41a4e1aa29667a573b04645e69b7e5", "score": "0.677585", "text": "def test_patch_hyperflex_ucsm_config_policy(self):\n pass", "title": "" }, { "docid": "b3f0f4a769dcae48f3d5315ad883d08c", "score": "0.6653474", "text": "def test_patch_hyperflex_node_config_policy(self):\n pass", "title": "" }, { "docid": "3fbbb71123957629a8bc963a960568c3", "score": "0.6644668", "text": "def test_create_hyperflex_server_firmware_version(self):\n pass", "title": "" }, { "docid": "5d8a30d4930c5a97cab54c0235c61e1c", "score": "0.66433513", "text": "def test_patch_bios_policy(self):\n pass", "title": "" }, { "docid": "224cee540fadcb3c0f9eb82cf7d792bd", "score": "0.65358466", "text": "def test_patch(scraper, version_parts):\n\n new_version_parts = list(version_parts)\n new_version_parts[2] = int(new_version_parts[2]) + 1\n\n assert scraper.is_compatible_with(generate_version(new_version_parts)) is True", "title": "" }, { "docid": "bdbdae3db173fca72712363e3658cdee", "score": "0.6483984", "text": "def test_create_hyperflex_hxdp_version(self):\n pass", "title": "" }, { "docid": "725005c93a716a8e79af2f6005b7b72c", "score": "0.64405406", "text": "def test_firmware_version(self):\n self._verify_firmware_version()", "title": "" }, { "docid": "ee76cbcd141fb57456f51f62e2d4657a", "score": "0.6403246", "text": "def test_patch_hyperflex_capability_info(self):\n pass", "title": "" }, { "docid": "e3ced363d60770f793b4e073ce1d911a", "score": "0.63994974", "text": "def test_update_hyperflex_vcenter_config_policy(self):\n pass", "title": "" }, { "docid": "54bce1352c475bd3de3c20547449fad4", "score": "0.6391098", "text": "def test_update_hyperflex_ucsm_config_policy(self):\n pass", "title": "" }, { "docid": "703f91ce84072b768dc153f5a958bc14", "score": "0.63904756", "text": "def test_patch_hyperflex_proxy_setting_policy(self):\n pass", "title": "" }, { "docid": "e277497e914862c5ee2ad7a2d559e248", "score": "0.6331508", "text": "def test_create_hyperflex_auto_support_policy(self):\n pass", "title": "" }, { "docid": "c9bdc98109b0f93791517c303e5d608d", "score": "0.63193685", "text": "def test_patch_hyperflex_ext_fc_storage_policy(self):\n pass", "title": "" }, { "docid": "6cc060d1131588ef3a208722c34a0436", "score": "0.6296983", "text": "def test_minor(scraper, version_parts):\n\n new_version_parts = list(version_parts)\n new_version_parts[1] = int(new_version_parts[1]) + 1\n\n assert scraper.is_compatible_with(generate_version(new_version_parts)) is True", "title": "" }, { "docid": "0c3a8702d1c2fb170613c72429eaa781", "score": "0.6295138", "text": "def test_lowest_version(self):\n self.assertEqual({\"python-xyz\": \"1\",\n \"python-foo\": \"3.1\"},\n pr.sanitize_requirements(\n [\"xyz>=1,>=2\", \"foo>=4,>=3.1\"]))", "title": "" }, { "docid": "0c18d49d7143b06c429176d55d4c4ff6", "score": "0.62866586", "text": "def test_update_hyperflex_node_config_policy(self):\n pass", "title": "" }, { "docid": "127c397cc7e2ae22b134a4a78b68c6d0", "score": "0.6245431", "text": "def test_pre_release(scraper, version_parts):\n\n new_version_parts = list(version_parts)\n if len(new_version_parts) > 4:\n new_version_parts[4] = int(new_version_parts[4]) + 1\n elif len(new_version_parts) > 3:\n new_version_parts.append(1)\n else:\n new_version_parts.extend(['a', 1])\n\n assert scraper.is_compatible_with(generate_version(new_version_parts)) is True", "title": "" }, { "docid": "ae51ab6cfbd4502e0728e5716b90d4eb", "score": "0.6226228", "text": "def test_version(self):\n pass", "title": "" }, { "docid": "beee50198e7c104bc7e62704d5fc656c", "score": "0.6212926", "text": "def test_major(scraper, version_parts):\n\n new_version_parts = list(version_parts)\n new_version_parts[0] = int(new_version_parts[0]) + 1\n\n assert scraper.is_compatible_with(generate_version(new_version_parts)) is False", "title": "" }, { "docid": "530eb6aa532962fef67c71f232a27cd9", "score": "0.61921763", "text": "def test_get_version(self):\n pass", "title": "" }, { "docid": "8d3518f5baf494486973e24ceeb0de66", "score": "0.6180285", "text": "def test_update_hyperflex_proxy_setting_policy(self):\n pass", "title": "" }, { "docid": "d083267138ebd6306d475c849fabc87b", "score": "0.61507654", "text": "def test_patch_hyperflex_local_credential_policy(self):\n pass", "title": "" }, { "docid": "e8a1a44194625a5b746730de440048d3", "score": "0.6149726", "text": "def test_update_hyperflex_capability_info(self):\n pass", "title": "" }, { "docid": "bc04f2231752c472b0df22ca1664c07e", "score": "0.6101326", "text": "def test_update_software_component_for_system_module(self):\n pass", "title": "" }, { "docid": "d6eefc54ce3cb2f5f47a10971ebfe9ab", "score": "0.60952234", "text": "def test_delete_hyperflex_server_firmware_version(self):\n pass", "title": "" }, { "docid": "91c84ed2b05c6f95a425289cf4fa6764", "score": "0.6092875", "text": "def test_patch_hyperflex_cluster_network_policy(self):\n pass", "title": "" }, { "docid": "35a5e59a3852a2a905ed026024732a9c", "score": "0.6063085", "text": "def py_versiontest(c):\n pass", "title": "" }, { "docid": "b54e813e9ea0f924851fb75394c30b86", "score": "0.60537696", "text": "def test_case03(self):\n version1 = versions.get_version_power(\"1.1.1\")\n version2 = versions.get_version_power(\"0.2.1\")\n self.assertGreater(version1, version2)", "title": "" }, { "docid": "bc487f854d40b676dff4d2e906b0c6a1", "score": "0.60471976", "text": "def test_update_hyperflex_ext_fc_storage_policy(self):\n pass", "title": "" }, { "docid": "454e2cdf9dd84ccba08b2885975e7cdd", "score": "0.6043588", "text": "def test_update_bios_policy(self):\n pass", "title": "" }, { "docid": "0d45ccfead0fb4cbbb5ed07c13bf0ce0", "score": "0.6035498", "text": "def test_patch_hyperflex_ext_iscsi_storage_policy(self):\n pass", "title": "" }, { "docid": "c0623c4e68bca07a2c07fa9e23a70c04", "score": "0.6030355", "text": "def test_update_software_components_for_system_module(self):\n pass", "title": "" }, { "docid": "79a60bb0209ead4311d87aae622bed0d", "score": "0.60052466", "text": "def test_get_hyperflex_server_firmware_version_list(self):\n pass", "title": "" }, { "docid": "1b58c989d6d71365246da190734c717e", "score": "0.59660256", "text": "def test_delete_hyperflex_hxdp_version(self):\n pass", "title": "" }, { "docid": "f45647382a0de3359f76827e8e5379d8", "score": "0.5954659", "text": "def testStratisVersion(self):\n version = Manager.Properties.Version.Get(get_object(TOP_OBJECT))\n (major, _, _) = version.split(\".\")\n self.assertEqual(major, \"0\")", "title": "" }, { "docid": "c9ad70942c83a7787d4e7e15c1e900bb", "score": "0.5951805", "text": "def test_create_hyperflex_sys_config_policy(self):\n pass", "title": "" }, { "docid": "ed13c3a6faea74f5b2565fedb8ae37de", "score": "0.5944472", "text": "def test_2x_only_python_version_deploy():\n pass", "title": "" }, { "docid": "1239c57491be8bf5150c9fefaad46985", "score": "0.5910565", "text": "def test_init_unsupported_version(self, monkeypatch, runway_config, runway_context):\n monkeypatch.setattr(MODULE + \".__version__\", \"1.3\")\n with pytest.raises(SystemExit) as excinfo:\n assert not Runway(runway_config, runway_context)\n assert excinfo.value.code == 1", "title": "" }, { "docid": "fb6b85825dbc39b2b9f57048787d69c2", "score": "0.5906543", "text": "def test_major(self):\n self.assertEqual(\"0\", self._version1.major())\n self.assertEqual(\"1.2\", self._version2.major())", "title": "" }, { "docid": "4b1ed8a71b779cd90f5ac0eb049b934d", "score": "0.58987325", "text": "def test_changeVersions(self):\n self._testVersionChanging(8, 2, 3)", "title": "" }, { "docid": "671d36149742a6f54163bc2aae4af448", "score": "0.58915055", "text": "def test_get_cons3rt_version(self):\n pass", "title": "" }, { "docid": "9a31db69789af5a7a4618d434a36cc97", "score": "0.58678615", "text": "def test_version(mocker):\n mocker.patch('serial.Serial.open')\n mocker.patch('serial.Serial.flushInput')\n mocker.patch('pysds011.driver.SDS011.cmd_set_sleep')\n mocker.patch('pysds011.driver.SDS011.cmd_set_mode')\n cfv = mocker.patch('pysds011.driver.SDS011.cmd_firmware_ver')\n cfv.return_value = {'pretty': 'BimBumBam'}\n runner = CliRunner()\n result = runner.invoke(main, ['fw-version'])\n\n assert 'BimBumBam' in result.output\n assert result.exit_code == 0", "title": "" }, { "docid": "31f38d1e8a6801fcfac96d67d8d88c71", "score": "0.5859708", "text": "def test_patch_hyperflex_feature_limit_external(self):\n pass", "title": "" }, { "docid": "bc64ac0d4854a9e9f210db92234ff99c", "score": "0.5846065", "text": "def test_patch_hyperflex_cluster_storage_policy(self):\n pass", "title": "" }, { "docid": "69b8237ead36cbed040677faa23d8eaf", "score": "0.5841981", "text": "def test_get_hyperflex_hxdp_version_list(self):\n pass", "title": "" }, { "docid": "27289fd5bcde84fdf69b7e8ec95f6d32", "score": "0.5838193", "text": "def _testVersionChanging(self, major, minor, micro, prerelease=None):\n versionUpdates = []\n def myVersionChanger(sourceTree, versionTemplate):\n versionUpdates.append((sourceTree, versionTemplate))\n versionChanger = ChangeVersionsScript()\n versionChanger.changeAllProjectVersions = myVersionChanger\n version = \"%d.%d.%d\" % (major, minor, micro)\n if prerelease is not None:\n version += \"pre%d\" % (prerelease,)\n versionChanger.main([version])\n self.assertEquals(len(versionUpdates), 1)\n self.assertEquals(versionUpdates[0][0], FilePath(\".\"))\n self.assertEquals(versionUpdates[0][1].major, major)\n self.assertEquals(versionUpdates[0][1].minor, minor)\n self.assertEquals(versionUpdates[0][1].micro, micro)\n self.assertEquals(versionUpdates[0][1].prerelease, prerelease)", "title": "" }, { "docid": "fd8bb97772d0e9d4172a4fce661bc209", "score": "0.5828938", "text": "def test_request_estable_version(self):\n current_stable_version = get_stable_version()\n self.assertIsNotNone(current_stable_version)", "title": "" }, { "docid": "8d01f79a61fe0056082c2e60cb1f82ef", "score": "0.58108085", "text": "def test_patch_cluster_policy(self):\n pass", "title": "" }, { "docid": "66ecf870729ed58befb82e7674cd1c6e", "score": "0.5802227", "text": "def version(monkeypatch):\n monkeypatch.setattr(homeassistant.const, \"__version__\", \"0.5.0\")", "title": "" }, { "docid": "2850f06c80a811a2eb8a969e8d2f6a04", "score": "0.580017", "text": "def test_update_hyperflex_local_credential_policy(self):\n pass", "title": "" }, { "docid": "8ef153f5613daedd4e6d3ac1505b1026", "score": "0.5798659", "text": "def test_3x_only_python_versions_deploy():\n pass", "title": "" }, { "docid": "1ad3464e995a0086e9c6ad30f6400f31", "score": "0.57933855", "text": "def test_fw_version(mocker):\n mocker.patch('serial.Serial.open')\n mocker.patch('serial.Serial.flushInput')\n mocker.patch('pysds011.driver.SDS011.cmd_set_sleep')\n mocker.patch('pysds011.driver.SDS011.cmd_set_mode')\n cfv = mocker.patch('pysds011.driver.SDS011.cmd_firmware_ver')\n cfv.return_value = {'pretty': 'BimBumBam'}\n runner = CliRunner()\n result = runner.invoke(main, ['fw-version'])\n\n assert 'FW version' in result.output\n assert 'BimBumBam' in result.output\n assert result.exit_code == 0", "title": "" }, { "docid": "445a95789ca59ca6a0bcc4390545b641", "score": "0.5780852", "text": "def test_update_hyperflex_cluster_network_policy(self):\n pass", "title": "" }, { "docid": "c35856473f35f0d63beb6117394da66b", "score": "0.5773968", "text": "def test_delete_hyperflex_auto_support_policy(self):\n pass", "title": "" }, { "docid": "d859acb1e66ce437f3751f04efecf475", "score": "0.5768167", "text": "def test_upgrade_with_auto_upgrade_latest_engine_enabled():", "title": "" }, { "docid": "6c6d7dfa7defb85ae2a84d604ec199f4", "score": "0.57596993", "text": "def test_update_software_configuration_for_system_module(self):\n pass", "title": "" }, { "docid": "a0b1b79534bd183aaa1eedbd307596f3", "score": "0.5742208", "text": "def test_patch_hyperflex_server_model(self):\n pass", "title": "" }, { "docid": "f4ba0cab41b22da41a27b7aa0b57db12", "score": "0.57403684", "text": "def test_version(self):\n version_instance = get_version('kolibri', __file__)\n self.assertIn(version_instance.major_version, kolibri.__version__)", "title": "" }, { "docid": "9137fe7f19a50f654f9e14685c360390", "score": "0.5734119", "text": "def test_version():\n versions = ((2, 7, 16), (3, 5, 7), (3, 6, 8), (3, 7, 3))\n assert sys.version_info[:3] in versions", "title": "" }, { "docid": "e749c9facf90c74c160a60cb24d9e71b", "score": "0.5728864", "text": "def test_update_hyperflex_ext_iscsi_storage_policy(self):\n pass", "title": "" }, { "docid": "59dd05f61aaa271b7ed3629d3ddce73e", "score": "0.5704703", "text": "def test_patch_cluster_policy_binding(self):\n pass", "title": "" }, { "docid": "4d4d8a264094dca1c3ee35fa487a4fef", "score": "0.5697776", "text": "def test_change_provisioned_throughput_usual_case():", "title": "" }, { "docid": "3c79af4acb542320bee1f33b69fad1ea", "score": "0.56970745", "text": "def test1_version(self):\n lVersion = rdbhdb.__version__.split('.')\n nVersion = need_version.split('.')\n self.assert_(lVersion >= nVersion, rdbhdb.__version__)", "title": "" }, { "docid": "e8d1cbfe079cf3910bf7fdc21ab0e14e", "score": "0.5684907", "text": "def test_getNextVersionAfterYearChange(self):\n now = date.today()\n major = now.year - VERSION_OFFSET\n version = Version(\"twisted\", major - 1, 9, 0)\n self.assertEquals(getNextVersion(version, now=now),\n Version(\"twisted\", major, 0, 0))", "title": "" }, { "docid": "8e1f3e3963680427b4588341845a2464", "score": "0.56839454", "text": "def test_update_software_asset_impact_level(self):\n pass", "title": "" }, { "docid": "946e73679a0e1bfb4d400d747b7ade94", "score": "0.568172", "text": "def checkVersion(self, clientName, edamVersionMajor, edamVersionMinor):\r\n pass", "title": "" }, { "docid": "7f8b8da74c60aee928adfbfd905b1ddf", "score": "0.566981", "text": "def test_ifVersionIsCorrect():\n \n for name in config.toTest:\n testConfig = dynamicallyLoadModule(name)\n if \"version\" in testConfig.config:\n print \"Version: \"+ testConfig.config[\"name\"]\n yield assertionFunctions.checkIfVersionIsExact, testConfig.config\n \n if \"minimum_version\" in testConfig.config:\n print \"Minimum Version: \"+ testConfig.config[\"name\"]\n yield assertionFunctions.checkIfMinimumVersionIsMet, testConfig.config", "title": "" }, { "docid": "e3e712dba8f1d0af87739d3eb4de4684", "score": "0.5662895", "text": "async def test_stable_version_beta_week(HaVersion):\n with patch(\n \"pyhaversion.container.HaVersionContainer.data\",\n fixture(\"container/beta_week\"),\n ):\n async with aiohttp.ClientSession() as session:\n haversion = HaVersion(\n session=session,\n source=HaVersionSource.CONTAINER,\n )\n await haversion.get_version()\n assert haversion.version == STABLE_VERSION_BETA_WEEK", "title": "" }, { "docid": "9827af9cddf415e9f2aff08cb496e16f", "score": "0.5662833", "text": "def test_higher_version_always_preferred(self):\n try:\n self.prepare()\n self.assertEquals((1, 2, 4), compute_version(\n get_git_describe(repository_directory=self.repo, fix_environment=True, accepted_tag_pattern='repo-*')\n ))\n finally:\n rmtree(self.repo)\n os.chdir(self.oldcwd)", "title": "" }, { "docid": "fd4546050640574d1ca204f4646d6f22", "score": "0.5660354", "text": "def test_patch_hyperflex_feature_limit_internal(self):\n pass", "title": "" }, { "docid": "75e2bb360e44296eb4128b54fbfef739", "score": "0.563925", "text": "def test_update_hyperflex_feature_limit_external(self):\n pass", "title": "" }, { "docid": "e2c206e04f1d1dff820bd1a0d77a8036", "score": "0.5617935", "text": "def test_version():\n with open(\"pyproject.toml\") as f:\n tomllines = f.read().splitlines()\n tomlversion = set([l for l in tomllines if \"version =\" in l])\n initversion = set([f'version = \"{mei2volpiano.__version__}\"'])\n # set is there to catch any duplicate/additional entries\n assert initversion == tomlversion", "title": "" }, { "docid": "8af817117586c7936404175173f87a09", "score": "0.5610465", "text": "def validate_configurator_version():\n if settings.CONFIGURATOR_MODULE == \"bootmachine.contrib.configurators.salt\":\n pkgver = settings.SALT_AUR_PKGVER\n pkgrel = settings.SALT_AUR_PKGREL\n response = urllib2.urlopen(\"https://aur.archlinux.org/packages/sa/salt/PKGBUILD\")\n for line in response:\n if line.startswith(\"pkgver=\") and not pkgver in line:\n abort(\"The requested Salt 'pkgrel={0}' in the AUR was updated to '{1}'.\".format(\n pkgver, line.strip()))\n if line.startswith(\"pkgrel=\") and not pkgrel in line:\n abort(\"The requested Salt 'pkgrel={0}' in the AUR was updated to '{1}'.\".format(\n pkgrel, line.strip()))", "title": "" }, { "docid": "8448b17b18b458c4f66e7449955ff72f", "score": "0.5601746", "text": "def test_get_short_version(self):\n pass", "title": "" }, { "docid": "07c0e3cb3b813b9f67adf58832f6bceb", "score": "0.5597425", "text": "def test_version():\n mock_version = MagicMock(return_value=\"0.1.5\")\n with patch.dict(mac_brew.__salt__, {\"pkg_resource.version\": mock_version}):\n assert mac_brew.version(\"foo\") == \"0.1.5\"", "title": "" }, { "docid": "0496630497f5d4501274dd2a36df2421", "score": "0.5583406", "text": "def test_version():\n assert pywren.__version__ is not None", "title": "" }, { "docid": "5c9425d3aeafd65f53ebe335375d59b2", "score": "0.5579021", "text": "def test_semantic_version():\n semantic_version.Version(settings.VERSION)", "title": "" }, { "docid": "4aefc1b3606e81643d56faba7805bfa0", "score": "0.5578216", "text": "def test_update_software_asset(self):\n pass", "title": "" }, { "docid": "a5587d11db939a245309569f775cdd98", "score": "0.5576831", "text": "def _get_version(self):", "title": "" }, { "docid": "7eb9d53b80c8a533ded248cb005282a5", "score": "0.5574269", "text": "def version_number() -> int:\n return 0", "title": "" }, { "docid": "9682a50f3ee6be40e318f174bc62eb2b", "score": "0.55666", "text": "def test_patch_hyperflex_app_catalog(self):\n pass", "title": "" }, { "docid": "4a713b89e83ace747212dcfb9641c81f", "score": "0.5558243", "text": "def test_version_remove_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('version remove 1.0')\n rv, output = self._execute('version list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)", "title": "" }, { "docid": "dacb97f0c2e986ba5209910f5a736573", "score": "0.55569595", "text": "def test_init_undetermined_version(\n self, caplog, monkeypatch, runway_config, runway_context\n ):\n monkeypatch.setattr(MODULE + \".__version__\", \"0.1.0-dev1\")\n caplog.set_level(logging.WARNING, logger=MODULE)\n assert Runway(runway_config, runway_context)\n assert \"shallow clone of the repo\" in \"\\n\".join(caplog.messages)", "title": "" }, { "docid": "9c98724fe110f30875ef2d24bc17ff9c", "score": "0.55518323", "text": "def test_get_next_version_PATCH(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, MINOR, MAJOR, MINOR, PATCH + 1): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, PATCH)))\n self.assertEqual('%d.%d-%d' % (MAJOR, MINOR, PATCH + 1), ver)", "title": "" }, { "docid": "2178ba1f6a1b4778cf1e94f9f96b5792", "score": "0.55440134", "text": "def testGetVersion(self):\n helper = pylint.PylintHelper()\n\n helper._GetVersion()", "title": "" } ]
f5b658fb54dc3fd91de64cbe1c75e84c
Handle the "change password" task both form display and validation.
[ { "docid": "667729a74ce5f7559dc6c6426bbaf866", "score": "0.0", "text": "def edit_my_shop(self, request, extra_context=None):\n defaults = {\n 'extra_context': {**self.each_context(request), **(extra_context or {})},\n }\n request.current_app = self.name\n return MyshopEditView.as_view(**defaults)(request)", "title": "" } ]
[ { "docid": "a59271f0af92a9a76696c145a4dd8436", "score": "0.77990556", "text": "def _change_password(request):\n context = {\n 'change_password_form': PasswordChangeForm(request.user, request.POST),\n 'subscribe_form': SubscribeForm(request.user)\n }\n if context['change_password_form'].is_valid():\n context['change_password_form'].save()\n messages.info(request, 'Password was updated.')\n return HttpResponseRedirect(reverse('settings'))\n return render_to_response(\"service/settings.html\", context, context_instance=RequestContext(request))", "title": "" }, { "docid": "75930a7ed0f462b434cfca602155b433", "score": "0.7465032", "text": "def password_change(request, extra_context=None):\r\n from django.contrib.admin.forms import AdminPasswordChangeForm\r\n from django.contrib.auth.views import password_change\r\n url = reverse('regression:password_change_done')\r\n defaults = {\r\n 'password_change_form': AdminPasswordChangeForm,\r\n 'post_change_redirect': url,\r\n 'extra_context': dict(**(extra_context or {})),\r\n }\r\n defaults['template_name'] = 'regression/change_password.html'\r\n return password_change(request, **defaults)", "title": "" }, { "docid": "44fe7521a01a9f376d7a218c1829f846", "score": "0.7363345", "text": "def change_password():\n user_manager = current_app.user_manager\n db_adapter = user_manager.db_adapter\n\n # Initialize form\n form = user_manager.change_password_form(request.form)\n form.next.data = request.args.get('next', _endpoint_url(user_manager.after_change_password_endpoint)) # Place ?next query param in next form field\n\n # Process valid POST\n if request.method=='POST' and form.validate():\n # Hash password\n hashed_password = user_manager.hash_password(form.new_password.data)\n\n # Change password\n user_manager.update_password(current_user, hashed_password)\n\n # Send 'password_changed' email\n if user_manager.enable_email and user_manager.send_password_changed_email:\n emails.send_password_changed_email(current_user)\n\n # Send password_changed signal\n signals.user_changed_password.send(current_app._get_current_object(), user=current_user)\n\n # Prepare one-time system message\n flash(_('Your password has been changed successfully.'), 'success')\n\n # Redirect to 'next' URL\n return redirect(form.next.data)\n\n # Process GET or invalid POST\n return render_template(user_manager.change_password_template, form=form)", "title": "" }, { "docid": "e39df8218c9e486b2b9f750c0823b561", "score": "0.7327036", "text": "def password_change(request, template_name='registration/password_form.html',\n pass_form=PasswordChangeForm, success_url=None, \n extra_context=None):\n user = request.user\n\n form = pass_form(user=user)\n\n if request.method == \"POST\":\n form = pass_form(user=user, data=request.POST)\n if form.is_valid():\n form.save()\n\n # Send a signal that the password has changed \n # TODO: implement signals\n #userena_signals.password_complete.send(sender=None,\n # user=user)\n\n if success_url: redirect_to = success_url\n else: redirect_to = reverse('baph_password_change_complete')\n return redirect(redirect_to)\n\n if not extra_context: extra_context = dict()\n extra_context['form'] = form\n return render_to_response(template_name, extra_context,\n context_instance=RequestContext(request))", "title": "" }, { "docid": "4caf81aa5854a2c5f2dc56e4257b1615", "score": "0.731493", "text": "def password_change(self, request):\r\n from django.contrib.admin.forms import AdminPasswordChangeForm\r\n from django.contrib.auth.views import password_change\r\n url = reverse('admin:password_change_done', current_app=self.name)\r\n defaults = {\r\n 'current_app': self.name,\r\n 'password_change_form': AdminPasswordChangeForm,\r\n 'post_change_redirect': url,\r\n 'extra_context': self.each_context(),\r\n }\r\n if self.password_change_template is not None:\r\n defaults['template_name'] = self.password_change_template\r\n return password_change(request, **defaults)", "title": "" }, { "docid": "d8ad3e28c37e81624fd16504816bc80e", "score": "0.72664326", "text": "def change_password(idx):\n\n user = find_user_by_id(idx)\n passes = codecooler_view.get_inputs(\"Please provide data\", [\"Old password\", \"New password\"])\n\n if passes[0] == user.password:\n user.password = passes[1]\n codecooler_view.print_result(\"Password changed succesfully!\\n\")\n\n else:\n codecooler_view.print_error_message('Wrong old password provided.')\n\n sleep(1.5)\n codecooler_view.clear_window()", "title": "" }, { "docid": "e7507ad940af4310ea4ec493fe956eef", "score": "0.7196952", "text": "def user_password(request):\n\n changed = False\n\n if request.method == 'POST':\n\n form = PasswordForm(request.POST)\n user = request.user\n \n # Check if the new password is valid and if the old password matches.\n if form.is_valid() and check_password(form.cleaned_data['oldpass'], user.password):\n newpassword = form.cleaned_data['newpass']\n user.set_password(newpassword)\n user.save()\n changed = True\n \n else:\n return render_to_response('postosaurus/user-password.html', {\n 'form': form,\n 'passwordtab' : True,\n 'success' : changed,\n }, context_instance = RequestContext(request))\n\n else:\n form = PasswordForm() # An unbound form\n\n \n return render_to_response('postosaurus/user-password.html', {\n 'form' : form,\n 'passwordtab' : True,\n 'success' : changed,\n }, context_instance = RequestContext(request))", "title": "" }, { "docid": "ddb31b77b3dbc343e954c78cfec425e8", "score": "0.71916234", "text": "def changepwd():\n\n id = session.get(\"user_id\")\n # User reached route via POST (as by submitting a form via POST)\n if request.method == \"POST\":\n # Query database for username\n rows = db.execute(\"SELECT hash FROM users WHERE id = :id\", id=id)\n\n # Check original password\n oldpwd = request.form.get(\"oldpassword\")\n if not check_password_hash(rows[0][\"hash\"], oldpwd):\n return apology(\"original password not correct\", 403)\n\n # Ensure new password was not blank\n newpwd = request.form.get(\"newpassword\")\n if not newpwd:\n return apology(\"password can't be blank\", 403)\n\n # Ensure original and new password not the same\n elif oldpwd == newpwd:\n return apology(\"password can't be the same\", 403)\n\n # Ensure confirmation was not blank\n confirm = request.form.get(\"confirmation\")\n if not confirm:\n return apology(\"confirmation can't be blank\", 403)\n\n # Ensure new password equals to confirmation\n elif newpwd != confirm:\n return apology(\"confirmation and password are not the same\", 403)\n\n # Update hash\n db.execute(\"UPDATE users SET hash=:hash WHERE id=:id\", hash=generate_password_hash(newpwd), id=id)\n\n # Redirect user to home page\n flash(\"Password Change Success!\")\n return redirect(\"/\")\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n return render_template(\"changepwd.html\")", "title": "" }, { "docid": "f3d559597a3a5b7ea6f4e156272425f2", "score": "0.7183655", "text": "def handle(self, *args, **options):\n email = options['user_email'][0]\n organization_id = options['organization_id'][0]\n\n try:\n user = User.objects.get(\n email__iexact=email, organization_id=organization_id)\n except User.DoesNotExist:\n raise CommandError(u'User with email \"{}\" and organization {} does '\n 'not exist'.format(email, organization_id))\n\n self.stdout.write(\"Changing password for user '%s'\\n\" % user)\n\n # pylint: disable=invalid-name\n MAX_TRIES = 3\n count = 0\n p1, p2 = 1, 2 # To make them initially mismatch.\n password_validated = False\n while (p1 != p2 or not password_validated) and count < MAX_TRIES:\n p1 = self._get_pass()\n p2 = self._get_pass(\"Password (again): \")\n if p1 != p2:\n self.stdout.write(\"Passwords do not match. Please try again.\\n\")\n count += 1\n # Don't validate passwords that don't match.\n continue\n try:\n validate_password(p2, user)\n except ValidationError as err:\n self.stderr.write('\\n'.join(err.messages))\n count += 1\n else:\n password_validated = True\n\n if count == MAX_TRIES:\n raise CommandError(\"Aborting password change for user '%s' after %s\"\n \" attempts\" % (user, count))\n\n user.set_password(p1)\n user.save()\n\n return \"Password changed successfully for user '%s'\" % user", "title": "" }, { "docid": "7aff803404bd44903830f4f2de0ed85f", "score": "0.70818806", "text": "def _handle_password_change(self, request, response, user):\n # Check that all password fields have been filled out.\n if not all([item in request.POST for item in\n (\"passwd_old\", \"passwd_new\", \"passwd_new_confirm\")]):\n request.session.flash(\n \"All password fields are required for a password change.\")\n return False\n\n # Verify the current password.\n if bcrypt.encrypt(request.POST[\"passwd_old\"]) != user.password:\n request.session.flash(\n \"Current password is incorrect. Please try again.\")\n return False\n\n # Verify that the new password is confirmed.\n if request.POST[\"passwd_new\"] != request.POST[\"passwd_new_confirm\"]:\n request.session.flash(\n (\"Password and confirmation are not the same. \"\n \"Please try again.\"),\n \"error\")\n return False\n\n # Update the password. This doesn't commit the password; the commit\n # will be performed by POST_settings().\n user.password = bcrypt.encrypt(request.POST[\"passwd_new\"])\n return True", "title": "" }, { "docid": "b4fe77d01a31d181d3238fde494bcad0", "score": "0.70729786", "text": "def ChangePassword(self, display, sender, connection):\n\t\t\n\t\tif not get_user(sender) in self.set_privileges and (self.polkit_policy and not is_authorized(\n\t\t\tsender,\n\t\t\tconnection,\n\t\t\tself.polkit_policy,\n\t\t\tTrue # user interaction\n\t\t)):\n\t\t\traise Exception(\"Not authorized\")\n\t\t\n\t\t# Create changepassword dialog\n\t\tusersd_ui._initialize(display, get_user(sender), self.service.get_uids_with_users())\n\t\tchange_password_dialog = usersd_ui.ChangePasswordDialog(self.is_locked())\n\t\t\n\t\t# Connect response\n\t\tchange_password_dialog.connect(\"response\", self.on_change_password_dialog_response, change_password_dialog)\n\t\t\n\t\tchange_password_dialog.show()", "title": "" }, { "docid": "491681760b7f545a244eaaa6f94d6f42", "score": "0.7066144", "text": "def password_changed( request ):\n utilities.add_message( request, 'Password changed!' )\n\n return HttpResponseRedirect( reverse( 'home' ) )", "title": "" }, { "docid": "c8d04a5ef3548851e370b4cc5edb0971", "score": "0.70634466", "text": "def password_change_done(request, extra_context=None):\r\n from django.contrib.auth.views import password_change_done\r\n defaults = {\r\n 'extra_context': dict( **(extra_context or {})),\r\n }\r\n defaults['template_name'] = 'regression/change_password_done.html'\r\n return password_change_done(request, **defaults)", "title": "" }, { "docid": "a717905ece6ad2eca5be4662238e85d0", "score": "0.7045373", "text": "def POST_update_password(self, form, jquery, password):\r\n\r\n if form.has_errors(\"curpass\", errors.WRONG_PASSWORD):\r\n return\r\n\r\n if (password and\r\n not (form.has_errors(\"newpass\", errors.BAD_PASSWORD) or\r\n form.has_errors(\"verpass\", errors.BAD_PASSWORD_MATCH))):\r\n change_password(c.user, password)\r\n\r\n if c.user.email:\r\n emailer.password_change_email(c.user)\r\n\r\n form.set_html('.status', _('your password has been updated'))\r\n form.set_inputs(curpass=\"\", newpass=\"\", verpass=\"\")\r\n\r\n # the password has changed, so the user's cookie has been\r\n # invalidated. drop a new cookie.\r\n self.login(c.user)", "title": "" }, { "docid": "d1534f4575ee6f12e00b5dd2aa710f02", "score": "0.7027289", "text": "def test_change_password():\n go(SITE + '/accounts/login/')\n code(200)\n show()\n formvalue(1, 'username', 'test')\n formvalue(1, 'password', '1')\n submit()\n code(200)\n go(SITE + '/accounts/password/change/')\n code(200)\n show()\n formvalue(1, 'old_password', '1')\n formvalue(1, 'new_password1', '2')\n formvalue(1, 'new_password2', '2')\n submit()\n code(200)\n find('Password change successful')\n go(SITE + '/accounts/login/')\n code(200)\n show()\n formvalue(1, 'username', 'test')\n formvalue(1, 'password', '2')\n submit()\n code(200)\n return", "title": "" }, { "docid": "beb197a8818ba8bad0f8f84e315c99cf", "score": "0.702405", "text": "def change_password(request):\n if request.user.check_password(request.POST['password_old']) and request.POST['password_new0'] == request.POST['password_new1']:\n request.user.set_password(request.POST['password_new0'])\n request.user.save()\n return HttpResponse(views.success(request))\n else:\n return HttpResponse(views.error(request, nextPage=\"/profile\", errorType=0, errorMessage=\"Couldn't change your password, please try again!\"+request.user.password))", "title": "" }, { "docid": "a5861f799cc5bfefa9210c887b9a9112", "score": "0.7017709", "text": "def password_change_done(self, request, extra_context=None):\r\n from django.contrib.auth.views import password_change_done\r\n defaults = {\r\n 'current_app': self.name,\r\n 'extra_context': dict(self.each_context(), **(extra_context or {})),\r\n }\r\n if self.password_change_done_template is not None:\r\n defaults['template_name'] = self.password_change_done_template\r\n return password_change_done(request, **defaults)", "title": "" }, { "docid": "9922ff5d61c4b25efe2b837101a9df94", "score": "0.7004314", "text": "def password():\n # User reached the site via POST(as by submiting the form via POST)\n if request.method == \"POST\":\n\n # Get existing hash from the users table\n hashes = db.execute(\"SELECT hash FROM users WHERE username=:name\", name=request.form.get(\"username\"))\n\n hash = hashes[0][\"hash\"]\n\n # Ensure the username was submitted\n if not request.form.get(\"username\"):\n return apology(\"missing username\")\n\n # Ensure the old password was submited\n if not request.form.get(\"password\"):\n return apology(\"missing old password\")\n\n # Ensure the new password was submitted\n elif not request.form.get(\"new_password\"):\n return apology(\"missing new password\")\n\n # Ensure the confirm_password password was submited\n elif not request.form.get(\"confirmpassword\"):\n return apology(\"missing password (again)\")\n\n # Ensure the passwords match\n elif request.form.get(\"new_password\") != request.form.get(\"confirmpassword\"):\n return apology(\"passwords dont match\")\n\n # Ensure old passwords match\n elif not check_password_hash(hash, request.form.get(\"password\")):\n return apology(\"Old Password doesn't match\")\n\n # All else satisfied change the password\n db.execute(\"UPDATE users SET hash=:new_hash WHERE username=:name\", new_hash=generate_password_hash(request.form.get(\"new_password\")), name=request.form.get(\"username\"))\n\n flash(\"Password successfully changed!\")\n return redirect(\"/\")\n # User reached the site via GET(as by clicking on a link or via redirect)\n else:\n return render_template(\"password.html\")", "title": "" }, { "docid": "4c06c07be9150c66d166ce375c38dc84", "score": "0.6971013", "text": "def test_changePassword(self):\n target = base_function.BasePage(self.driver)\n BP = booking_functions.Booking(self.driver)\n BP.Navigate_workoutscreen()\n self.driver.back()\n assert self.driver.find_element_by_css_selector('#settingsForm > p:nth-child(14)').text == 'Change Password'\n target.password_field().send_keys('8892279018')\n target.confirm_password().send_keys('8892279018')", "title": "" }, { "docid": "c07345a39ea098db0fed418c6819fbf1", "score": "0.69552803", "text": "async def change_password(self, password):\n raise NotImplementedError()", "title": "" }, { "docid": "d43eac461ffe3dde159e616826f018fd", "score": "0.6938165", "text": "def test_check_success_change_password(self):\n form = ChangePasswordForm(password='update', confirm='update')\n self.assertTrue(form.validate())", "title": "" }, { "docid": "5bdccae55f75613f25cfb8f341b36ecf", "score": "0.69160527", "text": "async def post(self):\n\n user = await self.get_current_user()\n old_password = self.get_body_argument(\"old_password\", strip=False)\n new_password = self.get_body_argument(\"new_password\", strip=False)\n confirmation = self.get_body_argument(\"new_password_confirmation\", strip=False)\n\n correct_password_provided = self.authenticator.get_user(\n user.name\n ).is_valid_password(old_password)\n\n new_password_matches_confirmation = new_password == confirmation\n\n if not correct_password_provided:\n alert = \"alert-danger\"\n message = \"Your current password was incorrect. Please try again.\"\n elif not new_password_matches_confirmation:\n alert = \"alert-danger\"\n message = (\n \"Your new password didn't match the confirmation. Please try again.\"\n )\n else:\n success = self.authenticator.change_password(user.name, new_password)\n if success:\n alert = \"alert-success\"\n message = \"Your password has been changed successfully!\"\n else:\n alert = \"alert-danger\"\n minimum_password_length = self.authenticator.minimum_password_length\n # Error if minimum password length is > 0.\n if minimum_password_length > 0:\n message = (\n \"Something went wrong!\\n\"\n \"Be sure your new password has at least\"\n f\" {minimum_password_length} characters \"\n \"and is not too common.\"\n )\n # Error if minimum password length is 0.\n else:\n message = (\n \"Something went wrong!\\n\"\n \"Be sure your new password is not too common.\"\n )\n\n html = await self.render_template(\n \"change-password.html\",\n user_name=user.name,\n result_message=message,\n alert=alert,\n )\n self.finish(html)", "title": "" }, { "docid": "e770ed78800ef95f0c9e430d960dfab9", "score": "0.6872853", "text": "def settings(request):\n user = request.user\n\n if request.method == \"POST\":\n form = ChangePasswordForm(user, request.POST)\n if form.is_valid():\n user.set_password(form.cleaned_data['new_password'])\n user.save()\n update_session_auth_hash(request, user)\n flash(request, \"Password changed!\")\n return redirect(\".\")\n else:\n form = ChangePasswordForm(user)\n\n return render(request, \"users/settings.html\", {\n \"form\": form,\n })", "title": "" }, { "docid": "bed981d736c83853e4c45bfd0a0df800", "score": "0.6870121", "text": "def changepassword():\n\n if request.method == \"POST\":\n\n # Get the passwords\n current_password = request.form.get(\"current-password\")\n new_password = request.form.get(\"new-password\")\n new_password_check = request.form.get(\"new-password-check\")\n\n # check if user has provided the password\n if not request.form.get(\"new-password\"):\n return apology(\"must provide a new password\", 403)\n\n # check if new passwords match\n if new_password != new_password_check:\n return apology(\"New passwords don't match\", 403)\n\n # find the user in the database\n user = User.query.filter_by(id=session[\"user_id\"]).first()\n check_pw = user.check_password(request.form.get(\"current-password\"))\n\n # if the current password provided is correct\n if check_pw == True:\n\n # encrypt new pw\n user.hashp = generate_password_hash(request.form.get(\"new-password\"))\n\n # add to database\n db.session.add(user)\n db.session.commit()\n\n return redirect(\"/\")\n\n # if the user is looking for the form\n else:\n return render_template(\"changepassword.html\")", "title": "" }, { "docid": "8025fdf85cabd1b138d24d3b0ddb25cc", "score": "0.6822743", "text": "def change_password(request):\n requesting_user_profile = request.user.userprofile\n if request.method == 'POST':\n change_password_form = PasswordChangeForm(request.POST)\n\n if change_password_form.is_valid():\n\n old_password = change_password_form.cleaned_data['password_old']\n password_1 = change_password_form.cleaned_data['password_1']\n password_2 = change_password_form.cleaned_data['password_2']\n\n if password_1 == password_2:\n if request.user.check_password(old_password):\n user = request.user\n user.set_password(password_1)\n user.save()\n return HttpResponseRedirect(reverse('updateinfo'))\n else:\n error = {'has_error': True,\n 'message': 'The passwords you entered is incorrect',\n 'type': 'incorrect password'}\n return render(request, 'change_password.html', {'form': change_password_form,\n 'user_profile': requesting_user_profile,\n 'error': error})\n else:\n error = {'has_error': True,\n 'message': 'The passwords you entered do not match',\n 'type': 'new passwords do not match'}\n return render(request, 'change_password.html', {'form': change_password_form,\n 'user_profile': requesting_user_profile,\n 'error': error})\n else:\n error = {'has_error': True,\n 'message': 'The passwords you entered is invalid',\n 'type': 'invalid password'}\n return render(request, 'change_password.html', {'form': change_password_form,\n 'user_profile': requesting_user_profile,\n 'error': error})\n else:\n change_password_form = PasswordChangeForm()\n error = {'has_error': False}\n return render(request, 'change_password.html', {'form': change_password_form,\n 'user_profile': requesting_user_profile,\n 'error': error})", "title": "" }, { "docid": "8e88924ce08ad2ad673bcae4267699b3", "score": "0.68133974", "text": "def post(self, request):\n try:\n password_form = ChangePasswordForm(user=request.user, data=request.POST)\n if password_form.is_valid():\n password_form.save()\n # necessary step to stay logged into the account with the password changed\n update_session_auth_hash(request, password_form.user)\n messages.success(request, f'Your password was successfully updated!')\n return redirect('profile')\n else:\n messages.error(request, f'An error has occured, please try again.')\n except Exception as ex:\n return HttpResponse('ERROR: ' + str(ex))\n else:\n return JsonResponse({'redirect': '/'})", "title": "" }, { "docid": "353fe10d5f7f6d0e6605e24672818feb", "score": "0.6793547", "text": "def new_password():\n if request.method == 'POST':\n code = request.form['code']\n newPassword = request.form['password']\n\n if dbInterface.change_password(code, newPassword):\n flash('Password successfully changed!')\n return redirect('login')\n\n msg = 'Code is incorrect, try again!'\n return render_template(\"new_password.html\", message = msg)\n\n return render_template(\"new_password.html\")", "title": "" }, { "docid": "04d996545e9994c7ad838230ab8fd0c7", "score": "0.67915595", "text": "def change_the_password(self, username):\n pass_1 = self.enter_new_password_entry_var.get()\n pass_2 = self.retype_password_entry_var.get()\n\n if pass_1 == pass_2:\n self.update_password(username, pass_1)\n self.window_forgot.destroy()\n login.Login_Window()\n\n\n else:\n pass", "title": "" }, { "docid": "7f7e17a88e9eb64a14ff13ae2ef699fe", "score": "0.6781769", "text": "def test_change_password(self):\n pass", "title": "" }, { "docid": "2e6c18b5fb81bb548291471294007a80", "score": "0.6769214", "text": "def change_pass(self):\n\n answer_1 = self.security_answer_1_var.get()\n answer_2 = self.security_answer_2_var.get()\n usr = self.given_username\n\n if answer_1 == '' and answer_2 == '':\n msgb.showerror('Error in Authentication','Please fill the Answers for recovery of your account')\n self.check_for_answer = False\n\n elif answer_1 == '':\n msgb.showerror('Error in Authentication','You have not entered the Answer 1. Please fill it.')\n self.check_for_answer = False\n\n elif answer_2 == '':\n msgb.showerror('Error in Authentication','You have not entered the Answer 2. Please fill it.')\n self.check_for_answer = False\n\n else:\n self.check_for_answer = self.check_answers(usr, answer_1, answer_2)\n\n if self.check_for_answer:\n\n self.security_question_1.destroy()\n self.security_question_1_text.destroy()\n self.security_answer_1.destroy()\n self.security_answer_1_entry.destroy()\n self.security_question_2.destroy()\n self.security_question_2_text.destroy()\n self.security_answer_2.destroy()\n self.security_answer_2_entry.destroy()\n self.change_password.destroy()\n self.get_question_button.destroy()\n self.clear_username.destroy()\n self.clear_answers.destroy()\n self.cancel_changes.destroy()\n self.username_entry.configure(state='disabled')\n\n self.enter_changes(usr)\n\n else:\n pass", "title": "" }, { "docid": "b369ebbaa41dc661e02a01f23feaffda", "score": "0.676427", "text": "def test_validate_success_change_password_form(self):\n form = PasswordChangeForm(old_password='old_one', password='new', password2='new')\n self.assertTrue(form.validate())", "title": "" }, { "docid": "1442a3fa97e3af3cfab2caec0cd9456f", "score": "0.67414296", "text": "def change():\n _id = session[\"user_id\"]\n user = db.execute(\"SELECT * FROM users WHERE id=:_id\",_id=_id)\n\n if not request.form.get(\"oldpass\"):\n return apology(\"Missing Old password!\", 403)\n\n # Ensure password was submitted\n if not request.form.get(\"password\"):\n return apology(\"Missing password!\", 403)\n\n # Ensure confirmation password was submitted\n if not request.form.get(\"confirmation_password\"):\n return apology(\"Missing confirmation passoword!\", 403)\n\n if len(user) != 1 or not check_password_hash(user[0][\"hash\"], request.form.get(\"oldpass\")):\n return apology(\"invalid old password\", 403)\n\n # Ensure password and confirmation passwords are the same\n if request.form.get(\"password\") != request.form.get(\"confirmation_password\"):\n return apology(\"your passwords are not matched\",403)\n\n password = request.form.get(\"password\")\n hashed = generate_password_hash(password, method='pbkdf2:sha256', salt_length=8)\n db.execute(\"UPDATE users SET hash =:updatedhash\",updatedhash= hashed)\n return redirect(\"/\")\n # return apology(\"TODO\")", "title": "" }, { "docid": "654eee3a8cba6f944047d25e14fc90e6", "score": "0.67230445", "text": "def get(self, request):\n password_form = ChangePasswordForm(request.user)\n return render(request, 'login/password_change_form.html',context={'password_form':password_form})", "title": "" }, { "docid": "38a13de8dbe8e7af1601bc1ee10d2fc4", "score": "0.6721963", "text": "def change_password_view(request):\r\n if request.method == \"GET\":\r\n\r\n # Get user info\r\n current_user = request.user\r\n formChangePassword = ChangePasswordForm(current_user.username)\r\n\r\n # Set session for the main left side bar active menu\r\n request.session['active_sidebar_menu'] = \"change_password\"\r\n\r\n return render(request, 'myroot/account/change_password.html',\r\n {\r\n 'title': 'Change Password',\r\n 'meta_desc': 'Change your password.',\r\n 'formChangePassword': formChangePassword,\r\n 'active_sidebar_menu': request.session['active_sidebar_menu']\r\n })\r\n\r\n data = dict()\r\n if request.method == 'POST':\r\n new_password1 = request.POST.get('new_password1')\r\n new_password2 = request.POST.get('new_password2')\r\n\r\n # Get user info\r\n current_user = request.user\r\n\r\n is_pass_valid, msg, title = is_password_valid(new_password1, new_password2)\r\n\r\n if not is_pass_valid:\r\n # Return some json response back to user\r\n data = dict_alert_msg('False', title, msg, 'error')\r\n\r\n else:\r\n\r\n ''' Begin reCAPTCHA validation '''\r\n recaptcha_response = request.POST.get('g-recaptcha-response')\r\n data = {\r\n 'secret': settings.GRECAP_SECRET_KEY,\r\n 'response': recaptcha_response\r\n }\r\n r = requests.post(settings.GRECAP_VERIFY_URL, data=data)\r\n result = r.json()\r\n ''' End reCAPTCHA validation '''\r\n\r\n if result['success']:\r\n # Check first if email existed in our users data\r\n if User.objects.filter(username=current_user.username):\r\n\r\n # Change the password now\r\n u = User.objects.get(username=current_user.username)\r\n u.set_password(new_password1)\r\n u.save()\r\n\r\n msg = \"\"\"Your new password was successfully changed.\"\"\"\r\n data = dict_alert_msg('True', 'Password Changed',\r\n msg, 'success')\r\n else:\r\n\r\n # The username submitted is not found in our users data\r\n msg = \"\"\"Oops, username not found, please try again.\"\"\"\r\n data = dict_alert_msg('False', 'Username Not Found!',\r\n msg, 'error')\r\n else:\r\n\r\n # Return some json response back to user\r\n msg = \"\"\"Invalid reCAPTCHA, please try again.\"\"\"\r\n data = dict_alert_msg('False', 'Oops, Error', msg, 'error')\r\n\r\n return JsonResponse(data)", "title": "" }, { "docid": "13696fe34ef3ee6747ebf28090c0b50f", "score": "0.67176306", "text": "def change_password(request):\n user = request.user\n id_user = request.user.id\n if request.method == 'PUT':\n put = QueryDict(request.body)\n form = ChangePasswordForm(put)\n if form.is_valid():\n old_password = form.cleaned_data.get('old_password')\n new_password = form.cleaned_data.get('new_password')\n check_pass = request.user.check_password(old_password)\n if check_pass:\n confirm_pass = form.cleaned_data.get('confirm_pass')\n if new_password == confirm_pass:\n user.set_password(new_password)\n new_password = user.password\n UserProfile.update_pass(new_password, id_user)\n update_session_auth_hash(request, user)\n LOGGER.debug(\"Successfully changed password for user %s\", user)\n return HttpResponse(status=200)\n LOGGER.warning(\"New password wasn't confirmed by user %s\", user)\n return HttpResponse(status=400)\n LOGGER.warning(\"New password failed the checking\")\n return HttpResponse(status=400)\n LOGGER.critical(\"Form for changing a password is invalid\")\n return HttpResponse(status=400)\n LOGGER.critical(\"Other methods except PUT aren't allowed for changing a password\")\n return HttpResponse(status=405)", "title": "" }, { "docid": "85562c181bb3a498becbdfa3046848ea", "score": "0.6690789", "text": "def on_password_change(self, option, value):\n self.xmpp.password = value", "title": "" }, { "docid": "309dde4095713e7139257cbed2c22a7f", "score": "0.66811544", "text": "def change_passw(request):\n if request.method=='POST':\n form = ChangePassw(data=request.POST, instance=request.user)\n form.user = request.user\n if form.is_valid():\n form.save()\n update_session_auth_hash(request, form.user)\n return redirect(settings.LOGIN_REDIRECT_URL)\n else:\n form = ChangePassw()\n return render(request, 'accounts/change_passw.html', context={\n 'form': form,\n })", "title": "" }, { "docid": "d9c5cd9eb5cb73e0fe3f0b056be54a8f", "score": "0.66343504", "text": "def change_password(reset_code):\r\n return flask.render_template('password_change_form',\r\n reset_code=reset_code)", "title": "" }, { "docid": "d9c5cd9eb5cb73e0fe3f0b056be54a8f", "score": "0.66343504", "text": "def change_password(reset_code):\r\n return flask.render_template('password_change_form',\r\n reset_code=reset_code)", "title": "" }, { "docid": "be9ee89aae8393c593ee975c1684e776", "score": "0.66199714", "text": "def software_set_password(request, slug, old_password, new_password):\n return", "title": "" }, { "docid": "275f377dbfc1f01190a911aa23b0b01a", "score": "0.66074574", "text": "def change_password():\n\n if logged_in:\n list_index = 0\n for user in users:\n if user.user_id == session_user['user_id']:\n user.users = users\n form = ChangePasswordForm(user, request.method)\n break\n list_index += 1\n if form.validate_on_submit():\n users[list_index] = form.user\n return redirect(url_for('profile')), 302\n return render_template('change_password.html', user=session_user, form=form), 200\n return redirect(url_for('login')), 302", "title": "" }, { "docid": "c4491a06f0a4f8b45bd1bef67d22fa2b", "score": "0.6594753", "text": "def change_password():\n\n if request.method == 'POST':\n if 'user' in session:\n existing_user = mongo.db.users.find_one(\n {'username': session['user']})\n else:\n existing_user = mongo.db.users.find_one(\n {'username': request.form.get('username')})\n\n if existing_user:\n username = existing_user['username']\n if request.form['new-password'] != request.form['repeat-password']:\n flash(flash_repeat)\n return render_template('change_password.html')\n\n if check_password_hash(\n existing_user['password'], request.form.get(\n 'current-password')) or check_password_hash(\n existing_user['temp_password'],\n request.form.get('current-password')):\n\n mongo.db.users.update_one(\n {'username': username},\n {'$set': {\n 'password': generate_password_hash(\n request.form.get('new-password')),\n 'temp_password': generate_password_hash(\n get_random_string(14))}})\n\n if not session:\n session['user'] = request.form.get('username')\n\n return redirect(url_for('profile',\n username=session['user']))\n\n else:\n flash(flash_incorrect)\n return redirect(url_for('change_password'))\n else:\n flash(flash_incorrect)\n return redirect(url_for('change_password'))\n return render_template('change_password.html')", "title": "" }, { "docid": "bc8818ed87b01a3510b0c8a9ef66b2ca", "score": "0.6581633", "text": "def password_change(cls, val):\n return cls('password_change', val)", "title": "" }, { "docid": "37b58af9397ee069eefe762607e7a2e5", "score": "0.65728575", "text": "def test_check_invalid_change_password(self):\n form = ChangePasswordForm(password='update', confirm='unknown')\n self.assertFalse(form.validate())", "title": "" }, { "docid": "db0f59f341cc35a942d32f97e85ea0df", "score": "0.6563264", "text": "def password_change(self):\n self.clear()\n new_password = input(\"New login: \").strip().lower()\n change_password = f\"update username set login='{new_password}' where login='{self.old_login_password[1]}'\"\n self.my_user.execute(change_password)\n self.mysql.commit()\n print(\"\\n\\t\\t**>> Your password has been updated <<**\\n\")", "title": "" }, { "docid": "65c175b4e351c32ab14b43bbfa12ca26", "score": "0.65499794", "text": "def change_a_password(username):\r\n old_pass = getpass.getpass(prompt=\"Old password: \")\r\n count = 1\r\n while not database_hotel.check_password(username, old_pass) and count != 3:\r\n print(\"Wrong password! Try again.\")\r\n old_pass = getpass.getpass(prompt=\"Old password: \")\r\n count += 1\r\n if not database_hotel.check_password(username, old_pass):\r\n quit()\r\n new_pass = getpass.getpass(prompt=\"New password: \")\r\n confirm_pass = getpass.getpass(prompt=\"Confirm password: \")\r\n while new_pass != confirm_pass:\r\n print(\"Wrong confirmation password! Try again.\")\r\n new_pass = getpass.getpass(prompt=\"New password: \")\r\n confirm_pass = getpass.getpass(prompt=\"Confirm password: \")\r\n database_hotel.change_password(username, new_pass)\r\n print(\"An email with your new password will be send to you\")\r\n user_mail = database_hotel.get_client_email(username)\r\n text = \"\"\"Hello,\\n\r\n You just changed your password!\\n\r\n Your new password is '{}'\\n\r\n Have a nice day! :)\"\"\".format(new_pass)\r\n send_email(user_mail, text)\r\n clear()", "title": "" }, { "docid": "982325ef33850db3673496a876c1db42", "score": "0.65492874", "text": "async def get(self):\n\n user = await self.get_current_user()\n html = await self.render_template(\n \"change-password.html\",\n user_name=user.name,\n )\n self.finish(html)", "title": "" }, { "docid": "7a5328ff84ed13b2f4652b95a1b17aae", "score": "0.6536606", "text": "def enter_changes(self, username):\n\n self.enter_new_password = tk.Label(self.window_forgot, text = 'Enter New Password',font=(\n 'arial', 12, 'bold'), bg='black', fg='#FFB6C1')\n self.enter_new_password.place(x=5, y=210)\n\n self.enter_new_password_entry_var = tk.StringVar()\n self.enter_new_password_entry = tk.Entry(self.window_forgot, textvariable=self.enter_new_password_entry_var,\n width=20, font=('arial', 12), bg='#C0C0C0', show = '*')\n self.enter_new_password_entry.place(x=170, y=210)\n\n self.retype_password = tk.Label(self.window_forgot, text='Retype Password', font=(\n 'arial', 12, 'bold'), bg='black', fg='#FFB6C1')\n self.retype_password.place(x=30, y=240)\n\n self.retype_password_entry_var = tk.StringVar()\n self.retype_password_entry = tk.Entry(self.window_forgot, textvariable=self.retype_password_entry_var,\n width=20, font=('arial', 12), bg='#C0C0C0', show = '*')\n self.retype_password_entry.place(x=170, y=240)\n\n self.change_password = tk.Button(self.window_forgot, text = 'Change Password',relief='groove',width=15,font=(\n 'consolas', 13, 'bold'), bg='#f1f5e0', command=lambda: self.change_the_password(username))\n self.change_password.place(x=130, y=280)\n\n self.clear_password = tk.Button(self.window_forgot, text = 'Clear',relief='groove',width=8,font=(\n 'consolas', 13, 'bold'), bg='#f1f5e0', command=self.password_clear)\n self.clear_password.place(x=300, y=280)", "title": "" }, { "docid": "734bfc7d2c379d3208de9816ef511dfc", "score": "0.6527115", "text": "def _process_password_change_form(self, user):\n form = PasswordChangeForm(user=user, data=self.request.POST)\n if form.is_valid():\n if form.is_valid():\n messages.add_message(\n request=self.request,\n level=messages.SUCCESS,\n message='Ви успішно змінили свій пароль')\n form.save()\n return form", "title": "" }, { "docid": "42d222ab25ce459587eeb9dd379df30d", "score": "0.65205294", "text": "def replace_password(self):\n self.clear_screen()\n print(\"Initiated password replacement.\")\n old_pw = getpass(\"Type old password:\")\n new_password = getpass(\"Type new password:\")\n new_password_ = getpass(\"Repeat new password:\")\n if new_password != new_password_:\n self.clear_screen()\n print(\"New password doens't match.\")\n else:\n try:\n print(\"Replacing password. Might take a second!\")\n self.client.replace_password(old_pw=old_pw, new_pw=new_password)\n input(\"Replaced password successfully. Press enter to continue.\")\n self.clear_screen()\n except (BadKeyException, BadPasswordSelected):\n input(\"Failed to replace password. Press enter to continue.\")\n self.clear_screen()", "title": "" }, { "docid": "7af57a974f6ce0efa9ae64f3033f4a63", "score": "0.64930683", "text": "def changeUserPassword(self):\n if self.args.user is not None and self.args.user != \"\":\n currentPw = getpass.getpass(\"Current password for \"+self.args.user+\": \")\n while True:\n newPw = getpass.getpass(\"New password: \")\n confirmPw = getpass.getpass(\"Confirm new password: \")\n\n if newPw != confirmPw:\n print \"Passwords did not match!\\n\";\n\n else:\n if not self.connectToDatabase(self.args.user, currentPw):\n print \"FAILED: could not connect to the database.\\n\";\n\n else:\n cursor = self.dbConnection.cursor()\n cursor.execute(\"SET PASSWORD = PASSWORD(%s)\", (newPw,))\n cursor.close()\n print \"Password changed.\\n\";\n \n break\n\n else:\n print \"ERROR: no user specified.\\n\";", "title": "" }, { "docid": "7a49ab14b370f40a328f3539cbc0692c", "score": "0.64780825", "text": "def change_password(self, change_password):\n \n self._change_password = change_password", "title": "" }, { "docid": "ddca7da750fb26728a426aea85a0ff6a", "score": "0.64522254", "text": "def change_passsub(request):\n if request.method=='POST':\n form = ChangePasswsub(data=request.POST, instance=request.user)\n form.user = request.user\n if form.is_valid():\n form.save()\n update_session_auth_hash(request, form.user)\n return redirect(settings.LOGIN_REDIRECT_URL)\n else:\n form = ChangePasswsub()\n return render(request, 'accounts/change_passsub.html', context={\n 'form': form,\n })", "title": "" }, { "docid": "fe47b655184a4b5a3314bf7e4a86f73d", "score": "0.6442435", "text": "def credentialsChanged(password, REQUEST=None):", "title": "" }, { "docid": "65bc881bb89994726545ab20fc1857b7", "score": "0.64386916", "text": "def change_password(request):\n email = request.POST['email']\n old_password = request.POST['old_password']\n new_password = request.POST['new_password']\n confirm_password = request.POST['confirm_password']\n\n # Validation\n required_fields = [email, old_password, new_password, confirm_password]\n trimmed = [i.strip() for i in required_fields]\n if \"\" in trimmed:\n return render(request, 'reset_password.html', {\n 'email': email,\n 'message': 'Missing Required Fields'})\n\n # Password matching\n if new_password != confirm_password:\n return render(request, 'reset_password.html', {\n 'email': email,\n 'message': 'New Password is different from Confirm Password'})\n\n # Email validation\n if not re.match(r\"[^@]+@[^@]+\\.[^@]+\", email):\n return render(request, 'reset_password.html', {\n 'email': email,\n 'message': 'Invalid Email'})\n\n user = authenticate(username=email, password=old_password)\n\n if user is not None:\n login(request, user)\n userget = User.get(username=email)\n userget.set_password(new_password)\n userget.save()\n logout(request)\n return redirect('news:user_login')\n else:\n return render(request, 'reset_password.html', {\n 'email': email,\n 'message': 'Incorrect Password to Email or Email does not exist'})", "title": "" }, { "docid": "a7cdc570d75f233c480a5e9e56dca7f6", "score": "0.63805526", "text": "def post(self):\n\n if current_user.is_anonymous:\n return abort(HTTPStatus.FORBIDDEN, message=\"User is anonymous\")\n\n args = self.security_password_parser.parse_args()\n\n user_credentials: UserCredentials = UserCredentials.query.get(current_user.id)\n if user_credentials is None or not user_credentials.check_password(args['old_password']):\n return abort(HTTPStatus.FORBIDDEN, message=\"Wrong password\")\n\n user_credentials.password = args['new_password']\n\n db.session.commit()\n return \"Password successfully changed\"", "title": "" }, { "docid": "b0f7a10514242e851c4a979eefd5c42a", "score": "0.6337727", "text": "def change_password(self):\n return self._change_password", "title": "" }, { "docid": "bbf9e2e4290fd7519d01eaf81e6d2250", "score": "0.63370436", "text": "def test_validate_invalid_change_password_form(self):\n form = PasswordChangeForm(old_password='old_one', password='new', password2='wrong')\n self.assertFalse(form.validate())", "title": "" }, { "docid": "f48debff9d83eaa35a5eb293f3581a3c", "score": "0.63367325", "text": "def on_change_password_dialog_response(self, dialog, response, parent):\n\t\t\n\t\tif response == Gtk.ResponseType.OK:\n\t\t\t# Verify old password\n\t\t\tif not parent.locked and not self.verify_password(parent.objects.old_password.get_text()):\n\t\t\t\tparent.show_error(_(\"Current password is not correct.\"))\n\t\t\t\treturn False\n\t\t\t\n\t\t\t# Verify new passwords\n\t\t\tif not parent.objects.new_password.get_text() == parent.objects.confirm_new_password.get_text():\n\t\t\t\tparent.show_error(_(\"The new passwords do not match.\"))\n\t\t\t\treturn False\n\t\t\t\n\t\t\t# Check password length\n\t\t\tif not len(parent.objects.new_password.get_text()) >= MIN_PASSWORD_LENGTH:\n\t\t\t\tparent.show_error(_(\"The new password should be of at least %s characters.\") % MIN_PASSWORD_LENGTH)\n\t\t\t\treturn False\n\t\t\t\n\t\t\tparent.hide_error()\n\t\t\t\n\t\t\t# Finally set password\n\t\t\tself.change_password(parent.objects.new_password.get_text())\n\t\t\t\n\t\t# Destroy the window\n\t\tdialog.destroy()", "title": "" }, { "docid": "1ca9589669834968b57ba4106467977c", "score": "0.63367254", "text": "def change_password(self, request, *args, **kwargs):\n user = self.get_object()\n serializer = PasswordSerializer(data=request.data)\n user_serializer = UserSerializer(user)\n\n if serializer.is_valid():\n if not user.check_password(serializer.data.get('old_password')):\n return Response({\"old_password\": [\"Votre ancienne mot de passe ne correspond pas\"]}, status=status.HTTP_400_BAD_REQUEST)\n\n if is_password_valid(serializer.data.get('new_password')):\n user.set_password(serializer.data.get('new_password'))\n site = get_object_or_404(Sit, pk=serializer.data.get('sit'))\n user.sit = site\n user.save()\n return Response(status=status.HTTP_200_OK)\n else:\n return Response({\"details\": _(\"The password must contain at least 1 uppercase letter, 1 special character and a minimum length of 8 characters\")}, status=status.HTTP_400_BAD_REQUEST)\n\n else:\n return Response(serializer.errors,\n status=status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "b8ecb6802fa34d6562a8324ea06e08df", "score": "0.6321174", "text": "def change_password(self, change_password_id, request):\n return self.start().uri('/api/user/change-password') \\\n .url_segment(change_password_id) \\\n .body_handler(JSONBodyHandler(request)) \\\n .post() \\\n .go()", "title": "" }, { "docid": "8aee63362f311e889116685c433ef4b9", "score": "0.63192767", "text": "def change_password(self, password):\n self.password = password\n return self.password", "title": "" }, { "docid": "413aa6fc99a27ea976b3028bb9d1135d", "score": "0.631739", "text": "def changePassword(self, password):\n # TODO : invalidate current cached uncrypted key\n pass", "title": "" }, { "docid": "6ec84f59383fb41ddd3163b81bf1441f", "score": "0.6297994", "text": "def xmpp_web_changepw(request, password=''):\n # JID is supposedly trusted and can *just* change it.\n request.user.set_password(password)\n request.user.save()\n if password:\n return XmppResponse(\"Password changed.\")\n else:\n return XmppResponse(\"Password disabled.\")", "title": "" }, { "docid": "e5e5ec2432dc715cb47e093c9bfb7efd", "score": "0.6296734", "text": "def show_acc_passwd():\n # Get current user\n if \"user\" in flask.session:\n curr_user = flask.session['user']\n else:\n # If not logged in, redirect to login.\n return flask.redirect(flask.url_for('show_acclogin'))\n\n # Connect to database\n connection = insta485.model.get_db()\n\n if flask.request.method == 'POST':\n # Get form data\n req = flask.request.form\n old_password = req['password']\n new_password1 = req['new_password1']\n new_password2 = req['new_password2']\n\n # Fetch password for user\n cur = connection.execute(\n \"SELECT username, password \"\n \"FROM users \"\n \"WHERE username = ? \",\n (curr_user,)\n )\n correct_login = cur.fetchall()\n\n # get database password and salt\n db_passwd_hash = correct_login[0]['password']\n db_passwd_salt = db_passwd_hash.split('$')\n db_passwd_salt = db_passwd_salt[1]\n\n # Hash the old password with same salt\n hash_passwod_input = password_db_string(old_password, db_passwd_salt)\n\n # See if hashed+salted is same as db hashed+salted for user\n if hash_passwod_input == db_passwd_hash:\n # Passwords match. Can change password\n\n # New passwords do not match. abort.\n if not new_password1 == new_password2:\n flask.abort(401)\n else:\n # New passwords match. Change password.\n new_password_hash = hash_password(new_password1)\n cur = connection.execute(\n \"UPDATE users \"\n \"SET password = ? \"\n \"WHERE username = ?\",\n (new_password_hash, curr_user)\n )\n\n return flask.redirect(flask.url_for('show_acc_edit'))\n\n # else: Old password does not match. abort.\n flask.abort(403)\n\n context = {}\n context[\"logname\"] = curr_user\n return flask.render_template(\"/accounts/password.html\", **context)", "title": "" }, { "docid": "1706f8fc047a87e50c9eb389f855cfd6", "score": "0.6294252", "text": "def change(request, token,\n template_name='uaccounts/change.html',\n changed_template_name='uaccounts/changed.html'):\n try:\n verification = verify_token(token, CHANGE_PASSWORD_EXPIRES)\n except VerificationError:\n return redirect('uaccounts:login')\n\n if not verification.email.verified:\n return redirect('uaccounts:login')\n\n user = verification.email.profile.user\n form = forms.ChangePasswordForm(user)\n\n if request.method == 'POST':\n form = forms.ChangePasswordForm(user, request.POST)\n if form.is_valid():\n form.save()\n verification.delete()\n return render(request, changed_template_name)\n\n return render(request, template_name, {'form': form})", "title": "" }, { "docid": "c7d278c97b750e5226ab8f03a5819d84", "score": "0.62764555", "text": "def password_change_db(app,user):\n pot_password = request.form.get('password')\n # verify if password is adequate.\n #pull second password from profile form\n pot2_password = request.form.get('sec_password')\n\n if len(pot_password) < 6:\n return('Your password is not long enough try something with at least 6 characters.')\n \n elif pot_password != pot2_password:\n return('Your second password does not match your first, please re-enter to verify.')\n \n else:\n user.password=Bcrypt(app).generage_password_hash(pot_password)\n db.session.commit()\n return", "title": "" }, { "docid": "c4f447995136654417dd64dc1b1b022b", "score": "0.62745523", "text": "def setUserPassword(username, password):", "title": "" }, { "docid": "bb3fdb1e0932677d067e1c1a95de807d", "score": "0.62596923", "text": "def change_password(request):\n this_user = request.user\n\n old_password = request.POST.get('old_password')\n new_password = request.POST.get('new_password')\n new_password_confirm = request.POST.get('new_password_confirm')\n\n if not (old_password and new_password and new_password_confirm):\n res_body = {\n \"error\": \"old_password or new_password or new_password_confirm not provided\"\n }\n return JsonResponse(res_body, status=400)\n\n if not new_password == new_password_confirm:\n res_body = {\n \"error\": \"New password confirm does not match\"\n }\n return JsonResponse(res_body, status=400)\n\n if this_user.check_password(old_password):\n this_user.password = make_password(new_password)\n this_user.save()\n update_session_auth_hash(request, this_user)\n res_body = {\n \"success\": \"{}'s password changed successfully\".format(this_user.get_full_name())\n }\n return JsonResponse(res_body, status=201)\n else:\n res_body = {\n \"error\": \"Password provided is incorrect\"\n }\n return JsonResponse(res_body, status=400)", "title": "" }, { "docid": "2baef4a713916bab827b623d1b88d1d7", "score": "0.6258902", "text": "def test_check_invalid_change_password_format(self):\n form = ChangePasswordForm(password='123', confirm='123')\n self.assertFalse(form.validate())", "title": "" }, { "docid": "2452d19218a28bd4c809f417a154db49", "score": "0.62521356", "text": "async def post(self, user_name):\n\n new_password = self.get_body_argument(\"new_password\", strip=False)\n confirmation = self.get_body_argument(\"new_password_confirmation\", strip=False)\n\n new_password_matches_confirmation = new_password == confirmation\n\n if not new_password_matches_confirmation:\n alert = \"alert-danger\"\n message = (\n \"The new password didn't match the confirmation. Please try again.\"\n )\n else:\n success = self.authenticator.change_password(user_name, new_password)\n if success:\n alert = \"alert-success\"\n message = f\"The password for {user_name} has been changed successfully\"\n else:\n alert = \"alert-danger\"\n minimum_password_length = self.authenticator.minimum_password_length\n # Error if minimum password length is > 0.\n if minimum_password_length > 0:\n message = (\n \"Something went wrong!\\nBe sure the new password \"\n f\"for {user_name} has at least {minimum_password_length} \"\n \"characters and is not too common.\"\n )\n # Error if minimum password length is 0.\n else:\n message = (\n \"Something went wrong!\\nBe sure the new password \"\n f\"for {user_name} is not too common.\"\n )\n\n html = await self.render_template(\n \"change-password-admin.html\",\n user_name=user_name,\n result_message=message,\n alert=alert,\n )\n self.finish(html)", "title": "" }, { "docid": "dd618339fa7fcdaf08589a754886f03e", "score": "0.6244944", "text": "def edit_admin_password(self):\n self.ids.adminPasswordEditBtn.icon = \"check\"\n self.ids.eyeBtn.disabled = True\n self.ids.adminPasswordLayout.clear_widgets()\n\n password_field = AdminInfoEditField()\n password_field.hint_text = \"password\"\n password_field.text = self.admin_password\n self.ids.adminPasswordLayout.add_widget(password_field)", "title": "" }, { "docid": "971779335b70e1cae0b4976c68495a88", "score": "0.6242513", "text": "def show_admin_password(self):\n self.flag = 1\n self.ids.adminPasswordEditBtn.icon = \"pencil\"\n self.ids.eyeBtn.disabled = False\n self.admin_password = self.ids.adminPasswordLayout.children[0].children[0].text\n\n self.ids.adminPasswordLayout.clear_widgets()\n self.admin_pass_label = AdminInfoLabel()\n self.admin_pass_label.title = \"Password\"\n self.admin_pass_label.text = \"*********\"\n self.ids.adminPasswordLayout.add_widget(self.admin_pass_label)\n\n # Database manipulation here\n try:\n conn = self.connect_database(\"user_main.db\")\n # READ ME: Here we're supposing that admin table has just one entry with id=1\n self.update_database(\"admin\", conn, \"pass\", self.admin_password, \"id\", 1)\n except Error:\n Snackbar(text=\"Error updating admin password\", duration=2).show()\n else:\n Snackbar(text=\"Admin password updated\", duration=2,).show()", "title": "" }, { "docid": "21b38bf732243787051fd44d9773bbcf", "score": "0.6228653", "text": "def patch(self) -> (str,int):\n\t\targs = self.parser.parse_args()\n\t\tcurrent_user = Users.find_by_email(args['email']) #t\n\n\t\tif current_user and Users.verify_hash(args['password_old'].encode(\"utf8\"), current_user.password.encode(\"utf8\")):\n\t\t\tcurrent_user.password = Users.generate_hash(args[\"password_new\"].encode(\"utf8\")).decode(\"utf8\") # decode necessary so that string (and not binary) is stored in the DB\n\t\t\tcurrent_user.commit()\n\t\t\treturn {\"message\": \"Password has been updated\"}, 200\n\t\telse:\n\t\t\treturn {\"message\": \"Incorrect credentials. Please Try again\"},403", "title": "" }, { "docid": "13b5c6f5f0862f78ded82a2aa4cb1cac", "score": "0.6227276", "text": "def validate_passcode_change(self, *_):\n if self.cache == self.input_password:\n if not len(self.cache) < 4:\n file = open(self.path, \"w\")\n print(file, \",\", self.path)\n file.write(self.cache)\n self.actual_password = self.cache\n file.close()\n else:\n print(\"failed validate_passcode_change\")", "title": "" }, { "docid": "25bfa229188578193b5722c921582cdb", "score": "0.6222048", "text": "def recover_change_password(request):\n json = request.json_body\n change_password_url_token = request.matchdict['change_password_hash']\n user = User.get_one(request, url_token=change_password_url_token )\n if user is not None:\n user.password=pbkdf2_sha256.hash(json['password'])\n user.url_token = None\n return {\n 'msg': \"Password was change\",\n 'success': True\n }\n return {\n 'msg': \"User not existing can't change password\",\n 'success': False}", "title": "" }, { "docid": "6034fe737513a0ec9ee6d512a1bddca0", "score": "0.6220868", "text": "def change_password():\n # TODO: change to encoded JSON\n data = request.get_json()\n session = Session()\n\n user = http_auth.current_user\n if user is None:\n session.expunge_all()\n session.close()\n return create_response(data, responses.INVALID_INPUT_422, ResponseMessages.AUTH_USERNAME_NOT_PROVIDED,\n User.__name__, 422)\n else:\n user.update_password(data[\"password_new\"], session, user.username)\n user = user.serialize()\n session.expunge_all()\n session.close()\n return create_response(user, responses.SUCCESS_200, ResponseMessages.AUTH_PASSWORD_CHANGED, User.__name__, 200)", "title": "" }, { "docid": "f4a3e25bba4cd316d5dcd7e333a7e9ec", "score": "0.6199565", "text": "def password(client, path, opt):\n if path.startswith('user:'):\n update_user_password(client, path[5:], opt)\n else:\n update_generic_password(client, path, opt)", "title": "" }, { "docid": "21ab8ff8d024aabf3149f6c9e4c4c8f3", "score": "0.61939967", "text": "def change_password(self, request):\n username = self.authdb.authenticate(request[\"token\"], self._config[\"role\"])\n salt, pwhash = hash_pw(request[\"password\"])\n with self.authdb as authdb:\n authdb.execute(\"UPDATE users SET pwhash=?, salt=? WHERE username=?\", (pwhash, salt, username))\n LOG.info(\"Password updated: {}\".format(username))\n return \"Password updated\"", "title": "" }, { "docid": "9ebdb3949c79293d7a49e370a0fef4e3", "score": "0.6190806", "text": "def change_password(request):\n user_id = request.POST.get('user_id')\n old_password = request.POST.get('old_password')\n new_password = request.POST.get('new_password')\n user = User.objects.get(id=user_id)\n if user.check_password(old_password):\n user.set_password(new_password)\n user.save()\n data = {'code': '0000', 'msg': '修改密码成功'}\n else:\n data = {'code': '0003', 'msg': '原密码错误'}\n return HttpResponse(json.dumps(data), content_type=\"application/json\")", "title": "" }, { "docid": "8b0559e19a9066bf286d70f7a8225c04", "score": "0.6166555", "text": "def password_change_request_handler(request):\n user = request.user\n if (user.is_staff or user.is_superuser) and request.POST.get('email_from_support_tools'):\n email = request.POST.get('email_from_support_tools')\n else:\n # Prefer logged-in user's email\n email = user.email if user.is_authenticated else request.POST.get('email')\n AUDIT_LOG.info(\"Password reset initiated for email %s.\", email)\n\n if getattr(request, 'limited', False):\n AUDIT_LOG.warning(\"Password reset rate limit exceeded for email %s.\", email)\n return HttpResponse(\n _(\"Your previous request is in progress, please try again in a few moments.\"),\n status=403\n )\n\n if email:\n try:\n request_password_change(email, request.is_secure())\n user = user if not request.POST.get('email_from_support_tools') and user.is_authenticated \\\n else _get_user_from_email(email=email)\n destroy_oauth_tokens(user)\n except errors.UserNotFound:\n AUDIT_LOG.info(\"Invalid password reset attempt\")\n # If enabled, send an email saying that a password reset was attempted, but that there is\n # no user associated with the email\n if configuration_helpers.get_value('ENABLE_PASSWORD_RESET_FAILURE_EMAIL',\n settings.FEATURES['ENABLE_PASSWORD_RESET_FAILURE_EMAIL']):\n site = get_current_site()\n message_context = get_base_template_context(site)\n\n message_context.update({\n 'failed': True,\n 'request': request, # Used by google_analytics_tracking_pixel\n 'email_address': email,\n })\n\n msg = PasswordReset().personalize(\n recipient=Recipient(lms_user_id=0, email_address=email),\n language=settings.LANGUAGE_CODE,\n user_context=message_context,\n )\n ace.send(msg)\n except errors.UserAPIInternalError as err:\n log.exception('Error occurred during password change for user {email}: {error}'\n .format(email=email, error=err))\n return HttpResponse(_(\"Some error occurred during password change. Please try again\"), status=500)\n\n return HttpResponse(status=200)\n else:\n return HttpResponseBadRequest(_(\"No email address provided.\"))", "title": "" }, { "docid": "14c968b30ddf42ff4d05f022dc4d7d33", "score": "0.61661875", "text": "def change_password():\n current_user = get_current_user()\n\n data = request.get_json()\n\n if (\"password1\" not in data.keys()) or (\"password2\" not in data.keys()) or (\"old_password\" not in data.keys()):\n return jsonify({\"status\": \"error\", \"detail\": \"Missing data (password1, password2 or old_password)\"}), 400\n\n if not is_password(data[\"password1\"]):\n return (\n jsonify({\"status\": \"error\", \"detail\": \"Password must be at least 8 characters and contain a uppercase letter, a lowercase letter and a number\"}),\n 400,\n )\n\n if data[\"password1\"] != data[\"password2\"]:\n return jsonify({\"status\": \"error\", \"detail\": \"Passwords don't match\"}), 400\n\n if not current_user.check_password(data[\"old_password\"]):\n return jsonify({\"status\": \"error\", \"detail\": \"Old password is incorrect\"}), 400\n\n current_user.set_password(data[\"password1\"])\n current_user.first_login = False\n\n db.session.commit()\n return jsonify({\"status\": \"OK\", \"detail\": \"Password changed successfuly\"}), 200", "title": "" }, { "docid": "e061520aa472fd179c0e9b24e6460b08", "score": "0.61614054", "text": "def test_set_password(self):\n pass", "title": "" }, { "docid": "b12a1ec082578cb24164e6506ecff356", "score": "0.6155073", "text": "def reset_passcode_entry(self, *_):\n self.input_password = \"\"\n self.led_board.flash_all_leds(1)", "title": "" }, { "docid": "6ee3ae4afc8396f281ecdcfa86917671", "score": "0.6142134", "text": "def test_change_password_success(self):\n payload = {\n 'old_password': 'pass',\n 'new_password': 'pass2'\n }\n res = self.client.post(PASSWORD_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.user.refresh_from_db()\n res = self.user.check_password(payload['new_password'])\n self.assertTrue(res)", "title": "" }, { "docid": "7ded59c1713625dc08b3e4064d784e54", "score": "0.6140753", "text": "def show_reset():\r\n\r\n form = ResetPasswordForm()\r\n token = request.args.get(\"key\")\r\n user = User.query.filter_by(password_reset=token).first()\r\n if not user or not token:\r\n return render_template(\"404.html\")\r\n\r\n if form.validate_on_submit():\r\n password = form.password.data\r\n user.password = User.hashed_new_password(password)\r\n user.password_reset = None\r\n db.session.commit()\r\n flash(\"Password reset. Please login.\")\r\n return redirect(\"/login\")\r\n\r\n else:\r\n return render_template(\"reset_password.html\", form=form)", "title": "" }, { "docid": "7d1f6e8c50bbe159fd6fe7e2432cad1d", "score": "0.61396396", "text": "async def change_psw(new_password_request: NewPassword, user: User = Depends(get_current_user)):\n\n if not verify_password(new_password_request.old_pwd, user['hashed_password']):\n raise HTTPException(status_code=401, detail=\"Wrong old password\")\n email = user['email']\n try:\n new_hash = get_password_hash(new_password_request.new_pwd)\n with db_session:\n user = db.DB_User.get(email=email)\n user.set(hashed_password=new_hash)\n commit()\n except Exception as e:\n print(e)\n raise HTTPException(\n status_code=503, detail=\"Service unavailable, try again soon\")\n return {\"message\": \"You have changed your password succesfully\"}", "title": "" }, { "docid": "d345b7606c97ce5c4fb1fcde85953d4d", "score": "0.6138195", "text": "def test_hide_change_password(self):\r\n user = User.objects.get(username='super')\r\n user.set_unusable_password()\r\n user.save()\r\n\r\n response = self.client.get('/test_admin/admin/')\r\n self.assertNotContains(response, reverse('admin:password_change'),\r\n msg_prefix='The \"change password\" link should not be displayed if a user does not have a usable password.')", "title": "" }, { "docid": "79301fad4822b0d67ab2b3a1b6039c35", "score": "0.6130966", "text": "def update_password(username):\n\n # Finds username in collection based on session username\n\n username = coll_users.find_one({'username': session['username'\n ]})['username']\n\n # Defines form as PasswordForm from users/form.py\n\n form = PasswordForm()\n\n if form.validate_on_submit():\n\n # Passes username variable to form\n\n form = PasswordForm(username)\n\n # If current password field matches existing password in collection\n\n if check_password_hash(coll_users.find_one({'username': username})['pass'\n ], request.form.get('current_password')):\n\n # User entry in collection is updated with new password\n\n coll_users.update_one({'username': username},\n {'$set': {'pass': generate_password_hash(request.form.get('new_password'\n ))}})\n return redirect(url_for('users.account', username=username))\n else:\n\n flash('Original password is incorrect!')\n return redirect(url_for('users.update_password',\n username=username))\n\n return render_template('change_password.html', username=username,\n form=form, title='Change Password')", "title": "" }, { "docid": "89658a05a1c6ab3ba66c2b3ec1b8f2e8", "score": "0.61306626", "text": "def user_change_password(user_id: int) -> None:\n publish(\n task_name='email_user_change_password',\n message=json.dumps({'user_id': user_id}),\n queue=queue\n )", "title": "" }, { "docid": "16c62dc3da401c389d98a265cf6d4eda", "score": "0.6129812", "text": "def user_change_password(request, pk):\n try:\n user = validations_utils.user_validation(pk) # Validates if user exists or not.\n # validations_utils.user_token_validation(request.auth.user_id, pk) # Validates user's Token authentication.\n except ValidationException as e: # Generic exception\n return Response(e.errors, status=e.status)\n if request.method == 'PUT':\n try:\n request.data['current_password']\n except KeyError:\n return Response(messages.REQUIRED_CURRENT_PASSWORD,\n status=status.HTTP_400_BAD_REQUEST)\n try:\n new_password = request.data['new_password']\n if new_password is None or not re.match(r'[A-Za-z0-9@#$%^&+=]+', new_password):\n return Response(messages.PASSWORD_NECESSITY, status=status.HTTP_406_NOT_ACCEPTABLE)\n else:\n pass\n except KeyError:\n return Response(messages.REQUIRED_NEW_PASSWORD, status=status.HTTP_400_BAD_REQUEST)\n data_keys = request.data.keys()\n # Change Password will only require current_password and new_password.\n if 'current_password' in data_keys and 'new_password' in data_keys:\n current_password = request.data['current_password']\n new_password = request.data['new_password']\n try:\n password = utils.change_password(current_password, new_password, user) # Changes password.\n return Response(password, status=status.HTTP_200_OK)\n except ValidationException as e:\n return Response(e.errors, status=e.status)", "title": "" }, { "docid": "0eae011ce82f300c9200518f66247d35", "score": "0.6121552", "text": "def update_password_btn_click(self):\n self.visible_element_click(SettingsPageLocators.UPDATE_PASSWORD_BTN)", "title": "" }, { "docid": "ca3d52ac8f496009019d10e9a36a8cd1", "score": "0.6120685", "text": "def intern_profile():\n form = PasswordForm()\n # Form submitted?\n if form.validate_on_submit():\n # Fetch current user's data\n user_data = User.query.filter_by(id = g.user.id).first()\n # Check if old password was correct\n if check_password_hash(user_data.password, form.password.data):\n # Generate new password\n user_data.password = generate_password_hash(form.newpassword.data)\n # Done, commit to database\n db.session.commit()\n flash('Password changed!')\n return redirect(url_for('intern_profile'))\n return render_template('intern/profile.html', form = form)", "title": "" }, { "docid": "7f3a06d74dda5e9075e6d5ea91ad4b9b", "score": "0.6094665", "text": "async def _perform_mutation(\n self,\n progress_reporter: ProgressReporter,\n context: AppLoggedInUseCaseContext,\n args: ChangePasswordArgs,\n ) -> None:\n async with self._domain_storage_engine.get_unit_of_work() as uow:\n try:\n auth = await uow.auth_repository.load_by_parent(context.user.ref_id)\n auth = auth.change_password(\n current_password=args.current_password,\n new_password=args.new_password,\n new_password_repeat=args.new_password_repeat,\n source=EventSource.CLI,\n modification_time=self._time_provider.get_current_time(),\n )\n auth = await uow.auth_repository.save(auth)\n except IncorrectPasswordError as err:\n raise InvalidChangePasswordCredentialsError(\"Invalid password\") from err", "title": "" }, { "docid": "543c99944abb0b0c855f769cda523135", "score": "0.60830545", "text": "def test_password_change_returns_validation_errors(self):\n u = self.create_user()\n u.username = '1' # trigger error\n user_service = get_service('user.user_service')\n with events.events.disconnect_receivers():\n res = user_service.change_password(u, '0987654')\n self.assertIsInstance(res, Result)", "title": "" }, { "docid": "a69857dc4525f6b32b08f6005acefc02", "score": "0.608104", "text": "def passwdcmd():\n store = Store()\n secrets = store.load_secrets()\n\n newpasswd = getpass.getpass(\"Enter new password:\")\n confirmpasswd = getpass.getpass(\"Confirm new password:\")\n\n if not newpasswd == confirmpasswd:\n raise click.ClickException(\"New passwords did not match. Aborting.\")\n\n store.save_secrets(secrets, newpasswd)", "title": "" }, { "docid": "16ef4cad5b3cf3c8b486ad6b9e14ba62", "score": "0.60734004", "text": "def Auth(self):\n secret = hashlib.sha224('thepassword').hexdigest() # Hash the secret password\n attempts = 1\n maxattempts = 3\n \n dlg = wx.TextEntryDialog(self, message=\"Enter password\", caption=\"Auth\", style=wx.OK|wx.PASSWORD)\n dlg.ShowModal()\n password = hashlib.sha224(dlg.GetValue()).hexdigest()\n \n while not password == secret and attempts < maxattempts:\n attempts += 1\n dlg.SetValue(\"\")\n dlg.ShowModal()\n password = hashlib.sha224(dlg.GetValue()).hexdigest()\n \n if attempts > maxattempts-1:\n wx.MessageDialog(self,\n \"Max number of password attempts ({}) has been reached\".format(maxattempts),\n style=wx.OK).ShowModal()\n sys.exit(0)\n \n dlg.Destroy()", "title": "" }, { "docid": "a8e1041e895113112f3ea1709a607f5c", "score": "0.6069156", "text": "def askPlayerPassword(player):\r\n \r\n status = player.status\r\n \r\n if status is GETNEWPASSWORD:\r\n player.transport.write(\"Enter new password: \")\r\n return\r\n elif status is CONFIRMPASSWORD:\r\n player.transport.write(\"Confirm new password: \")", "title": "" }, { "docid": "09a85b9dcab5a241affb540540ab0e55", "score": "0.6068351", "text": "def school_profile():\n form = PasswordForm()\n # Form submitted?\n if form.validate_on_submit():\n # Fetch current user's data\n user_data = User.query.filter_by(id = g.user.id).first()\n # Check if old password was correct\n if check_password_hash(user_data.password, form.password.data):\n # Generate new password\n user_data.password = generate_password_hash(form.newpassword.data)\n # Done, commit to database\n db.session.commit()\n flash('Password changed!')\n return redirect(url_for('school_index'))\n return render_template('school/profile.html', form = form)", "title": "" }, { "docid": "355ed311e23e81873b1235de66023f16", "score": "0.605592", "text": "def click_account_settings_change_password_page_button(self):\n\n self.hover(*AccountSettingsIdentifiers.CHANGE_PASSWORD)\n self.find_element(*AccountSettingsIdentifiers.CHANGE_PASSWORD).click()\n page_title = self.find_element(*AccountSettingsManageProfileInfoIdentifiers.PAGE_TITLE).text\n if page_title == 'Change Password':\n return CanonizerAccountSettingsPage(self.driver)", "title": "" }, { "docid": "97d6385f722769c12023da5d8a1a0b75", "score": "0.60550267", "text": "def UpdatePassword(self, plaintext):\n self.update(self.HashPassword(plaintext))\n self.Save()", "title": "" } ]
2f37efc678815635b5de026c34c64cf8
Create test fixture for source meta
[ { "docid": "7e10778b7fef0a7a07fbe08678b5d731", "score": "0.0", "text": "def source_meta():\n return [SourceName.HGNC.value, SourceName.ENSEMBL.value, SourceName.NCBI.value]", "title": "" } ]
[ { "docid": "1092eee1fd63f4e443c3f1744e987661", "score": "0.6867291", "text": "def pytest_generate_tests(metafunc):\n if \"lf_data\" in metafunc.fixturenames:\n with open(DATA_DIR / \"reference_data.csv\") as f:\n reader = csv.DictReader(f)\n records = list(reader)\n metafunc.parametrize(\"lf_data\", records)", "title": "" }, { "docid": "75d302fac7f2c51907877667f2fcea0e", "score": "0.6857486", "text": "def fixtures():", "title": "" }, { "docid": "f06b75f815d4fb7f85416a24f5b40ebd", "score": "0.6747181", "text": "def pytest_generate_tests(metafunc):\n with open(metafunc.config.getoption('test_conf'), \"r\") as file:\n test_cases = yaml.safe_load(file)\n if test_cases:\n metafunc.parametrize(\"instance\", test_cases)", "title": "" }, { "docid": "f4bd42cc05fe8b122ee74fb709a0ec61", "score": "0.6696803", "text": "def test_create_source(self):\n self.assertEqual(self.source.id, \"bbc-news\")\n self.assertEqual(self.source.name, \"BBC News\")\n self.assertEqual(self.source.image, \"images/bbc.jpg\")", "title": "" }, { "docid": "4fa90bd25bd66cb06b24d76a72c071fc", "score": "0.64444506", "text": "def test_fixture(self, testdir):\n testdir.makeconftest(\"\"\"\n from seaworthy.pytest.fixtures import image_fetch_fixture\n\n fixture = image_fetch_fixture('busybox', name='image')\n \"\"\".format(IMG))\n\n testdir.makepyfile(\"\"\"\n def test_image_fetch(image):\n assert 'busybox:latest' in image.tags\n \"\"\")", "title": "" }, { "docid": "3b5e9e8f0406c52014e3f6238d9a4ea0", "score": "0.6424012", "text": "def _create_test_sources(n=100):\n files = _get_test_models()['files']\n\n for i in range(1, n + 1):\n s = model.Source()\n s.key = unicode(i)\n if i in range(1, 11):\n s.type = u'article'\n s.author = u'Author Mc%d' % i\n s.title = u'Title %d' % i\n s.journal = u'Journal %d' % i\n s.year = int('199%s' % str(i)[-1])\n elif i in range(11, 21):\n s.type = u'book'\n s.author = u'Author Mc%d' % i\n s.title = u'Title %d' % i\n s.journal = u'Publisher %d' % i\n s.year = int('199%s' % str(i)[-1])\n elif i in range(21, 31):\n s.type = u'booklet'\n s.title = u'Title %d' % i\n elif i in range(31, 41):\n s.type = u'conference'\n s.author = u'Author Mc%d' % i\n s.title = u'Title %d' % i\n s.booktitle = u'Book Title %d' % i\n s.year = int('199%s' % str(i)[-1])\n elif i in range(41, 51):\n s.type = u'inbook'\n s.editor = u'Editor Mc%d' % i\n s.title = u'Title %d' % i\n s.chapter = unicode(i)\n s.pages = u'9--36'\n s.publisher = u'Publisher %d' % i\n s.year = int('199%s' % str(i)[-1])\n elif i in range(51, 61):\n s.type = u'incollection'\n s.author = u'Author Mc%d' % i\n s.title = u'Title %d' % i\n s.booktitle = u'Book Title %d' % i\n s.publisher = u'Publisher %d' % i\n s.year = int('199%s' % str(i)[-1])\n elif i in range(61, 71):\n s.type = u'inproceedings'\n s.author = u'Author Mc%d' % i\n s.title = u'Title %d' % i\n s.booktitle = u'Book Title %d' % i\n s.year = int('199%s' % str(i)[-1])\n elif i in range(71, 81):\n s.type = u'manual'\n s.title = u'Title %d' % i\n elif i in range(81, 91):\n s.type = u'mastersthesis'\n s.author = u'Author Mc%d' % i\n s.title = u'Title %d' % i\n s.school = u'The University of %d' % i\n s.year = int('199%s' % str(i)[-1])\n else:\n s.type = u'misc'\n\n if i % 2 == 0:\n s.file_id = files[i - 1].id\n\n if i > 8:\n s.datetime_modified = yesterday_timestamp\n\n Session.add(s)\n Session.commit()", "title": "" }, { "docid": "d3d74d03e279b5fa1ff670d22dd652dc", "score": "0.6403999", "text": "def setup(self):\n self.new_source = Source(\"bloomberg\",\"Donald\",\"Test to see if Source is retrieved\",\"TestUrl\",\"The article itself\")", "title": "" }, { "docid": "f19249544899341781552258b93bb114", "score": "0.6339898", "text": "def setUp(self):\n self.new_sources = Source(1234, 'Amazing news', 'Thrilling news', 'https://abcnews.go.com', 'Technology',\n 'UnitedStates', 'English')", "title": "" }, { "docid": "02ef573885e6ac54e0bcc99e777d7fc7", "score": "0.6336975", "text": "def pytest_generate_tests(metafunc):\n if 'content' in metafunc.fixturenames:\n content = getattr(metafunc.function, '_content', None)\n if isinstance(content, list):\n metafunc.parametrize('content', [content])\n else:\n metafunc.parametrize('content', [[]])\n\n if 'buffer_size' in metafunc.fixturenames:\n buffer_size = getattr(metafunc.function, '_buffer_size', None)\n if isinstance(buffer_size, int):\n metafunc.parametrize('buffer_size', [buffer_size])\n else:\n metafunc.parametrize('buffer_size', [50])\n\n if 'delay' in metafunc.fixturenames:\n delay = getattr(metafunc.function, '_delay', None)\n if isinstance(delay, int):\n metafunc.parametrize('delay', [delay])\n else:\n metafunc.parametrize('delay', [100])", "title": "" }, { "docid": "a2d95d1231493f06e57d673b315f6bab", "score": "0.63079125", "text": "def pytest_generate_tests(metafunc):\n if \"machine\" in metafunc.fixturenames:\n metafunc.parametrize(\"machine\", metafunc.config.option.machine)", "title": "" }, { "docid": "c69100754848ae6a9c64bf1a62ca74d2", "score": "0.6306748", "text": "def setUp(self):\r\n self.fixtures = '/test_transforms/fixtures/do_while_to_goto'\r\n self.transformer = None", "title": "" }, { "docid": "18c27a2d2abf4e04e24d2b40b6057aee", "score": "0.6306504", "text": "def setUpTestData(cls):\n TEST_SERIES_FIELDS[\"patient\"] = Patient.objects.create(**TEST_PATIENT_FIELDS)\n TEST_SERIES_FIELDS[\"study\"] = Study.objects.create(**TEST_STUDY_FIELDS)\n TEST_IMAGE_FIELDS[\"series\"] = Series.objects.create(**TEST_SERIES_FIELDS)\n Image.objects.create(**TEST_IMAGE_FIELDS)", "title": "" }, { "docid": "18c27a2d2abf4e04e24d2b40b6057aee", "score": "0.6306504", "text": "def setUpTestData(cls):\n TEST_SERIES_FIELDS[\"patient\"] = Patient.objects.create(**TEST_PATIENT_FIELDS)\n TEST_SERIES_FIELDS[\"study\"] = Study.objects.create(**TEST_STUDY_FIELDS)\n TEST_IMAGE_FIELDS[\"series\"] = Series.objects.create(**TEST_SERIES_FIELDS)\n Image.objects.create(**TEST_IMAGE_FIELDS)", "title": "" }, { "docid": "4eddfcede9677e650f7d3b071197572a", "score": "0.63053465", "text": "def create_source(self, *args, **kwargs):", "title": "" }, { "docid": "1c9b0e38a9e07517188ad61041a74eaa", "score": "0.628374", "text": "def setUp(self):\n self.source = source.Source('bbc-news', 'BBC News', \"images/bbc.jpg\")", "title": "" }, { "docid": "1dec4d8c94f6b91496fffc3addb33af3", "score": "0.6275783", "text": "def setUpTestData(cls):\n cls.post = Post.objects.create(title=\"Foo\", content=\"bar\")", "title": "" }, { "docid": "9781c01ce7e99ce3f359d89ad7560878", "score": "0.6238775", "text": "def gen_fixture(file_name):\n @pytest.fixture(scope='function')\n def _test_fixture(self, *args, **kwargs):\n self.engine.mrt_schedule.init(\n peak_schedule=ONE_MIN_SCHEDULE, night_schedule=ONE_MIN_SCHEDULE, ordinary_schedule=ONE_MIN_SCHEDULE)\n self.engine.mrt_map.init(get_data_file_path(file_name))\n yield\n return _test_fixture", "title": "" }, { "docid": "9b35693479e55edec6559ab4798489ba", "score": "0.62309265", "text": "def setUpTestData(cls):", "title": "" }, { "docid": "a356901e7c7fb1bfe3db478d25deebc7", "score": "0.6224951", "text": "def test_init(self):\n self.assertEqual(self.new_source.id,\"bloomberg\")\n self.assertEqual(self.new_source.name,\"Donald\")\n self.assertEqual(self.new_source.description,\"Test to see if Source is retrieved\")\n self.assertEqual(self.new_source.url,\"TestUrl\")\n self.assertEqual(self.new_source.category,\"The article itself\")", "title": "" }, { "docid": "3ca35bab2aae08908c174fbca6c74cc1", "score": "0.62124413", "text": "def setUp(self):\n self.field = \"name\"\n self.existingName = \"Test\"\n self.nonExistingName = \"Blah\"\n self.expectedResult = {self.field:self.existingName}\n source = DummyDataSource([self.expectedResult])\n self.factory = DataSourceFactory(DummyClass, parameters, source, self.field)", "title": "" }, { "docid": "ae23c0fa2b75b3fe8f7981a0abb09745", "score": "0.6205022", "text": "def test_data(self):\n pass", "title": "" }, { "docid": "226785b22178a0303aefcfe3708da049", "score": "0.6195618", "text": "def setUp(self) -> None:\n\n self.tempfile = tempfile.NamedTemporaryFile()\n\n self.our_dataset = {\n \"ac\": [\"com.ac\", \"edu.ac\", \"gov.ac\", \"mil.ac\", \"net.ac\", \"org.ac\"],\n \"academy\": [\"official.academy\"],\n \"ad\": [\"nom.ad\"],\n }\n\n self.tempfile.write(json.dumps(self.our_dataset).encode())\n self.tempfile.seek(0)\n\n self.ps_dataset = PublicSuffixDataset()\n self.ps_dataset.source_file = self.tempfile.name\n\n self.get_content_patch = unittest.mock.patch.object(DatasetBase, \"get_content\")\n self.mock_get_content = self.get_content_patch.start()\n self.mock_get_content.return_value = copy.deepcopy(self.our_dataset)", "title": "" }, { "docid": "0a30228a33b7859c11d0f16739bdaae7", "score": "0.6188502", "text": "def pytest_generate_tests(metafunc):\n if 'resolution' in metafunc.fixturenames:\n metafunc.parametrize('resolution', [256, 512, 1024, 2048])", "title": "" }, { "docid": "db0116a376ac120b3062c8809cb96cf7", "score": "0.6108964", "text": "def pytest_generate_tests(metafunc):\n\n examples_files = list()\n schemas_files = dict()\n if metafunc.config.option.testdir:\n directory = metafunc.config.option.testdir\n if os.path.exists(directory):\n for subdir, namespace in pytest.path_to_namespace.items():\n if directory.startswith(subdir):\n schemas_files = scan_shapes(directory, namespace).values()\n else:\n LOGGER.error(\"%s not found\", directory)\n sys.exit()\n else:\n schemas_files = pytest.shape_to_file.values()\n for subdir, _ in pytest.path_to_namespace.items():\n examples_files.extend(scan_examples(subdir))\n\n if \"schema_file\" in metafunc.fixturenames:\n metafunc.parametrize(\"schema_file\", schemas_files)\n\n if \"shapes_file\" in metafunc.fixturenames:\n metafunc.parametrize(\"shapes_file,test_file,assertion\", examples_files)", "title": "" }, { "docid": "c3e9726d768a87d43ae76aad5ac4bc1e", "score": "0.6094707", "text": "def setUp(self):\n self.filepath = '/users/rahulbirmiwal/Documents/UWMSDS/DATA515/class.db'\n self.fixture = create_dataframe(self.filepath)", "title": "" }, { "docid": "046d477195c5f41ec84fa6d7d99076d6", "score": "0.6090883", "text": "def setUp(self):\n self.field = \"name\"\n self.existingName = \"Test\"\n self.nonExistingName = \"Blah\"\n \n self.expectedValues = {\"arg1\":1, \"arg2\":2, \"arg3\":3}\n self.data = dict(self.expectedValues)\n self.data[self.field] = self.existingName\n \n source = DummyDataSource([self.data])\n self.factory = DataSourceFactory(DummyClass, parameters, source, self.field)\n self.errorFactory = DataSourceFactory(DummyClass, [ErrorParameter()], source, self.field)", "title": "" }, { "docid": "453d5b58974a15261581c11713b69978", "score": "0.60846347", "text": "def build_test_dataloader(self, *args, **kwargs):", "title": "" }, { "docid": "89d2e209752adace0132ff3960670a4e", "score": "0.6081761", "text": "def pytest_generate_tests(metafunc):\n\n fastybus_settings = []\n for case in ['14', '30', '57', '118', '300']:\n basedir = os.path.join(os.path.dirname(__file__), 'PowerFlow',\n 'FastYBus', case)\n fastybus_settings.append((case, basedir))\n metafunc.parametrize(('case', 'basedir'), fastybus_settings)", "title": "" }, { "docid": "8d36009a6b8151608fbd0230405f3231", "score": "0.60814625", "text": "def setUpTestData(cls):\n data_gen.run()", "title": "" }, { "docid": "c341f21436ebb48c941cb49f597735de", "score": "0.6078957", "text": "def test_create_source(app_tester, init_database):\n data = create_source(Employee.query.all())\n assert isinstance(data, ColumnDataSource)\n assert \"workers\" in data.column_names\n\n assert isinstance(data.data, dict)\n # assert \"JaneLong\" in data.data[\"workers\"]\n # assert datetime(2020, 7, 15, 13, 30) in data.data[\"timeIn\"]\n # assert len(data.data[\"timeIn\"]) == 2", "title": "" }, { "docid": "cf631bb2884ff65ec4455038de04f326", "score": "0.60769624", "text": "def setUpTestData(cls):\n # data_gen.run()", "title": "" }, { "docid": "b3d65b68a4e698dbe097056fba85b9c3", "score": "0.60667014", "text": "def setUp(self):\n self.db = Database(\":memory:\")\n self.db.create_tables()\n\n # Add a source into the database to add articles to\n self.db.add_source(Source(\"http://test_source.com\"))", "title": "" }, { "docid": "03b6f572166ec664fa38713716404bea", "score": "0.6052302", "text": "def setUpTestData(cls):\n pass", "title": "" }, { "docid": "105638c1d39c88d47314f5dfcb9db34d", "score": "0.6017508", "text": "def test_create_template(self):\n pass", "title": "" }, { "docid": "2729a522bcb8e98392061e63b9d6fbb8", "score": "0.59995586", "text": "def setUp(self):\n # Defining the class arguments\n self.s3_access_key = 'AWS_ACCESS_KEY_ID'\n self.s3_secret_key = 'AWS_SECRET_ACCESS_KEY'\n self.s3_endpoint_url = 'https://s3.eu-central-1.amazonaws.com'\n self.s3_bucket_name_src = 'xetra-int-test-src'\n self.s3_bucket_name_trg = 'xetra-int-test-trg'\n self.meta_key = 'meta_file.csv'\n # Creating the source and target bucket on the mocked s3\n self.s3 = boto3.resource(service_name='s3', endpoint_url=self.s3_endpoint_url)\n self.src_bucket = self.s3.Bucket(self.s3_bucket_name_src)\n self.trg_bucket = self.s3.Bucket(self.s3_bucket_name_trg)\n # Creating S3BucketConnector testing instances\n self.s3_bucket_src = S3BucketConnector(self.s3_access_key,\n self.s3_secret_key,\n self.s3_endpoint_url,\n self.s3_bucket_name_src)\n self.s3_bucket_trg = S3BucketConnector(self.s3_access_key,\n self.s3_secret_key,\n self.s3_endpoint_url,\n self.s3_bucket_name_trg)\n # Creating a list of dates\n self.dates = [(datetime.today().date() - timedelta(days=day))\\\n .strftime(MetaProcessFormat.META_DATE_FORMAT.value) for day in range(8)]\n # Creating source and target configuration\n conf_dict_src = {\n 'src_first_extract_date': self.dates[3],\n 'src_columns': ['ISIN', 'Mnemonic', 'Date', 'Time',\n 'StartPrice', 'EndPrice', 'MinPrice', 'MaxPrice', 'TradedVolume'],\n 'src_col_date': 'Date',\n 'src_col_isin': 'ISIN',\n 'src_col_time': 'Time',\n 'src_col_start_price': 'StartPrice',\n 'src_col_min_price': 'MinPrice',\n 'src_col_max_price': 'MaxPrice',\n 'src_col_traded_vol': 'TradedVolume'\n }\n conf_dict_trg = {\n 'trg_col_isin': 'isin',\n 'trg_col_date': 'date',\n 'trg_col_op_price': 'opening_price_eur',\n 'trg_col_clos_price': 'closing_price_eur',\n 'trg_col_min_price': 'minimum_price_eur',\n 'trg_col_max_price': 'maximum_price_eur',\n 'trg_col_dail_trad_vol': 'daily_traded_volume',\n 'trg_col_ch_prev_clos': 'change_prev_closing_%',\n 'trg_key': 'report1/xetra_daily_report1_',\n 'trg_key_date_format': '%Y%m%d_%H%M%S',\n 'trg_format': 'parquet'\n }\n self.source_config = XetraSourceConfig(**conf_dict_src)\n self.target_config = XetraTargetConfig(**conf_dict_trg)\n # Creating source files on mocked s3\n columns_src = ['ISIN', 'Mnemonic', 'Date', 'Time', 'StartPrice',\n 'EndPrice', 'MinPrice', 'MaxPrice', 'TradedVolume']\n data = [['AT0000A0E9W5', 'SANT', self.dates[5], '12:00', 20.19, 18.45, 18.20, 20.33, 877],\n ['AT0000A0E9W5', 'SANT', self.dates[4], '15:00', 18.27, 21.19, 18.27, 21.34, 987],\n ['AT0000A0E9W5', 'SANT', self.dates[3], '13:00', 20.21, 18.27, 18.21, 20.42, 633],\n ['AT0000A0E9W5', 'SANT', self.dates[3], '14:00', 18.27, 21.19, 18.27, 21.34, 455],\n ['AT0000A0E9W5', 'SANT', self.dates[2], '07:00', 20.58, 19.27, 18.89, 20.58, 9066],\n ['AT0000A0E9W5', 'SANT', self.dates[2], '08:00', 19.27, 21.14, 19.27, 21.14, 1220],\n ['AT0000A0E9W5', 'SANT', self.dates[1], '07:00', 23.58, 23.58, 23.58, 23.58, 1035],\n ['AT0000A0E9W5', 'SANT', self.dates[1], '08:00', 23.58, 24.22, 23.31, 24.34, 1028],\n ['AT0000A0E9W5', 'SANT', self.dates[1], '09:00', 24.22, 22.21, 22.21, 25.01, 1523]]\n self.df_src = pd.DataFrame(data, columns=columns_src)\n self.s3_bucket_src.write_df_to_s3(self.df_src.loc[0:0],\n f'{self.dates[5]}/{self.dates[5]}_BINS_XETR12.csv','csv')\n self.s3_bucket_src.write_df_to_s3(self.df_src.loc[1:1],\n f'{self.dates[4]}/{self.dates[4]}_BINS_XETR15.csv','csv')\n self.s3_bucket_src.write_df_to_s3(self.df_src.loc[2:2],\n f'{self.dates[3]}/{self.dates[3]}_BINS_XETR13.csv','csv')\n self.s3_bucket_src.write_df_to_s3(self.df_src.loc[3:3],\n f'{self.dates[3]}/{self.dates[3]}_BINS_XETR14.csv','csv')\n self.s3_bucket_src.write_df_to_s3(self.df_src.loc[4:4],\n f'{self.dates[2]}/{self.dates[2]}_BINS_XETR07.csv','csv')\n self.s3_bucket_src.write_df_to_s3(self.df_src.loc[5:5],\n f'{self.dates[2]}/{self.dates[2]}_BINS_XETR08.csv','csv')\n self.s3_bucket_src.write_df_to_s3(self.df_src.loc[6:6],\n f'{self.dates[1]}/{self.dates[1]}_BINS_XETR07.csv','csv')\n self.s3_bucket_src.write_df_to_s3(self.df_src.loc[7:7],\n f'{self.dates[1]}/{self.dates[1]}_BINS_XETR08.csv','csv')\n self.s3_bucket_src.write_df_to_s3(self.df_src.loc[8:8],\n f'{self.dates[1]}/{self.dates[1]}_BINS_XETR09.csv','csv')\n columns_report = ['ISIN', 'Date', 'opening_price_eur', 'closing_price_eur',\n 'minimum_price_eur', 'maximum_price_eur', 'daily_traded_volume', 'change_prev_closing_%']\n data_report = [['AT0000A0E9W5', self.dates[3], 20.21, 18.27, 18.21, 21.34, 1088, 10.62],\n ['AT0000A0E9W5', self.dates[2], 20.58, 19.27, 18.89, 21.14, 10286, 1.83],\n ['AT0000A0E9W5', self.dates[1], 23.58, 24.22, 22.21, 25.01, 3586, 14.58]]\n self.df_report = pd.DataFrame(data_report, columns=columns_report)", "title": "" }, { "docid": "ec98098510ffdb24345c70561109bbe4", "score": "0.5967185", "text": "def test_simple_fixture(self):\n\n self.pge_fixture_test(\n \"simple_fixture_input.json\", \"simple_fixture_expected.json\"\n )", "title": "" }, { "docid": "bd527810a4cf316f548addbb937bfe39", "score": "0.595181", "text": "def pytest_generate_tests(metafunc):\n TRAPI_KP_edges = generate_TRAPI_KP_tests(metafunc)\n generate_TRAPI_ARA_tests(metafunc,TRAPI_KP_edges)", "title": "" }, { "docid": "3a4369f32297468613903d69ea61a304", "score": "0.5948683", "text": "def test_template_from_data(self):\n self.dummyscript.create_templates_from_data(\n [DummyClamServer.DummyTemplate()]\n )", "title": "" }, { "docid": "72d1fc4609a62696b918e78c074f9c29", "score": "0.5944972", "text": "def setUpTestData(cls):\n TestDatabase.create()", "title": "" }, { "docid": "e991a7559593e2812177b89fd1cb3a1f", "score": "0.5942099", "text": "def createTest(self):\n tests = shelve.open(self.datafile)\n numberOfTests = self.getNumberOfTests()\n temp = tests['Tests']\n temp.append([numberOfTests, self.testName, self.testContent, self.testType, self.deadline])\n tests['Tests'] = temp\n tests.sync()\n tests.close()", "title": "" }, { "docid": "ba8198ef139bf230dd77871367ec1d43", "score": "0.59291416", "text": "def setUp(self):\n dbtype = 'csv'\n test_config_file = os.path.join(SCRIPT_DIR, TEST_CONFIG_FILE_NAME)\n db_config = read_config(test_config_file, dbtype)\n db_config['directory'] = os.path.dirname(test_config_file)\n self.targets_tbl = TargetsTable.factory(db_config, dbtype, False)", "title": "" }, { "docid": "28b2e1f3fb87f4e0e3f3cd9aeb07e279", "score": "0.5927309", "text": "def populate(self):\n populate_standard_test()", "title": "" }, { "docid": "ca05663cad9b340286329fdb5985a189", "score": "0.5911395", "text": "def test_generate_from_file():\n pass", "title": "" }, { "docid": "8aec49c37c6a71c4bbb9cb3b83e8bf42", "score": "0.59089345", "text": "def fixture_mk(request):\n mk_dir(TEST_DIR)\n setup_mk(request.param)\n yield request.param\n teardown()", "title": "" }, { "docid": "7d864b608b339eb485e76ea9c99d49c3", "score": "0.5902019", "text": "def test_get_metadata(self):\n pass", "title": "" }, { "docid": "0a318520ca736f19b617e75660e4ddc7", "score": "0.58955145", "text": "def pytest_generate_tests(metafunc):\n if EXPORTS_KEY in metafunc.fixturenames:\n metafunc.parametrize(\n EXPORTS_KEY,\n list(\n itertools.chain(\n *[\n itertools.combinations(CLASS_NAMES, k)\n for k in range(1, 1 + len(CLASS_NAMES))\n ]\n )\n ),\n )\n # Tests should hold regardless of parser style.\n metafunc.parametrize(\"parse_style\", PARSERS.keys())", "title": "" }, { "docid": "94b2a9f801d485ca744c845b6ecb6b5d", "score": "0.5890834", "text": "def setUpTestData(cls):\n setup_test_data(cls)", "title": "" }, { "docid": "94b2a9f801d485ca744c845b6ecb6b5d", "score": "0.5890834", "text": "def setUpTestData(cls):\n setup_test_data(cls)", "title": "" }, { "docid": "a09a38c0313296a43afba46a32129ae0", "score": "0.5880418", "text": "def setUp(self):\n fixtures.create_tags()\n fixtures.create_bookmarks()\n fixtures.create_bookmarks_tags()", "title": "" }, { "docid": "7ce6e548f9dcc818f9f1d4de879946a4", "score": "0.58736247", "text": "def generate_fixture(content: str):\n pass # pylint: disable=unnecessary-pass\n\n @pytest.fixture(scope='module')\n def my_fixture():\n return content\n\n return my_fixture", "title": "" }, { "docid": "0ba447745a2a6804f579e5170e040f50", "score": "0.587079", "text": "def test_generation(self):\n pass", "title": "" }, { "docid": "eeb9ef86fd15719d908ef9c74cb66de8", "score": "0.58639896", "text": "def test_generate(self,generatorBaseTestFixture):\n generatorBaseTestFixture.generate()", "title": "" }, { "docid": "3be7c0319fd6d095ff658207c1cc47f4", "score": "0.58626944", "text": "def my_fixture():\n settings.data = {}\n yield", "title": "" }, { "docid": "ee8aa57de8eed99d8062c2278ae6e36f", "score": "0.58605456", "text": "def setUpTestData(cls):\n Country.objects.create(name=\"United Kingdom\")\n #Profile.objects.create(username='testing')", "title": "" }, { "docid": "d256c18090298a0a0e74bd28a470daa1", "score": "0.5826071", "text": "def setUp(self):\n\n super(TestCase, self).setUp()\n\n FLAGS(sys.argv) # Required for tests that use flags in open-source.\n tempdir = tempfile.mkdtemp(dir=absltest.get_default_test_tmpdir())\n self.pipeline_root = os.path.join(tempdir, self.pipeline_name)\n tf.io.gfile.makedirs(self.pipeline_root)\n logging.info(\"pipeline_root located at %s\", self.pipeline_root)\n self.metadata_path = os.path.join(self.pipeline_root, \"mlmd.sqlite\")\n logging.info(\"MLMD SQLite instance located at %s\", self.metadata_path)", "title": "" }, { "docid": "83f7dd76e4147e0c831245b881594fcf", "score": "0.5825985", "text": "def setUp(self):\n self._source_scanner = source_scanner.SourceScanner()", "title": "" }, { "docid": "5f1e07af868ab9f6f3fd3ef8df9fc082", "score": "0.58189845", "text": "def setUp(self):\n with open('tests/yaml_call_test.yml', 'r') as myfile:\n self.yamldata=myfile.read()\n datamap=yaml.load(self.yamldata)\n self.r = Recipe(datamap)", "title": "" }, { "docid": "088bb775aafdbab806afff75f35ea230", "score": "0.5786184", "text": "def test_adobe_fixture(self):\n self.sj_water_fixture_test(\"Adobe_input.json\", \"Adobe_expected.json\")", "title": "" }, { "docid": "26309593c826bb34ae0c88f65cf9a134", "score": "0.5778827", "text": "def setup_test_metadata_db(url):\n\n db = HypernetsDBBuilder.create_db_template(url, \"metadata\")\n\n # todo - add test data to test metadata db\n\n db.commit()\n del db", "title": "" }, { "docid": "f3b09b859a23f151932eb8b176511a63", "score": "0.57596767", "text": "def testUsingTestData(self):\n # TODO:\n pass", "title": "" }, { "docid": "f3b09b859a23f151932eb8b176511a63", "score": "0.57596767", "text": "def testUsingTestData(self):\n # TODO:\n pass", "title": "" }, { "docid": "f3b09b859a23f151932eb8b176511a63", "score": "0.57596767", "text": "def testUsingTestData(self):\n # TODO:\n pass", "title": "" }, { "docid": "0a2e3a7520b6d14f46dfbe6c84151644", "score": "0.57574105", "text": "def __init__(self, project_name, project_definition, project_version):\n super(TestSourceHelper, self).__init__(project_name, project_definition)\n self._project_version = project_version\n self._source_directory_path = '{0:s}-{1:s}'.format(\n project_name, project_version)\n self._source_package_filename = '{0:s}-{1:s}.tar.gz'.format(\n project_name, project_version)", "title": "" }, { "docid": "a2d98e95c814000cd9d46435e1b19402", "score": "0.5753273", "text": "def prepareTest(self, test):\r\n pass", "title": "" }, { "docid": "6467087e61990e9c4e24acde51e2883c", "score": "0.5740843", "text": "def test_xilinx_fixture(self):\n self.sj_water_fixture_test(\"Xilinx_input.json\", \"Xilinx_expected.json\")", "title": "" }, { "docid": "39acd27c7cf8f9a46ab7e9713cb8559f", "score": "0.57334787", "text": "def setUp(self):\n self.data = {'url': 'http://www.google.com',\n 'slug': 'a'}", "title": "" }, { "docid": "0ac31974b9584d4e3bd098a710d5616b", "score": "0.57293856", "text": "def setUp(self):\n\n self.b1 = BaseModel()\n self.a1 = Amenity()\n self.c1 = City()\n self.p1 = Place()\n self.r1 = Review()\n self.s1 = State()\n self.u1 = User()\n self.storage = FileStorage()\n if os.path.exists(\"file.json\"):\n pass\n else:\n os.mknod(\"file.json\")", "title": "" }, { "docid": "75681c54b5834ea2c1a22d48a210ce28", "score": "0.57268417", "text": "def create_metadata():\n pass", "title": "" }, { "docid": "ce51433830623e74bfd195c51730ba14", "score": "0.5721293", "text": "def setUpClass(cls):\n call_command('loaddata', test_data_file_path, verbosity=0)\n super(DatasetCatalogApiReadTestV1, cls).setUpClass()", "title": "" }, { "docid": "4592b66df4eb9cd312936fb56a017650", "score": "0.5709527", "text": "def fixture_create_run():\n return create_run()", "title": "" }, { "docid": "bd432e67490896e78fab09eff80f4e83", "score": "0.5709249", "text": "def prepareTestCase(self, test):\r\n pass", "title": "" }, { "docid": "6e23029d146ae6c360404c6b23d637ac", "score": "0.57078254", "text": "def setUp(self):\n\n conf_file = os.path.join(os.getcwd(), 'data/config.yaml')\n self.config = metadata_updater.ConfigReader(conf_file)\n self.lds_test_layer = '95322'\n self.pub_id = None", "title": "" }, { "docid": "86c19a64b25fb0406f0a412e6cd6ac4c", "score": "0.56824225", "text": "def setUp(self):\n\n conf_file = os.path.join(os.getcwd(), 'data/config.yaml')\n self.config = metadata_updater.ConfigReader(conf_file)\n self.lds_test_layer = '95322'", "title": "" }, { "docid": "73b64bcab89f7fc3325522c7319c77d8", "score": "0.56813616", "text": "def setUp(self):\n super().setUp()\n self.file_name = \"testrc\"", "title": "" }, { "docid": "84a534d46d6000a7ed7afb245a006796", "score": "0.5678312", "text": "def test_get_templates(self):\n pass", "title": "" }, { "docid": "441bde01fb5ca4b318892f8dd1770d9b", "score": "0.56727505", "text": "def setup():\n\n print >> sys.stderr, \"SETUP: nwispy_webservice tests\"\n\n fixture[\"data file\"] = \\\n \"\"\" \n # data_type\tsite_num\tstart_date\tend_date\tparameters\n dv\t03284000\t2014-01-01\t2014-03-10\t00060\t00065 \n iv\t03375000\t2014-02-12\t2014-02-19\t00010\t00045\t00060 \n \"\"\" \n\n fixture[\"data requests\"] = [\n {\"end date\": \"2014-01-15\", \n \"data type\": \"dv\", \n \"start date\": \"2014-01-01\", \n \"parameters\": [\"00060\"], \n \"site number\": \"03284000\"\n }, \n {\"end date\": \"2014-01-15\", \n \"data type\": \"dv\", \n \"start date\": \"2014-01-01\", \n \"parameters\": [\"00060\", \"00065\"], \n \"site number\": \"03284000\"\n }, \n {\"end date\": \"2014-02-19\", \n \"data type\": \"iv\", \n \"start date\": \"2014-02-12\", \n \"parameters\": [\"00060\", \"00065\", \"00045\"], \n \"site number\": \"03284000\"\n },\n {\"data type\": \"\",\n \"site number\": \"\",\n \"start date\": \"\",\n \"end date\": \"\",\n \"parameters\": \"\", \n }\n ]", "title": "" }, { "docid": "18ddd0565a4fbdf4618c9bc5064ed707", "score": "0.56714", "text": "def pytest_generate_tests(metafunc):\n estimator_classes = get_estimators(\n estimator_types=estimator_types, return_names=False)\n\n if \"estimator_class\" in metafunc.fixturenames:\n metafunc.parametrize(\"estimator_class\", estimator_classes)\n\n # if estimator test, construct all instances for the test\n if \"estimator_instance\" in metafunc.fixturenames:\n estimator_instances = [estimator.create_test_instance()\n for estimator in estimator_classes]\n metafunc.parametrize(\"estimator_instance\", estimator_instances)", "title": "" }, { "docid": "2396f23a35e2452bb60cabddd1e697d9", "score": "0.5668637", "text": "def test_data_source_file():\n test_data_source = util.DataSource(absolute_data_path(\"simplefile\"))\n test_data_source.open()\n data_source_file_handle = test_data_source.source\n data_source_contents = data_source_file_handle.read()\n assert_equals(data_source_contents, \"onionperf\")", "title": "" }, { "docid": "1faeaeebf8493faaeb5fde4ea86bf039", "score": "0.56670856", "text": "def setup_build_tests(self):\n tests = self.test_list\n relative_test_dir = self.test_base_path\n files_to_cpy = []\n header = \"test/argparsing.h\"\n for test in tests:\n test_path = join_path(relative_test_dir, test + \".c\")\n files_to_cpy.append(test_path)\n files_to_cpy.append(header)\n self.cache_extra_test_sources(files_to_cpy)", "title": "" }, { "docid": "3f017d79b61b0f19bbd3e875feb242c7", "score": "0.5666451", "text": "def _build_test_samples(self):\n for name, test_sample in self.bio_sample.test_samples.items():\n yield name, self.__class__.test_sample_class(self, test_sample)", "title": "" }, { "docid": "7a269e2203364ed7210c9d39e3af9ef7", "score": "0.56623465", "text": "def test_lamesa_fixture(self):\n self.pge_fixture_test(\n \"lamesa_input.json\", \"lamesa_expected.json\", start_date=date(2018, 1, 1)\n )", "title": "" }, { "docid": "014c828b15e7defb63b8816b369e2751", "score": "0.5638679", "text": "def setUp(self):\n super().setUp()\n self.slo = baker.make_recipe('makeReports.sloInReport',slo__numberOfUses=3)", "title": "" }, { "docid": "92971fb15a16abf3778e4b0f1c944b1b", "score": "0.5633382", "text": "def setUp(self):\n self.cube = set_up_variable_cube(\n 280 * np.ones((3, 3), dtype=np.float32),\n attributes={\n \"mosg__grid_version\": \"1.3.0\",\n \"mosg__model_configuration\": \"uk_det\",\n },\n )\n self.metadata_dict = {\n \"mosg__grid_version\": \"remove\",\n \"source\": \"IMPROVER unit tests\",\n \"mosg__model_configuration\": \"other_model\",\n }", "title": "" }, { "docid": "d19dc939ec49a96478f6fe6a0b3c7b7a", "score": "0.56316876", "text": "def setUp(self):\n super(TestResources, self).setUp()", "title": "" }, { "docid": "62ac6914629d5b88becb3049c90c9b5a", "score": "0.56309825", "text": "def setUpTestData(cls):\n User.objects.create_user(\n 'Georgie', '[email protected]', '12345678'\n )\n Expense.objects.create(\n date=date.today(),\n description=\"Test 1\",\n category=\"Food\",\n amount=50,\n converted_amount=10,\n currency=\"ILS\",\n who_for=\"Everyone\",\n who_paid=\"Georgie\"\n )\n Expense.objects.create(\n date=date.today().replace(day=1),\n description=\"Test 2\",\n category=\"Food\",\n amount=100,\n converted_amount=20,\n currency=\"ILS\",\n who_for=\"Everyone\",\n who_paid=\"Georgie\"\n )\n Expense.objects.create(\n date=date.today().replace(day=10),\n description=\"Test 3\",\n category=\"Baby\",\n amount=50,\n converted_amount=50,\n currency=\"GBP\",\n who_for=\"Everyone\",\n who_paid=\"Tristan\"\n )", "title": "" }, { "docid": "cd44b32b66a34f0bc0c18667c1cb6a9c", "score": "0.5630874", "text": "def setUp(self):\n self.temp_dir = TemporaryDirectory()\n self.data_dir = os.path.join(self.temp_dir.name, \"data\")\n os.makedirs(self.data_dir)", "title": "" }, { "docid": "85d04f64a5bee8ad22a5b46c844ce18e", "score": "0.5630581", "text": "def setup(self):\n self.meta = pysat.Meta()\n self.testInst = pysat.Instrument('pysat', 'testing', '', 'clean')", "title": "" }, { "docid": "f5d4a3b708a3638a893fa2df9bece2bb", "score": "0.5629217", "text": "def object_test(self):\n test = ImportData()\n yield test", "title": "" }, { "docid": "a3b0ee11be9b81ee7694d8d2f2df20c9", "score": "0.5628132", "text": "def test_fixture(self, testdir):\n testdir.makepyfile(\"\"\"\n from docker.errors import NotFound\n import pytest\n\n def test_container_basics(docker_helper):\n container = docker_helper.containers.create('test', '{}')\n container.start()\n container.reload()\n\n assert container.status == 'running'\n\n docker_helper.containers.remove(container, force=True)\n\n with pytest.raises(NotFound):\n container.reload()\n \"\"\".format(IMG))\n\n result = testdir.runpytest()\n result.assert_outcomes(passed=1)", "title": "" }, { "docid": "4b8ccfb62d043e1da8ddd6c25a47ba50", "score": "0.5610573", "text": "def test_provider_gcp_instance_meta(change_dir):\n context = tackle('.', context_file='instance_meta.yaml')\n\n assert len(context['instance_types']) > 1", "title": "" }, { "docid": "88bdb9e85cb6884f0d5ea3e3e81f5e2b", "score": "0.56096524", "text": "def TEST_DataSource():\n df = bk_data_sets.DataSetTitanic.get_combined()\n ind_test = df[df.Survived.isnull()].index\n dt, dv = train_test_split(df[~df.Survived.isnull()], test_size=.25)\n ind_train = dt.index\n ind_val = dv.index\n inds = utd.Indexes(ind_train, ind_val, ind_test)\n\n cat_sex: pd.DataFrame = pd.DataFrame(utd.CatVar(df.Sex, inds, 20).s_out); cat_sex\n cont_age: pd.DataFrame = utd.ContVar(df.Age, inds, 20, [0, 5, 12, 25, 100]).df_out; cont_age[:2]\n data_source = DataSource(inds, cat_sex, cont_age, embedding_size=4, name=\"titanic-age&sex\")\n\n data_source.get_combined_df()\n print(data_source)", "title": "" }, { "docid": "caf3c13a144d2d5c380ed7f3749faa6c", "score": "0.5607334", "text": "def test_data():\n\n pass", "title": "" }, { "docid": "8f0640cb25238d434c4b45f056e4e454", "score": "0.5595462", "text": "def setUp(self):\n\n super(TBRTest, self).setUp()\n\n # Load the salesandcost dataset.\n csv_path = 'matched_markets/csv/'\n csv_dir = os.path.join(\"\", csv_path)\n self.data = salesandcost.example_data_formatted(csv_dir)\n\n # Data frame names for the salesandcost example.\n self.key_response = 'sales'\n self.key_cost = 'cost'\n self.key_group = 'geo.group'\n self.key_period = 'period'\n self.key_geo = 'geo'\n self.key_date = 'date'\n\n # Semantics for groups and periods.\n self.groups = semantics.GroupSemantics()\n self.periods = semantics.PeriodSemantics()", "title": "" }, { "docid": "76dd771914dadbff1552dbcb2e519d69", "score": "0.55941975", "text": "def create_mock_data_source(features):\n\n class MockLayer:\n def __init__(self):\n self.srs = SpatialReference(settings.SRID)\n self.mock_features = features\n\n def __iter__(self):\n yield from self.mock_features\n\n def __len__(self):\n return len(self.mock_features)\n\n class MockDataSource:\n def __init__(self, ds_input):\n pass\n\n def __getitem__(self, index):\n return MockLayer()\n\n return MockDataSource", "title": "" }, { "docid": "d733d3466e234923cc914c3d9dd3b8e7", "score": "0.5593971", "text": "def test_initialize_metadata(self):\n\n self.test_function(self.testInst.meta, self.name)\n self.eval_defaults(self.testInst.meta[self.name])\n\n return", "title": "" }, { "docid": "770398550734f825ac2d6991a868decc", "score": "0.55812156", "text": "def get_sources(self, source_data):\n return 'kiskadee/tests/test_source/test_source.tar.gz'", "title": "" }, { "docid": "ad91495e39c6e6ed6a583118854efefb", "score": "0.5577349", "text": "def setUp(self):\n super().setUp()\n self.slo = baker.make_recipe('makeReports.sloInReport', report=self.rpt)\n self.slo2 = baker.make_recipe('makeReports.sloInReport', report=self.rpt)", "title": "" }, { "docid": "0b86a420ff7ee76ff902479e7ff3c640", "score": "0.55757195", "text": "def test_301_industrial_fixture(self):\n self.pge_fixture_test(\n \"4504960933350245_input.json\",\n \"4504960933350245_expected.json\",\n start_date=date(2017, 1, 1),\n )", "title": "" }, { "docid": "fbf7dea4130558e39f157370be476a9b", "score": "0.5574612", "text": "def setUp(self):\n super(BaseTest, self).setUp()\n # pre setup creations and checks read from config files", "title": "" }, { "docid": "edfb088461dca467b391f8ac74244e63", "score": "0.5568726", "text": "def setUp(self):\n test_data_path = Path('data', 'test_data.csv')\n self.test_student_data = load_data_csv(test_data_path)\n self.default_student_data = [ # model default values\n {\n 'id': 0,\n 'question': '',\n 'context': '',\n 'response': '',\n 'views': [],\n 'student_id': 0,\n 'scroll_ups': 0,\n }\n ]\n test_data_2_path = Path('data', 'test_data_2.csv')\n self.default_student_data_2 = load_data_csv(test_data_2_path)\n sample_csv_path = Path('data', 'rereading_data_2019-09-13.csv')\n self.student_data = load_data_csv(sample_csv_path)\n test_data_3_path = Path('data', 'test_data_3.csv')\n self.default_student_data_3 = load_data_csv(test_data_3_path)\n self.feel = \"In one word, how does this text make you feel?\"\n self.about = \"In three words or fewer, what is this text about?\"\n self.encountered = \"Have you encountered this text before?\"\n self.ads = \"This is an ad.\"\n self.short_story = \"This is actually a short story.\"", "title": "" }, { "docid": "23819880255833beb8ebbea5be2631e8", "score": "0.55619824", "text": "def _create_test_data(): # pylint: disable=invalid-name\n bucket_uri = self.CreateBucket()\n contents = b'x' * 10000\n tmpdir = self.CreateTempDir()\n\n local_uris = []\n for filename in ('test.html', 'test.js', 'test.txt'):\n local_uris.append(\n self.CreateTempFile(file_name=filename,\n tmpdir=tmpdir,\n contents=contents))\n\n return (bucket_uri, tmpdir, local_uris)", "title": "" } ]
886e76f5557ff2af61fd624b497e0ebd
Plot the completeness against the metric binned by bins. injgroups contains n arrays of t/F of ways you want to group against any other metric. labels should give the text to go into a legend for the plot
[ { "docid": "a304c3cec3aa3a275ef2186546d4e3fc", "score": "0.7307003", "text": "def plot1DCompletenessGroups(inj,metric,bins,injgroup,s=[0.0,1.0],xlabel='metric',labels=['a','b','c','d']):\n bins=np.array(bins)\n injpcs=passes(inj,s=s)\n #injfps=~injpcs\n \n for i in np.arange(0,len(injgroup),1):\n \n ninj,bb=np.histogram(inj[metric][injgroup[i]],bins=bins)\n npc,bb=np.histogram(inj[metric][injgroup[i] & injpcs], bins=bins)\n C=npc.astype(float)/ninj.astype(float) \n \n plt.step(bb[:-1],C,lw=2.1-i*0.1,label=labels[i],where='post')\n \n if xlabel=='metric':\n plt.xlabel(metric)\n else:\n plt.xlabel(xlabel)\n \n plt.ylabel('Completeness')\n plt.legend(loc='best')", "title": "" } ]
[ { "docid": "4bcacfe659a2f825cb821ddf845ab2a7", "score": "0.64017665", "text": "def plot1DReliabilityGroups(ops,inv,metric,bins,opsgroup,invgroup,s=[0.0,1.0],xlabel='metric',labels=['a','b','c','d']):\n bins=np.array(bins)\n opspcs=passes(ops,s=s)\n opsfps=~opspcs\n invpcs=passes(inv,s=s)\n invfps=~invpcs\n \n for i in np.arange(0,len(opsgroup),1):\n \n nopspcs,bb=np.histogram(ops[metric][opspcs & opsgroup[i]],bins=bins)\n nopsfps,bb=np.histogram(ops[metric][opsfps & opsgroup[i]],bins=bins)\n ninvpcs,bb=np.histogram(inv[metric][invpcs & invgroup[i]],bins=bins)\n ninvfps,bb=np.histogram(inv[metric][invfps & invgroup[i]],bins=bins)\n \n eff=ninvfps.astype(float)/(ninvfps+ninvpcs).astype(float)\n \n #midbins=(bins[:-1]+bins[1:])/2.0 \n \n rel=rvs.arrayReliability(nopsfps.astype(float),nopspcs.astype(float),eff)\n\n plt.step(bb[:-1],rel,'-',lw=2.1-i*0.1,ms=5,label=labels[i],where='post')\n \n\n if xlabel=='metric':\n plt.xlabel(metric)\n else:\n plt.xlabel(xlabel)\n plt.ylabel('Reliability')\n plt.legend(loc='best')", "title": "" }, { "docid": "e72c66c50c78f6905b2150ecd24bc219", "score": "0.56501263", "text": "def plot_gc_bin_bands(facecolor='#B0C4DE'):\n vars = [GC_var(i) for i in ('FastJ_lower', 'FastJ_upper')]\n alphas = [0.3, 0.1]*len(vars[0])\n [plt.axvspan(vars[0][n], vars[1][n], facecolor=facecolor,\n alpha=alphas[n]) for n in range(len(vars[0]))]", "title": "" }, { "docid": "e34b4162ec184798cafa557add044b29", "score": "0.56203747", "text": "def main(argv):\n data = pd.read_csv('fails.csv') # This file is created from script faces.py\n\n fig, ax = plt.subplots()\n\n sns.barplot(x='# subject', y='fails (percent)', data=data)\n ax.set_ylim((0, 16))\n ax.set_yticks([i for i in range(1, 17)])\n ax.set_yticklabels(['{}%'.format(i) for i in range(1, 17)])\n\n ax.set_xlabel('Subjects', fontsize=15)\n ax.set_ylabel('Detection Failures (Percent)', fontsize=15)\n\n m = data['fails (percent)']\n\n q1 = np.percentile(m, 25)\n q2 = np.percentile(m, 50)\n q3 = np.percentile(m, 75)\n\n iqr = q3 - q1\n print('Q1: {}'.format(q1))\n print('Q3: {}'.format(q3))\n print('IQR: {}'.format(iqr))\n print('lower fence: {}'.format(q1 - 1.5 * iqr))\n print('upper fence: {}'.format(q3 + 1.5 * iqr))\n\n x = [-1] + [i for i in range(len(data['# subject'].tolist())+1)]\n y1 = [q1 for _ in range(len(x))]\n y2 = [q2 for _ in range(len(x))]\n y3 = [q3 for _ in range(len(x))]\n\n fence = q3 + 1.5 * iqr\n yf = [fence for _ in range(len(x))]\n\n ax.fill_between(x, y1, y3, color='b', alpha=0.2, zorder=2,\n label='Interquartile Range (Q1: {:.2f}%, Q3: {:.2f}%)'.format(q1, q3))\n ax.plot(x, y2, 'b', zorder=2, label='Median ({:.2f}%)'.format(q2))\n ax.plot(x, yf, 'r', zorder=2, label='Upper Fence ({:.2f}%)'.format(fence))\n\n ax.legend(prop={'size':15})\n\n #lg = ax.legend([l0, l1, l2], labels=[,\n # ,\n # ],\n # loc='top right', borderaxespad=0.1)\n #lg.legendHandles[1].set_color('b')\n #lg.legendHandles[2].set_color('r')\n\n mng = plt.get_current_fig_manager()\n mng.window.state('zoomed')\n\n plt.show()", "title": "" }, { "docid": "dd0958220edda014a646ca1c5c6557b0", "score": "0.55838305", "text": "def decision_plot_mBB(df, show=False, block=False, trafoD_bins = False, bin_number = 15):\n \n nJets = df['nJ'].tolist()[1]\n if trafoD_bins == True:\n bins, arg2, arg3 = trafoD_with_error_mbb(df)\n else:\n bins = np.linspace(35,600,bin_number+1)\n\n for i in range(len(bins)):\n bins[i] = bins[i]/1000\n\n mbb_vals = df['mBB_raw'].tolist()\n for i in range(len(df)):\n if mbb_vals[i]>=500000:\n mbb_vals[i] = 499500\n\n df['mBB_raw'] = mbb_vals\n\n bins = np.linspace(0,500,26)\n # Initialise plot stuff\n plt.ion()\n plt.close(\"all\")\n plt.figure(figsize=(8.5,7))\n plot_range = (0, 500)\n plot_data = []\n plot_weights = []\n plot_colors = []\n plt.rc('font', weight='bold')\n plt.rc('xtick.major', size=5, pad=7)\n plt.rc('xtick', labelsize=10)\n\n plt.rcParams[\"font.weight\"] = \"bold\"\n plt.rcParams[\"axes.labelweight\"] = \"bold\"\n plt.rcParams[\"mathtext.default\"] = \"regular\"\n\n decision_value_list = (df['mBB_raw']/1000).tolist()\n post_fit_weight_list = df['post_fit_weight'].tolist()\n sample_list = df['sample'].tolist()\n\n # Get list of hists.\n for t in class_names_grouped[::-1]:\n class_names = class_names_map[t]\n class_decision_vals = []\n plot_weight_vals = []\n for c in class_names:\n for x in xrange(0,len(decision_value_list)):\n if sample_list[x] == c:\n class_decision_vals.append(decision_value_list[x])\n plot_weight_vals.append(post_fit_weight_list[x])\n \n plot_data.append(class_decision_vals)\n plot_weights.append(plot_weight_vals)\n plot_colors.append(colour_map[t])\n\n # Plot.\n plt.hist(plot_data,\n bins=bins,\n weights=plot_weights,\n range=plot_range,\n rwidth=1,\n color=plot_colors,\n label=legend_names[::-1],\n stacked=True,\n edgecolor='None')\n x1, x2, y1, y2 = plt.axis()\n plt.axis((x1, x2, y1, y2 * 1.2))\n axes = plt.gca()\n axes.set_ylim([2,320])\n axes.set_xlim(plot_range)\n\n df_sig = df.loc[df['Class']==1]\n plt.hist(df_sig['mBB_raw']/1000,\n bins=bins,\n weights=(df_sig['post_fit_weight']*5).tolist(),\n range=plot_range,\n rwidth=1,\n histtype = 'step',\n linewidth=2,\n color='#FF0000',\n label=r'VH $\\rightarrow$ Vbb x 5',\n edgecolor='#FF0000')\n \n axes.yaxis.set_ticks_position('both')\n#axes.yaxis.set_tick_params(which='minor', direction='in',length = 4, width = 2)\n#axes.yaxis.set_tick_params(which='major', direction='in',length = 8, width = 2)\n axes.yaxis.set_tick_params(which='both', direction='in')\n axes.xaxis.set_ticks_position('both')\n#axes.xaxis.set_tick_params(which='minor', direction='in',length = 4, width = 2)\n#axes.xaxis.set_tick_params(which='major', direction='in',length = 8, width = 2)\n axes.xaxis.set_tick_params(which='both',direction = 'in')\n axes.xaxis.set_minor_locator(AutoMinorLocator(4))\n axes.yaxis.set_minor_locator(AutoMinorLocator(4))\n\n handles, labels = axes.get_legend_handles_labels()\n #Weird hack thing to get legend entries in correct order\n handles = handles[::-1]\n handles = handles+handles\n handles = handles[1:12]\n plt.legend(loc='upper right', ncol=1, prop={'size': 12},frameon=False,\n handles=handles)\n \n plt.ylabel(\"Events\",fontsize = 16,fontweight='normal')\n axes.yaxis.set_label_coords(-0.07,0.93)\n plt.xlabel(\"$m_{bb}$ [GeV]\",fontsize = 16,fontweight='normal')\n axes.xaxis.set_label_coords(0.89, -0.07)\n\n axes.xaxis.set_label_coords(0.89, -0.07)\n an1 = axes.annotate(\"ATLAS\", xy=(0.05, 0.91), xycoords=axes.transAxes,fontstyle = 'italic',fontsize = 16)\n \n offset_from = OffsetFrom(an1, (0, -1.4))\n an2 = axes.annotate(r'$\\sqrt{s}$' + \" = 13 TeV , 36.1 fb$^{-1}$\", xy=(0.05,0.91), xycoords=axes.transAxes, textcoords=offset_from, fontweight='normal',fontsize = 12)\n \n offset_from = OffsetFrom(an2, (0, -1.4))\n an3 = axes.annotate(\"1 lepton, \"+str(nJets)+\" jets, 2 b-tags\", xy=(0.05,0.91), xycoords=axes.transAxes, textcoords=offset_from,fontstyle = 'italic',fontsize = 12)\n \n offset_from = OffsetFrom(an3, (0, -1.6))\n an4 = axes.annotate(\"p$^V_T \\geq$ 150 GeV\", xy=(0.05,0.91), xycoords=axes.transAxes, textcoords=offset_from,fontstyle = 'italic',fontsize = 12)\n\n\n plt.show(block=block)", "title": "" }, { "docid": "35c7402c429f2278ba80ef4221de53fd", "score": "0.55827105", "text": "def plot_img_completeness():\n dir = '/u/jlu/work/gc/dp_msc/2010_11_08/completeness/'\n\n fields = ['C', 'C_NE', 'C_NW', 'C_SE', 'C_SW', 'E', 'N', 'S', 'W',\n 'NE', 'NW', 'SE', 'SW']\n\n colors = ['black', 'brown', 'salmon', 'red', 'orange', \n 'gold', 'greenyellow', 'green', 'cyan', \n 'blue', 'navy', 'purple', 'magenta']\n\n py.clf()\n\n for ff in range(len(fields)):\n field = fields[ff] \n\n compFile = dir + field + '/kp/align_in_out/completeness.dat'\n\n tab = asciidata.open(compFile)\n\n mag = tab[0].tonumpy()\n comp = tab[1].tonumpy()\n\n py.plot(mag, comp, linestyle='-', label=field, color=colors[ff])\n\n py.xlabel('Kp Magnitude')\n py.ylabel('Completeness')\n py.ylim(0, 1.1)\n py.legend(loc='lower left', ncol=2, prop={'size':14})\n py.savefig(dir + 'plots/completeness_by_field.png')\n print 'Saving %s' % (dir + 'plots/completeness_by_field.png')", "title": "" }, { "docid": "95355045c07bccca522f3d2344f7d6d3", "score": "0.55029804", "text": "def _plot_one_fingerprint(self, data, title, ax, xticklabels, xticks=True):\n import seaborn as sns\n\n data = data[data['best_group']]\n data = data.sort_index(level=0)\n\n pal = sns.color_palette(\"Reds\", int(data['n_significant'].max())+1) #'Reds'\n xs = np.linspace(0,1,int(data['n_significant'].max())+1)\n pal = {n:c for n,c in zip(xs, pal)}\n lut = data['n_significant_ratio'].apply(lambda x: xs[np.argmin(np.abs(xs-x))])\n lut = lut.map(pal)\n\n norm = plt.Normalize(0, 1)\n sm = plt.cm.ScalarMappable(cmap=\"Reds\", norm=norm)\n sm.set_array([])\n\n g= sns.barplot(x=data.index.get_level_values(0), y=data['effect_size_50th'], ax=ax,\n yerr=data['effect_size_50th_ci'], palette=lut, dodge=False)\n if xticks:\n g.set(xticklabels=xticklabels)\n else:\n g.set(xticklabels=[])\n ax.set_title(title)\n\n labels = ax.get_xticklabels()\n ax.set_xticklabels(labels, rotation=90)\n ax.tick_params(axis='both', which='major', labelsize=6)\n ax.tick_params(axis='both', which='minor', labelsize=6)\n\n return g, sm", "title": "" }, { "docid": "10dfdafa27fc4be0ed3781b1b36b7f9a", "score": "0.54989314", "text": "def plot_NB_Full_to_Full():", "title": "" }, { "docid": "80647deae63c82bc5b1aa8f626fd4ac4", "score": "0.5477475", "text": "def plotNumberAcc( percentHebb, percentDeca, percentOjas, title ):\n width = 1\n bins = np.array( range(11) )\n\n f, ax = plt.subplots( 1, 1, figsize=(15, 5) )\n ax.bar( bins[:-1] * 4, percentHebb, width=width )\n ax.bar( bins[:-1] * 4 + width, percentDeca, width=width )\n ax.bar( bins[:-1] * 4 + width * 2, percentOjas, width=width )\n ax.set_ylim( [0, 115] )\n\n # axis labels\n ax.set_ylabel( \"Accuracy in %\" )\n ax.set_xlabel( \"image labels\" )\n\n # x ticks\n ax.set_xticks( bins[:-1] * 4 + width )\n ax.set_xticklabels( bins[:-1] )\n\n # numbers above bars\n offsetx = -0.2\n offsety = 1.5\n for i, v in enumerate( percentHebb ):\n plt.text( bins[i] * 4 + offsetx, v + offsety, f\"{v:.2f}%\", rotation=90, fontsize=9 )\n for i, v in enumerate( percentDeca ):\n plt.text( bins[i] * 4 + width + offsetx, v + offsety, f\"{v:.2f}%\", rotation=90, fontsize=9 )\n for i, v in enumerate( percentOjas ):\n plt.text( bins[i] * 4 + width * 2 + offsetx, v + offsety, f\"{v:.2f}%\", rotation=90, fontsize=9 )\n\n plt.legend( [\"Hebbian\", \"Decay\", \"Oja\"] )\n plt.title( title )\n plt.show()", "title": "" }, { "docid": "98e01dff796bef9ff5cb7f83d0ac1a42", "score": "0.5475165", "text": "def autolabel(rects, thresh):\n for rect in rects:\n height = rect.get_height()\n if height > thresh:\n color = \"green\"\n else:\n color = \"black\"\n if height != 0:\n ax.text(\n rect.get_x() + rect.get_width() / 2.,\n 1. * height,\n \"%d\" % int(height),\n ha=\"center\",\n va=\"bottom\",\n color=color,\n )", "title": "" }, { "docid": "2140aad168144c4c105d634511614385", "score": "0.54681444", "text": "def __call__(self, labels):\n labels = labels[0]\n labs = np.zeros((labels.shape[0],len(self.bins)))\n prebe = 0\n for i,be in enumerate(self.bins):\n labs[:,[i]] += np.sum(labels[:,prebe:be],axis=1,keepdims=True)\n prebe = be\n with np.errstate(invalid='ignore'):\n dnm = np.nansum(labs,axis=1,keepdims=True)\n labels = labs / dnm\n\n mlab = np.max(labels,axis=1,keepdims=True)\n OHlabels = (labels==mlab).astype(theano.config.floatX)\n OHlabels[np.any(np.isnan(labels), axis=1), :] = np.nan\n\n morethanone = np.sum(OHlabels,axis=1,keepdims=True) > 1\n maxtoolow = mlab < 0#.5\n nanlabind = (morethanone|maxtoolow).flatten()\n OHlabels[nanlabind,:] = np.nan\n return OHlabels.astype(theano.config.floatX)", "title": "" }, { "docid": "1f70797b73de4b1d70d09c93e1b77da9", "score": "0.5452051", "text": "def find_labels(NP, bins=5):\n\n ticker = matplotlib.ticker.MaxNLocator(bins)\n return ticker.tick_values(0, NP)", "title": "" }, { "docid": "33e5ef35054d55dc722d6975a05e6e92", "score": "0.54491186", "text": "def plot_groups(label_groups, args):\n for label, group in label_groups.items():\n plt.plot(group['input'], group[args.metric], label=label, marker='.')\n if args.logx:\n plt.xscale('log')\n if args.logy:\n plt.yscale('log')\n plt.xlabel(args.xlabel)\n plt.ylabel(args.ylabel)\n plt.title(args.title)\n plt.legend()\n plt.show()", "title": "" }, { "docid": "e600fe8b5ff4e39b684ac428777efcd9", "score": "0.5447919", "text": "def credible_1d(self, idx: int, credible_level=(0.6827, 0.9545), nbins=80, ax=None,\n give_max=False, label='', smooth=False, countour=True, give_edge=False):\n if ax is not None:\n fig = None\n else:\n fig, ax = subplots()\n minx = np.amin(self.ftxt[:, idx+2])\n maxx = np.amax(self.ftxt[:, idx+2])\n binw = (maxx - minx) / nbins\n binx = np.linspace(minx + binw/2, maxx - binw/2, nbins)\n biny = np.zeros_like(binx)\n for i in range(self.ftxt.shape[0]):\n pos = int((self.ftxt[i, idx+2] - minx) / binw)\n if pos < nbins:\n biny[pos] += self.ftxt[i, 0]\n else:\n biny[pos-1] += self.ftxt[i, 0]\n cl = np.sort(credible_level)[::-1]\n if smooth:\n countour = False\n by = signal.savgol_filter(biny, 21, 2)\n ax.plot(binx, by/binw, label=label)\n else:\n ax.bar(binx, biny, label=label, width=binw, alpha=0.5)\n if give_max:\n print(binx[np.argmax(biny)])\n sorted_idx = np.argsort(biny)[::-1]\n if countour:\n al = np.linspace(0.2, 0.3, len(cl))\n for ic in range(len(cl)):\n s = 0\n cxl = []\n cyl = []\n for i in sorted_idx:\n s += biny[i]\n cyl.append(biny[i])\n cxl.append(binx[i])\n if s > cl[ic]:\n break\n ax.bar(cxl, cyl, width=binw, alpha=al[ic], color='b')\n if give_edge:\n print(cl[ic], '-->', np.sort(cxl))\n xleft, xright = ax.get_xlim()\n ybottom, ytop = ax.get_ylim()\n ax.set_aspect(abs((xright-xleft)/(ybottom-ytop)))\n return fig, ax", "title": "" }, { "docid": "cd44dff3b7567aa7293ccdecd76d81b1", "score": "0.5426044", "text": "def PMFByBins(cleanData):\n\tcleanData = sort(cleanData, 'editsB')\n\tcolors = [\"#3366FF\",\"#FF6633\",\"#CCFF33\",\"#66FF33\",\"#33FFCC\",\"#33CCFF\",\"#003DF5\",\"#002EB8\",\"#F5B800\",\"#B88A00\"]\n\teditsB = [d['editsB'] for d in cleanData]\n#\tfor key in cleanData[1].keys():\n#\t\tlocals()[key] = [d[key] for d in cleanData]\n\tnumBins = 5\n\tmoreThanBins = {}\n\tpylab.clf()\n\tfor i in range (1,numBins+1):\n\t\tlow = len(editsB)*(i-1)/numBins\n\t\thigh = len(editsB)*(i)/numBins-1\n\t\tdataName = \"bin%iof%i\" % (i, numBins)\n\t\tlocals()[dataName] = cleanData[low:high]\n\t\tdeltaNorm = [d['deltaNorm'] for d in locals()[dataName] ]\n\t\tPMF = Pmf.MakePmfFromList(deltaNorm)\n\t\tpylab.plot(PMF.Render()[0][2:-2], PMF.Render()[1][2:-2], '*', color=colors[i])\n\t\tpylab.axvline(scipy.mean(deltaNorm), linewidth=3, label=r'$ \\mu\\ for\\ %s $' %dataName, color=colors[i])\n\tpylab.title(r'binned PMFs for $\\Delta_{edits}$', fontsize=24)\n\tpylab.xlabel(r'$\\Delta_{edits}$', fontsize=16)\n\tpylab.ylabel(r'probability of $\\Delta_{edits}$', fontsize=16)\n\tpylab.legend()\n\tpylab.savefig('PMFByBins.png')", "title": "" }, { "docid": "72437e3e7cf65f634a032e0cd9e33119", "score": "0.53761506", "text": "def grid_bh_count(saveLocation='', grid=[], num_bins=0):\n plt.close('all')\n if grid == []:\n grid = _get_grid()\n plt.figure(1)\n plt.figure(2)\n plt.figure(3)\n legend = []\n for sim_type in grid:\n legend.append(sim_type)\n sim_type_list = subprocess.Popen('ls *short{0}*'.format(sim_type), shell=True, stdout=subprocess.PIPE)\n stdout, stderr = sim_type_list.communicate()\n stdout = stdout.split()\n times = []\n sBH = []\n bBH = []\n totBH = []\n for sim in stdout:\n data = ps.blackHoleOverTime(saveLocation=saveLocation, inf=sim.split('short')[-1], to_return=True)\n times = times + data[0]\n sBH = sBH + data[1]\n bBH = bBH + data[2]\n totBH = totBH + data[5]\n if num_bins == 0:\n avg_sim_len = float(len(times)) / len(stdout)\n num_bins = int(avg_sim_len / 5)\n time_bins = []\n sBH_bins = []\n bBH_bins = []\n totBH_bins = []\n for i in range(num_bins):\n time_bins.append([])\n sBH_bins.append([])\n bBH_bins.append([])\n totBH_bins.append([])\n bin_min = min(times)\n bin_cutoffs = [0] * num_bins\n dtime = float((max(times)) - min(times)) / num_bins\n for i in range(len(bin_cutoffs)):\n bin_cutoffs[i] = bin_min + ((i+1) * dtime)\n for i, time in enumerate(times):\n for j, cutoff in enumerate(bin_cutoffs):\n if time < cutoff:\n time_bins[j].append(times[i])\n sBH_bins[j].append(sBH[i])\n bBH_bins[j].append(bBH[i])\n totBH_bins[j].append(totBH[i])\n break\n for i in range(len(time_bins)):\n time_bins[i] = bin_cutoffs[i] - (float(dtime) / 2)\n sBH_bins[i] = float(sum(sBH_bins[i])) / len(sBH_bins[i])\n bBH_bins[i] = float(sum(bBH_bins[i])) / len(bBH_bins[i])\n totBH_bins[i] = float(sum(totBH_bins[i])) / len(totBH_bins[i])\n plt.figure(1)\n plt.plot(time_bins, sBH_bins)\n plt.xscale('log')\n plt.figure(2)\n plt.plot(time_bins, bBH_bins)\n plt.xscale('log')\n plt.figure(3)\n plt.plot(time_bins, totBH_bins)\n plt.xscale('log')\n plt.figure(1)\n plt.legend(legend, loc=0)\n plt.title('Grid Single Black Hole Count')\n plt.ylabel('N')\n plt.xlabel('Physical Time (Myr)')\n plt.savefig((saveLocation + 'gridSingleBH.png'))\n plt.figure(2)\n plt.legend(legend, loc=0)\n plt.title('Grid Binary Black Hole Count')\n plt.ylabel('N')\n plt.xlabel('Physical Time (Myr)')\n plt.savefig((saveLocation + 'gridBinaryBH.png'))\n plt.figure(3)\n plt.legend(legend, loc=0)\n plt.title('Grid Total Black Hole Count')\n plt.ylabel('N')\n plt.xlabel('Physical Time (Myr)')\n plt.savefig((saveLocation + 'gridTotalBH.png'))\n plt.close('all')", "title": "" }, { "docid": "f51fe0d13618f5c2f399299e4b059fb6", "score": "0.5375529", "text": "def __call__(self, labels):\n labels = labels[0]\n labs = np.zeros((labels.shape[0],len(self.bins)))\n prebe = 0\n for i,be in enumerate(self.bins):\n labs[:,[i]] += np.sum(labels[:,prebe:be],axis=1,keepdims=True)\n prebe = be\n with np.errstate(invalid='ignore'):\n labels = labs / np.nansum(labs,axis=1,keepdims=True)\n\n mlab = np.max(labels,axis=1,keepdims=True)\n OHlabels = (labels==mlab).astype(theano.config.floatX)\n\n morethanone = np.sum(OHlabels,axis=1,keepdims=True) > 1\n maxtoolow = mlab < 0#.5\n nanlabind = (morethanone|maxtoolow).flatten()\n OHlabels[nanlabind,:] = np.nan\n\n labels[nanlabind,:] = np.nan\n return labels.astype(theano.config.floatX)", "title": "" }, { "docid": "533d65b85823868bd456bf8fed24d675", "score": "0.53600067", "text": "def binplot(ax, app_effs, colordict=None):\n bins = np.arange(0, 1.1, 0.1, dtype=np.float)\n bins[0] = np.finfo(float).eps\n bins = np.append(np.zeros(1), bins)\n bar_data = {}\n for name in app_effs.columns[1:]:\n data = app_effs[name]\n bar_data[name] = histogram(bins, data)\n bar_data[name] = bar_data[name] / bar_data[name].sum() * 100.0\n\n bin_offsets = 2 * np.array(range(len(bins) - 1))\n\n handles = []\n width = float(1.0) / len(bar_data.items())\n for i, (name, data) in enumerate(list(bar_data.items())):\n pbins = float(i) * width + width / 2.0 + bin_offsets\n if colordict:\n res = ax.bar(pbins,\n height=data,\n width=width,\n color=colordict[name])\n else:\n res = ax.bar(pbins, height=data, width=width)\n handles.append(res.patches[0])\n handles[-1].set_label(name)\n plt.ylabel('Frequency in %')\n plt.xlabel('Efficiency')\n plt.grid(axis='y')\n\n ax.set_ylim([0, 100.0])\n\n ax.set_xticks(bin_offsets + 0.5)\n # Rename the first bin\n locs, labels = plt.xticks()\n labels[0] = \"Did not run\"\n for i, _ in enumerate(labels):\n if i == 0:\n continue\n labels[i] = f\"({round(bins[i],3)}, {round(bins[i+1],3)}]\"\n plt.xticks(locs, labels)\n\n labels = ax.get_xticklabels()\n ax.set_xticklabels(labels, rotation=45, ha=\"right\", rotation_mode=\"anchor\")\n return handles", "title": "" }, { "docid": "8963a8a50dbcbec44323ac158ef1e7a2", "score": "0.53587437", "text": "def lv_stat_barplot(data, splitby='key_labels', colorby='louvain_groups', hatch=True, ylab=\"% in each group\", \n xlab=\"Clusters\", figsize=(10, 3), startangle = 90, ncols=3, stacked=True, plotMethod='Barplot', \n orientation = 'vertical', fontsize=11, bbox_to_anchor=(1.2, 0.34), color=None, save=None):\n \n if splitby in data.obs_keys():\n g1D = data.obs[splitby]\n elif splitby in data.var_names:\n g1D = data[:, splitby].X\n g1D = ['Positive' if x > 0 else 'Negative' for x in g1D]\n g1D = pd.Series(g1D, dtype = 'category')\n g1D = g1D.cat.reorder_categories(['Negative', 'Positive'])\n g1D = g1D.values\n else:\n raise ValueError('\"' + splitby + '\" is invalid!'\n + ' specify valid sample annotation, one of '\n + str(data.obs_keys()) + ' or a gene name '\n + str(data.var_names))\n \n if colorby in data.obs_keys():\n g2D = data.obs[colorby]\n elif colorby in data.var_names:\n g2D = data[:, colorby].X\n g2D = ['Positive' if x > 0 else 'Negative' for x in g2D]\n g2D = pd.Series(g2D, dtype = 'category')\n g2D = g2D.cat.reorder_categories(['Negative', 'Positive'])\n g2D = g2D.values\n else:\n raise ValueError('\"' + colorby + '\" is invalid!'\n + ' specify valid sample annotation, one of '\n + str(data.obs_keys()) + ' or a gene name '\n + str(data.var_names))\n \n df = pd.crosstab(g1D, g2D)\n df_new = df.div(df.sum(axis=1),axis=0)\n \n #print(df_new)\n if plotMethod=='Barplot':\n fig, ax = plt.subplots(figsize= figsize)\n if color is None:\n color = [[vega_20_scanpy[x]]*df_new.shape[0] for x in range(df_new.shape[1])]\n color = np.stack(color)\n #print(color)\n if orientation == 'horizontal':\n df_new = -df_new\n df_new.plot.barh(stacked=stacked, color=color, edgecolor=\"black\", ax=ax)\n plt.xticks(np.arange(0, 101, 20)/-100)\n ax.set_ylabel(xlab)\n ax.set_xlabel(ylab)\n ax.set_xticklabels(np.arange(100, -1, -20),rotation=0)\n ax.grid()\n else:\n df_new = -df_new\n df_new.plot.bar(stacked=stacked, color=color, edgecolor=\"black\", ax=ax)\n plt.yticks(np.arange(0, 101, 20)/-100, np.arange(100, -1, -20))\n ax.set_xlabel(xlab)\n ax.set_ylabel(ylab)\n ax.set_xticklabels(df_new.index,rotation=0)\n if len(data.obs[splitby].cat.categories) >= 5:\n plt.xticks(rotation=90)\n ax.grid()\n ax2 = ax.twiny()\n ax2.set_xlim(ax.get_xlim())\n plt.xticks(range(df_new.shape[0]),df.sum(axis=1),rotation=90)\n ax2.grid(False)\n if hatch is True:\n hatch1 = [[hatches[x]]*df_new.shape[0] for x in range(df_new.shape[1])]\n hatch1 = np.hstack(hatch1)\n for i, thisbar in enumerate(ax.patches):\n thisbar.set_hatch(hatch1[i])\n ax.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))\n elif plotMethod=='Stackedarea':\n hatch1 = hatches[0:df_new.shape[1]]\n if color is None:\n color = color_20[0:df_new.shape[1]]\n ax = df_new.plot.area(color=color)\n ax.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))\n ax.set_xlabel(xlab)\n ax.set_ylabel(ylab)\n ax.grid()\n elif plotMethod=='Panel_Column_Chart':\n sns.set_style(\"whitegrid\")\n ax = df_new.plot.bar(subplots=True, sharey=True, \n figsize=(6, 5), legend=False, \n grid=False, edgecolor='none', \n fontsize=12) \n plt.text(-1, 2, ylab, fontsize=12, rotation=0) #df_new.shape[1]/2\n sns.despine(left=True)\n for ax1 in ax: # set the names beside the axes\n #ax1.lines[0].set_visible(False) # remove ugly dashed line\n ax1.set_title('')\n sername = ax1.get_legend_handles_labels()[1][0]\n ax1.text(7, 0.5, sername, fontsize=12)\n #plt.suptitle(\"My panel chart\", fontsize=18)\n elif plotMethod=='PieChart':\n import math\n nrow = math.ceil(len(df.index)/ncols)\n fig, axes = plt.subplots(nrow,ncols, figsize=figsize)\n for i in range(len(df.index)):\n if nrow==1:\n ax = axes[i % ncols]\n else:\n ax = axes[i // ncols, i % ncols]\n patches, texts, autotexts = ax.pie(df.iloc[i,:] ,startangle = startangle, counterclock=False, colors=color, autopct = lambda pct: func(pct, df.iloc[i,:])) \n #[ _.set_fontsize(3) for _ in texts]\n ax.set_title(df.index[i], fontsize=fontsize) #, loc='left'\n plt.setp(autotexts, size=8, weight=\"bold\")\n plt.figlegend(patches, df.columns, bbox_to_anchor=bbox_to_anchor, loc='right', fontsize=fontsize)\n fig.subplots_adjust(top=0.8,right=0.8) \n \n plt.tight_layout()\n if save is not None:\n plt.savefig('./figures/Barplot_'+save, bbox_inches='tight')\n return df", "title": "" }, { "docid": "5df975439b6271b3443aa9d8afd70a94", "score": "0.53496623", "text": "def plot_classifier_decission_boundary(shots_0, shots_1, shots_2,\n classifier,\n xlabel: str, xunit: str,\n ylabel: str, yunit: str,\n title: str, ax, **kw):\n grid_points = 200\n\n x_min = np.nanmin([shots_0[0], shots_1[0], shots_2[0]])\n x_max = np.nanmax([shots_0[0], shots_1[0], shots_2[0]])\n y_min = np.nanmin([shots_0[1], shots_1[1], shots_2[1]])\n y_max = np.nanmax([shots_0[1], shots_1[1], shots_2[1]])\n xx, yy = np.meshgrid(np.linspace(x_min, x_max, grid_points),\n np.linspace(y_min, y_max, grid_points))\n Z = classifier.predict(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n\n plot_cal_points_hexbin(shots_0=shots_0,\n shots_1=shots_1,\n shots_2=shots_2,\n xlabel=xlabel, xunit=xunit,\n ylabel=ylabel, yunit=yunit,\n title=title, ax=ax)\n ax.pcolormesh(xx, yy, Z,\n cmap=c.ListedColormap(['C0', 'C3', 'C2']),\n alpha=.2)", "title": "" }, { "docid": "365a8fea080190371c13f0df44bc24d1", "score": "0.5344525", "text": "def plot_order_histograms(ax, df, prime=False, bins=300, xlim=[-20,20], ylim=[0,1], x_label=None,\n y_label=None, norm_factor=1.0, hist_vline=None):\n\n # Add vertical line to histogram, useful for showing dividers\n if hist_vline != None:\n for xval in hist_vline:\n ax.axvline(xval, color='red', alpha=0.25, linewidth=5)\n\n total_size = sum(df[\"Order\"] == 0)\n\n if prime:\n gdf_size = df[df[\"Prime\"]==True]\n else:\n gdf_size = df[df[\"Prime\"]==False]\n\n # This is the old way of computing occupancy number, which doesn't check the blue ion\n #size_prime_gdf = gdf_size.groupby([\"TrajNum\",\"Time\"]).size().reset_index(name=\"Occupancy\")\n #occ_percent = size_prime_gdf.groupby([\"Occupancy\"]).size() / total_size\n # This way takes that into account (here, and noted below)\n\n occ_percent = gdf_size[gdf_size[\"Order\"] == 0].groupby([\"OccMacrostate\"]).size() / total_size\n print(occ_percent)\n #occ_cut = (occ_percent[occ_percent > 0.04]).to_dict()\n occ_cut = (occ_percent[occ_percent > 0.01]).to_dict()\n\n ordcolors=[\"red\",\"green\",\"blue\",\"purple\",\"orange\",\"pink\",\"aqua\",\"maroon\"]\n for axind, (occid, occval) in enumerate(occ_cut.iteritems()):\n\n target_ax = ax[axind]\n target_ax.text(xlim[1]-9.5, ylim[1]-0.065,\n \"Occ. \"+str(occid)+\": \"+str(np.around(100*occval,1))+\"%\",\n fontsize=16)\n\n #for orderedatom, data in gdf_size[gdf_size[\"Occupancy\"]==occid].groupby(\"Order\"):\n for orderedatom, data in gdf_size[gdf_size[\"OccMacrostate\"]==occid].groupby(\"Order\"):\n histogram, edges = np.histogram(data[\"Z\"], bins=bins, range=[-20,20], normed=True)\n proxy_fill_between(edges[1:], 0, histogram,\n ax=target_ax,\n linewidth=0.5, facecolor=ordcolors[orderedatom], alpha=0.75)\n #histogram, edges = np.histogram(data[\"Z\"], bins=bins, range=[-20,20], normed=False)\n #proxy_fill_between(edges[1:], 0, histogram*norm_factor,\n # ax=target_ax,\n # linewidth=0.5, facecolor=ordcolors[orderedatom], alpha=0.75)\n\n _plot_labelling(target_ax, xlim, ylim, x_label, y_label,\n tickx=[1.0,5.0], ticky=[0.05,0.1])\n\n return ax", "title": "" }, { "docid": "f8f268e79fe5bcb970e081c37d16f7e3", "score": "0.5337026", "text": "def binarize_labels(raw_labels,label_lookups):\n\n labels = []\n # Figure out which group it falls into\n for label in raw_labels:\n if not label == 'normal':\n label = label_lookups[label]\n label_number = label_numbers[label]\n # Make all non-normal packets anomalies\n # Just for now :)\n if label_number != 0: label_number = 1\n labels.append(np.int(label_number))\n if label_number != 1 and label_number != 0:\n print(\"DANG\")\n return np.array(labels)", "title": "" }, { "docid": "6bb107172582bf5e2ac79421db09a97b", "score": "0.5335765", "text": "def CDFByBins(cleanData):\n\tcleanData = sort(cleanData, 'editsB')\n\tcolors = [\"#3366FF\",\"#6633FF\",\"#CC33FF\",\"#FF33CC\",\"#FF3366\",\"#FF6633\",\"#FFCC33\",\"#CCFF33\",\"#66FF33\",\"#33FF66\",\"#33FFCC\",\"#33CCFF\",\"#003DF5\",\"#002EB8\",\"#F5B800\",\"#B88A00\"]\n\teditsB = [d['editsB'] for d in cleanData]\n#\tfor key in cleanData[1].keys():\n#\t\tlocals()[key] = [d[key] for d in cleanData]\n\tnumBins = 5\n\tmoreThanBins = {}\n\tpylab.clf()\n\tfor i in range (1,numBins+1):\n\t\tlow = len(editsB)*(i-1)/numBins\n\t\thigh = len(editsB)*(i)/numBins-1\n\t\tdataName = \"bin%iof%i\" % (i, numBins)\n\t\tlocals()[dataName] = cleanData[low:high]\n\t\tCDF = Cdf.MakeCdfFromList([d['deltaNorm'] for d in locals()[dataName]])\n\t\tpylab.plot(CDF.xs, CDF.ps, label=r'$ %i\\ <\\ edits_{beforePosting}\\ <\\ %i $' % (low,high))\n\tpylab.title('CDFs for %i bins' % numBins, fontsize=24)\n\tpylab.xlabel(r'normalized $\\Delta_{edits}$', fontsize=16)\n\tpylab.ylabel('percentile rank', fontsize=16)\n\tpylab.legend(loc=4)\n\tpylab.savefig('CDFByBins.png')", "title": "" }, { "docid": "ff45f3b8200a5f93088ea9d3bd5809e6", "score": "0.53200364", "text": "def createBinPlot(image_analysis: dict, measurement_name: str, color_palette: str, x_label: str,\n num_of_bins: int = 10) -> np.ndarray:\n df_depth = pd.DataFrame({\n measurement_name: image_analysis[measurement_name],\n }\n )\n df_depth['intervals'] = pd.cut(x=df_depth[measurement_name], bins=num_of_bins, right=False)\n\n df_depth = df_depth.sort_values('intervals')\n\n ax_dims = (15, 15)\n fig, ax = pyplot.subplots(figsize=ax_dims)\n pal = sns.color_palette(color_palette)\n return create_count_plot_and_array(ax, df_depth, \"intervals\", pal, x_label)", "title": "" }, { "docid": "a947a0199a1b40ee50848c23e2ad9a44", "score": "0.5311625", "text": "def plot_hist_kde(y, xlabel, title='', groups=('all',),\n bools=None, clrs=('g',)):\n\n if bools is None:\n bools = ([True]*len(y),)\n elif isinstance(bools, dict):\n dict_of_bools = bools.copy()\n bools = []\n for g in groups:\n bools.append(dict_of_bools[g])\n\n\n # set up figure\n fig, ax1 = plt.subplots(figsize=(5.5, 4), dpi=150)\n # ax1 = target_ax\n # fig = ax1.get_figure()\n\n # plot histogram and make axes for density\n _, _, patches = ax1.hist(y, bins=20)\n\n ax2 = ax1.twinx()\n xlims = ax1.get_xlim()\n x = np.linspace(xlims[0], xlims[1], 500)\n\n # plot outlier thresholds and gaussian kdes per group\n outl_ids = []\n outl_patch_cnt = np.zeros(len(patches))\n\n for g, tf, clr in zip(groups, bools, clrs):\n\n # plot vertical lines at outlier thresholds\n outl_bool, zs_thr, sd_rob = detect_outliers(y[tf], report_thresh=False)\n\n vline_kwargs = {'color': clr, 'ls': \":\", 'alpha': 0.67}\n uthresh = y[tf].mean() + zs_thr*sd_rob\n lthresh = y[tf].mean() - zs_thr*sd_rob\n ax1.axvline(uthresh, **vline_kwargs)\n ax1.axvline(lthresh, **vline_kwargs)\n\n # Report outlier subjids\n if np.any(outl_bool) and isinstance(outl_bool, pd.Series):\n new_outl_ids = y[tf].index[outl_bool].tolist()\n print(\"Outlier subjids found beyond z={:4.2f} (grp={}): {}\".format(zs_thr, g, new_outl_ids))\n outl_ids.extend(new_outl_ids)\n\n # Change colour of outlier patches\n # a bit tricky because patches may straddle the outlier line\n # and outliers could be on either side\n\n # find the bin of the outl_subjid and change the colour of that patch\n w0 = patches[0].get_x()\n w = patches[0].get_width()\n for oid in new_outl_ids:\n oval = y[oid]\n obin = int(np.floor((oval - w0)/w))\n\n if obin >= len(patches):\n obin = len(patches) - 1\n elif obin < 0:\n obin = 0\n\n opatch = patches[obin]\n outl_patch_cnt[obin] += 1\n\n # be careful not to avoid outlier patches already drawn\n newy = opatch.get_height() - outl_patch_cnt[obin]\n newpatch = plt.Rectangle((opatch.get_x(), newy), w, 1., ec='None')\n\n # make a maroon patch if outside lines, purple if on the line\n if (opatch.get_x() > uthresh) or (opatch.get_x() + opatch.get_width() < lthresh):\n newpatch.set_facecolor('maroon')\n elif (opatch.get_x() + opatch.get_width() > uthresh) or (opatch.get_x() < lthresh):\n newpatch.set_facecolor('purple')\n\n ax1.add_patch(newpatch)\n\n\n # plot smooth kde to view limits\n kde = stats.gaussian_kde(y[tf])\n ax2.plot(x, kde(x), color=clr, ls='-', alpha=0.5)\n ax2.set_xlim(xlims)\n\n # labels, make histogram on top, show, etc\n ax1.set_xlabel(xlabel)\n ax1.set_ylabel(\"Count\")\n ax2.set_ylabel(\"Density\")\n ax2.set_ylim(bottom=0.)\n ax1.set_title(title)\n # ax1.set_zorder(ax2.get_zorder()+1)\n # ax1.patch.set_visible(False)\n\n fig.tight_layout()\n fig.canvas.draw()\n\n return fig, outl_ids", "title": "" }, { "docid": "12f69be4bc37df9bc75b1c33716ec449", "score": "0.53036314", "text": "def plot_label_counts(df, name, filename, fun, title,sort=False, cut_ones=False):\n counter = fun(df, name)\n labels, values = zip(*counter.items())\n indices = np.arange(len(labels))\n\n plt.title('Label counts for {}'.format(title))\n plt.xlabel('Label')\n plt.ylabel('Number of labels')\n\n if sort:\n sorted_values_idx = np.argsort(values)[::-1]\n labels = np.asarray(labels)[sorted_values_idx]\n values = np.asarray(values)[sorted_values_idx]\n\n if cut_ones:\n one_idx = np.argwhere(values==1)[0][0]\n labels = labels[0:one_idx+2]\n values = values[0:one_idx+2]\n labels[one_idx+1] = '...'\n indices = np.arange(len(labels))\n\n plt.yticks(range(0,max(values),2))\n plt.bar(indices, values)\n plt.xticks(indices, labels, rotation=90)\n #plt.savefig(\"C:\\\\Users\\\\Maria\\\\Desktop\\\\{}.pdf\".format(filename),bbox_inches='tight')\n plt.show()", "title": "" }, { "docid": "fa089322cc8a16bf2f70c9f8b8a27728", "score": "0.53012353", "text": "def plot_feature_mi(map, top_n=20):\n map = sorted(map, key=lambda tup: tup[1])\n map = map[len(map)-top_n:]\n features = [x[0] for x in map]\n scores = [x[1] for x in map]\n\n fig, ax1 = plt.subplots(figsize=(9, 7))\n fig.subplots_adjust(left=0.115, right=0.88)\n fig.canvas.set_window_title('Eldorado K-8 Fitness Chart')\n pos = np.arange(len(features))\n\n rects = ax1.barh(pos, [scores[k] for k in range(len(scores))],\n align='center',\n height=0.2, color='b',\n tick_label=features)\n\n ax1.set_title(\"Mutual Information\")\n\n ax1.set_xlim([0, scores[len(scores)-1]+0.2])\n ax1.xaxis.set_major_locator(MaxNLocator(11))\n ax1.xaxis.grid(True, linestyle='--', which='major',\n color='grey', alpha=.25)\n\n # set X-axis tick marks at the deciles\n imp = ax1.text(.5, -.07, 'Mutual Information',\n horizontalalignment='center', size='small',\n transform=ax1.transAxes)\n\n\n for rect in rects:\n ax1.text(rect.get_width() + 0.01, rect.get_y() + rect.get_height()/2.,\n '{}'.format(rect.get_width()),\n ha='left', va='center')\n\n plt.show()", "title": "" }, { "docid": "0f57a7076d20ccdc4d338d9368843cbc", "score": "0.5292866", "text": "def plot_order_2d_histograms(ax, df, prime=False, bins=300, \n xlim=[-20,20], ylim=[0,1], x_label=None,\n y_label=None, norm_factor=1.0, hist_vline=None):\n\n # Add vertical line to histogram, useful for showing dividers\n if hist_vline != None:\n for xval in hist_vline:\n ax.axvline(xval, color='red', alpha=0.25, linewidth=5)\n\n gdf_size_lowestorder = df\n total_size = sum(gdf_size_lowestorder[\"Order\"] == 0)\n\n if prime:\n gdf_size = gdf_size_lowestorder[gdf_size_lowestorder[\"Prime\"]==True]\n else:\n gdf_size = gdf_size_lowestorder[gdf_size_lowestorder[\"Prime\"]==False]\n\n size_prime_gdf = gdf_size.groupby([\"TrajNum\",\"Time\"]).size().reset_index(name=\"Occupancy\")\n occ_percent = size_prime_gdf.groupby([\"Occupancy\"]).size() / total_size\n print(occ_percent)\n occ_cut = (occ_percent[occ_percent > 0.04]).to_dict()\n\n ordcolors=[\"red\",\"green\",\"blue\",\"purple\",\"orange\",\"pink\",\"aqua\",\"maroon\"]\n for axind, (occid, occval) in enumerate(occ_cut.iteritems()):\n\n target_ax = ax[axind]\n target_ax.text(xlim[1]-0.3, ylim[1]-0.05,\n \"Occ. \"+str(occid)+\": \"+str(np.around(100*occval,1))+\"%\",\n fontsize=16)\n\n for orderedatom, data in gdf_size[gdf_size[\"Occupancy\"]==occid].groupby(\"Order\"):\n histogram, edges = np.histogram(data[\"Z\"], bins=bins, range=[-20,20], normed=True)\n proxy_fill_between(edges[1:], 0, histogram,\n ax=target_ax,\n linewidth=0.5, facecolor=ordcolors[orderedatom], alpha=0.75)\n\n _plot_labelling(target_ax, xlim, ylim, x_label, y_label,\n tickx=[1.0,5.0], ticky=[0.05,0.1])\n\n return ax", "title": "" }, { "docid": "f830987c9a3b7958f9109ee2c1cc0f6c", "score": "0.528966", "text": "def get_labels_from_bins(self):\n contigs = self.large_contigs[~self.disconnected][~self.disconnected_intersected]\n self.labels = np.array([-1 for _ in range(contigs.shape[0])])\n for bin_id, tids in self.bins.items():\n if bin_id != 0:\n truth_array = contigs['tid'].isin(tids)\n self.labels[truth_array] = bin_id", "title": "" }, { "docid": "cfa73eaba2367cbf853820f16ff369ea", "score": "0.52618563", "text": "def plot_composite(inargs, rootgroup, i, data, ax, label, clist, ylabel):\n\n # Check how many nans there are in dataset\n num_nan = np.isnan(data).sum()\n num_tot = data.size\n print('Scale index: %i' % (i))\n print('Number of NaNs: %.2e/%.2e' % (num_nan, num_tot))\n print('Percentage of NaNs: %.2f' % (np.float(num_nan) / num_tot * 100.))\n\n composite_mean = np.nanmean(data, axis=(0,2))\n per25 = np.nanpercentile(data, 25, axis=(0,2))\n per75 = np.nanpercentile(data, 75, axis=(0,2))\n\n ax.plot(rootgroup.variables['time'][:], composite_mean,\n label=label, c=clist[i], zorder=1,\n linewidth=2)\n ax.fill_between(rootgroup.variables['time'][:],\n per25, per75,\n where=per25 < per75,\n linewidth=0, facecolor=clist[i],\n alpha=0.3, zorder=0.5)\n\n if inargs.diurnal_title:\n comp_str = 'Composite ' + get_composite_str(inargs, rootgroup)\n ax.set_title(comp_str)\n if inargs.diurnal_legend:\n ax.legend(loc=2, fontsize=8)\n ax.set_ylabel(ylabel, labelpad=0)\n ax.set_xticks([6, 9, 12, 15, 18, 21, 24])\n ax.set_xticklabels([6, 9, 12, 15, 18, 21, 24])\n ax.set_xlabel('Time [h/UTC]')\n if inargs.diurnal_log:\n ax.set_yscale('log')\n ax.axhline(y=1, c='gray', zorder=0.1)\n ax.set_yticks(np.arange(0.1, 3, 0.1), minor='True')\n ax.set_ylim(inargs.diurnal_ylim[0], inargs.diurnal_ylim[1])\n # Fix from https://github.com/matplotlib/matplotlib/issues/8386/\n from matplotlib.ticker import StrMethodFormatter, NullFormatter\n ax.yaxis.set_major_formatter(StrMethodFormatter('{x:.0f}'))\n ax.yaxis.set_minor_formatter(NullFormatter())\n ax.set_yticks([0.5, 1, 2])\n ax.set_yticklabels([0.5, 1, 2])\n elif inargs.plot_type == 'corr_m_N':\n ax.set_ylim(-1, 1)\n ax.axhline(y=0, c='gray', zorder=0.1)\n ax.yaxis.labelpad = -6\n elif inargs.plot_type == 'mean_m':\n pass\n else:\n ax.set_ylim(0.1, 2.5)\n ax.axhline(y=1, c='gray', zorder=0.1)\n\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['left'].set_position(('outward', 3))\n ax.spines['bottom'].set_position(('outward', 3))\n ax.set_xlim((6, 24))\n plt.subplots_adjust(left=0.2, right=0.95, bottom=0.2, top=0.95)", "title": "" }, { "docid": "68a223f526a64a49ab281032da0a878f", "score": "0.5261152", "text": "def setBinning(dataset_output, binning_array):\r\n labels = np.zeros(len(dataset_output), dtype=\"uint8\")\r\n for i in range(len(dataset_output)):\r\n for j in range(len(binning_array) - 1):\r\n if dataset_output[i] >= binning_array[j] and dataset_output[i] <= binning_array[j + 1]:\r\n labels[i] = j\r\n break\r\n \r\n return labels", "title": "" }, { "docid": "c2c5209d22353b1f24fc787d35c318e3", "score": "0.5258834", "text": "def calculate_stats_for_bins(binned_values,labels,sorted_split_values,verbose=False):\n binned_df = pd.DataFrame()\n binned_df[\"value\"] = binned_values\n binned_df[\"label\"] = labels \n stats_df, IV = get_stats_for_categorical(binned_df,\"value\",\"label\")\n if verbose:\n print(stats_df)\n stats_df = stats_df.sort_index()\n lower_bounds, upper_bounds = sorted_split_values[0:-1], sorted_split_values[1:]\n if len(lower_bounds) != len(stats_df) or len(upper_bounds) != len(stats_df):\n print(len(lower_bounds), len(upper_bounds), len(stats_df))\n raise RuntimeError(\"TRY AGAIN: bagging found invalid number of cutting values!\")\n stats_df[\"lower_bound\"] = lower_bounds\n stats_df[\"upper_bound\"] = upper_bounds\n return stats_df, IV", "title": "" }, { "docid": "b4758df4d97675d076271146210443fc", "score": "0.52540624", "text": "def generate_features_labels(bins):\n degree = ut.load_numpy_file(ut.topo_features_path + \"degree.npy\")\n clustering = ut.load_numpy_file(ut.topo_features_path + \"clustering.npy\")\n eigenvector_centrality = ut.load_numpy_file(ut.topo_features_path + \"eigenvector_centrality.npy\")\n betweenness_centrality = ut.load_numpy_file(ut.topo_features_path + \"betweenness_centrality.npy\")\n if not ut.is_directed:\n triangles = ut.load_numpy_file(ut.topo_features_path + \"triangles.npy\")\n\n bin_feature(degree, \"degree\", False, bins)\n bin_feature(clustering, \"clustering\", False, bins)\n bin_feature(eigenvector_centrality, \"eigenvector_centrality\", False, bins)\n bin_feature(betweenness_centrality, \"betweenness_centrality\", False, bins)\n if not ut.is_directed:\n bin_feature(triangles, \"triangles\", False, bins)", "title": "" }, { "docid": "61d8a442b1ac5a8b859948a4c23e5509", "score": "0.52536464", "text": "def plot_countspectralbins(qa_dict,outfile):\n\n arm=qa_dict[\"ARM\"]\n spectrograph=qa_dict[\"SPECTROGRAPH\"]\n expid=qa_dict[\"EXPID\"]\n paname=qa_dict[\"PANAME\"]\n \n bins100=qa_dict[\"VALUE\"][\"NBINS100\"]\n bins250=qa_dict[\"VALUE\"][\"NBINS250\"]\n bins500=qa_dict[\"VALUE\"][\"NBINS500\"]\n\n bins100_amp=qa_dict[\"VALUE\"][\"NBINS100_AMP\"]\n bins250_amp=qa_dict[\"VALUE\"][\"NBINS250_AMP\"]\n bins500_amp=qa_dict[\"VALUE\"][\"NBINS500_AMP\"]\n\n index=np.arange(bins100.shape[0])\n\n fig=plt.figure()\n plt.suptitle(\"Count spectral bins after %s, Camera: %s%s, ExpID: %s\"%(paname,arm,spectrograph,expid))\n\n gs=GridSpec(7,6)\n ax1=fig.add_subplot(gs[1:4,:2])\n ax2=fig.add_subplot(gs[1:4,2:4])\n ax3=fig.add_subplot(gs[1:4,4:])\n ax4=fig.add_subplot(gs[4:,:2])\n ax5=fig.add_subplot(gs[4:,2:4])\n ax6=fig.add_subplot(gs[4:,4:])\n\n hist_med=ax1.bar(index,bins100,color='b',align='center')\n ax1.set_xlabel('Fiber #',fontsize=10)\n ax1.set_ylabel('Counts > 100',fontsize=10)\n ax1.tick_params(axis='x',labelsize=10)\n ax1.tick_params(axis='y',labelsize=10)\n\n hist_med=ax2.bar(index,bins250,color='r',align='center')\n ax2.set_xlabel('Fiber #',fontsize=10)\n ax2.set_ylabel('Counts > 250',fontsize=10)\n ax2.tick_params(axis='x',labelsize=10)\n ax2.tick_params(axis='y',labelsize=10)\n\n hist_med=ax3.bar(index,bins500,color='g',align='center')\n ax3.set_xlabel('Fiber #',fontsize=10)\n ax3.set_ylabel('Counts > 500',fontsize=10)\n ax3.tick_params(axis='x',labelsize=10)\n ax3.tick_params(axis='y',labelsize=10)\n\n heatmap1=ax4.pcolor(bins100_amp.reshape(2,2).T,cmap=plt.cm.coolwarm)\n ax4.set_xlabel(\"Bins above 100 counts (per Amp)\",fontsize=10)\n ax4.tick_params(axis='x',labelsize=10,labelbottom='off')\n ax4.tick_params(axis='y',labelsize=10,labelleft='off')\n ax4.annotate(\"Amp 1\\n%.1f\"%bins100_amp[0],\n xy=(0.4,0.4),\n fontsize=10\n )\n ax4.annotate(\"Amp 2\\n%.1f\"%bins100_amp[1],\n xy=(1.4,0.4),\n fontsize=10\n )\n ax4.annotate(\"Amp 3\\n%.1f\"%bins100_amp[2],\n xy=(0.4,1.4),\n fontsize=10\n )\n ax4.annotate(\"Amp 4\\n%.1f\"%bins100_amp[3],\n xy=(1.4,1.4),\n fontsize=10\n )\n heatmap2=ax5.pcolor(bins250_amp.reshape(2,2).T,cmap=plt.cm.coolwarm)\n ax5.set_xlabel(\"Bins above 250 counts (per Amp)\",fontsize=10)\n ax5.tick_params(axis='x',labelsize=10,labelbottom='off')\n ax5.tick_params(axis='y',labelsize=10,labelleft='off')\n ax5.annotate(\"Amp 1\\n%.1f\"%bins250_amp[0],\n xy=(0.4,0.4),\n fontsize=10\n )\n ax5.annotate(\"Amp 2\\n%.1f\"%bins250_amp[1],\n xy=(1.4,0.4),\n fontsize=10\n )\n ax5.annotate(\"Amp 3\\n%.1f\"%bins250_amp[2],\n xy=(0.4,1.4),\n fontsize=10\n )\n ax5.annotate(\"Amp 4\\n%.1f\"%bins250_amp[3],\n xy=(1.4,1.4),\n fontsize=10\n )\n\n heatmap3=ax6.pcolor(bins500_amp.reshape(2,2).T,cmap=plt.cm.coolwarm)\n ax6.set_xlabel(\"Bins above 500 counts (per Amp)\",fontsize=10)\n ax6.tick_params(axis='x',labelsize=10,labelbottom='off')\n ax6.tick_params(axis='y',labelsize=10,labelleft='off')\n ax6.annotate(\"Amp 1\\n%.1f\"%bins500_amp[0],\n xy=(0.4,0.4),\n fontsize=10\n )\n ax6.annotate(\"Amp 2\\n%.1f\"%bins500_amp[1],\n xy=(1.4,0.4),\n fontsize=10\n )\n ax6.annotate(\"Amp 3\\n%.1f\"%bins500_amp[2],\n xy=(0.4,1.4),\n fontsize=10\n )\n ax6.annotate(\"Amp 4\\n%.1f\"%bins500_amp[3],\n xy=(1.4,1.4),\n fontsize=10\n )\n plt.tight_layout()\n fig.savefig(outfile)", "title": "" }, { "docid": "ec2edfe06df43d60775c78d95869f36d", "score": "0.52435076", "text": "def test_creation_range_100__n_bins_5(self):\n # Create data\n x = list(range(100))\n # Calculate binning\n bins = create_binning(x, 5)\n r = bins.label(x, '{simplei}')\n # Expected result\n # Comparison\n self.assertEqual(r[0], '0_20')\n self.assertEqual(r[20], '20_40')\n self.assertEqual(r[40], '40_60')\n self.assertEqual(r[60], '60_80')\n self.assertEqual(r[80], '80_99')\n self.assertEqual(r[99], '80_99')", "title": "" }, { "docid": "4b831338d4ccccfeaa4011d78bec50c7", "score": "0.52398854", "text": "def plot_completeness_for_tdo():\n # Setup plots\n py.close(1)\n py.figure(1, figsize=(12, 6))\n py.clf()\n py.subplots_adjust(left=0.1)\n\n ##########\n # First the plots for completeness vs. Kp\n ##########\n magEdges = np.arange(8.5, 16.6, 0.5)\n rmin = 0\n rmax = 30\n\n comp_imag_kp = np.zeros(len(magEdges)-1, dtype=float)\n comp_spec_kp = np.zeros(len(magEdges)-1, dtype=float)\n\n for kk in range(len(magEdges)-1):\n mmin = magEdges[kk]\n mmax = magEdges[kk+1]\n\n comp_imag_kp[kk], comp_spec_kp[kk] = completeness_from_catalog(rmin, rmax,\n mmin, mmax)\n\n mags = np.repeat(magEdges, 2)[1:-1]\n comp_imag_kp = np.repeat(comp_imag_kp, 2)\n comp_spec_kp = np.repeat(comp_spec_kp, 2)\n\n py.subplot(1, 2, 1)\n py.plot(mags, comp_imag_kp, 'b--', label='Imaging', linewidth=2)\n py.plot(mags, comp_spec_kp, 'g-', label='Spectroscopy', linewidth=2)\n py.xlabel('Kp Magnitudes')\n py.ylabel('Completeness')\n py.ylim(0, 1.05)\n py.xlim(9, 15.5)\n py.title(r'%d\" $\\leq$ R $\\leq$ %d\"' % (rmin, rmax))\n py.legend(loc='lower left')\n\n ##########\n # Plots for completeness vs. radius\n ##########\n radiiEdges = np.array([0.0, 2.0, 4.0, 6.0, 8.0, 10.0, 16.0])\n mmin = 14.5\n mmax = 15.5\n\n comp_imag_r = np.zeros(len(radiiEdges)-1, dtype=float)\n comp_spec_r = np.zeros(len(radiiEdges)-1, dtype=float)\n \n for rr in range(len(radiiEdges)-1):\n rmin = radiiEdges[rr]\n rmax = radiiEdges[rr+1]\n\n comp_imag_r[rr], comp_spec_r[rr] = completeness_from_catalog(rmin, rmax,\n mmin, mmax)\n\n radii = np.repeat(radiiEdges, 2)[1:-1]\n comp_imag_r = np.repeat(comp_imag_r, 2)\n comp_spec_r = np.repeat(comp_spec_r, 2)\n \n py.subplot(1, 2, 2)\n py.plot(radii, comp_imag_r, 'b--', label='Imaging', linewidth=2)\n py.plot(radii, comp_spec_r, 'g-', label='Spectroscopy', linewidth=2)\n py.xlabel('Radius (arcsec)')\n py.ylabel('Completeness')\n py.ylim(0, 1.05)\n py.title(r'%.1f $\\leq$ Kp $\\leq$ %.1f' % (mmin, mmax))\n\n py.savefig('%scompleteness_ext_spec_imag_for_tdo.png' % (workDir))\n py.savefig('%scompleteness_ext_spec_imag_for_tdo.eps' % (workDir))", "title": "" }, { "docid": "ebadec502ca43087eab7bd86fb08cdf5", "score": "0.5219858", "text": "def plot_bldg_hist(df, i, value):\n # Parse building info\n building = df.iloc[i]\n building_type = building[('cis', 'building_type')]\n cz = str(building[('cis', 'cz')])\n # Extract rows from the specified building types and climate zones\n group = get_group(df, building_type=building_type, cz=cz)\n # Get values\n building_eui = building[value]\n group_eui = group[value]\n group_eui = group_eui[group_eui.notnull()]\n group_eui_mean = group_eui.mean()\n percentile = stats.percentileofscore(group_eui, building_eui)\n # Define xlabel and title\n if 'fit' in value[1]:\n xlabel = 'Change in annual EUI from 2009-2015<br>(kBtu/ft²/year)'\n xlim = None\n elif 'avg' in value[1]:\n xlabel = 'Average annual EUI from 2009-2015<br>(kBtu/ft²)'\n xlim = [0, group_eui.max()]\n # Plot\n data = go.Data([go.Histogram(x=group_eui,\n marker={'color': 'rgb(52,152,219)'},\n opacity=0.75)])\n # Set layout\n sign = int(building_eui < group_eui_mean) * 2 - 1\n layout = go.Layout(shapes=[_vertline(group_eui_mean, 'rgb(0,0,0)'),\n _vertline(building_eui, list_colors_rgb[5])],\n annotations=[_annot(group_eui_mean, 'group mean', sign),\n _annot(building_eui,\n '{:.1f}%'.format(percentile),\n -sign)],\n xaxis={'title': xlabel,\n 'range': xlim},\n yaxis={'title': 'Counts'},\n margin={'l': 50, 'r': 0, 't': 40, 'b': 60},\n showlegend=False,\n paper_bgcolor='#F3F3F3')\n return {'data': data, 'layout': layout}", "title": "" }, { "docid": "461a32278778df3dd898d7e0d0e136d0", "score": "0.521085", "text": "def calc_completeness_map(self,mbins,logrbins,band,show=True):\n\t\tmag_in = getattr(self,'%s_mag_in'%band.lower())\n\t\tre_in = getattr(self,'re_in')\n\t\tlogre_in = log10(re_in)\n\t\tn_input = histogram2d(mag_in,logre_in,bins=[mbins,logrbins])[0]\n\t\tn_detect = histogram2d(mag_in[self.detect==True],logre_in[self.detect==True],bins=[mbins,logrbins])[0]\n\t\tself.compmap = n_detect.astype('float') / maximum(n_input,1.0)\n\t\tself.mbins = mbins\n\t\tself.logrbins = logrbins\n\t\tif show:\n\t\t\tplt.figure()\n\t\t\tplt.imshow(self.compmap.swapaxes(0,1),origin='lower')\n\t\t\tplt.xticks(arange(len(mbins))[::2],mbins[::2])\n\t\t\tplt.yticks(arange(len(logrbins))[::2],logrbins[::2])\n\t\t\tplt.xlabel('input magnitude',size=14)\n\t\t\tplt.ylabel('input logRe', size=14)\n\t\t\tplt.title('Completeness map in %s-band' % band)\n\t\t\tplt.colorbar()", "title": "" }, { "docid": "8f0975e9be6f010a9f94eec91be8644b", "score": "0.5210783", "text": "def footprint_tests(cat,vals,mask=None,bins=100,label=''):\n\n mask=catalog.CatalogMethods.check_mask(cat.coadd,mask)\n\n if vals==[]:\n fig.plot_methods.plot_footprint(cat,mask=mask,label=label,bins=bins)\n\n else:\n for val in vals:\n flag=getattr(cat,val)\n for i in xrange(summary_stats.n_bits_array(cat,val)):\n if np.sum((flag & 2**i) != 0)>100:\n print 'footprint flag',val,i\n fig.plot_methods.plot_footprint(cat,mask=((flag & 2**i) != 0)&mask,label=getattr(config,val+'_name').get(i),bins=bins)\n\n return", "title": "" }, { "docid": "4843eca378364ae0146ad58f0441fb60", "score": "0.52098876", "text": "def _map_bins_to_labels(bins):\n labels = [len(bins)] * 100\n label = 0\n i = 0\n for bin in bins:\n for _ in range(0, bin):\n labels[i] = label\n i += 1\n label += 1\n\n return labels", "title": "" }, { "docid": "c5c5a6a2ecd760210259c444c0a2be39", "score": "0.51854753", "text": "def comparative_histogram():\n dataset = khi2.dataset(pirand_data)\n dataset2 = khi2.dataset(python_data)\n fig, ax = plt.subplots()\n pirand_observed = ax.bar(map(lambda x: x * 0.1 - 0.04, range(10)), dataset, color=\"firebrick\", width=0.04)\n python_observed = ax.bar(map(lambda x: x * 0.1, range(10)), dataset2, color=\"orangered\", width=0.04)\n plt.legend([pirand_observed, python_observed], [\"generateur pirand\", \"generateur python\"])\n plt.xlabel('classe')\n plt.ylabel('occurrence')\n ax.set_xticklabels(('???', '[0.0, 0.1[', '[0.2, 0.3[', '[0.4, 0.5[', '[0.6, 0.7[', '[0.8, 0.9['))\n plt.axis([-0.07, 0.97, 0, 130000])\n plt.savefig(\"report/comparative_histogram.png\", bbox_inches='tight')", "title": "" }, { "docid": "1f8c6955f693229529f95f561f96d656", "score": "0.5180356", "text": "def plot_hist( bins, plotting_options=plotting_options, processes=None, x_range=None, y_range=None, title=None, y_label=None, colors=None, logy=True, x_minor_ticks=True, lumi_amount=\"19.7\", ax=None):\n if ax == None: fig, ax = plt.subplots(figsize=(11, 9))\n if colors == None:\n colors = palettes[\"default\"]\n elif type(colors) == str:\n colors = palettes[colors]\n\n if \"plotting\" in bins:\n if y_label == None:\n y_label = bins[\"plotting\"][\"y_label\"] \n if title == None:\n title = bins[\"plotting\"][\"title\"] \n \n if processes == None:\n processes = plotting_options.process.unique()\n# if \"_\" in title:\n# title = \" \".join(title.split(\"_\"))\n\n minorLocator = AutoMinorLocator()\n\n tot_bins = {}\n sum_yerr = np.zeros( len( bins[ bins.keys()[0] ][3] ) )\n for process in processes:#plotting_options.process.unique():\n for process_decay in plotting_options[ plotting_options.process == process ].process_decay.unique():\n if process_decay in bins.keys():\n sum_yerr += bins[process_decay][3]\n if process not in tot_bins.keys():\n tot_bins[process] = copy.deepcopy(bins[process_decay])\n else: \n tot_bins[process][0] += bins[process_decay][0]\n \n\n#Plotting rects\n rect = []\n sum_bins = np.zeros( len( tot_bins[ tot_bins.keys()[0] ][0] ) )\n last_color = None\n for process in processes:\n#########\n if process in tot_bins.keys() and process in colors.keys():\n########\n bottom = sum_bins\n if int(matplotlib.__version__.split(\".\")[0]) == 1: \n rect.append(ax.bar( tot_bins[process][1][:-1], tot_bins[process][0],\n tot_bins[process][1][1] - tot_bins[process][1][0] , color = colors[process],\n edgecolor = colors[process], bottom=bottom ))\n if int(matplotlib.__version__.split(\".\")[0]) >= 2: \n rect.append(ax.bar( tot_bins[process][2], tot_bins[process][0],\n tot_bins[process][1][1] - tot_bins[process][1][0] , color = colors[process],\n edgecolor = colors[process], bottom=bottom ))\n sum_bins +=tot_bins[process][0]\n last_color = colors[process]\n\n\n #Yerror\n process_ = tot_bins.keys()[0]\n sum_yerr = np.sqrt(sum_yerr)\n for i, yerr in enumerate(sum_yerr): \n ax.fill( [tot_bins[process_][1][i], tot_bins[process_][1][i+1], tot_bins[process_][1][i+1], tot_bins[process_][1][i] ],\\\n [sum_bins[i] - yerr, sum_bins[i] - yerr, sum_bins[i] + yerr, sum_bins[i] + yerr], fill=False, hatch='//', edgecolor='0.45' )\n\n\n #Configurables\n if logy == True: \n if sum_bins.sum() > 0: \n ax.set_yscale(\"log\", nonposy='clip')\n if type(x_range)==tuple: ax.set_xlim(x_range)\n if y_range==None: \n bottom = 1 \n\n bottoms = [100, 10] \n for _bottom in bottoms:\n if _bottom * 5 < sum_bins[~np.isinf(sum_bins)].min():\n continue\n else:\n if bottom < _bottom:\n bottom = _bottom\n\n if logy == True: ax.set_ylim( bottom=bottom, top= sum_bins[~np.isinf(sum_bins)].max()*5.)\n else: ax.set_ylim( bottom=0, top= sum_bins.max()*2.)\n elif type(y_range)==tuple: ax.set_ylim( y_range )\n\n\n ax.xaxis.labelpad = 20\n ax.yaxis.labelpad = 15\n\n\n if y_label != None:\n ax.set_ylabel(y_label, fontsize=22, fontname='Bitstream Vera Sans', )\n if title != None:\n plt.xlabel( title, fontname='Bitstream Vera Sans', fontsize=24)#position=(1., 0.), va='bottom', ha='right',)\n\n #plt.rc('text', usetex=True)\n page_ = [page, \"\"]\n if page == \"13TeV\":\n page_ = [\"13\", \"TeV\"]\n else:\n page_ = [\"8\", \"TeV\"]\n ax.set_title(r\"\\textbf{CMS} Work in Progress \\hspace{8cm} $\"+ lumi_amount +\" fb^{-1}$ $\\sqrt{s}=\"+page_[0]+\" \\mathrm{\"+page_[1]+\"}$\", fontname='Bitstream Vera Sans', fontsize=24)\n\n ####################################\n #Add minor tick marks to the x-axis\n if x_minor_ticks == False:\n loc = matplotlib.ticker.MultipleLocator(base=1) # this locator puts ticks at regular intervals\n ax.xaxis.set_major_locator(loc)\n else:\n ax.xaxis.set_minor_locator(minorLocator)\n \n###################################\n ax.yaxis.set_tick_params(length=10, labelsize=22)\n ax.yaxis.set_tick_params(which='minor',length=5)\n ax.xaxis.set_tick_params(length=10, labelsize=22)\n ax.xaxis.set_tick_params(which='minor',length=5)\n\n ax.yaxis.grid(color='gray', linestyle='dashed')\n \n plt.xticks()\n plt.tight_layout()\n\n\n processes_return = [ process for process in processes if process in tot_bins.keys() ]\n return ax, rect, processes_return", "title": "" }, { "docid": "07c7c0859b0979835980255bce799ad8", "score": "0.5178914", "text": "def plot_predicted_values_per_bin_bar_graph(predicted_labels):\n labels = predicted_labels.keys()\n N = len(labels)\n ind = np.arange(N) # the x locations for the groups\n width = 0.35 # the width of the bars\n correct = []\n wrong = []\n for idx in ind:\n label = labels[idx]\n correct.append(len(predicted_labels[label].astype(int)[predicted_labels[label].astype(int)==label]))\n wrong.append(len(predicted_labels[label]) - correct[idx])\n fig, ax = plt.subplots()\n rects1 = ax.bar(ind, correct, width, color='g')\n rects2 = ax.bar(ind+width, wrong, width, color='R')\n ax.set_ylabel('Number of observations')\n ax.set_title('Correct and Wrong observations per bin')\n ax.set_xticks(ind+width)\n ax.set_xticklabels( labels )\n ax.legend( (rects1[0], rects2[0]), ('Correct', 'Wrong') )", "title": "" }, { "docid": "6f4bd7a101bab91aa3b31a8cadec7ae6", "score": "0.5177758", "text": "def plot_CI_performance(cluster_labels, legend_labels, *args):\n\n num_metrics = len(metrics)\n num_classifiers = len(args) - 1\n\n ind = np.arange(num_metrics) # the x locations for the groups\n width = 0.7 / num_classifiers # the width of the bars\n\n fig, ax = plt.subplots()\n\n # loop through classifiers\n rects_list = []\n for i in range(num_classifiers):\n results = args[i+1] # skip baseline\n means = [it[0] for it in results]\n errs = [(it[0] - it[1], it[2] - it[0]) for it in results]\n rects = ax.bar(ind + i * width, means, width, label=classifiers[i])\n ax.errorbar(ind + i * width, means, yerr=np.array(errs).T, fmt=\"None\", ecolor='k')\n rects_list.append(rects)\n\n # baseline\n results = args[0]\n for i in range(num_metrics) :\n mean = results[i][0]\n err_low = results[i][1]\n err_high = results[i][2]\n xlim = (ind[i] - 0.8 * width, ind[i] + num_classifiers * width - 0.2 * width)\n plt.plot(xlim, [mean, mean], color='k', linestyle='-', linewidth=2)\n plt.plot(xlim, [err_low, err_low], color='k', linestyle='--', linewidth=2)\n plt.plot(xlim, [err_high, err_high], color='k', linestyle='--', linewidth=2)\n\n ax.set_ylabel('Score')\n ax.set_ylim(0, 1)\n ax.set_xticks(ind + width / num_classifiers)\n ax.set_xticklabels(metrics)\n ax.legend()\n\n def autolabel(rects):\n \"\"\"Attach a text label above each bar displaying its height\"\"\"\n for rect in rects:\n height = rect.get_height()\n ax.text(rect.get_x() + rect.get_width()/2., 1.05*height,\n '%.3f' % height, ha='center', va='bottom')\n\n for rects in rects_list:\n autolabel(rects)\n\n plt.show()", "title": "" }, { "docid": "a21018a001e5d7b8463bf0accf6e5ab6", "score": "0.5170351", "text": "def vis_detections(im, prob, bboxes, labelmap, thresh=0.3, save_filename=None):\n im = im[:, :, (2, 1, 0)]\n plt.cla()\n fig = plt.imshow(im)\n\n for i, box in enumerate(bboxes):\n for j in range(prob.shape[1] - 1):\n if prob[i, j] < thresh:\n continue;\n score = prob[i, j]\n cls = j\n x,y,w,h = box\n \n im_h, im_w = im.shape[0:2]\n left = (x-w/2.)\n right = (x+w/2.)\n top = (y-h/2.)\n bot = (y+h/2.)\n\n left = max(left, 0)\n right = min(right, im_w - 1)\n top = max(top, 0)\n bot = min(bot, im_h - 1)\n\n plt.gca().add_patch(\n plt.Rectangle((left, top),\n right - left,\n bot - top, fill=False,\n edgecolor='g', linewidth=3)\n )\n plt.text(float(left), float(top - 10), '%s: %.3f'%(labelmap[cls], score), color='darkgreen', backgroundcolor='lightgray')\n #plt.title('{} {:.3f}'.format(class_name, score))\n\n if save_filename is None:\n plt.show()\n else:\n plt.axis('off')\n fig.axes.get_xaxis().set_visible(False)\n fig.axes.get_yaxis().set_visible(False)\n plt.savefig(save_filename, bbox_inches='tight', pad_inches = 0)", "title": "" }, { "docid": "6723da6867021c1ca76e4036fa57396d", "score": "0.516939", "text": "def drawLabels(self):\n def GetLabelWidth(label):\n \"\"\"Returns the total width of the length of 'label', using the\n fonts from glut\"\"\"\n assert type(label) is str, \"Incorrect type\"\n\n length = 0\n for c in label:\n length += glutBitmapWidth(GLUT_BITMAP_HELVETICA_18, ord(c))\n\n assert type(length) is int\n assert length >= 0\n\n return length\n def lerp(a, b, t):\n \"\"\"For interpolating between the range [a, b], according to the formula:\n value = (1 - t) * a + t * b, for t in [0, 1]\"\"\"\n assert 0.0 <= t <= 1.0\n value = (1 - t) * a + t * b\n\n assert a <= value <= b, \"Out of range\"\n return value\n\n # Draw the value of the ranges\n a = self.range[0]\n glPushMatrix()\n glTranslatef(0.03, 0.0, 0.0)\n for i in range(self.numBins+1):\n if self.category == 0:\n x = a + i * self.binWidth\n xLabel = '{:.1f}'.format(x)\n else:\n if i < self.numBins:\n for v in self.value:\n if v == self.value[i]:\n xLabel = self.name[i]\n else:\n break\n\n length = GetLabelWidth(xLabel)\n length /= self.size.width\n if i % 2 == 0:\n y = -0.04\n else:\n y = -0.08\n if self.category == 1:\n glPushMatrix()\n self.binWidth = 1.0 / self.numBins\n glTranslatef(self.binWidth / 2.0, 0.0, 0.0)\n glRasterPos2f(i * self.rectWidth - length / 2.0, y)\n for c in xLabel:\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_12, ord(c))\n if self.category == 1:\n glPopMatrix()\n glPopMatrix()\n\n # Draw the value of the frequencies\n minFreq = 0\n divWidth = 1.0 / self.numDivisions\n for i in range(self.numDivisions+1):\n y = lerp(minFreq, self.maxFrequency, i / self.numDivisions)\n yLabel = '{:.1f}'.format(y)\n length = GetLabelWidth(yLabel)\n length /= self.size.width\n glRasterPos2f(-0.12, i * divWidth)\n for c in yLabel:\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_12, ord(c))\n\n # Draw the name of the variable\n label = self.axisName\n length = GetLabelWidth(label)\n length /= self.size.width\n glRasterPos2f(0.5 - length, 1.05)\n for c in label:\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, ord(c))\n\n # Draw the y-axis label\n label = 'Frequencies'\n length = len(label)\n fontHeight = 18#glutBitmapHeight(GLUT_BITMAP_HELVETICA_18)\n fontHeight /= self.size.height\n i = 0\n start = 1.0#0.5 + ((fontHeight * length) / 2.0)\n for c in label:\n glRasterPos2f(-0.25, start - i * fontHeight)\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, ord(c))\n i += 1\n \n label = self.unit\n glRasterPos2f(1.06, 0.0)\n for c in label:\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, ord(c))", "title": "" }, { "docid": "42b6687a199fc6df9dc19ec31f7c9152", "score": "0.5165693", "text": "def plot_summary():\n #25-first cell,36-wrong cell,67-probe trials\n import sys\n #sys.path.append('/Volumes/Storage/psilentp/pybin')\n from Salines import salines\n cmap = plb.cm.Paired\n interal = salines.internal.buffCa.lowCl\n external = salines.ext.tets.highCa\n nersts = salines.saline_nersts(external,interal)\n e_cl = nersts['Cl-']\n groups = {\n 'AVA_AVB_l1':[26,27,28,29,30,31,32,33,34,35,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59],\n 'AVB_AVA_l1':[60,61,62,63,64,65,66,68,69,70,71,72,73,74,75],\n 'AVB_AVA_ttx':[96,97,98,99,100,103,104,105,106,107],\n 'AVB_AVA':[60,61,62,63,64,65,66,68,69,70,71,72,73,74,75,96,97,98,99,100,103,104,105,106,107],\n 'ASH_AVB_l1':[88,89,90,92,93,94,95]\n }\n from collections import OrderedDict\n \n #ordered_groups = OrderedDict()\n #for key in ['AVA_AVB_l1','AVB_AVA_l1','AVB_AVA_ttx','ASH_AVB_l1']:\n #ordered_groups.update({key:groups[key]})\n fi = open('conductance_ohmic_fits.txt','r')\n lns = fi.readlines()\n calcdict = dict()\n\n for l in lns:\n data = [float(x) for x in l.split()]\n calcdict.update({int(data[0]):{'rpot':data[1],'cond':data[2],'std_err':data[3],'p_val':data[4]}})\n #print calcdict[59]\n fig = plb.figure(figsize = (6.0,11))\n for i,key in enumerate(['AVA_AVB_l1','AVB_AVA_l1','AVB_AVA_ttx','ASH_AVB_l1']):\n group = groups[key]\n xs = [calcdict[cennum]['rpot'] for cennum in group]\n #xs = [np.random.rand()*5+20 for cennum in group]\n ys = [calcdict[cennum]['cond'] for cennum in group]\n yerr = [calcdict[cennum]['std_err'] for cennum in group]\n pval = [calcdict[cennum]['p_val'] for cennum in group]\n alpha_level = 0.05/len(ys)\n sig_rps = list()\n ax = plb.subplot(4,1,i+1)\n n_sig = 0\n n_not_sig = 0\n for x,y,er,pv in zip(xs,ys,yerr,pval):\n if pv > alpha_level:\n c = cmap(0.5)\n n_not_sig += 1\n else:\n sig_rps.append(x)\n c = cmap(0.1)\n n_sig += 1\n plb.errorbar(x,y,yerr=er,marker='o',ls='None',color = c)\n mean_rp = np.mean(sig_rps)\n from scipy import stats\n sterr_rp = stats.sem(sig_rps)\n print(\"%s:%.2f\"%(key,mean_rp)) \n print(\"%s:%.2f\"%(key,sterr_rp))\n print('not sig: ' + str(n_not_sig))\n print('sig: ' + str(n_sig))\n #ax = plb.gca()\n plb.title(key)\n format_iv(ax)\n ax.set_xbound([-130,50])\n ax.set_ybound([-50,210])\n ax.tick_params(top=False,right=False)\n ax.spines['left'].set_position('zero')\n y_start = -40\n y_axis_space = 15\n plb.arrow(mean_rp,y_start,0,-1*y_start-y_axis_space,shape='full',length_includes_head = True,head_length=10,head_width=2,color = cmap(0.1))\n plb.arrow(float(e_cl),y_start,0,-1*y_start-y_axis_space,shape='full',length_includes_head = True,head_length=10,head_width=2,color = cmap(0.3))\n #plb.savefig('/Volumes/Storage/psilentp/Desktop/'+key+'.pdf')\n return calcdict", "title": "" }, { "docid": "067b90bc75580a3f76f62a97c9f3354d", "score": "0.51644987", "text": "def scan_mv_pls_beta_plot_full(df_beta, labels_group, inp):\n if len(labels_group) > 2:\n for item in labels_group:\n df_hold = df_beta.filter(['Component Name', item+'_mean', item+'_ci_lower', item+'_ci_upper', item+'_interval']).copy()\n \n # Sort by biggest beta coefficients, positive and negative\n df_sort = df_hold.copy()\n df_sort[item+'_mean'] = abs(df_sort[item+'_mean'])\n df_sort = df_sort.sort_values(by = [item+'_mean'], ascending = False).copy().reset_index(drop=True)\n df_sort['sorter'] = range(len(df_sort))\n mapper_sorting = dict(zip(df_sort['Component Name'], df_sort['sorter']))\n\n df_hold['sorter'] = df_hold['Component Name'].map(mapper_sorting)\n df_hold = df_hold.copy().sort_values(by = ['sorter'], ascending = False).reset_index(drop=True)\n df_hold = df_hold.copy().filter([item for item in df_hold.columns if item != 'sorter'])\n\n x1 = len(df_hold)\n df_hold = df_hold.loc[len(df_hold)-inp['mv_vip_number']:].copy()\n x2 = len(df_hold)\n\n # Create plot \n fig = plt.figure(figsize = inp['mv_figsize_vip'])\n # Add plots for multiple axes\n ax1=fig.add_subplot(111, label=\"1\", frameon = True)\n ax2=fig.add_subplot(111, label=\"2\", frameon = False)\n ax2.get_xaxis().set_visible(False)\n ax2.yaxis.tick_right()\n #ax2.get_yaxis().set_visible(False) \n \n # Plot data\n xerr = [df_hold[item+'_mean']-df_hold[item+'_ci_lower'], df_hold[item+'_ci_upper']-df_hold[item+'_mean']]\n color = inp['mapper_group_color'][item]\n markersize = 4\n points = ax1.errorbar(x = df_hold[item+'_mean'], y = df_hold.index, xerr=xerr, linewidth = 0.8, fmt = 'o', markersize = markersize, elinewidth=1, capsize=markersize, label = item)\n if 0 in df_hold[item+'_interval']:\n ax1.axvline(0, linewidth = 0.8, color = 'red')\n\n # Set grid\n ax1.set_axisbelow(True)\n ax1.yaxis.grid() \n\n # Scale axis\n ax1.set_ylim(df_hold.index[0]-1,df_hold.index[-1]+1)\n ax2.set_ylim(df_hold.index[0]-1,df_hold.index[-1]+1)\n\n plt.locator_params(axis='x', nbins=5)\n ax1.yaxis.set_ticks(df_hold.index)\n ax2.yaxis.set_ticks(df_hold.index)\n ax1.tick_params(axis = 'both', labelsize = inp['mv_labelsize'])\n ax2.tick_params(axis = 'both', labelsize = inp['mv_labelsize'])\n ax1.tick_params(axis = 'y', length = 0, labelsize = inp['mv_labelsize'])\n ax2.tick_params(axis = 'y', length = 0, labelsize = inp['mv_labelsize'])\n\n plt.draw()\n\n # Set standard label\n labels1 = [label for label in df_hold['Component Name']]\n ax1.set_xticklabels(ax1.get_xticklabels(), fontsize = inp['mv_labelsize'], fontweight = 'bold')\n ax1.set_yticklabels(labels1, fontsize = inp['mv_labelsize'], va = 'center', ha = 'right', fontweight = 'bold')\n \n # Set full label\n if inp['mv_label_vip'] == True:\n labels2 = [inp['mapper_name'][label] for label in df_hold['Component Name']]\n ax2.set_yticklabels(labels2, fontsize = inp['mv_labelsize'], va = 'center', ha = 'left', fontweight = 'bold')\n\n # Set label\n ax1.set_xlabel(r'Beta coefficients, $\\beta$', fontsize = inp['mv_labelsize'], fontweight = 'bold')\n ax1.set_ylabel(None)\n\n # Add non displayed data information\n legendproperties = {'size': inp['mv_labelsize'], 'weight': 'bold'}\n list_bbox = []\n\n handles_class = mlines.Line2D([], [], marker=None, markersize=0, label=f'{item}') \n leg1 = ax1.legend(handles = [handles_class], handlelength = False, bbox_to_anchor=(0, 1), loc=\"lower left\", frameon = False, prop = legendproperties)\n list_bbox.append(leg1)\n\n if x1-x2 > 0:\n handles_annotation = mlines.Line2D([], [], marker=None,markersize=0, label=f'Beta Top {inp[\"mv_vip_number\"]}')\n leg2 = ax2.legend(handles = [handles_annotation], handlelength = False, bbox_to_anchor=(1, 1), loc=\"lower right\", frameon = False, prop = legendproperties)\n list_bbox.append(leg2)\n\n # Name append \n if inp['mv_scaling'] == True:\n app_scale = f'_{inp[\"mv_scaling_method\"]}'\n else:\n app_scale = ''\n fig.savefig(inp['path_evaluation_mv'].joinpath(f'beta_pls_{item}_SN{inp[\"pre_signal_noise\"]}{app_scale}.png'), bbox_extra_artists = (list_bbox), bbox_inches = 'tight', dpi = 800)\n fig.savefig(inp['path_evaluation_mv'].joinpath(f'beta_pls_{item}_SN{inp[\"pre_signal_noise\"]}{app_scale}.svg'), bbox_extra_artists = (list_bbox), bbox_inches = 'tight', format = 'svg')\n plt.close(fig)\n else:\n df_hold = df_beta.filter(['Component Name', 'mean', 'ci_lower', 'ci_upper', 'interval']).copy()\n \n # Sort by biggest beta coefficients, positive and negative\n df_sort = df_hold.copy()\n df_sort['mean'] = abs(df_sort['mean'])\n df_sort = df_sort.sort_values(by = ['mean'], ascending = False).copy().reset_index(drop=True)\n df_sort['sorter'] = range(len(df_sort))\n mapper_sorting = dict(zip(df_sort['Component Name'], df_sort['sorter']))\n\n df_hold['sorter'] = df_hold['Component Name'].map(mapper_sorting)\n df_hold = df_hold.copy().sort_values(by = ['sorter'], ascending = False).reset_index(drop=True)\n df_hold = df_hold.copy().filter([item for item in df_hold.columns if item != 'sorter'])\n\n x1 = len(df_hold)\n df_hold = df_hold.loc[len(df_hold)-inp['mv_vip_number']:].copy()\n x2 = len(df_hold)\n\n # Create plot \n fig = plt.figure(figsize = inp['mv_figsize_vip'])\n # Add plots for multiple axes\n ax1=fig.add_subplot(111, label=\"1\", frameon = True)\n ax2=fig.add_subplot(111, label=\"2\", frameon = False)\n ax2.get_xaxis().set_visible(False)\n ax2.yaxis.tick_right()\n #ax2.get_yaxis().set_visible(False) \n \n # Plot data\n xerr = [df_hold['mean']-df_hold['ci_lower'], df_hold['ci_upper']-df_hold['mean']]\n markersize = 4\n points = ax1.errorbar(x = df_hold['mean'], y = df_hold.index, xerr=xerr, linewidth = 0.8, fmt = 'o', markersize = markersize, elinewidth=1, capsize=markersize)\n if 0 in df_hold['interval']:\n ax1.axvline(0, linewidth = 0.8, color = 'red')\n\n # Set grid\n ax1.set_axisbelow(True)\n ax1.yaxis.grid() \n\n # Scale axis\n ax1.set_ylim(df_hold.index[0]-1,df_hold.index[-1]+1)\n ax2.set_ylim(df_hold.index[0]-1,df_hold.index[-1]+1)\n\n plt.locator_params(axis='x', nbins=5)\n ax1.yaxis.set_ticks(df_hold.index)\n ax2.yaxis.set_ticks(df_hold.index)\n ax1.tick_params(axis = 'both', labelsize = inp['mv_labelsize'])\n ax2.tick_params(axis = 'both', labelsize = inp['mv_labelsize'])\n ax1.tick_params(axis = 'y', length = 0, labelsize = inp['mv_labelsize'])\n ax2.tick_params(axis = 'y', length = 0, labelsize = inp['mv_labelsize'])\n\n plt.draw()\n\n # Set standard label\n labels1 = [label for label in df_hold['Component Name']]\n ax1.set_xticklabels(ax1.get_xticklabels(), fontsize = inp['mv_labelsize'], fontweight = 'bold')\n ax1.set_yticklabels(labels1, fontsize = inp['mv_labelsize'], va = 'center', ha = 'right', fontweight = 'bold')\n \n # Set full label\n if inp['mv_label_vip'] == True:\n labels2 = [inp['mapper_name'][label] for label in df_hold['Component Name']]\n ax2.set_yticklabels(labels2, fontsize = inp['mv_labelsize'], va = 'center', ha = 'left', fontweight = 'bold')\n\n # Set label\n ax1.set_xlabel(r'Beta coefficients, $\\beta$', fontsize = inp['mv_labelsize'], fontweight = 'bold')\n ax1.set_ylabel(None)\n\n # Add non displayed data information\n legendproperties = {'size': inp['mv_labelsize'], 'weight': 'bold'}\n list_bbox = []\n\n if x1-x2 > 0:\n handles_annotation = mlines.Line2D([], [], marker=None,markersize=0, label=f'Beta Top {inp[\"mv_vip_number\"]}')\n leg2 = ax2.legend(handles = [handles_annotation], handlelength = False, bbox_to_anchor=(1, 1), loc=\"lower right\", frameon = False, prop = legendproperties)\n list_bbox.append(leg2)\n\n # Name append \n if inp['mv_scaling'] == True:\n app_scale = f'_{inp[\"mv_scaling_method\"]}'\n else:\n app_scale = ''\n fig.savefig(inp['path_evaluation_mv'].joinpath(f'beta_pls_SN{inp[\"pre_signal_noise\"]}{app_scale}.png'), bbox_extra_artists = (list_bbox), bbox_inches = 'tight', dpi = 800)\n fig.savefig(inp['path_evaluation_mv'].joinpath(f'beta_pls_SN{inp[\"pre_signal_noise\"]}{app_scale}.svg'), bbox_extra_artists = (list_bbox), bbox_inches = 'tight', format = 'svg')\n plt.close(fig)\n return", "title": "" }, { "docid": "27f16fc6563936115b498bf9a68ee6da", "score": "0.51623356", "text": "def plot_quantiles(bin_centers,perc_all,ax,**kwargs):\n\n delta_bins = bin_centers[1] - bin_centers[0]\n\n ax.errorbar(bin_centers,perc_all['median'],yerr=[perc_all['median']-perc_all['10th'],perc_all['90th']-perc_all['median']],color=kwargs.get('color','black'),linestyle='',marker='o',label=kwargs.get('label',None))\n\n if (kwargs.get('vlines',False)):\n for pos in np.arange(bin_centers[1]-delta_bins/2.,bin_centers[-1]+delta_bins/2.,delta_bins):\n ax2.axvline(pos, color='k', linestyle=':')\n ax2.set_xlim(bin_centers[0],bin_centers[-1])\n\n for i in np.arange(bin_centers.size):\n rect = matplotlib.patches.Rectangle((bin_centers[i]-0.375*delta_bins,perc_all['25th'][i]),width=0.75*delta_bins,height=perc_all['75th'][i]-perc_all['25th'][i],color=kwargs.get('color','black'),alpha=kwargs.get('alpha',0.5),linewidth=1.)\n ax.add_patch(rect)", "title": "" }, { "docid": "9d81406102afa815038a982d01a15aa4", "score": "0.5157904", "text": "def get_hist_label(j, i):\n return \"bdtg\" + \"j\" + str(j) + \"i\" + str(i)", "title": "" }, { "docid": "4e2633da9ad3029b24de910f56a5a51c", "score": "0.5152091", "text": "def make_indicator_graph():\n buffet_indicator = buffet_indicator_calculation()\n fig = plt.figure()\n plt.bar(buffet_indicator, height=100)\n plt.xlabel('BuffetIndicator')\n fig.savefig('pics/buffett_indicator.png', dpi=100)", "title": "" }, { "docid": "42c1c83bf1442cdb4d3565c5853ca660", "score": "0.5151531", "text": "def test_creation_range_100__nbins_2(self):\n # Create data\n x = list(range(100))\n # Calculate binning\n bins = create_binning(x, 2)\n r = bins.label(x, '{simplei}')\n # Expected result\n self.assertEqual(r[0], '0_50')\n self.assertEqual(r[-1], '50_99')", "title": "" }, { "docid": "1bfc8edfadc680ec451ef6dada88ed85", "score": "0.51463217", "text": "def binning(loop_count, bin_width=500, wanted_bin=9):\n # 1. create the array of bins - bin_arr\n # maybe use for that the hint below!\n# global bin_width, bin_arr, bin_arr2, cube, baddata, Tmax, legend_content\n \n# print \"What file do you want to work with? (e.g. NGC1333_NH3_11_DR1.fits)\"\n# file_name = raw_input()\n file_name = files[loop_count]\n cube = SpectralCube.read(file_name)\n cube = cube.with_spectral_unit(u.km/u.s,velocity_convention='radio')\n Tmax = cube.apply_numpy_function(np.nanmax,axis=0) # array of the maximum values in the spectra of each pixel\n baddata = nd.morphology.binary_dilation(np.isnan(Tmax),np.ones((25,25)))\n Tmax[baddata]=0.0\n Tmax[np.isfinite(Tmax)]\n \n# print \"How many pixels would you like as a bin width in brightness?\"\n# bin_width = input()\n bin_arr = np.sort(Tmax[np.isfinite(Tmax)])\n bin_arr2 = bin_arr[:: - bin_width] # this creates an array of the bin margins, in which every bin has a width of \"bin_width\"\n# print \"The margins of the bins are at:\", bin_arr2\n\n# 2. use the array of bins for labeling all the pixels in which of the bins they belong\n# and also doing a plot of the pixel labels\n \n np.digitize(Tmax,bin_arr2)\n# plt.title('plot1')\n# plt.figure(figsize=(5,10))\n# plt.imshow(np.digitize(Tmax,bin_arr2))\n# plt.clf()\n\n\n# print \"What bin value would you like to average the spectrum from?\"\n# wanted_bin = input()\n bins = np.digitize(Tmax,bin_arr2)\n# bins = np.digitize(Tmax,binning()[1])\n y, x = np.where(bins==wanted_bin)\n\n return Tmax, np.digitize(Tmax,bin_arr2), bin_arr2, cube, file_name, bin_width, wanted_bin, y, x", "title": "" }, { "docid": "4c0fbb875580ad58525189a15b0fcba5", "score": "0.5145033", "text": "def plotArrayJones(j, freq, filebase, title, pix_per_deg=1, j_1D=None, gridded=False):\n plt.rcParams['savefig.dpi'] = 300\n\n for i in [0, 1]:\n for ii in [0, 1]:\n if j_1D is not None: # show cut\n plt.subplot(121)\n plt.plot(np.arange(len(j_1D[i, ii])) * 1.0 / pix_per_deg, np.abs(j_1D[i, ii]))\n plt.title('1-D cut')\n plt.xlabel('ZA (degs)')\n plt.ylabel('magnitude')\n plt.subplot(122)\n\n if gridded:\n plt.imshow(np.abs(j[i, ii]), interpolation='none', extent=[0, 90, 360, 0])\n plt.xticks(np.arange(0, 91, 30))\n plt.yticks(np.arange(360, -1, -30))\n else:\n plt.imshow(np.abs(j[i, ii]), interpolation='none')\n plt.suptitle('MWA %s MHz J%s%s voltage mag, %s' % (freq / 1.e6, i, ii, title))\n plt.colorbar(label='magnitude')\n # plt.gca().invert_yaxis()\n plt.savefig('MWA_J%s%s_voltage_mag_%sMHz_%s.png' % (i, ii, freq / 1.e6, filebase))\n plt.clf()\n\n if j_1D is not None: # show cut\n plt.subplot(121)\n plt.plot(np.arange(len(j_1D[i, ii])) * 1.0 / pix_per_deg, np.angle(j_1D[i, ii]) * 180 / math.pi)\n plt.title('1-D cut')\n plt.xlabel('ZA (deg)')\n plt.ylabel('phase (deg)')\n plt.subplot(122)\n\n if gridded:\n plt.imshow(np.angle(j[i, ii]) * 180 / math.pi, interpolation='none', extent=[0, 90, 360, 0])\n plt.xticks(np.arange(0, 91, 30))\n plt.yticks(np.arange(360, -1, -30))\n else:\n plt.imshow(np.angle(j[i, ii]) * 180 / math.pi, interpolation='none')\n plt.suptitle('MWA %s MHz J%s%s voltage phase, %s' % (freq / 1e6, i, ii, title))\n plt.colorbar(label='phase (deg)')\n # plt.gca().invert_yaxis()\n plt.savefig('MWA_J%s%s_voltage_phase_%sMHz_%s.png' % (i, ii, freq / 1.e6, filebase))\n plt.clf()", "title": "" }, { "docid": "3eafddfdf8275426fbd858da81e39f3d", "score": "0.51447386", "text": "def plot_bldg_hist(df, info, value, histrange=None,\n figsize=(6, 5), xlabel=None):\n # Parse building info\n building, full_addr, building_type, cz = _parse_building_info(df, info)\n # Extract rows from the specified building types and climate zones\n group = get_group(df, building_type=building_type, cz=cz)\n # Get values\n building_eui = building[value].iloc[0]\n group_eui = group[value]\n group_eui = group_eui[group_eui.notnull()]\n group_eui_mean = group_eui.mean()\n percentile = stats.percentileofscore(group_eui, building_eui)\n\n # Define xlabel and title\n if xlabel is None:\n if 'fit' in value[1]:\n xlabel = 'Change in annual EUI from 2009-2015\\n(kBtu/ft2/year)'\n elif 'avg' in value[1]:\n xlabel = 'Average annual EUI from 2009-2015 \\n(kBtu/ft2)'\n title = full_addr + '\\nType = ' + building_type + ', CZ = ' + cz\n\n # Plot\n fig = plt.figure(figsize=figsize)\n ax = plt.gca()\n # num_bins = min(20, int(np.ceil(len(group_eui) / 3))) # to fix\n ax = sns.distplot(group_eui,\n hist_kws={'range': histrange},\n kde_kws={'clip': histrange})\n ylim = ax.get_ylim()\n ax.plot([building_eui, building_eui], ylim, color='r', linewidth=2,\n label='Current building')\n ax.plot([group_eui_mean, group_eui_mean], ylim, color='b', linewidth=2,\n label='Group average')\n ax.text(building_eui, ylim[1] * 1.05, '{:.1f}%'.format(percentile),\n ha=\"center\", fontsize=16)\n # Set miscell properties\n setproperties(xlabel=xlabel, ylabel='Density', title=title,\n ylim=(ylim[0], ylim[1] * 1.15),\n legend=True, legend_bbox_to_anchor=(1, 1), legendloc=2,\n tickfontsize=18, labelfontsize=18, legendfontsize=16)\n\n return fig, ax", "title": "" }, { "docid": "f25422d8e0a5901972e2539001f32064", "score": "0.5144724", "text": "def plot_all(img_features, label):\n feat_list = img_features[0].keys()\n feat_list.sort()\n f, axes = plt.subplots(nrows=3, ncols=len(feat_list)) \n #f.set_size_inches(8,15)\n for level in range(3): \n for ax, feature in zip(axes[level],feat_list): \n im = ax.imshow(img_features[level][feature]) # set vmin, vmax individually\n #ax.axis('off')\n #ax.tick_params(axis='both', left='off', bottom='off')\n ax.set_xticks([])\n ax.set_yticks([]) \n im.set_cmap('YlGnBu') \n #im.set_cmap('viridis')\n div = make_axes_locatable(ax)\n cax = div.append_axes(\"bottom\", size=\"10%\", pad=0.3)\n plt.colorbar(im, cax=cax, format=\"%.1g\", orientation='horizontal')\n # label rows and colums\n if label == '':\n labels = ['level 0', 'level 1', 'level 2']\n else:\n labels = ['Method 1', 'Method 2', 'Method 3']\n \n for ax, feature in zip(axes[0], feat_list):\n ax.set_title(feature)\n for ax, level in zip(axes[:, 0], labels):\n ax.set_ylabel(level)\n \n \n return(axes,f)", "title": "" }, { "docid": "a75c23a899a31619d44929a766c0b0f6", "score": "0.5117043", "text": "def print_rf_90th_percentile_SGonly_ANOM_plots(model, dest, optimal_k):\n\n rfstarttime = timer(); print(f'{utils.time_now()} - Plotting 90th-perc rainfall over SG now.\\nTotal of {optimal_k} clusters, now printing cluster: ')\n\n RFprec_to_ClusterLabels_dataset = utils.open_pickle(model.RFprec_to_ClusterLabels_dataset_path)\n\n w_lim_sg = 103.5\n e_lim_sg = 104.055\n s_lim_sg = 1.1\n n_lim_sg = 1.55\n\n RFprec_to_ClusterLabels_dataset = RFprec_to_ClusterLabels_dataset.sel(lon=slice(w_lim_sg, e_lim_sg),lat=slice(s_lim_sg, n_lim_sg))\n \n fig, gs_rf_plot = create_multisubplot_axes(optimal_k)\n rf_ds_lon = RFprec_to_ClusterLabels_dataset.lon\n rf_ds_lat = RFprec_to_ClusterLabels_dataset.lat\n\n w = rf_ds_lon.min().values\n e = rf_ds_lon.max().values\n s = rf_ds_lat.min().values\n n = rf_ds_lat.max().values\n\n baseline = np.percentile(RFprec_to_ClusterLabels_dataset.precipitationCal, 90, axis=0)\n \n all_colors = np.vstack(plt.cm.terrain_r(np.linspace(0,1,11)))\n terrain_map = colors.LinearSegmentedColormap.from_list('terrain_map', all_colors)\n\n # fig.suptitle(f'Anomaly for 90th percentile RF over region: {model.domain[0]}S {model.domain[1]}N {model.domain[2]}W {model.domain[3]}E', fontweight='bold')\n fig.suptitle(f'Anomaly for 90th percentile RF over SG-only: {model.domain[0]}S {model.domain[1]}N {model.domain[2]}W {model.domain[3]}E', fontweight='bold')\n\n levels = [int(i) for i in np.linspace(-100,100,21)]\n\n\n for clus in range(optimal_k):\n time.sleep(1); gc.collect()\n # data = RFprec_to_ClusterLabels_dataset.where(RFprec_to_ClusterLabels_dataset.cluster==clus, drop=True).sel(\n # lon=slice(model.LON_W, model.LON_E), lat=slice(model.LAT_S, model.LAT_N)).precipitationCal.values\n data = RFprec_to_ClusterLabels_dataset.where(RFprec_to_ClusterLabels_dataset.cluster==clus, drop=True).precipitationCal.values\n mean = np.percentile(data, 90, axis=0)\n mean = mean-baseline\n time.sleep(1); gc.collect()\n\n ax_rf_plot = fig.add_subplot(gs_rf_plot[clus], projection=ccrs.PlateCarree())\n ax_rf_plot.xaxis.set_major_formatter(model.lon_formatter)\n ax_rf_plot.yaxis.set_major_formatter(model.lat_formatter)\n ax_rf_plot.set_facecolor('w')\n ax_rf_plot.set_extent([w,e,s,n])\n ax_rf_plot.coastlines(\"50m\", linewidth=.7, color='k')\n ax_rf_plot.add_feature(cf.BORDERS, linewidth=.5, color='k', linestyle='dashed')\n\n if clus < model.grid_width: # top ticks \n ax_rf_plot.set_xticks([np.ceil(w), np.floor(e)], crs=ccrs.PlateCarree())\n ax_rf_plot.set_xticklabels([np.ceil(w), np.floor(e)], rotation=55)\n ax_rf_plot.xaxis.tick_top()\n else: ax_rf_plot.set_xticks([])\n\n if clus % model.grid_width == model.grid_width - 1: # right-side ticks\n ax_rf_plot.set_yticks([s,n], crs=ccrs.PlateCarree())\n ax_rf_plot.yaxis.set_label_position(\"right\")\n ax_rf_plot.yaxis.tick_right()\n else: ax_rf_plot.set_yticks([])\n \n RF = ax_rf_plot.contourf(rf_ds_lon, rf_ds_lat, mean.T, \n levels,\n cmap=terrain_map, \n extend='neither')\n conts = ax_rf_plot.contour(RF, 'w', linewidths=0)\n ax_rf_plot.clabel(conts, conts.levels, colors='k', inline=True, fmt='%1.f', fontsize=8)\n\n ax_rf_plot.set_title(f\"cluster no.{clus+1}\", loc='left')\n\n time.sleep(1); gc.collect()\n\n if clus == model.cbar_pos: # cbar\n axins_rf = inset_axes(ax_rf_plot, width='100%', height='100%',\n loc='lower left', bbox_to_anchor=(0, -.8, model.grid_width, .1),\n bbox_transform=ax_rf_plot.transAxes)\n cbar_rf = fig.colorbar(RF, cax=axins_rf, label='Anomaly of 90th percentile RF (in mm) relative to baseline.', orientation='horizontal', pad=0.01, \n # ticks=np.arange(0,100,10)\n ticks=levels\n )\n cbar_rf.ax.xaxis.set_ticks_position('top')\n cbar_rf.ax.xaxis.set_label_position('top')\n\n print(f'\\n{utils.time_now()}: {clus}.. ');\n\n print(f\"\\n -- Time taken is {utils.time_since(rfstarttime)}\\n\")\n\n fig.subplots_adjust(wspace=0.05,hspace=0.3)\n fn = f\"{dest}/{model.month_names_joined}_RFplot_90th_percentile_SGonly_ANOM_v1_{model.gridsize}x{model.gridsize}\"\n fig.savefig(fn, bbox_inches='tight', pad_inches=1)\n print(f'file saved @:\\n{fn}')\n plt.close('all')", "title": "" }, { "docid": "e2f4b7a7d1138382f210587f94a969a0", "score": "0.5110043", "text": "def test_creation_range_100__n_bins_8(self):\n # Create data\n x = list(range(100))\n # Calculate binning\n bins = create_binning(x, 8)\n labels = bins.label(x, '{simplei}')\n r = list(bins.labels('{simplei}'))\n r.sort()\n # Expected result\n e = ['88_99', '0_13', '63_76', '39_51', '51_63', '76_88', '26_39', '13_26']\n e.sort()\n # Comparison\n self.assertEqual(r, e)", "title": "" }, { "docid": "e1303e83332c178379f4f1fa4e8bdd72", "score": "0.51054406", "text": "def plot_bin_band_all(corr_fns, titles, plotname=\"binBandingAll.pdf\"):\n\n # Internal jet pTs, pre calibration\n min_pre = 0.5\n max_pre = 20\n pt_pre = np.arange(min_pre, max_pre, 0.5)\n\n n_cols = 3\n n_rows = len(corr_fns) / 3\n if len(corr_fns) % 3 != 0:\n n_rows += 1\n\n c1 = ROOT.TCanvas(\"c_all\",\"\", 600*n_cols, 600*n_rows)\n c1.SetTicks(1, 1)\n c1.Divide(n_cols, n_rows)\n shit_to_keep_showing = [] # need this otherwise python will auto garbage collect\n for ic, corr_fn in enumerate(corr_fns,1):\n c1.cd(ic)\n\n # Post calibration\n pt_post = np.array([pt * corr_fn.Eval(pt) for pt in pt_pre])\n\n # Make coloured blocks to show the bins\n blocks = []\n lower_bound = pt_pre[0] / 4\n if pt_post[-1] % 4 != 0:\n upper_bound = 4*(1+(pt_post[-1] / 4))\n else:\n upper_bound = pt_post[-1]\n gct_bins = np.arange(0, upper_bound+4, 4)\n for i, pt in enumerate(gct_bins):\n # skip if\n if pt+4 < pt_post[0] or pt > pt_post[-1]:\n continue\n b = ROOT.TBox(min_pre, pt, max_pre, pt+4)\n col = 30 if i % 2 else 38\n b.SetFillColorAlpha(col, 0.7)\n b.SetLineStyle(0)\n blocks.append(b)\n shit_to_keep_showing.append(blocks)\n\n # Plot\n gr = ROOT.TGraph(len(pt_pre), pt_pre, pt_post)\n gr.SetMarkerColor(ROOT.kRed)\n gr.SetMarkerStyle(2)\n gr.SetTitle(titles[ic-1]+\";p_{T}^{pre} [GeV];p_{T}^{post} [GeV]\")\n gr.Draw(\"AP\")\n [b.Draw() for b in blocks]\n gr.Draw(\"P\")\n # some helpful lines at 0.5, 5, 10\n for p in [0.5, 5, 10]:\n l_x = ROOT.TLine(p, 0, p, p*corr_fn.Eval(p))\n l_x.SetLineStyle(2)\n l_x.SetLineColor(ROOT.kBlue)\n l_x.Draw()\n shit_to_keep_showing.append(l_x)\n l_y = ROOT.TLine(0, p*corr_fn.Eval(p), p, p*corr_fn.Eval(p))\n l_y.SetLineStyle(2)\n l_y.SetLineColor(ROOT.kBlue)\n l_y.Draw()\n shit_to_keep_showing.append(l_y)\n\n shit_to_keep_showing.append(gr)\n c1.SaveAs(plotname)", "title": "" }, { "docid": "071fb1d245b53fc32f189296258cd1d6", "score": "0.5103337", "text": "def plot_image_completeness_vs_radius():\n radial_bins = [0.0, 2.0, 4.0, 8.0, 16.0]\n\n py.clf()\n for rr in range(len(radial_bins) - 1):\n rmin = radial_bins[rr]\n rmax = radial_bins[rr+1]\n image_completeness_by_radius(rmin=rmin, rmax=rmax, plot=False)\n d = load_image_completeness_by_radius(rmin=rmin, rmax=rmax)\n\n legLabel = '%d\" - %d\"' % (rmin, rmax)\n py.plot(d.mag, d.comp_no_ext, label=legLabel, linewidth=2)\n\n py.legend(loc='lower left')\n py.xlabel('Kp Magnitude')\n py.ylabel('Imaging Completeness')\n py.ylim(0, 1.05)\n py.savefig(workDir + 'plots/image_completeness_vs_radius.png')\n py.savefig(workDir + 'plots/image_completeness_vs_radius.eps')", "title": "" }, { "docid": "88b6987654c2c00cc319c94bdad25f50", "score": "0.5097529", "text": "def show_label_distribution(data_df, ind2class, title=''):\n\n labels = pd.Series(data_df.label).replace(ind2class)\n\n fig = plt.figure(figsize=(20,5))\n ax = plt.subplot(111)\n\n labels.value_counts().sort_index(0).plot(kind='bar')\n plt.xticks(color='r', rotation=90, fontsize=15)\n plt.yticks(color='r', fontsize=15)\n plt.title(title, color='red', fontsize=15)\n for p in ax.patches:\n ax.annotate(str(p.get_height()), (p.get_x(), p.get_height()))\n\n return fig", "title": "" }, { "docid": "111f6748a21cb6a5a8b07a0cb79a874e", "score": "0.508674", "text": "def scan_mv_pls_beta_plot_relevant(df_beta, labels_group, inp):\n if len(labels_group) > 2:\n # Create plot \n fig = plt.figure(figsize = inp['mv_figsize_vip'])\n # Add plots for multiple axes\n ax1=fig.add_subplot(111, label=\"1\", frameon = True)\n ax2=fig.add_subplot(111, label=\"2\", frameon = False)\n ax2.get_xaxis().set_visible(False)\n ax2.yaxis.tick_right()\n #ax2.get_yaxis().set_visible(False) \n\n mapper_sorting = dict(zip(inp['pre_list_relevant'],range(len(inp['pre_list_relevant']))))\n df_beta = df_beta[df_beta['Component Name'].isin(set(inp['pre_list_relevant']))].copy().reset_index(drop=True)\n df_beta['sorter'] = df_beta['Component Name'].map(mapper_sorting)\n df_beta = df_beta.sort_values(by = ['sorter'], ascending = False).copy().reset_index(drop=True)\n df_beta = df_beta.filter([item for item in df_beta.columns if item != 'sorter']).copy()\n\n for item in labels_group:\n df_hold = df_beta.filter(['Component Name', item+'_mean', item+'_ci_lower', item+'_ci_upper', item+'_interval', item+'_relevant']).copy()\n\n color = inp['mapper_group_color'][item]\n markersize = 3\n\n df_rel = df_hold[df_hold[item+'_relevant'] == True].copy()\n xerr_rel = [df_rel[item+'_mean']-df_rel[item+'_ci_lower'], df_rel[item+'_ci_upper']-df_rel[item+'_mean']]\n ax1.errorbar(x = df_rel[item+'_mean'], y = df_rel.index, xerr=xerr_rel, linewidth = 0.8, color = color, fmt = 'o', ecolor=color, markersize = markersize, elinewidth=1, capsize=markersize, label = item+r' ($\\beta$ $\\neq$ 0)')\n \n df_notrel = df_hold[df_hold[item+'_relevant'] == False].copy()\n xerr_notrel = [df_notrel[item+'_mean']-df_notrel[item+'_ci_lower'], df_notrel[item+'_ci_upper']-df_notrel[item+'_mean']]\n ax1.errorbar(x = df_notrel[item+'_mean'], y = df_notrel.index, xerr=xerr_notrel, linewidth = 0.8, color = 'grey', fmt = 'o', ecolor='grey', markersize = markersize, elinewidth=1, capsize=markersize, label = item+r' ($\\beta$ = 0)')\n\n if 0 in df_hold[item+'_interval']:\n ax1.axvline(0, linewidth = 0.8, color = 'red')\n\n # Set grid\n ax1.set_axisbelow(True)\n ax1.yaxis.grid() \n plt.locator_params(axis = 'x', nbins=4)\n\n # Scale axis\n ax1.set_ylim(df_hold.index[0]-1,df_hold.index[-1]+1)\n ax2.set_ylim(df_hold.index[0]-1,df_hold.index[-1]+1)\n\n plt.locator_params(axis='x', nbins=5)\n ax1.yaxis.set_ticks(df_hold.index)\n ax2.yaxis.set_ticks(df_hold.index)\n ax1.tick_params(axis = 'both', labelsize = inp['mv_labelsize'])\n ax2.tick_params(axis = 'both', labelsize = inp['mv_labelsize'])\n ax1.tick_params(axis = 'y', length = 0, labelsize = inp['mv_labelsize'])\n ax2.tick_params(axis = 'y', length = 0, labelsize = inp['mv_labelsize'])\n \n plt.draw()\n\n # Set standard label\n #labels1 = [label for label in df_hold['Component Name']]\n ax1.set_xticklabels(ax1.get_xticklabels(), fontsize = inp['mv_labelsize'], fontweight = 'bold')\n ax1.set_yticklabels([None]*len(ax1.get_yticklabels()), fontsize = inp['mv_labelsize'], va = 'center', ha = 'right', fontweight = 'bold')\n \n # Set full label\n if inp['mv_label_vip'] == True:\n labels2 = [inp['mapper_name'][label] for label in df_hold['Component Name']]\n else:\n labels2 = [label for label in df_hold['Component Name']]\n\n ax2.set_yticklabels(labels2, fontsize = inp['mv_labelsize'], va = 'center', ha = 'left', fontweight = 'bold')\n # Set label\n ax1.set_xlabel(r'Beta coefficients, $\\beta$', fontsize = inp['mv_labelsize'], fontweight = 'bold')\n ax1.set_ylabel(None)\n\n # Legend\n legendproperties = {'size': inp['mv_labelsize'], 'weight': 'bold'}\n # Sort both labels and handles by labels\n handles, labels = ax1.get_legend_handles_labels()\n labels, handles = zip(*sorted(zip(labels, handles), key=lambda t: t[0]))\n leg = ax1.legend(handles, labels, bbox_to_anchor=(1.0, 1.0), loc=\"lower right\", frameon = False, prop = legendproperties)\n \n # Name append \n if inp['mv_scaling'] == True:\n app_scale = f'_{inp[\"mv_scaling_method\"]}'\n else:\n app_scale = ''\n\n list_bbox = [leg]\n fig.savefig(inp['path_evaluation_mv'].joinpath(f'beta_pls_SN{inp[\"pre_signal_noise\"]}_relevant{app_scale}.png'), bbox_extra_artists = (list_bbox), bbox_inches = 'tight', dpi = 800)\n fig.savefig(inp['path_evaluation_mv'].joinpath(f'beta_pls_SN{inp[\"pre_signal_noise\"]}_relevant{app_scale}.svg'), bbox_extra_artists = (list_bbox), bbox_inches = 'tight', format = 'svg')\n plt.close(fig)\n else:\n # Create plot \n fig = plt.figure(figsize = inp['mv_figsize_vip'])\n # Add plots for multiple axes\n ax1=fig.add_subplot(111, label=\"1\", frameon = True)\n ax2=fig.add_subplot(111, label=\"2\", frameon = False)\n ax2.get_xaxis().set_visible(False)\n ax2.yaxis.tick_right()\n #ax2.get_yaxis().set_visible(False) \n\n mapper_sorting = dict(zip(inp['pre_list_relevant'],range(len(inp['pre_list_relevant']))))\n df_beta = df_beta[df_beta['Component Name'].isin(set(inp['pre_list_relevant']))].copy().reset_index(drop=True)\n df_beta['sorter'] = df_beta['Component Name'].map(mapper_sorting)\n df_beta = df_beta.sort_values(by = ['sorter'], ascending = False).copy().reset_index(drop=True)\n df_beta = df_beta.filter([item for item in df_beta.columns if item != 'sorter']).copy()\n\n df_hold = df_beta.filter(['Component Name', 'mean', 'ci_lower', 'ci_upper', 'interval', 'relevant']).copy()\n\n color = 'black'\n markersize = 3\n\n df_rel = df_hold[df_hold['relevant'] == True].copy()\n xerr_rel = [df_rel['mean']-df_rel['ci_lower'], df_rel['ci_upper']-df_rel['mean']]\n ax1.errorbar(x = df_rel['mean'], y = df_rel.index, xerr=xerr_rel, linewidth = 0.8, color = color, fmt = 'o', ecolor=color, markersize = markersize, elinewidth=1, capsize=markersize, label = '$\\beta$ $\\neq$ 0')\n \n df_notrel = df_hold[df_hold['relevant'] == False].copy()\n xerr_notrel = [df_notrel['mean']-df_notrel['ci_lower'], df_notrel['ci_upper']-df_notrel['mean']]\n ax1.errorbar(x = df_notrel['mean'], y = df_notrel.index, xerr=xerr_notrel, linewidth = 0.8, color = 'grey', fmt = 'o', ecolor='grey', markersize = markersize, elinewidth=1, capsize=markersize, label = '$\\beta$ = 0')\n\n if 0 in df_hold['interval']:\n ax1.axvline(0, linewidth = 0.8, color = 'red')\n\n # Set grid\n ax1.set_axisbelow(True)\n ax1.yaxis.grid() \n plt.locator_params(axis = 'x', nbins=4)\n\n # Scale axis\n ax1.set_ylim(df_hold.index[0]-1,df_hold.index[-1]+1)\n ax2.set_ylim(df_hold.index[0]-1,df_hold.index[-1]+1)\n\n plt.locator_params(axis='x', nbins=5)\n ax1.yaxis.set_ticks(df_hold.index)\n ax2.yaxis.set_ticks(df_hold.index)\n ax1.tick_params(axis = 'both', labelsize = inp['mv_labelsize'])\n ax2.tick_params(axis = 'both', labelsize = inp['mv_labelsize'])\n ax1.tick_params(axis = 'y', length = 0, labelsize = inp['mv_labelsize'])\n ax2.tick_params(axis = 'y', length = 0, labelsize = inp['mv_labelsize'])\n \n plt.draw()\n\n # Set standard label\n #labels1 = [label for label in df_hold['Component Name']]\n ax1.set_xticklabels(ax1.get_xticklabels(), fontsize = inp['mv_labelsize'], fontweight = 'bold')\n ax1.set_yticklabels([None]*len(ax1.get_yticklabels()), fontsize = inp['mv_labelsize'], va = 'center', ha = 'right', fontweight = 'bold')\n \n # Set full label\n if inp['mv_label_vip'] == True:\n labels2 = [inp['mapper_name'][label] for label in df_hold['Component Name']]\n else:\n labels2 = [label for label in df_hold['Component Name']]\n\n ax2.set_yticklabels(labels2, fontsize = inp['mv_labelsize'], va = 'center', ha = 'left', fontweight = 'bold')\n # Set label\n ax1.set_xlabel(r'Beta coefficients, $\\beta$', fontsize = inp['mv_labelsize'], fontweight = 'bold')\n ax1.set_ylabel(None)\n\n # Legend\n legendproperties = {'size': inp['mv_labelsize'], 'weight': 'bold'}\n # Sort both labels and handles by labels\n handles, labels = ax1.get_legend_handles_labels()\n labels, handles = zip(*sorted(zip(labels, handles), key=lambda t: t[0]))\n leg = ax1.legend(handles, labels, bbox_to_anchor=(1.0, 1.0), loc=\"lower right\", frameon = False, prop = legendproperties)\n \n # Name append \n if inp['mv_scaling'] == True:\n app_scale = f'_{inp[\"mv_scaling_method\"]}'\n else:\n app_scale = ''\n\n list_bbox = [leg]\n fig.savefig(inp['path_evaluation_mv'].joinpath(f'beta_pls_SN{inp[\"pre_signal_noise\"]}_relevant{app_scale}.png'), bbox_extra_artists = (list_bbox), bbox_inches = 'tight', dpi = 800)\n fig.savefig(inp['path_evaluation_mv'].joinpath(f'beta_pls_SN{inp[\"pre_signal_noise\"]}_relevant{app_scale}.svg'), bbox_extra_artists = (list_bbox), bbox_inches = 'tight', format = 'svg')\n plt.close(fig)\n return", "title": "" }, { "docid": "6f1342dcdc75980f70276ab48879cc20", "score": "0.50851643", "text": "def test_creation_range_100__n_bins_20(self):\n # Create data\n x = list(range(100))\n\n # Calculate binning\n bins = create_binning(x, 20)\n r = bins.label(x, '{simplei}')\n\n # Expected result\n self.assertEqual(r[0], '0_5')\n self.assertEqual(r[6], '5_10')\n self.assertEqual(r[-1], '95_99')", "title": "" }, { "docid": "6111c40f258cd7268be6477e9d0d50cf", "score": "0.50843805", "text": "def labels(self):", "title": "" }, { "docid": "3fe1fcd8a65c4185e5af7c4daca7bd90", "score": "0.5076021", "text": "def showNLLContour(exp,obs,name,title,method='linear',levels=[2.30,4.61,9.21],levelLabels=['68.3%','90%','99%']):\n\n plt.clf()\n fig, ax = plt.subplots() \n fig.set_size_inches(10,10)\n\n cntr=[]\n for data,ls,c,label in [(exp,'--','gray','Exp.'),(obs,'-','k','Obs.')]:\n\n data=np.array(data)\n x=data[:,0]\n y=data[:,1]\n z=data[:,2]\n\n #interpolate in a regular grid and draw the contour\n xi = np.linspace(min(x),max(x),1000)\n yi = np.linspace(min(y),max(y),1000)\n zi = griddata((x, y), z, (xi[None,:], yi[:,None]), method=method)\n cntr.append( ax.contour(xi, yi, 2*zi, levels=levels, linewidths=1.0, linestyles=ls, colors=c) )\n cntr[-1].collections[0].set_label(r'%s'%label)\n\n #add contour level names\n fmt = {}\n for l, s in zip(levels,levelLabels):\n fmt[l] = s\n ax.clabel(cntr[-1], cntr[-1].levels[:], inline=True, fmt=fmt, fontsize=26)\n\n plt.xlabel(r'$\\mu_{t\\bar{t}}$', fontsize=28)\n plt.ylabel(r'$\\mu_{DY}$', fontsize=28)\n plt.ylim(0.6,1.4)\n plt.xlim(0,2)\n ax.legend(framealpha=0.0, fontsize=20, loc='upper right', numpoints=1)\n finalizePlot(plt,ax,name,title)", "title": "" }, { "docid": "8a2c1c942896d0daf8f4e79e10142e46", "score": "0.5068987", "text": "def get_labels(ab_img):\n pass", "title": "" }, { "docid": "8d624b98783b4c7045b9379ba3bf29d1", "score": "0.5068401", "text": "def show_opt_categories(df, feat, gbt, num_splits, sample_ratio = 0.9, bagging_rounds = 10, trg = \"TARGET\", verbose=False, figure_file=None):\n splits = {}\n auc_list = []\n for i in range(bagging_rounds):\n tr_arr, labels = get_data_sample(df,feat,trg,sample_ratio)\n gbt.fit(tr_arr,labels)\n pred = gbt.predict_proba(tr_arr)[:,1]\n auc_list.append(roc_auc_score(labels,pred))\n splits = extract_splits_from_gbt(tr_arr,gbt,splits)\n for val in splits:\n splits[val] = float(splits[val]) / bagging_rounds\n binned_values, splits = get_auto_categories(df[feat],splits,num_splits,verbose=verbose)\n stats, iv = calculate_stats_for_bins(binned_values,df[trg],sorted(splits.keys()),verbose=False)\n print(stats[[\"lower_bound\",\"upper_bound\",\"all_count\",\"def_count\",\"PD\",\"WOE\"]])\n #print(\"AUC: %f, IV:%f\" % (np.mean(auc_list),iv))\n plotter(df[feat],binned_values,splits,figure_file=figure_file)", "title": "" }, { "docid": "259041401adb77c4aca728dd78708e6a", "score": "0.50667465", "text": "def fill_ugc_counties(self, data, bins=np.arange(0,101,10), **kwargs):\n cmap = kwargs.get('cmap', maue())\n norm = mpcolors.BoundaryNorm(bins, cmap.N)\n \n pgconn = psycopg2.connect(database='postgis', host='iemdb', \n user='nobody')\n cursor = pgconn.cursor()\n\n cursor.execute(\"\"\"\n SELECT ugc, ST_asEWKB(simple_geom) from ugcs WHERE end_ts is null\n and substr(ugc,3,1) = 'C'\n \"\"\")\n akpatches = []\n hipatches = []\n prpatches = []\n patches = []\n for row in cursor:\n ugc = row[0]\n if data.get(ugc) is None:\n c = 'white'\n else:\n c = cmap( norm([data[ugc],]) )[0]\n geom = loads( str(row[1]) )\n for polygon in geom:\n if polygon.exterior is None:\n continue\n a = np.asarray(polygon.exterior)\n if ugc[:2] == 'AK':\n if self.ak_ax is None:\n continue\n x,y = self.ak_map(a[:,0], a[:,1])\n a = zip(x,y)\n p = Polygon(a, fc=c, ec='None', zorder=2, lw=.1)\n akpatches.append(p)\n pass\n elif ugc[:2] == 'HI':\n if self.hi_ax is None:\n continue\n x,y = self.hi_map(a[:,0], a[:,1])\n a = zip(x,y)\n p = Polygon(a, fc=c, ec='None', zorder=2, lw=.1)\n hipatches.append(p)\n elif ugc[:2] == 'PR':\n if self.pr_ax is None:\n continue\n x,y = self.pr_map(a[:,0], a[:,1])\n a = zip(x,y)\n p = Polygon(a, fc=c, ec='None', zorder=2, lw=.1)\n prpatches.append(p)\n else:\n x,y = self.map(a[:,0], a[:,1])\n a = zip(x,y)\n p = Polygon(a, fc=c, ec='None', zorder=2, lw=.1)\n patches.append(p)\n\n if len(patches) > 0:\n self.ax.add_collection(\n PatchCollection(patches,match_original=True))\n if len(akpatches) > 0 and self.ak_ax is not None:\n self.ak_ax.add_collection(\n PatchCollection(akpatches,match_original=True))\n if len(hipatches) > 0 and self.hi_ax is not None:\n self.hi_ax.add_collection(\n PatchCollection(hipatches,match_original=True))\n if len(prpatches) > 0 and self.pr_ax is not None:\n self.pr_ax.add_collection(\n PatchCollection(prpatches,match_original=True))\n if kwargs.has_key('cmap'):\n del kwargs['cmap']\n self.draw_colorbar(bins, cmap, norm, **kwargs)", "title": "" }, { "docid": "20d0c2b8ad55ff0fce2bb7c3777da600", "score": "0.5066545", "text": "def grapher(data, labels, per_channel):\r\n \r\n def autolabel(rects):\r\n \"\"\"Attach a text label above each bar in *rects*, displaying its height.\"\"\"\r\n for rect in rects:\r\n height = rect.get_height()\r\n ax.annotate('{0:.2f}'.format(height),\r\n xy=(rect.get_x() + rect.get_width() / 2, height),\r\n xytext=(0, 3 if height>0 else -18), # 3 points vertical offset\r\n size=8,\r\n textcoords=\"offset points\",\r\n ha='center', va='bottom')\r\n x = np.arange(len(data[0][0]))\r\n width = 0.2\r\n offset = len(labels)/2\r\n\r\n fig, ax = plt.subplots()\r\n title = \"Extrema DNL (12 bit)\" \r\n #ax.set_title(title, fontsize = 40)\r\n ax.set_ylabel(title, fontsize = 25)\r\n for i, lab in enumerate(labels):\r\n autolabel(ax.bar(x+(i-offset)*width, data[i][1], width, label=lab))\r\n for i, lab in enumerate(labels):\r\n autolabel(ax.bar(x+(i-offset)*width, data[i][0], width, label=lab))\r\n if not per_channel:\r\n for i, lab in enumerate(labels):\r\n ax.errorbar(x+(i-offset)*width, data[i][1], yerr=data[i][6], ecolor='black', fmt='none')\r\n ax.errorbar(x+(i-offset)*width, data[i][0], yerr=data[i][5], ecolor='black', fmt='none')\r\n if per_channel:\r\n ax.set_xlabel('ADC Channel #', fontsize = 25)\r\n ax.set_xticks(x)\r\n #ax.set_ylim(-.8, 0.8)\r\n ax.tick_params(axis = 'both', which = 'major', labelsize = 15)\r\n ax.legend(bbox_to_anchor=(1.1, 1.05))\r\n else:\r\n ax.set_xlabel('(Averaged over all channels)')\r\n #ax.set_xticks([])\r\n plt.xticks(x, [\"Warm\", \"Cold\"])\r\n ax.legend(bbox_to_anchor=(.9, .9))\r\n\r\n fig, ax = plt.subplots()\r\n title = \"Extrema INL (12 bit)\" \r\n #ax.set_title(title, fontsize = 40)\r\n ax.set_ylabel(title, fontsize = 25)\r\n #ax.set_ylabel(\"|Extrema_1-Extrema_5|/Extrema_5\")\r\n for i, lab in enumerate(labels):\r\n #autolabel(ax.bar(x+(i-offset)*width, data[i][3], width, label=lab, color='b'))\r\n autolabel(ax.bar(x+(i-offset)*width, data[i][3], width, label=lab))\r\n for i, lab in enumerate(labels):\r\n autolabel(ax.bar(x+(i-offset)*width, data[i][2], width, label=lab))\r\n if not per_channel:\r\n for i, lab in enumerate(labels):\r\n ax.errorbar(x+(i-offset)*width, data[i][3], yerr=data[i][8], ecolor='black', fmt='none')\r\n ax.errorbar(x+(i-offset)*width, data[i][2], yerr=data[i][7], ecolor='black', fmt='none')\r\n if per_channel:\r\n ax.set_xlabel('ADC Channel #', fontsize = 25)\r\n ax.set_xticks(x)\r\n #ax.set_ylim(-2.5, 2.5)\r\n ax.tick_params(axis = 'both', which = 'major', labelsize = 15)\r\n ax.legend(bbox_to_anchor=(1.1, 1.05))\r\n else:\r\n ax.set_xlabel('(Averaged over all channels)')\r\n ax.set_xticks([])\r\n #plt.xticks(x, [\"Warm\", \"Cold\"])\r\n ax.legend(bbox_to_anchor=(.9, .9))\r\n\r\n #plt.gcf().savefig('inl.png', dpi=500)\r\n plt.show()", "title": "" }, { "docid": "55ac8ea890f6017009d16cc81caee3f6", "score": "0.50662553", "text": "def binD(df1, df2, labels, prefix):\n bins=9\n suffixes = [' %s'%(x) for x in labels]\n dot = df1.merge(df2,on=['SNP_A', 'SNP_B'], suffixes=suffixes)\n vmax = np.max((np.max(dot.loc[:, 'MAF_A%s'%(suffixes[0])]),\n np.max(dot.loc[:, 'MAF_B%s'%(suffixes[0])]))) \n vmin = np.min((np.min(dot.loc[:, 'MAF_A%s'%(suffixes[0])]),\n np.min(dot.loc[:, 'MAF_B%s'%(suffixes[0])]))) \n step = (vmax - vmin)/bins\n MAF_mean = np.mean([dot.loc[:,'MAF_A%s'%(suffixes[0])], \n dot.loc[:,'MAF_B%s'%(suffixes[0])]], axis=0)\n MAF_meanp2 = np.mean([dot.loc[:,'MAF_A%s'%(suffixes[1])], \n dot.loc[:,'MAF_B%s'%(suffixes[1])]], axis=0)\n nor = norm(MAF_meanp2, 0, 1) \n rang = np.arange(vmin, vmax + step, step)\n subs = getsubsets(dot, rang, MAF_mean, vmin, nor)\n #ncols, nrows = setcolsrows(len(subs))\n nrows = int(np.ceil(len(subs)/3))\n fig, axes = plt.subplots(ncols=3, nrows=nrows, sharey=True, sharex=True)\n axs = axes.ravel()\n cNorm = plt.matplotlib.colors.Normalize(vmin=0, vmax=1)\n x, y ='Dprime%s'%(suffixes[0]), 'Dprime%s'%(suffixes[1])\n for p, subset in enumerate(subs):\n subset.plot(kind='scatter', x=x, y=y, c='mean2', colormap='inferno', \n ax=axs[p])\n #subset.plot(kind='scatter', x='D%s'%(suffixes[0]), y='D%s'%(suffixes[1]\n #),\n # c='mean2', colormap='inferno', ax=axs[p])\n plt.xlabel('D%s'%(suffixes[0]))\n plt.savefig('%s_binnedD.png'%prefix)\n plt.close()", "title": "" }, { "docid": "75bcfa794065f3188e45a69981c4771b", "score": "0.5065433", "text": "def plot1DReliability(ops,inv,metric,bins,s=[0.0,1.0],xlabel='metric'):\n bins=np.array(bins)\n opspcs=passes(ops,s=s)\n opsfps=~opspcs\n invpcs=passes(inv,s=s)\n invfps=~invpcs\n \n nopspcs,bb,patch=plt.hist(ops[metric][opspcs],bins=bins)\n nopsfps,bb,patch=plt.hist(ops[metric][opsfps],bins=bins)\n ninvpcs,bb,patch=plt.hist(inv[metric][invpcs],bins=bins)\n ninvfps,bb,patch=plt.hist(inv[metric][invfps],bins=bins)\n \n eff=ninvfps.astype(float)/(ninvfps+ninvpcs).astype(float)\n plt.clf()\n\n midbins=(bins[:-1]+bins[1:])/2.0 \n \n rel=rvs.arrayReliability(nopsfps.astype(float),nopspcs.astype(float),eff)\n print rel\n #plt.figure()\n plt.plot(midbins,rel,'-ko',lw=2.5,ms=5)\n if xlabel=='metric':\n plt.xlabel(metric)\n else:\n plt.xlabel(xlabel)\n plt.ylabel('Reliability')\n #plt.show()", "title": "" }, { "docid": "8971edb280418930436d0fbe0d267fd7", "score": "0.5064541", "text": "def occupancy_plif_plot():\n pass", "title": "" }, { "docid": "831a99831822ead394e32019e62ee822", "score": "0.5060745", "text": "def draw_constraints(self,titles,**kwargs):\n\n\t\tdata = np.stack([indiv[\"constraints\"] for indiv in self.aggregate],axis=1)\n\t\tsns.heatmap(data,vmin=0,vmax=1,cmap=\"coolwarm_r\",yticklabels=titles)\n\t\tplt.yticks(rotation=\"horizontal\")", "title": "" }, { "docid": "463072810e13fb0cb16f117e2617cc06", "score": "0.5051524", "text": "def plot_barh(self, amounts, labels, **kwargs):\n # if \"filename\" in kwargs:\n # # This allows to use without DISPLAY\n # import matplotlib\n # matplotlib.use(\"Agg\")\n if len(labels) == 0:\n # If no labels to print, return empty list.\n return []\n if kwargs.get(\"strict_n\", False):\n while len(amounts) < kwargs.get(\"n_most_prominent_detections_per_type\", len(amounts)):\n amounts.append(0)\n labels.append(\"\")\n\n # Override default value:\n if \"image_size\" not in kwargs:\n self.height = len(labels) / 3 + 2\n\n fig, ax = plt.subplots(figsize=(self.width, self.height), dpi=self.dpi)\n ax.spines[\"right\"].set_visible(False)\n ax.spines[\"top\"].set_visible(False)\n ax.xaxis.set_ticks_position(\"bottom\")\n ax.tick_params(axis='y',\n which='both',\n left='off',\n right='off')\n ax.tick_params(axis='x',\n direction='out')\n ax.set_yticklabels([])\n\n def delta_formatter(x, pos):\n \"\"\"Custom formatter\n\n :param x: a tick value as seconds\n :param pos: a position (required even if unused)\n :return: corresponding tick label\n \"\"\"\n if kwargs.get(\"sort_method\", None) == \"count\":\n return int(x)\n\n if self.number_format == \"seconds\":\n format_string = u'{}'.format(int(x))\n return format_string\n d = datetime.timedelta(seconds=x)\n format_string = u'{}'.format(str(d))\n return format_string\n\n def time_label_formatter(x, pos):\n \"\"\"Custom formatter time_label: ['timestamp', 'timestamp_and_percent', 'percent']\n\n :param x: a tick value as seconds\n :param pos: a position (required even if unused)\n :return: corresponding tick label\n \"\"\"\n if kwargs.get(\"sort_method\", None) == \"count\":\n return int(x)\n if kwargs.get('video_length') is not None:\n percent = 100.0*x/kwargs['video_length']\n if percent > 100.0:\n n = 0\n percent = 100.0\n elif percent > 10.0:\n n = 0\n elif percent > 1.0:\n n = 1\n elif percent > 0.1:\n n = 2\n else:\n n = 3\n else:\n percent = None\n\n if kwargs.get(\"time_label\") is not None:\n if kwargs[\"time_label\"] == \"timestamp_and_percent\" and percent is not None:\n format_string = u'{}, {:.{n}f}%'.format(str(datetime.timedelta(seconds=x)), percent, n=n)\n elif kwargs[\"time_label\"] == \"percent\" and percent is not None:\n format_string = u'{:.{n}f}%'.format(percent, n=n)\n else: # kwargs[\"time_label\"] == \"timestamp\":\n format_string = str(datetime.timedelta(seconds=x))\n elif kwargs.get('video_length') is not None:\n format_string = u'{:.{n}f}%'.format(percent, n=n)\n else:\n format_string = str(datetime.timedelta(seconds=x))\n return format_string\n\n ax.locator_params(axis='x', nbins=kwargs.get('max_ticks', 6), integer=True)\n\n formatter = matplotlib.ticker.FuncFormatter(delta_formatter)\n ax.xaxis.set_major_formatter(formatter)\n plt.xticks(fontproperties=self.ticklabel_prop)\n\n if \"title\" in kwargs:\n plt.title(kwargs[\"title\"], fontproperties=self.prop)\n\n # amounts is the amount of rest of the labels\n y = numpy.arange(len(amounts)) + 0.5\n rects = ax.barh(y, amounts,\n edgecolor=kwargs.get(\"bar_color\", config[\"bar_edge_color\"]),\n color=kwargs.get(\"bar_color\", config[\"bar_color\"]),\n align=\"center\")\n text_in_bar_color, text_out_bar_color = text_color(kwargs.get(\"bar_color\", config[\"bar_color\"]))\n\n # ax.set_xlabel(\"Total detection duration\")\n if kwargs.get(\"label_location\") == \"left_side\":\n plt.yticks(y, labels)\n bar_width = int(rects[0].get_width())\n for index, rect in enumerate(rects):\n width = rect.get_width()\n yloc = rect.get_y() + rect.get_height() * 0.5\n if kwargs.get(\"label_location\") == \"left_side\":\n if width > bar_width * 0.5:\n xloc = width * 0.98\n align = \"right\"\n #color = config[\"text_in_bar_color\"]\n color = text_in_bar_color\n else:\n xloc = width + bar_width / 100.0\n align = \"left\"\n #color = config[\"text_out_bar_color\"]\n color = text_out_bar_color\n\n ax.text(xloc, yloc, time_label_formatter(int(width), None), horizontalalignment=align,\n verticalalignment='center', color=color, weight='bold',\n fontproperties=self.prop)\n else: # on_bar\n\n # -- Label in bar, left alignment; Percent in bar, right alignment -------------------------------------\n if width > bar_width * 0.75:\n xloc_label = bar_width * 0.02\n xloc = width - bar_width * 0.02\n label_color = text_in_bar_color # config[\"text_in_bar_color\"]\n color = text_in_bar_color # config[\"text_in_bar_color\"]\n label_align = \"left\"\n ax.text(xloc, yloc, time_label_formatter(int(width), None), horizontalalignment=\"right\",\n verticalalignment='center', color=color, weight='bold',\n fontproperties=self.prop)\n\n # -- Label in bar, left alignment; Percent outside bar, left alignment ---------------------------------\n elif width > bar_width * 0.5:\n xloc_label = bar_width * 0.02\n xloc = width + bar_width * 0.02\n label_color = text_in_bar_color # config[\"text_in_bar_color\"]\n color = text_out_bar_color # config[\"text_out_bar_color\"]\n label_align = \"left\"\n ax.text(xloc, yloc, time_label_formatter(int(width), None), horizontalalignment=\"left\",\n verticalalignment='center', color=color, weight='bold',\n fontproperties=self.prop)\n\n # -- Label outside bar, left alignment; Percent inside bar, right alignment ----------------------------\n elif width > bar_width * 0.1:\n xloc_label = width + bar_width * 0.02\n xloc = width - bar_width * 0.02\n label_color = text_out_bar_color # config[\"text_out_bar_color\"]\n color = text_in_bar_color # config[\"text_in_bar_color\"]\n label_align = \"left\"\n ax.text(xloc, yloc, time_label_formatter(int(width), None), horizontalalignment=\"right\",\n verticalalignment='center', color=color, weight='bold',\n fontproperties=self.prop)\n\n # -- Label outside bar, left alignment; Percent not visible --------------------------------------------\n else: # width <= bar_width * 0.5\n xloc_label = width + bar_width * 0.02\n label_color = text_out_bar_color # config[\"text_out_bar_color\"]\n label_align = \"left\"\n ax.text(xloc_label, yloc, labels[index], horizontalalignment=label_align,\n verticalalignment='center', color=label_color, weight='bold',\n fontproperties=self.prop)\n\n fig.gca().invert_yaxis()\n fig.set_tight_layout(True)\n\n if self.filename is not None:\n try:\n plt.savefig(self.filename, transparent=self.transparency, dpi=self.dpi)\n except ValueError as msg:\n print(\"Invalid output-file: {}\".format(msg), file=sys.stderr)\n else:\n plt.close(fig)\n return [self.filename]\n plt.close(fig)\n else:\n fig.canvas.set_window_title(config[\"window_title\"])\n\n plt.show()", "title": "" }, { "docid": "d71c00a099b2f5f48943777d4563d980", "score": "0.50493354", "text": "def calc_and_plot(houses: List[str], features: List[str], house_data: List[np.array],\n idx: int) -> None:\n data_col = []\n for idx_house, _ in enumerate(houses):\n data_col.append(np.array(house_data[idx_house][:, idx], dtype=float))\n plot_histogram(data_col, legend=houses, title=features[idx], x_label='Marks',\n y_label='A number of students')", "title": "" }, { "docid": "48974e6859fdebd123635f83f80dbe95", "score": "0.5048141", "text": "def plot_results(metrics, classifiers, figname, *args):\r\n \r\n num_metrics = len(metrics)\r\n num_classifiers = len(args) - 1\r\n \r\n ind = np.arange(num_metrics) # the x locations for the groups\r\n width = 0.7 / num_classifiers # the width of the bars\r\n \r\n fig, ax = plt.subplots()\r\n \r\n # loop through classifiers\r\n rects_list = []\r\n for i in xrange(num_classifiers):\r\n results = args[i+1] # skip baseline\r\n \r\n # mean\r\n means = [it[0] for it in results]\r\n rects = ax.bar(ind + i * width, means, width, label=classifiers[i])\r\n rects_list.append(rects)\r\n \r\n # errors\r\n if len(it) == 3:\r\n errs = [(it[0] - it[1], it[2] - it[0]) for it in results]\r\n ax.errorbar(ind + i * width, means, yerr=np.array(errs).T, fmt='none', ecolor='k')\r\n \r\n # baseline\r\n results = args[0]\r\n for i in xrange(num_metrics) :\r\n xlim = (ind[i] - 0.8 * width, ind[i] + num_classifiers * width - 0.2 * width)\r\n \r\n # mean\r\n mean = results[i][0]\r\n plt.plot(xlim, [mean, mean], color='k', linestyle='-', linewidth=2)\r\n \r\n # errors\r\n if len(results[i]) == 3:\r\n err_low = results[i][1]\r\n err_high = results[i][2]\r\n plt.plot(xlim, [err_low, err_low], color='k', linestyle='--', linewidth=2)\r\n plt.plot(xlim, [err_high, err_high], color='k', linestyle='--', linewidth=2)\r\n \r\n ax.set_ylabel('Score')\r\n ax.set_ylim(0, 1)\r\n ax.set_xticks(ind + width / num_classifiers)\r\n ax.set_xticklabels(metrics)\r\n ax.legend()\r\n \r\n def autolabel(rects):\r\n \"\"\"Attach a text label above each bar displaying its height\"\"\"\r\n for rect in rects:\r\n height = rect.get_height()\r\n ax.text(rect.get_x() + rect.get_width()/2., 1.05*height,\r\n '%.2f' % height, ha='center', va='bottom')\r\n \r\n for rects in rects_list:\r\n autolabel(rects)\r\n # save fig instead of show because show breaks my computer :(\r\n plt.savefig(figname)", "title": "" }, { "docid": "ca94c85dbd858341a410a2b51442b97f", "score": "0.504694", "text": "def plot_features(data, features):\n for n, f in enumerate(features):\n plt.subplot(numpy.ceil(len(features) / 5.), 5, n+1)\n mask = data[f].values > -999\n mask_label = data.label == 1\n x1 = max(numpy.min(data.loc[mask & mask_label, f]), numpy.min(data.loc[mask & ~mask_label, f]))\n x2 = min(numpy.max(data.loc[mask & mask_label, f]), numpy.max(data.loc[mask & ~mask_label, f]))\n plt.hist(data.loc[mask & mask_label, f].values, bins=80, alpha=0.3, normed=True, \n weights=data.loc[mask & mask_label, 'N_sig_sw'].values, range=(x1, x2), label='positive')\n plt.hist(data.loc[mask & ~mask_label, f].values, bins=80, alpha=0.3, normed=True, \n weights=data.loc[mask & ~mask_label, 'N_sig_sw'].values, range=(x1, x2), label='negative')\n plt.xlabel(f)\n plt.title('%1.5f' % roc_auc_score(data.label, data[f], sample_weight=data.N_sig_sw))\n plt.legend()", "title": "" }, { "docid": "dd57f53d38e21f1fa419790a7f07a7b5", "score": "0.50447005", "text": "def plot_p(maps, sub, title, nbin, coord, gr=30):\n sorted_maps = [np.sort(m) for m in maps]\n lims = np.arange(nbin) / nbin\n p = np.zeros(npix)\n for ilim, lim in enumerate(lims):\n for m, msorted in zip(maps, sorted_maps):\n val = msorted[int(npix * lim)]\n p[m > val] = lim\n hp.mollview(p, xsize=2400, sub=sub, title=title, coord=coord, cmap=\"bwr\")\n hp.graticule(gr)\n return p", "title": "" }, { "docid": "3b8a3056470b5a31e3d0794dc4b683e4", "score": "0.50443506", "text": "def group_lf(mbins=(12.00, 12.34, 12.68, 13.03, 13.37, 13.71, 14.05),\n mdbins=(12, 14, 14.5, 15, 16.5),\n ndbins=(1, 1.8, 2.2, 2.5, 5), nmin=5, nmax=500, edge_min=0.9,\n Mmin=-25, Mmax=-14, nbin=22, colname='ABSMAG_R'):\n# mbins=(12, 13, 13.5, 14, 16)\n samp = gs.GalSample()\n samp.read_gama()\n samp.add_vmax()\n samp.group_props()\n t = samp.t\n t['log_massden'] = t['log_mass'] - np.log10(math.pi*t['Rad50']**2)\n t['log_numden'] = np.log10(t['Nfof'] / (math.pi*t['Rad50']**2))\n\n plt.clf()\n sel = np.logical_not(t['log_mass'].mask)\n plt.hist((t['log_mass'][sel],\n t['log_mass'][sel * np.array(t['Nfof'] >= nmin)]),\n bins=12, range=(10, 16))\n plt.xlabel('log (M/M_sun)')\n plt.ylabel('Frequency')\n plt.show()\n\n plt.clf()\n sel = np.logical_not(t['log_mass'].mask) * np.array(t['Nfof'] >= nmin)\n plt.hist(t['log_massden'][sel])\n plt.xlabel(r'log Mass density [M_sun Mpc$^{-2}$]')\n plt.ylabel('Frequency')\n plt.show()\n\n# plt.clf()\n# plt.hist(np.log10(t['LumBfunc'][np.logical_not(t['log_mass'].mask)]))\n# plt.xlabel('log Lum')\n# plt.ylabel('Frequency')\n# plt.show()\n#\n plt.clf()\n plt.scatter(t['Nfof'] + np.random.random(len(t['Nfof'])) - 0.5,\n t['log_mass'], s=0.1, c=np.log10(t['Nfof']))\n plt.xlabel('Nfof')\n plt.ylabel('log (M/M_sun)')\n plt.semilogx(basex=10)\n plt.show()\n#\n# plt.clf()\n# plt.scatter(t['LumBfunc'], t['log_mass'], s=0.1, c=t['Nfof'])\n# plt.xlabel('Lum')\n# plt.ylabel('log mass')\n# plt.semilogx(basex=10)\n# plt.show()\n\n print(len(t), 'galaxies before reliable group selection')\n sel = (np.array(t['GroupEdge'] > edge_min) *\n np.logical_not(t['log_mass'].mask) *\n np.array(t['Nfof'] >= nmin))\n samp.t = t[sel]\n print(len(samp.t), 'galaxies after reliable group selection')\n\n plot_samples(samp, 'log_mass', mbins, '{} < log M < {}',\n outfile='group_lf_mass.txt')\n plot_samples(samp, 'log_massden', mdbins, '{} < log Mden < {}')\n\n samp.vol_limit(-17.5)\n samp.group_limit(nmin)\n print(len(samp.t), 'galaxies after volume limiting to ', samp.zlim)\n\n plt.clf()\n plt.hist(samp.t['log_numden'])\n plt.xlabel(r'log Number density [Mpc$^{-2}$]')\n plt.ylabel('Frequency')\n plt.show()\n\n plt.clf()\n plt.scatter(samp.t['z'], samp.t['log_numden'], s=0.1)\n plt.xlabel(r'Redshift')\n plt.ylabel(r'log Number density [Mpc$^{-2}$]')\n plt.show()\n\n plot_samples(samp, 'log_numden', ndbins, '{} < log nden < {}')", "title": "" }, { "docid": "cc5f37387877a492d638200a9bf3ba2f", "score": "0.50414634", "text": "def plot_categ(train_data, target, nominal_features,positive =1) :\n # Looping through and Plotting Categorical features\n for column in nominal_features:\n # Figure initiation\n fig = plt.figure(figsize=(18,12))\n \n ### Number of occurrences per categoty - target pair\n ax = sns.countplot(x=column, hue=target, data=train_data, ax = plt.subplot(211));\n # X-axis Label\n plt.xlabel(column, fontsize=14);\n # Y-axis Label\n plt.ylabel('Number of occurrences', fontsize=14);\n # Adding Super Title (One for a whole figure)\n plt.suptitle('Plots for '+column, fontsize=18);\n # Setting Legend location \n plt.legend(loc=1);\n\n ### Adding percents over bars\n # Getting heights of our bars\n height = [p.get_height() if np.isnan(p.get_height()) == 0 else 0 for p in ax.patches] # get nan if\n # Counting number of bar groups \n ncol = int(len(height)/2)\n # Counting total height of groups\n total = [height[i] + height[i + ncol] for i in range(ncol)] * 2\n # Looping through bars\n for i, p in enumerate(ax.patches): \n # Adding percentages\n ax.text(p.get_x()+p.get_width()/2, height[i]*1.01 + 10,\n '{:1.0%}'.format(height[i]/total[i]), ha=\"center\", size=14) \n\n negative = train_data[target].unique()[0] if train_data[target].unique()[0] != positive else train_data[target].unique()[1]\n ### Positive class percentage for every value of feature\n \n sns.pointplot(x=train_data[column], y=train_data[target].map({negative:0 , positive: 1}), ax = plt.subplot(212));\n # X-axis Label\n plt.xlabel(column, fontsize=14);\n # Y-axis Label\n plt.ylabel(' Positive class percentage', fontsize=14);\n # Printing Chart\n plt.show()", "title": "" }, { "docid": "e24fac4696378cf2f8df295b5964f383", "score": "0.5038497", "text": "def print_rf_90th_percentile_ANOM_plots(model, dest, optimal_k, too_large):\n\n rfstarttime = timer(); print(f'{utils.time_now()} - Plotting 90th-perc rainfall now.\\nTotal of {optimal_k} clusters.')\n\n # RFprec_to_ClusterLabels_dataset = utils.open_pickle(model.RFprec_to_ClusterLabels_dataset_path).sel(\n # lon=slice(model.LON_W, model.LON_E), lat=slice(model.LAT_S, model.LAT_N))\n \n \n # if not too_large:\n # fig, gs_rf_plot = create_multisubplot_axes(optimal_k)\n # else:\n # fig = plt.Figure(figsize=(10,10))\n\n fig, gs_rf_plot = create_multisubplot_axes(optimal_k)\n \n # rf_ds_lon = RFprec_to_ClusterLabels_dataset.lon\n # rf_ds_lat = RFprec_to_ClusterLabels_dataset.lat\n \n # baseline = get_baseline_90perc(RFprec_to_ClusterLabels_dataset)\n baseline = get_RF_calculations(model, criteria=\"90perc\", calculation=\"90perc\")\n print('Baseline calculated')\n rf_ds_lon = get_RF_calculations(model, criteria=\"rf_ds_lon\")\n rf_ds_lat = get_RF_calculations(model, criteria=\"rf_ds_lat\")\n \n all_colors = np.vstack(plt.cm.terrain_r(np.linspace(0,1,11)))\n terrain_map = colors.LinearSegmentedColormap.from_list('terrain_map', all_colors)\n\n fig.suptitle(f'Anomaly for 90th percentile RF over region: {model.domain[0]}S {model.domain[1]}N {model.domain[2]}W {model.domain[3]}E', fontweight='bold')\n \n levels = [int(i) for i in np.linspace(-100,100,21)]\n\n for clus in range(optimal_k):\n print(f'{utils.time_now()}: Cluster {clus} now.. ')\n \n time.sleep(1); gc.collect()\n # data = RFprec_to_ClusterLabels_dataset.where(RFprec_to_ClusterLabels_dataset.cluster==clus, drop=True).precipitationCal.values\n # mean = np.percentile(data, 90, axis=0)\n mean = get_RF_calculations(model, criteria=\"90perc\", calculation=\"90perc\", clus=clus, too_large=too_large)\n mean = mean-baseline\n time.sleep(1); gc.collect()\n\n # if too_large:\n # ax_rf_plot = fig.add_subplot(111, projection=ccrs.PlateCarree())\n # else:\n # ax_rf_plot = fig.add_subplot(gs_rf_plot[clus], projection=ccrs.PlateCarree())\n ax_rf_plot = fig.add_subplot(gs_rf_plot[clus], projection=ccrs.PlateCarree())\n \n ax_rf_plot.xaxis.set_major_formatter(model.lon_formatter)\n ax_rf_plot.yaxis.set_major_formatter(model.lat_formatter)\n ax_rf_plot.set_facecolor('w')\n ax_rf_plot.set_extent([model.LON_W-1, model.LON_E+1, model.LAT_S-1, model.LAT_N+1])\n ax_rf_plot.coastlines(\"50m\", linewidth=.7, color='k')\n ax_rf_plot.add_feature(cf.BORDERS, linewidth=.5, color='k', linestyle='dashed')\n ax_rf_plot.set_title(f\"cluster no.{clus+1}\", loc='left')\n\n # if too_large or not too_large and clus < model.grid_width: # top ticks \n if clus < model.grid_width: # top ticks \n ax_rf_plot.set_xticks(np.linspace(model.LON_W,model.LON_E,10), crs=ccrs.PlateCarree())\n ax_rf_plot.set_xticklabels([int(i) for i in np.linspace(model.LON_W,model.LON_E,10)], rotation=55)\n ax_rf_plot.xaxis.tick_top()\n else: ax_rf_plot.set_xticks([])\n\n # if too_large or not too_large and clus % model.grid_width == model.grid_width - 1: # right-side ticks\n if clus % model.grid_width == model.grid_width - 1: # right-side ticks\n ax_rf_plot.set_yticks([int(i) for i in np.linspace(model.LAT_S,model.LAT_N,10)], crs=ccrs.PlateCarree())\n ax_rf_plot.yaxis.set_label_position(\"right\")\n ax_rf_plot.yaxis.tick_right()\n else: ax_rf_plot.set_yticks([])\n \n RF = ax_rf_plot.contourf(rf_ds_lon, rf_ds_lat, mean.T, \n levels,\n cmap=terrain_map, \n extend='neither')\n conts = ax_rf_plot.contour(RF, 'w', linewidths=0)\n ax_rf_plot.clabel(conts, conts.levels, colors='k', inline=True, fmt='%1.f', fontsize=8)\n \n # if not too_large and clus == model.cbar_pos: # cbar\n if clus == model.cbar_pos: # cbar\n axins_rf = inset_axes(ax_rf_plot, width='100%', height='100%',\n loc='lower left', bbox_to_anchor=(0, -.8, model.grid_width, .1),\n bbox_transform=ax_rf_plot.transAxes)\n cbar_rf = fig.colorbar(RF, cax=axins_rf, ticks=levels, \n label='Anomaly of 90th percentile RF (in mm) relative to baseline.', orientation='horizontal', pad=0.01)\n cbar_rf.ax.xaxis.set_ticks_position('top')\n cbar_rf.ax.xaxis.set_label_position('top')\n # elif too_large:\n # cbar_rf = fig.colorbar(RF, ticks=[-2.58, -1.96, -1.65, -.67, 0, .67, 1.65, 1.96, 2.58], \n # label='Anomaly of 90th percentile RF (in mm) relative to baseline.', orientation='horizontal', pad=0.01)\n\n # fig.subplots_adjust(wspace=0.05,hspace=0.3)\n # fn = f\"{dest}/{model.month_names_joined}_RFplot_90th_percentile_ANOM_v1_cluster_{clus}_{model.gridsize}x{model.gridsize}\"\n # fig.savefig(fn, bbox_inches='tight', pad_inches=1)\n # print(f'file saved @:\\n{fn}')\n # plt.close('all')\n\n\n # if not too_large:\n # fig.subplots_adjust(wspace=0.05,hspace=0.3)\n # fn = f\"{dest}/{model.month_names_joined}_RFplot_90th_percentile_ANOM_v1_{model.gridsize}x{model.gridsize}\"\n # fig.savefig(fn, bbox_inches='tight', pad_inches=1)\n # print(f'file saved @:\\n{fn}')\n # plt.close('all')\n\n fig.subplots_adjust(wspace=0.05,hspace=0.3)\n fn = f\"{dest}/{model.month_names_joined}_RFplot_90th_percentile_ANOM_v1_{model.gridsize}x{model.gridsize}\"\n fig.savefig(fn, bbox_inches='tight', pad_inches=1)\n print(f'file saved @:\\n{fn}')\n plt.close('all')\n\n print(f\"\\n -- Time taken is {utils.time_since(rfstarttime)}\\n\")", "title": "" }, { "docid": "8b09be81d2a6dde0ab2852e604903dd0", "score": "0.5034748", "text": "def bar_chart_plotter(xs, ys):\r\n n = 8\r\n ind = np.arange(n) # the x locations for the groups\r\n width = 0.3 # the width of the bars\r\n\r\n fig, ax = plt.subplots()\r\n rects = ax.bar(ind, ys, width, color='b')\r\n ax.set_xticklabels(xs, rotation=45)\r\n ax.set_ylabel('percent')\r\n ax.set_title('percentage of examples in training set for each category')\r\n ax.set_xticks(ind + width / 2)\r\n plt.axis([-.1, 7.5, 0, .2])\r\n \r\n def autolabel(rects):\r\n \"\"\"\r\n Attach a text label above each bar displaying its height\r\n \"\"\"\r\n for rect in rects:\r\n height = rect.get_height()\r\n ax.text(rect.get_x() + rect.get_width() / 2., 1.01 * height,\r\n '%.2f' % (height*100),\r\n ha='center', va='bottom', rotation='vertical')\r\n\r\n autolabel(rects)\r\n plt.show()\r\n\r\n return None", "title": "" }, { "docid": "f5f2b0ebe3914c1d6fec1c914a221abd", "score": "0.5030243", "text": "def plot_comparison(comparison):\n N = 16\n ind = np.arange(N) # the x locations for the groups\n width = 0.2\n fig = plt.figure()\n ax = fig.add_subplot(111)\n rf = comparison.iloc[:, 0]\n rects1 = ax.bar(ind, rf, width, color='Coral')\n gb = comparison.iloc[:, 1]\n rects2 = ax.bar(ind + width, gb, width, color='LightSeaGreen')\n svm = comparison.iloc[:, 2]\n rects3 = ax.bar(ind + width * 2, svm, width, color='DodgerBlue')\n ax.set_ylabel('F-1 measures')\n ax.set_xlabel('Strains')\n ax.set_xticks(ind + width)\n ax.set_xticklabels(range(16))\n ax.legend((rects1[0], rects2[0], rects3[0]),\n ('RandomForest', 'GradientBoosting', 'SVM'))\n plt.show()", "title": "" }, { "docid": "538dbda30425826a31b6843bacc7db39", "score": "0.5029941", "text": "def apply_classification(g_array, ref_col):\n #get the values_list\n values_list = []\n for val in iter(g_array):\n values_list.append(val['properties'][ref_col])\n\n the_breaks = NaturalBreaks(values_list, 5)\n print(the_breaks, '\\n', the_breaks.bins, '\\n')\n\n break_list = []\n for list_value in iter(values_list):\n classed = False\n the_breaks_ref = 0\n for break_value in iter(the_breaks.bins):\n if list_value >= break_value and classed is False:\n the_breaks_ref = break_value\n classed = True\n break_list.append(the_breaks_ref)\n #old\n #break_list.append(classify(the_breaks.bins, val))\n\n # kml alpha format #AABBGGRR\n c_hex_a_ref = ['ZZ000099', 'ZZ001AA6', 'ZZ0033B3', 'ZZ004DBF', 'ZZ004DCC',\n 'ZZ0066CC', 'ZZ0080D9', 'ZZ0099E6', 'ZZ0320FB', 'ZZ00CCFF']\n c_hex_a = []\n for val in iter(c_hex_a_ref):\n c_hex_a.append(val.replace('ZZ', 'FF'))\n\n break_distinct = list(dict.fromkeys(break_list))\n\n #values_break = []\n old_val = []\n colour_breaks = []\n for val in iter(values_list):\n new_val = values_list.index(val) #[i for i, e in enumerate(values_n) if e is val]\n if new_val != old_val:\n #look up rgb colour values\n color_index = break_distinct.index(break_list[new_val])\n\n old_val = new_val\n #rgb_breaks.append(c_rgb[color_index])\n colour_breaks.append(c_hex_a[color_index])\n return colour_breaks", "title": "" }, { "docid": "a68cfe3c14da8c9ecc4e2c83e55097bf", "score": "0.5028299", "text": "def heatmap_phen(clusters_un,phen_ranked,ind_groups,ad_or_ped,nb_phen,figsize,vmin,vmax,figname):\r\n if ad_or_ped==\"adult\":\r\n cluster_list=[\"Cluster C\"+str(cluster+1)+\"A, N=\"+ \\\r\n str(len(clusters_un[cluster])) for cluster in ind_groups]\r\n elif ad_or_ped==\"pediatric\":\r\n cluster_list=[\"Cluster C\"+str(cluster+1)+\"P, N=\"+ \\\r\n str(len(clusters_un[cluster])) for cluster in ind_groups]\r\n list_phen_max=[]\r\n for cluster in ind_groups:\r\n i,j=0,0\r\n while j<nb_phen:\r\n if not(phen_ranked[cluster][0][i]) in list_phen_max:\r\n list_phen_max.append(phen_ranked[cluster][0][i])\r\n j+=1\r\n i+=1\r\n heatmap_mat=[[] for i in range(len(list_phen_max))]\r\n for i,phen in enumerate(list_phen_max):\r\n for cluster in ind_groups:\r\n if phen in phen_ranked[cluster][0]:\r\n indphen=np.where(phen_ranked[cluster][0]==phen)[0][0]\r\n heatmap_mat[i].append(phen_ranked[cluster][1][indphen]*100)\r\n else:\r\n heatmap_mat[i].append(0)\r\n sns.set()\r\n fig,ax=plt.subplots(figsize=(figsize,figsize))\r\n sns.heatmap(heatmap_mat,cbar=True,cmap=\"YlGnBu\",xticklabels=cluster_list,\r\n yticklabels=list_phen_max,ax=ax,vmin=vmin,vmax=vmax)\r\n plt.ylabel(\"Phenotypes\")\r\n plt.savefig(figname+\".svg\",bbox_inches=\"tight\",dpi=350)\r\n plt.show()", "title": "" }, { "docid": "8d657fa776fbb9895dbd47ee58bd6455", "score": "0.5027849", "text": "def plot_Hyperplan(method,numMetric,bags,labels_bags_c,labels_instance_c,\n StratifiedFold,opts,\n dataNormalizationWhen,dataNormalization,opts_MIMAX=None,\n verbose=False,prefixName='',end_name='',OnePtTraining=False):\n \n matplotlib.use('agg')\n \n nRep= 1\n nFolds = 1\n fold= 0\n r = 0\n perfObj=np.empty((nRep,nFolds,numMetric))\n perfObjB=np.empty((nRep,nFolds,numMetric))\n\n skf = StratifiedKFold(n_splits=2, shuffle=True, random_state=r)\n train_index, test_index = skf.split(bags,labels_bags_c)\n train_index =train_index[0]\n test_index =test_index[0]\n labels_bags_c_train, labels_bags_c_test = \\\n getTest_and_Train_Sets(labels_bags_c,train_index,test_index)\n bags_train, bags_test = \\\n getTest_and_Train_Sets(bags,train_index,test_index)\n _ , labels_instance_c_test = \\\n getTest_and_Train_Sets(labels_instance_c,train_index,test_index)\n if OnePtTraining:\n index_pos_pt = np.random.choice(np.where(np.vstack(labels_bags_c_train)==1.)[0],1)\n index_neg_pts = np.where(np.vstack(labels_bags_c_train)==-1.)[0]\n indextotal = np.concatenate((index_pos_pt,index_neg_pts))\n local_index = 0\n labels_bags_c_train_tmp = []\n bags_train_tmp = []\n for local_index in range(len(labels_bags_c_train)):\n if local_index in indextotal:\n labels_bags_c_train_tmp += [labels_bags_c_train[local_index]]\n bags_train_tmp += [bags_train[local_index]]\n bags_train = bags_train_tmp\n labels_bags_c_train = labels_bags_c_train_tmp\n if dataNormalizationWhen=='onTrainSet':\n bags_train,bags_test = normalizeDataSetTrain(bags_train,bags_test,dataNormalization)\n\n gt_instances_labels_stack = np.hstack(labels_instance_c_test)\n\n bags_test_vstack = np.vstack(bags_test)\n X = bags_test_vstack\n\n grid_size= 100\n filename= prefixName +'_' + method\n if dataNormalizationWhen=='onTrainSet':\n filename += '_' +str(dataNormalization)\n filename += end_name + \".png\"\n filename = filename.replace('MISVM','bigMISVM')\n path_filename =os.path.join('MILbenchmark','ResultsToy',filename)\n\n x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n xx, yy = np.meshgrid(np.linspace(x_min, x_max, grid_size),\n np.linspace(y_min, y_max, grid_size),\n indexing='ij')\n flatten = lambda m: np.array(m).reshape(-1,)\n\n points = []\n for (i, j) in itertools.product(range(grid_size), range(grid_size)):\n point = np.array([xx[i, j], yy[i, j]]).reshape(1, 2)\n points.append(point)\n\n if method in list_of_MIMAXbasedAlgo:\n pred_bag_labels, pred_instance_labels,result,bestloss = train_and_test_MIL(bags_train,\\\n labels_bags_c_train,bags_test,labels_bags_c_test,\\\n method,opts,opts_MIMAX=opts_MIMAX,verbose=verbose,pointsPrediction=points,\\\n get_bestloss=True)\n else:\n pred_bag_labels, pred_instance_labels,result = train_and_test_MIL(bags_train,labels_bags_c_train,bags_test,labels_bags_c_test,\\\n method,opts,opts_MIMAX=opts_MIMAX,verbose=verbose,pointsPrediction=points)\n # result is the predited class for the points\n\n perfObj[r,fold,:]=getClassifierPerfomance(y_true=gt_instances_labels_stack,y_pred=pred_instance_labels,numMetric=numMetric)\n perfObjB[r,fold,:]=getClassifierPerfomance(y_true=labels_bags_c_test,y_pred=pred_bag_labels,numMetric=numMetric)\n\n y = np.sign(pred_instance_labels) # The class prediction +1 or -1\n Z = np.zeros_like(xx)\n index_r= 0\n for (i, j) in itertools.product(range(grid_size), range(grid_size)):\n Z[i,j] = np.sign(result[index_r])\n index_r+=1\n\n plt.contourf(xx, yy, Z,\n cmap=cm.Paired,\n levels=[-0.001, 0.001],\n extend='both',\n alpha=0.5)\n\n # Plot with the predictive value\n y_unique = np.unique(y)\n if len(y_unique) == 1:\n if y_unique[0]==1:\n color = 'r'\n else:\n color = 'b'\n else:\n color = y\n\n# plt.scatter(X[:, 0], X[:, 1],\n# c=color, cmap=cm.Paired,alpha=0.5)\n plt.scatter(flatten(X[:, 0]), flatten(X[:, 1]),\n c=flatten(color), cmap=cm.Paired)\n\n X_pos = X[np.where(gt_instances_labels_stack==1)[0],:]\n X_neg = X[np.where(gt_instances_labels_stack==-1)[0],:]\n plt.scatter(flatten(X_pos[:, 0]), flatten(X_pos[:, 1]), s=80, facecolors='none', edgecolors='r')\n plt.scatter(flatten(X_neg[:, 0]), flatten(X_neg[:, 1]), s=80, facecolors='none', edgecolors='b')\n\n dataset = 'GaussianToy'\n perf=getMeanPref(perfObj,dataset)\n perfB=getMeanPref(perfObjB,dataset)\n mPerf = perf[0]\n\n plt.xlim(x_min, x_max)\n plt.ylim(y_min, y_max)\n\n\n\n add_to_name = ''\n if method in list_of_MIMAXbasedAlgo and not(opts_MIMAX is None):\n C,C_Searching,CV_Mode,restarts,LR = opts_MIMAX\n if not(restarts==49):\n add_to_name += '_r'+str(restarts)\n if not(C==1.0):\n add_to_name += '_C'+str(C)\n if not(LR==0.01):\n add_to_name += '_LR'+str(LR)\n if C_Searching:\n add_to_name += '_C_Searching'\n if not(CV_Mode=='') or not(CV_Mode is None):\n add_to_name += '_' + CV_Mode\n\n if method=='MIMAXaddLayer':\n method_str = 'MI-max-HL'\n elif method=='MIMAX':\n method_str = 'MI-max'\n elif method=='IA_mi_model':\n method_str ='mi-perceptron' \n elif method=='MaxOfMax':\n method_str ='Polyhedral MI-max' \n else:\n method_str = method\n\n titlestr = method_str+' AUC: {0:.2f}, UAR: {1:.2f}, F1 : {2:.2f}'.format(mPerf[2],mPerf[1],mPerf[0])\n if method in list_of_MIMAXbasedAlgo:\n try:\n bestloss_length = len(bestloss)\n if bestloss_length==1:\n titlestr += ' BL : {0:.2f}'.format(bestloss[0])\n else:\n titlestr += ' BL : {0:.2f}'.format(bestloss)\n except TypeError:\n titlestr += ' BL : {0:.2f}'.format(bestloss)\n plt.title(titlestr)\n plt.savefig(path_filename)\n plt.show()\n plt.close()\n\n return(perf,perfB)", "title": "" }, { "docid": "2afe799ff653b7eff5eee587b3453df1", "score": "0.5026644", "text": "def __individual_tasks_plot(self, df, op_dict, pdf):\n def __kpmeier(x):\n x_uniq = np.unique(x)\n x0 = list([x_uniq[0]])\n x1 = list([1])\n for t in x_uniq[:-1]:\n s_hat_ti = list()\n for ti in x_uniq:\n if ti < t:\n d_i = (x == ti).sum()\n n_i = (x > ti).sum()\n s_hat_ti.append((1 - d_i/float(n_i)))\n x0.extend((t, t))\n x1.extend((x1[-1], np.array(s_hat_ti).prod()))\n x0.extend((x0[-1], x_uniq[-1], ))\n x1.extend((0, 0))\n return x0, x1\n\n def __individual_metric_kaplan_meier(df, op_task, pdf, title):\n df.is_copy = None\n # fig, ax = plt.subplots(figsize=(10, 5), ncols=2, nrows=1)\n fig = plt.figure()\n fig.subplots_adjust(hspace=0.4, wspace=0.4)\n ax1 = fig.add_subplot(2, 1, 1)\n plt.suptitle(title, fontsize=16)\n\n # Draw a nested barplot to show Class. ACC for each methods\n g = sns.factorplot(x=\"Metric\", y=\"Value\", hue=\"Methods\",\n data=df, size=6, kind=\"bar\",\n palette=\"muted\", legend=False, ax=ax1)\n g.despine(left=True)\n g.set_ylabels('Value')\n plt.legend(loc='upper right')\n if not title:\n nb_tasks = len(df['Task'].unique())\n g.fig.suptitle('Average over all ({}) tasks'.format(nb_tasks))\n else:\n g.fig.suptitle(title)\n\n # kaplan meier\n if df['Metric'].iloc[0] in ('rmse', 'nmse'):\n # colors = ['azure', 'green', 'sienna', 'orchid', 'darkblue']\n colors = [\"windows blue\", \"amber\", \"greyish\", \"faded green\", \"dusty purple\"]\n leg_list = list()\n ax2 = fig.add_subplot(2, 1, 2)\n for i, k in enumerate(op_task.keys()):\n # plot observed curve\n if i == 0:\n y0, y1 = __kpmeier(op_task[k]['obs'])\n ax2.plot(y0, y1, color='xkcd:black')\n leg_list.append('Observed')\n\n # plot predicted value for all methods\n p0, p1 = __kpmeier(np.squeeze(op_task[k]['pred']))\n ax2.plot(p0, p1, color='xkcd:%s' % colors[i])\n leg_list.append(k[0:10])\n ax2.set_xlabel('Time')\n ax2.set_ylabel('Survival Probability (%)')\n ax2.legend(leg_list)\n pdf.savefig(fig)\n\n df.is_copy = None\n tasks = df['Task'].unique()\n runs = op_dict.keys()\n for task in tasks:\n op_dict_task = dict()\n for met in op_dict.keys():\n print(op_dict[met].keys())\n op_dict_task[met] = op_dict[met][task]\n __individual_metric_kaplan_meier(df[df['Task'] == task],\n op_dict_task, pdf, task)", "title": "" }, { "docid": "601f6394fdc0be197ff04e5f284ecad8", "score": "0.5025597", "text": "def print_rf_heavy_gt50mm_ANOM_plots(model, dest, optimal_k):\n\n rfstarttime = timer(); print(f'{utils.time_now()} - Plotting ANOM (v2) proba of >50mm rainfall now.\\nTotal of {optimal_k} clusters, now printing cluster: ')\n\n # RFprec_to_ClusterLabels_dataset = utils.open_pickle(model.RFprec_to_ClusterLabels_dataset_path)\n rf_ds_lon = get_RF_calculations(model, 'rf_ds_lon')\n rf_ds_lat = get_RF_calculations(model, 'rf_ds_lat')\n baseline = (get_RF_calculations(model, criteria=\"gt50mm\", calculation=\"mean\", clus=\"whole\"))\n if baseline.max() > 100:\n baseline = baseline/100\n \n # RFprec_to_ClusterLabels_dataset = utils.open_pickle(model.RFprec_to_ClusterLabels_dataset_path).sel(\n # lon=slice(model.LON_W, model.LON_E), lat=slice(model.LAT_S, model.LAT_N))\n \n fig, gs_rf_plot = create_multisubplot_axes(optimal_k)\n # rf_ds_lon = RFprec_to_ClusterLabels_dataset.lon\n # rf_ds_lat = RFprec_to_ClusterLabels_dataset.lat\n\n # baseline = np.mean(RFprec_to_ClusterLabels_dataset.precipitationCal > 50, axis=0) * 100\n \n all_colors = np.vstack(plt.cm.BrBG(np.linspace(0,1,11)))\n terrain_map = colors.LinearSegmentedColormap.from_list('terrain_map', all_colors)\n\n fig.suptitle(f'Anomaly for rainfall above 50mm, over region: {model.domain[0]}S {model.domain[1]}N {model.domain[2]}W {model.domain[3]}E', fontweight='bold')\n \n levels1 = np.linspace(-20,20,81)\n levels2 = [int(i) for i in np.arange(-20, 21, 5)]\n\n for clus in range(optimal_k):\n print(f'\\n{utils.time_now()}: {clus}.. ');\n time.sleep(1); gc.collect()\n data = get_RF_calculations(model, criteria=\"gt50mm\", calculation=\"mean\", clus=clus)\n # data = RFprec_to_ClusterLabels_dataset.where(RFprec_to_ClusterLabels_dataset.cluster==clus, drop=True).precipitationCal\n # mean = np.mean(data > 50, axis=0)*100\n mean = data-baseline\n # print(mean)\n # print(mean.min())\n # print(mean.max())\n # sys.exit()\n time.sleep(1); gc.collect()\n\n ax_rf_plot = fig.add_subplot(gs_rf_plot[clus], projection=ccrs.PlateCarree())\n ax_rf_plot.xaxis.set_major_formatter(model.lon_formatter)\n ax_rf_plot.yaxis.set_major_formatter(model.lat_formatter)\n ax_rf_plot.set_facecolor('k')\n ax_rf_plot.set_extent([model.LON_W-1, model.LON_E+1, model.LAT_S-1, model.LAT_N+1])\n ax_rf_plot.coastlines(\"50m\", linewidth=.7, color='k')\n ax_rf_plot.add_feature(cf.BORDERS, linewidth=.5, color='k', linestyle='dashed')\n\n if clus < model.grid_width: # top ticks \n ax_rf_plot.set_xticks(np.linspace(model.LON_W,model.LON_E,10), crs=ccrs.PlateCarree())\n ax_rf_plot.set_xticklabels([int(i) for i in np.linspace(model.LON_W,model.LON_E,10)], rotation=55)\n # ax_rf_plot.set_xticks([model.LON_W, (model.LON_E - model.LON_W)/2 + model.LON_W, model.LON_E], crs=ccrs.PlateCarree())\n # ax_rf_plot.set_xticklabels([model.LON_W, (model.LON_E - model.LON_W)/2 + model.LON_W, model.LON_E], rotation=55)\n ax_rf_plot.xaxis.tick_top()\n else: ax_rf_plot.set_xticks([])\n\n if clus % model.grid_width == model.grid_width - 1: # right-side ticks\n ax_rf_plot.set_yticks([int(i) for i in np.linspace(model.LAT_S,model.LAT_N,10)], crs=ccrs.PlateCarree())\n # ax_rf_plot.set_yticklabels([int(i) for i in np.linspace(model.LAT_S,model.LAT_N,10)], rotation=55)\n ax_rf_plot.yaxis.set_label_position(\"right\")\n ax_rf_plot.yaxis.tick_right()\n else: ax_rf_plot.set_yticks([])\n \n RF = ax_rf_plot.contourf(rf_ds_lon, rf_ds_lat, mean.T, \n # np.linspace(0,100,11), \n levels1,\n cmap=terrain_map, \n extend='both')\n\n conts = ax_rf_plot.contour(RF, 'w', linewidths=0)\n\n ax_rf_plot.clabel(conts, \n # conts.levels, \n np.concatenate([levels2[:4],levels2[5:]]),\n colors='grey', inline=True, fmt='%1.f', fontsize=7)\n\n ax_rf_plot.set_title(f\"cluster no.{clus+1}\", loc='left')\n\n time.sleep(1); gc.collect()\n\n if clus == model.cbar_pos: # cbar\n axins_rf = inset_axes(ax_rf_plot, width='100%', height='100%',\n loc='lower left', bbox_to_anchor=(0, -.8, model.grid_width, .1),\n bbox_transform=ax_rf_plot.transAxes)\n cbar_rf = fig.colorbar(RF, cax=axins_rf, label='Proportion of grid with >50 mm rainfall (%) relative to whole dataset baseline', orientation='horizontal', pad=0.01, \n # ticks=np.arange(0,100,10)\n ticks=levels2\n )\n cbar_rf.ax.xaxis.set_ticks_position('top')\n cbar_rf.ax.xaxis.set_label_position('top')\n \n print(f\"\\n -- Time taken is {utils.time_since(rfstarttime)}\\n\")\n\n fig.subplots_adjust(wspace=0.05,hspace=0.3)\n fn = f\"{dest}/{model.month_names_joined}_RFplot_heavy_gt50mm_ANOM_v2_{model.gridsize}x{model.gridsize}\"\n fig.savefig(fn, bbox_inches='tight', pad_inches=1)\n print(f'file saved @:\\n{fn}')\n plt.close('all')", "title": "" }, { "docid": "b8b646a5dda9033e053cbae55eb8e480", "score": "0.5024064", "text": "def montageOne(objid, fig=None, bands=['f435w','f850lp','f160w','ch1','ch2'], size=3, nrows=1, ncols=1, rownum=1, colnum=1, xmin=0.1, xmax=0.9, colpad=0.1, ymin=0.1, ymax=0.9, rowpad=0.05, labelsize='large', add_label=True):\n if fig == None:\n fig = plt.figure()\n assert rownum <= nrows, \"rownum (%d) should not be larger than nrows (%d)!\" % (rownum, nrows)\n assert colnum <= ncols, \"colnum (%d) should not be larger than ncols (%d)!\" % (colnum, ncols)\n figures = []\n nbands = len(bands)\n # calculate dx, dy, which are the width and height of the montage of \n # each object\n dx = (xmax - xmin - (ncols-1)*colpad) / ncols\n dy = (ymax - ymin - (nrows-1)*rowpad) / nrows\n # Also calculate the width and height of each cutout in figure coordinate\n cutdx = dx / float(nbands)\n cutdy = dy \n for i in range(nbands):\n b = bands[i]\n sub = [xmin + (colnum-1) * (dx + colpad) + i * cutdx,\n ymax - rownum * cutdy - (rownum - 1) * rowpad,\n cutdx, cutdy]\n # f = cutoutOne(objid, b, figure=fig, subplot=(1, nbands, i+1), size=size)\n f = cutoutOne(objid, b, figure=fig, subplot=sub, size=size)\n f.set_axis_labels(xlabel='', ylabel='')\n f.tick_labels.hide()\n if add_label:\n f.add_label(0.5, 0.1, b.upper(), relative=True, size='x-large', \n family='monospace', weight='bold', color='Lime')\n if i == 0:\n f.axis_labels.set_ytext(objid)\n f.axis_labels.set_font(size=labelsize, weight='bold', \n family='monospace')\n f.axis_labels.show_y()\n figures.append(f)\n return figures", "title": "" }, { "docid": "66973508d0e5ad940c8208f7db779c7b", "score": "0.50210756", "text": "def plot_group(gid, group, group_joined, start_stop, sign,\n savefolder, thresholds):\n\n print('Plotting group {} ({} clusters)'.format(gid, len(group)))\n\n panels = PANELS\n\n # timelim = (start_time/60, stop_time/60)\n\n# if sign == 'neg':\n# ylim = (-200, 0)\n# elif sign == 'pos':\n# ylim = (0, 200)\n#\n # maxima over time\n plot = panels['maxima']\n\n plot_maxima_over_time(plot, group, start_stop, sign, thresholds)\n plot.text(.5, 1.05,\n 'Group {} ({}) Firing over time'.format(gid,\n TYPE_NAMES[group_joined['type']]),\n va='bottom', ha='center', transform=plot.transAxes,\n backgroundcolor='w')\n\n #plot.text(0, 1, '{} ({})'.format(gid, group['type']),\n # transform=plot.transAxes, va='bottom', ha='left') \n\n # ISI\n times = group_joined['times']\n spikes = group_joined['spikes']\n timelim = (start_stop[0]/1000/60, start_stop[1]/1000/60)\n\n plot = panels['isi']\n data = np.diff(times) # to ms\n data = data[data <= 100]\n plot.cla()\n if data.shape[0] > 10:\n plot.hist(data, 100, edgecolor='none')\n plot.set_xlim([0, 100])\n under3 = (data <= 3).sum()/data.shape[0]\n plot.text(.5, 1.1, '{:.1%} < 3 ms'.format(under3),\n va='top', ha='center', transform=plot.transAxes,\n backgroundcolor='w')\n else:\n plot.axis('off')\n plot.set_ylabel('# lags')\n plot.set_xlabel('ms')\n plot.text(.95, .97, 'Inter-Spike Intervals',\n va='top', ha='right', transform=plot.transAxes,\n backgroundcolor='w')\n\n # all means?\n\n # count over time\n plot = panels['cumulative']\n plot.cla()\n plot.plot(times/1000/60, range(len(times)))\n plot.set_xticklabels([])\n plot.set_xlim(timelim)\n plot.set_ylabel('# spikes')\n plot.grid(True)\n #plot.set_xticks(tickpos)\n# add_events(plot, events)\n# plot.text(.5, -.15, u'Propofol concentration [µg/mL]', va='top', ha='center',\n# transform=plot.transAxes, backgroundcolor='w')\n plot.text(.5, .95, 'Cumulative spike count',\n va='top', ha='center', transform=plot.transAxes,\n backgroundcolor='w')\n\n # density\n plot = panels['density']\n plot.cla()\n spike_heatmap(plot, spikes)\n plot.set_xticks([])\n plot.set_ylabel(u'µV')\n\n # other density\n data = np.array([np.histogram(row, bins=DENSITY_BINS)[0]\n for row in spikes.T])\n plot = panels['density2']\n plot.cla()\n plot.axis('off')\n plot.imshow(data.T, aspect='auto', origin='lower', cmap=cm.hot)\n\n # now the images", "title": "" }, { "docid": "b249d4ce62c1a49540b83ea1649a04c7", "score": "0.5018867", "text": "def labelTopo(self, Y):\n topo = np.copy(Y)\n height, binedge = np.histogram(topo, 100)\n peak = []\n points = topo.shape[1]\n lines = topo.shape[0]\n for number in range(1, len(height)-1):\n if height[number+1]<height[number] \\\n and height[number-1]<height[number] \\\n and height[number]>900/(256*256)*points*lines:\n peak.append(binedge[number])\n print(peak)\n plt.plot(height, binedge[:-1])\n plt.xlabel('Count')\n plt.ylabel('Apparent Height')\n plt.show()\n with np.nditer(topo, op_flags=['readwrite']) as matrix:\n for point in matrix:\n if point<peak[0]+700000:\n point[...] = 1\n elif point>peak[-1]-400000 and point<peak[-1]+400000:\n point[...] = 2\n else:\n point[...] = 0\n s = np.ones((3,3), dtype = int)\n labeled_topo, features = label(topo, s)\n print(features)\n plt.matshow(topo)\n plt.colorbar()\n plt.show()", "title": "" }, { "docid": "2081166ac5721f361560396d2b5fd2eb", "score": "0.5015301", "text": "def plotDistribution( labels, title=\"\" ):\n bins = [0,1,2,3,4,5,6,7,8,9,10]\n heights, _ = np.histogram( asDigits( labels ), bins )\n percent = [ i / sum( heights ) * 100 for i in heights ]\n\n f, ax = plt.subplots( 1, 1 )\n ax.bar( bins[:-1], percent, width=0.8, color=\"grey\" )\n ax.set_ylim( [0, 15] )\n\n # axis labels\n ax.set_ylabel( \"Percentage of entire dataset in %\" )\n ax.set_xlabel( \"image labels\" )\n\n # x ticks\n ax.set_xticks( bins[:-1] )\n\n # numbers above bars\n for i, v in enumerate( percent ):\n plt.text( bins[i] - 0.4, v + 0.4, f\"{v:.2f}%\", rotation=45)\n plt.title( title )\n plt.show()", "title": "" }, { "docid": "ccf5f6fbce9d4393400b29c12c360973", "score": "0.50151396", "text": "def grand_L2_plot(data,n,bin_size,save): \n \"rotate frame 90 degrees so population on x axis\"\n data = np.rot90(data,k=1) \n\n \"initiate plot\"\n f,ax=plt.subplots(figsize=(8,8))\n \"colourmap\"\n cmap = cm.viridis\n \"set nans for 0 agents unobserved to white (not black because black text)\"\n cmap.set_bad(\"white\") \n \n \" mask needed to get bad white squares in imshow\"\n data2 = np.ma.masked_where(np.isnan(data),data)\n \"rotate again so imshow right way up (origin bottom left i.e. lower)\"\n data2=np.flip(data2,axis=0) \n im=ax.imshow(data2,interpolation=\"nearest\",cmap=cmap,origin=\"lower\")\n \n \"labelling\"\n ax.set_xticks(np.arange(len(n)))\n ax.set_yticks(np.arange(len(bin_size)))\n ax.set_xticklabels(n)\n ax.set_yticklabels(bin_size)\n ax.set_xticks(np.arange(-.5,len(n),1),minor=True)\n ax.set_yticks(np.arange(-.5,len(bin_size),1),minor=True)\n ax.grid(which=\"minor\",color=\"k\",linestyle=\"-\",linewidth=2)\n ax.set_xlabel(\"Number of Agents\")\n ax.set_ylabel(\"Aggregate Grid Squre Size\")\n #plt.title(\"Grand L2s Over Varying Agents and Percentage Observed\")\n\n\n \"text on top of squares for clarity\"\n data = np.flip(data,axis=0)\n for i in range(data.shape[0]):\n for j in range(data.shape[1]):\n plt.text(j,i,str(data[i,j].round(2)),ha=\"center\",va=\"center\",color=\"w\",\n path_effects=[pe.Stroke(linewidth = 0.7,foreground='k')])\n \n \"colourbar alignment and labelling\"\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\",size=\"5%\",pad=0.05)\n cbar=plt.colorbar(im,cax,cax)\n cbar.set_label(\"Grand Mean L2 Error\")\n \n \"further labelling and saving\"\n cbar.set_label(\"Aggregate Median L2s\")\n ax.set_ylabel(\"Aggregate Grid Squre Width\")\n if save:\n plt.savefig(\"Aggregate_Grand_L2s.pdf\")", "title": "" }, { "docid": "335b7231e0cc7bf1eab1c032a88da513", "score": "0.500402", "text": "def plot_result(result,\n threshold=None,\n group_by=0,\n dataset=None,\n model=None,\n metric=None,\n fixed_category=\"auto\",\n x_label=\"\",\n y_label=\"\",\n y_range=None,\n add_label=True,\n font_size=12,\n file_name=None,\n bar_size=2,\n add_line=False,\n **kwargs):\n import matplotlib.pyplot as plt\n from numpy import arange\n\n plt.rcParams.update({'font.size': font_size})\n\n def autolabel(rects):\n \"\"\"\n Attach a text label above each bar displaying its height\n \"\"\"\n for rect in rects:\n height = rect.get_height()\n ax.text(rect.get_x() + rect.get_width() / 2., 1.01 * height,\n '%.2f' % height,\n ha='center', va='bottom')\n\n if dataset is None and model is None and metric is None:\n raise ValueError(\"You have to set one value\")\n if group_by not in (0, 1):\n raise ValueError(\"group_by can only be 0 or 1\")\n\n result = list(result.get_result(dataset=dataset, model=model, metric=metric, fixed=fixed_category))\n\n if group_by:\n x_labels = result[1]\n legends = result[0]\n result[2] = result[2].T\n else:\n x_labels = result[0]\n legends = result[1]\n\n if threshold is None:\n valid = [True] * len(legends)\n else:\n valid = eval(\"result[2]\" + threshold).sum(axis=0) == result[2].shape[0]\n legends = [legends[i] for i in range(len(legends)) if valid[i]]\n\n result[2] = result[2][:, valid]\n\n w = 1 / (len(legends) + bar_size)\n x = arange(len(x_labels), dtype=float)\n\n fig, ax = plt.subplots(figsize=(12, 8))\n hatches = ['/', '\\\\', '', '-', '+', 'x', 'o', 'O', '.', '*']\n markers = ['o', 'D', '+', 'v', '^', '<', '>', '1', '2', '3', '4', '8', 's', 'p', 'P', '*', 'h', 'H', '.',\n 'X', 'x', ',', 'd', '|', '_']\n\n for i in range(len(legends)):\n\n if add_line:\n line = ax.plot(result[2][:, i], label=legends[i],\n marker=markers[i% len(markers)],\n ms=15,\n markeredgecolor=\"#000000ff\",\n markerfacecolor=\"#00000000\")\n else:\n bars = ax.bar(x, result[2][:, i], width=w, label=legends[i], hatch=hatches[i % len(hatches)], **kwargs)\n if add_label:\n autolabel(bars)\n x += w\n\n plt.xlabel(x_label, fontweight='bold')\n plt.ylabel(y_label, fontweight='bold')\n if y_range is not None:\n plt.ylim(y_range)\n if add_line:\n plt.xticks(x, x_labels)\n else:\n plt.xticks(x - (len(legends) + 1) / 2 * w, x_labels)\n plt.legend()\n if file_name is not None:\n plt.savefig(file_name, transparent=True, pad_inches=0)\n plt.show()", "title": "" }, { "docid": "c59df6f761fb8e0ddd112206f819a023", "score": "0.49908143", "text": "def income_classifier(grid):\n # Defining thresholds for income\n breaks = [x for x in range(55000, 110000, 5000)]\n\n # Initialize the classifier and apply it\n classifier = ps.User_Defined.make(bins=breaks)\n pt_classif = grid[['income']].apply(classifier)\n # Rename the classified column\n pt_classif.columns = ['incomeb']\n # Join it back to the grid layer\n grid = grid.join(pt_classif)\n # Adding new column with bin names to be used in legend\n grid['bin'] = pd.np.where(\n grid.incomeb.astype(str) == '1',\n \"[55000-60000]\",\n pd.np.where(\n grid.incomeb.astype(str) == '2',\n \"[60000-65000]\",\n pd.np.where(\n grid.incomeb.astype(str) == '3',\n \"[65000-70000]\",\n pd.np.where(\n grid.incomeb.astype(str) == '4',\n \"[70000-75000]\",\n pd.np.where(\n grid.incomeb.astype(str) == '5',\n \"[75000-80000]\",\n pd.np.where(\n grid.incomeb.astype(str) == '6',\n \"[80000-85000]\",\n pd.np.where(\n grid.incomeb.astype(str) == '7',\n \"[85000-90000]\",\n pd.np.where(\n grid.incomeb.astype(str) == '8',\n \"[90000-95000]\",\n pd.np.where(\n grid.incomeb.astype(str) == '9',\n \"[95000-100000]\",\n pd.np.where(\n grid.incomeb.astype(str) == '10',\n \"[100000-105000]\",\n pd.np.where(\n grid.incomeb.astype(str) ==\n '11',\n \"[105000-110000]\",\n 'NA')))))))))))\n return(grid)", "title": "" }, { "docid": "2d1ee44a169a5e2b29bb8d432e735bd0", "score": "0.49892685", "text": "def r_metric(i,j,labels):\n return 1.0*sum(labels[i] & labels[j]) / sum(labels[i] | labels[j])", "title": "" }, { "docid": "94fc15d9eb6a30335d2adc9c87630d7f", "score": "0.4984953", "text": "def plot_ratio_occupancy(\n occ_df,\n xlabel,\n f_name,\n occ_column_name,\n col='b',\n b_col_name=\"B_mean\",\n min_cond=0.0,\n plot_fit=True,\n two_legend=True,\n single_legend=False,\n):\n # Data to be plotted\n x = occ_df[occ_column_name]\n y = occ_df[\"Expected Ratio\"]\n\n # Independent scale for the B factor\n # marker 10 = B factor 50\n # marker 20 = B factor 100\n # Squaring is used so the scaling is in area\n marker_area = (occ_df[b_col_name] / 5) ** 2\n\n # Create figure to plot onto\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n\n # Change to left and bottom axis, with zero point not at bottom of graph\n # This is for showing markers near zero\n ax.spines[\"bottom\"].set_position(\"zero\")\n ax.spines[\"top\"].set_visible(False)\n ax.spines[\"right\"].set_visible(False)\n ax.xaxis.set_label_coords(0.5, -0.05)\n plt.xlim(0, 1.2)\n plt.ylim(-0.1, 1.1)\n\n plt.xlabel(xlabel)\n plt.ylabel(\"Ratio of labelled species\")\n\n # Plot x=y\n xline = np.linspace(0, 1, 100)\n yline = xline\n xy_line, = ax.plot(xline, yline, \"k:\")\n\n # print(xline)\n # print(type(xline))\n # print(y.values)\n # print(type(y.values))\n\n print(\"------------\")\n #print(y.values)\n #print(x.values)\n print(np.mean(np.sqrt((y.values-x.values)**2)))\n print(\"------------\")\n #exit()\n\n #rmsd = np.sqrt(x^2-y^2)\n #mean_rmsd = np.mean(rmsd)\n\n # Set up condition for occupancy given the min_cond\n #cond = (x <= 1.0) & (x >= 0.5) & (y <= 1.0) & (y >= 0)\n #xFit = x[cond]\n #yFit = y[cond]\n\n yFit=y\n xFit=x\n\n # # Split the markers into those that have been fitted and those that haven't\n # marker_area_fit = marker_area[cond]\n # marker_area_other = marker_area[~cond]\n\n # Linear fit (mx+c) to occupancy vs ratio when occ >= min_cond\n\n fit = sm.OLS(yFit, sm.add_constant(xFit)).fit()\n #print(fit.params)\n\n # Plot of linear fit\n if plot_fit:\n try:\n fit_line = plt.plot(\n np.linspace(0, 1, 100), np.linspace(0, 1, 100) * fit.params[1] + fit.params[0],\n color='k'\n )\n except IndexError:\n fit_line = plt.plot(\n np.linspace(0, 1, 100), np.linspace(0, 1, 100) * fit.params[0],\n color='k'\n )\n\n # Scatter plots showing the occupancy vs ratio data\n scatter_1 = ax.scatter(x, y, s=marker_area, color=col)\n\n #fill region\n per = np.percentile(x,5)\n print(f'percentile {per}')\n ax.axvspan(0,np.percentile(x,5),facecolor='grey',alpha=0.3)\n\n # print(\"ALLA\")\n # print(fit.params[0], fit.params[1])\n # from scipy.stats import linregress\n # print(linregress(x,y))\n # print(linregress(x,y).slope)\n #\n # fit_line_2 = plt.plot(\n # np.linspace(0, 1, 100), np.linspace(0, 1, 100) * (linregress(x,y).slope + linregress(x,y).intercept),\n # color='g'\n # )\n\n #ax.plot(np.unique(x), np.poly1d(np.polyfit(x, y, 1))(np.unique(x)), color='r')\n\n # scatter_2 = ax.scatter(x[~cond], y[~cond], marker_area_other, color=\"C0\", alpha=0.3)\n\n # Artist objects for use with the legend\n blue_circ = mlines.Line2D(\n [], [], color=col, marker=\"o\", linestyle=\"None\", markersize=10\n )\n\n blue_circ_small = mlines.Line2D(\n [], [], color=col, marker=\"o\", linestyle=\"None\", markersize=4.8\n )\n\n blue_circ_2 = mlines.Line2D(\n [], [], color=col, marker=\"o\", linestyle=\"None\", markersize=20\n )\n\n trans_blue_circ = mlines.Line2D(\n [], [], color=col, marker=\"o\", linestyle=\"None\", markersize=10, alpha=0.3\n )\n\n # Shrink current axis's height by 20% on the bottom\n box = ax.get_position()\n\n # legend usind defined artist objects and x=y line and fit_line\n if two_legend:\n legend = plt.legend(\n (xy_line, blue_circ, blue_circ_2),\n (\n \"Refined Occupancy\\n= Ratio of Labelled Species\",\n \"B factor = 50\",\n \"\\nB factor = 100\",\n ),\n prop={\"size\": 8},\n loc=\"upper left\",\n frameon=False,\n )\n\n # Single point in legend\n elif single_legend:\n legend = plt.legend(\n (xy_line, blue_circ_small),\n (\n \"Refined Occupancy\\n= Ratio of Labelled Species\",\n \"B factor = 24\",\n ),\n prop={\"size\": 8},\n loc=\"upper left\",\n frameon=False,\n )\n\n\n plt.savefig(f_name, dpi=600)\n plt.close()", "title": "" }, { "docid": "d2586e3656f9f4b95d1bbcedf505a8b4", "score": "0.49845013", "text": "def candidate_stats(cands, title=None):\n cands_by_model = cands.groupby('model')\n\n fig, axes = plt.subplots(2, 3, figsize=(17, 2 + 0.4 * len(cands_by_model)))\n ax = sns.barplot(0, 'model', data=cands_by_model.size().reset_index(), ax=axes[0][0], color='grey')\n ax.set_xlabel('# BGC candidates total')\n\n print(cands_by_model['candidate_hash'].nunique().reset_index().head())\n\n ax = sns.barplot('candidate_hash', 'model', data=cands_by_model['candidate_hash'].nunique().reset_index(), ax=axes[0][1], color='grey')\n ax.set_xlabel('# Unique BGC candidates')\n\n ax = sns.boxplot('num_proteins', 'model', ax=axes[0][2], data=cands, color='white', linewidth=1, showfliers=False)\n ax.set_xlabel('# proteins per BGC candidate')\n\n ax = sns.boxplot('num_all_domains', 'model', ax=axes[1][0], data=cands, color='white', linewidth=1, showfliers=False)\n ax.set_xlabel('# protein domains per BGC candidate')\n\n ax = sns.boxplot('num_bio_domains', 'model', ax=axes[1][1], data=cands, color='white', linewidth=1, showfliers=False)\n ax.set_xlabel('# biosynthetic protein domains per BGC candidate')\n\n ax = sns.boxplot('nucl_length', 'model', ax=axes[1][2], data=cands, color='white', linewidth=1, showfliers=False)\n ax.set_xlabel('nucleotide length per BGC candidate')\n\n fig.tight_layout()\n\n if title:\n fig.suptitle(title, fontsize=15)\n fig.subplots_adjust(top=0.90)\n\n return fig", "title": "" }, { "docid": "66ad33b515f55a657aa624d057f664b9", "score": "0.4984322", "text": "def barplot(ycsb, maxcutoff, dototals):\n updates = ycsb[0]\n reads = ycsb[1]\n checks = ycsb[2]\n\n hatches = ['/', 'x', '.' ]\n\n utime, ulatency = splitbyrecordcount(updates)\n rtime, rlatency = splitbyrecordcount(reads)\n ctime, clatency = splitbyrecordcount(checks)\n\n # Set up record counts for x-axis titles\n maxcutoff = round(maxcutoff, 0)\n recordcounts = np.arange(maxcutoff/9, maxcutoff + maxcutoff/9, maxcutoff/9)\n for i in range(0, len(recordcounts)):\n recordcounts[i] = int(round_sigfigs(recordcounts[i], 3))\n\n #for i in range(0, len(clatency)):\n #clatency[i] = clatency[i]/1000\n\n uheights = []\n rheights = []\n cheights = []\n\n if dototals:\n for i in range(1, len(utime)):\n uheights.append((integralof(ulatency[i], utime[i])/1000)/5)\n for i in range(1, len(rtime)):\n rheights.append((integralof(rlatency[i], rtime[i])/1000)/5)\n for i in range(1, len(ctime)):\n cheights.append((integralof(clatency[i], ctime[i])/1000)/5)\n else:\n for i in range(1, len(utime)):\n uheights.append(((sum(ulatency[i])/1000)/5)/len(ulatency[i]))\n for i in range(1, len(rtime)):\n rheights.append(((integralof(rlatency[i], rtime[i])/1000)/5)/len(rlatency[i]))\n for i in range(1, len(ctime)):\n cheights.append(((integralof(clatency[i], ctime[i])/1000)/5)/len(clatency[i]))\n\n if debug:\n print \"U\", len(ulatency[i]), len(utime[i])\n print \"R\", len(rlatency[i]), len(rtime[i])\n print \"C\", len(clatency[i]), len(ctime[i])\n\n btm = 0\n fig = plt.figure()\n font = {'family' : 'serif',\n 'weight' : 'normal',\n 'size' : 10}\n plt.rc('font', **font)\n fig.add_subplot(111)\n\n heights = [ uheights, rheights, cheights ]\n legend = [ \"Update\", \"Read\", \"Verification\" ]\n\n for i in range(0, len(heights)):\n width = 0.5 # Bar width\n N = 9 # Number of bars\n ind = np.arange(N) # X-coords of left side of bars\n\n #c = colmap.gray((i+1)/3.,1)\n c = 'w'\n\n if i == 0:\n btm = [ 0 for x in range(0, len(heights[i])) ]\n elif i == 1:\n btm = heights[0]\n elif i == 2:\n btm = [ sum(a) for a in zip(heights[0], heights[1]) ]\n\n if debug:\n print \"i = \", i\n print \"heights:\", heights[i]\n print \"btm\", btm\n\n plt.bar( ind, heights[i], width, color = c, hatch = hatches[i], bottom = btm )\n maxplotheight = max([ sum(a) for a in\n zip(heights[0], heights[1], heights[2]) ])\n locs, labels = plt.yticks()\n #print locs\n #labs = [ l/1000 for l in locs ]\n #print locs\n #plt.yticks(locs, labs)\n plt.yticks()\n\n plt.suptitle('Latency vs. Number of Records' + getTestInfo())\n plt.xlabel('Number of Records (Approx.)')\n plt.xticks(ind+width/2., recordcounts, rotation = 35)\n\n if prunedtoken == 'P':\n plt.legend( legend, loc=2, bbox_to_anchor=(1.05, 1),\n borderaxespad=0. )\n\n fig.subplots_adjust( bottom=0.20, hspace=0.20, right=0.75 )\n\n if dototals:\n plt.ylabel('Total Latency (ms)')\n plt.savefig( getfilename(\"barplot\", i),\n format='png', dpi=300, bbox_inches='tight',\n transparent=True )\n else:\n plt.ylabel('Average Latency (ms)')\n plt.savefig( getfilename(\"barplotAVG\", i),\n format='png', dpi=300, bbox_inches='tight',\n transparent=True )", "title": "" }, { "docid": "cb770970683d0003fb25865d90471d5b", "score": "0.4982604", "text": "def show_patches_per_bucket(patches):\n scaled_imgs = len(patches)\n num_bins = 100\n x = []\n for k in range(scaled_imgs):\n for patch in patches[k]:\n x.append(patch.bucket)\n\n n, bins, patches = plt.hist(x, num_bins, facecolor='black', alpha=0.5)\n\n plt.xlabel('Bucket numbers')\n plt.ylabel('Number of patches')\n plt.title(r'$\\mathrm{Histogram}$')\n plt.grid(True)\n plt.xlim([0, 10])\n plt.show()", "title": "" }, { "docid": "5b406960928cc091943ee065dde128ec", "score": "0.4978723", "text": "def plotVisDistribution(nvisMap,idNvisDistributionPerSubMs,filename,idLabel,plotMode=1):\n \n # Create a new figure\n plt.ioff()\n \n \n # If plot is not to be shown then use pre-define sized figure to 1585x1170 pizels with 75 DPI\n # (we cannot maximize the window to the screen size)\n if plotMode==2:\n plt.figure(figsize=(21.13,15.6),dpi=75) # Size is given in inches\n else:\n plt.figure()\n \n \n # Sort the id according to the total number of visibilities to that we can\n # represent bigger the groups at the bottom and the smaller ones at the top\n idx = 0\n idArray = np.zeros(len(nvisMap))\n idNvisArray = np.zeros(len(nvisMap))\n for id in nvisMap:\n idArray[idx] = int(id)\n idNvisArray[idx] = nvisMap[id]\n idx = idx + 1\n \n idArraySortIndex = np.argsort(idNvisArray)\n idArraySortIndex[:] = idArraySortIndex[::-1]\n idArraySorted = idArray[idArraySortIndex]\n \n \n # Initialize color vector to alternate cold/warm colors\n nid = len(nvisMap)\n colorVector = list()\n colorRange = range(nid)\n colorVectorEven = colorRange[::2]\n colorVectorOdd = colorRange[1::2]\n colorVectorOdd.reverse()\n while len(colorVectorOdd) > 0 or len(colorVectorEven) > 0:\n if len(colorVectorOdd) > 0: colorVector.append(colorVectorOdd.pop())\n if len(colorVectorEven) > 0: colorVector.append(colorVectorEven.pop())\n \n \n # Generate stacked bar plot\n coloridx = 0 # color index\n width = 0.35 # bar width\n nsubms = len(idNvisDistributionPerSubMs[idNvisDistributionPerSubMs.keys()[0]])\n idx = np.arange(nsubms) # location of the bar centers in the horizontal axis\n bottomLevel = np.zeros(nsubms) # Reference level for the bars to be stacked after the previous ones\n legendidLabels = list() # List of legend idLabels\n plotHandles=list() # List of plot handles for the legend\n for id in idArraySorted:\n \n id = str(int(id))\n \n idplot = plt.bar(idx, idNvisDistributionPerSubMs[id], width, bottom=bottomLevel, color=plt.cm.Paired(1.*colorVector[coloridx]/nid))\n \n # Update color index \n coloridx = coloridx + 1\n \n # Update legend lists\n plotHandles.append(idplot)\n legendidLabels.append(idLabel + ' ' + id)\n \n # Update reference level\n bottomLevel = bottomLevel + idNvisDistributionPerSubMs[id]\n\n \n # Add legend\n plt.legend( plotHandles, legendidLabels, bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.)\n \n \n # AQdd lable for y axis\n plt.ylabel('nVis')\n \n \n # Add x-ticks \n xticks = list()\n for subms in range(0,nsubms):\n xticks.append('subMS-' + str(subms)) \n plt.xticks(idx+width/2., xticks )\n \n \n # Add title\n title = filename + ' distribution of ' + idLabel + ' visibilities across sub-MSs'\n plt.title(title)\n \n \n # Resize to full screen\n if plotMode==1 or plotMode==3:\n mng = plt.get_current_fig_manager()\n mng.resize(*mng.window.maxsize())\n \n \n # Show figure\n if plotMode==1 or plotMode==3:\n plt.ion()\n plt.show()\n \n \n # Save plot\n if plotMode>1:\n title = title.replace(' ','-') + '.png'\n plt.savefig(title)\n \n \n # If plot is not to be shown then close it\n if plotMode==2:\n plt.close()", "title": "" } ]
55d49f29e95ea569ff907a293f75ed60
Generates summary statistics for the extracted unmapped regions
[ { "docid": "491efe2883d60911dd457945db5f1089", "score": "0.58510685", "text": "def unmapsum(unmappeddict, idunmap):\n \n # Create GC content, length and amino acid residues list to store values for each unmapped region\n gc_unmap = list()\n len_unmap = list()\n amino = pd.DataFrame(columns = ['A', 'D','E', 'G','F', 'L', 'Y', 'C', 'W', 'P', 'H', 'Q','I', 'M', 'T', 'N', 'S', 'K', 'R', 'V'])\n \n # Calculate values for each unmapped sequence\n for seq in unmappeddict.values():\n gc_unmap.append(SeqUtils.GC(str(seq)))\n len_unmap.append(len(seq))\n dna = Seq(seq)\n dna_seq = [dna, dna.reverse_complement()]\n codes = list()\n for s in dna_seq:\n for frame in range(3):\n pro = s[frame:].translate(table = 11)\n codes.append(pro._data)\n \n A = 0\n D = 0\n E = 0\n G = 0\n F = 0\n L = 0\n Y = 0\n C = 0\n W = 0\n P = 0\n H = 0\n Q = 0\n I = 0\n M = 0\n T = 0\n N = 0\n S = 0\n K = 0\n R = 0\n V = 0\n Stop = 0\n len_seq = 0\n \n for pro_seq in codes:\n \n # Counts\n A = (str(pro_seq).count('A')) + A\n D = (str(pro_seq).count('D')) + D\n E = (str(pro_seq).count('E')) + E\n G = (str(pro_seq).count('G')) + G\n F = (str(pro_seq).count('F')) + F\n L = (str(pro_seq).count('L')) + L\n Y = (str(pro_seq).count('Y')) + Y\n C = (str(pro_seq).count('C')) + C\n W = (str(pro_seq).count('W')) + W\n P = (str(pro_seq).count('P')) + P\n H = (str(pro_seq).count('H')) + H\n Q = (str(pro_seq).count('Q')) + Q\n I = (str(pro_seq).count('I')) + I\n M = (str(pro_seq).count('M')) + M\n T = (str(pro_seq).count('T')) + T\n N = (str(pro_seq).count('N')) + N\n S = (str(pro_seq).count('S')) + S\n K = (str(pro_seq).count('K')) + K\n R = (str(pro_seq).count('R')) + R\n V = (str(pro_seq).count('V')) + V\n Stop = (str(pro_seq).count('*')) + Stop\n len_seq = len(pro_seq) + len_seq\n \n A = A/len_seq\n D = D/len_seq\n E = E/len_seq\n G = G/len_seq\n F = F/len_seq\n L = L/len_seq\n Y = Y/len_seq\n C = C/len_seq\n W = W/len_seq\n P = P/len_seq\n H = H/len_seq\n Q = Q/len_seq\n I = I/len_seq\n M = M/len_seq\n T = T/len_seq\n N = N/len_seq\n S = S/len_seq\n K = K/len_seq\n R = R/len_seq\n V = V/len_seq\n Stop = Stop/len_seq\n \n amino = amino.append({'A':A*100,\n 'D':D*100,\n 'E':E*100,\n 'G':G*100,\n 'F':F*100,\n 'L':L*100,\n 'Y':Y*100,\n 'C':C*100,\n 'W':W*100,\n 'P':P*100,\n 'H':H*100,\n 'Q':Q*100,\n 'I':I*100,\n 'M':M*100,\n 'T':T*100,\n 'N':N*100,\n 'S':S*100,\n 'K':K*100,\n 'R':R*100,\n 'V':V*100,\n 'Stop':Stop*100}, ignore_index = True)\n codes.clear()\n \n \n # Create unmapped region summary dataframe: Region, GC content, length and total amino acid frequency for all six reading frames \n unmap_stats = pd.DataFrame(list(zip(idunmap, gc_unmap, len_unmap)), columns = ['Region', 'GCContent', 'Length'])\n unmap_stats = pd.concat([unmap_stats, amino], axis = 1)\n unmap_stats.reset_index(drop = True, inplace = True)\n unmap_stats.sort_index(inplace = True)\n \n return unmap_stats", "title": "" } ]
[ { "docid": "0b7767a929343f878d5391ab16351e29", "score": "0.67602116", "text": "def compute_statistics(self, region_dict):\n min_area = 15\n stats = {}\n for k,pixels in region_dict.items():\n area = len(pixels)\n if area < min_area:\n continue\n (ci,cj) = (0,0)\n for (i,j) in pixels:\n ci += i\n cj += j\n ci /= area\n cj /= area\n stats[k] = {'x':cj,'y':ci, 'area':area}\n\n print(stats)\n\n\n\n # Please print your region statistics to stdout\n # <region number>: <location or center>, <area>\n # print(stats)\n\n return 0", "title": "" }, { "docid": "d8f95a8591ce1c62654e6e736dd030aa", "score": "0.66127986", "text": "def _basic_mapping_statistics_info(self,bowtiesummary = ''):\n summary = []\n names, totalReads,mappedReads,uniqueReads,uniqueLocation,mapRatio =[],[],[],[],[],[]\n redundant = []\n mapRatio = []\n with open(bowtiesummary) as fhd:\n for line in fhd:\n line.strip()\n if line.startswith('sam file'):\n s = line.split('=')[1].strip().split('.')[0]\n names.append(s)\n if line.startswith('total reads'):\n total = line.split('=')[1].strip()\n totalReads.append(total)\n if line.startswith('mapped reads'):\n mapped = line.split('=')[1].strip()\n mappedReads.append(mapped)\n if line.startswith('unique location'):\n uniqueLocation.append(line.split('=')[1].strip())\n mapRatio.append(round(float(mapped)/float(total),3))\n\n with open(self.rule.qc.filterdup) as frddt:\n for line in frddt:\n score = round(float(line.strip().split('=')[1]),3)\n redundant.append(score)\n\n # formating\n name_sp = map(underline_to_space, names)\n digit_tex = lambda x:digit_format(x,sep=\"\\,\") # comma in Tex file should be \"/,\"\n for i in range(len(name_sp)):\n self.checker.append({\"desc\": 'Unique mappable reads',\n \"data\": name_sp[i],\n \"value\": mappedReads[i],\n \"cutoff\": 5000000})\n # index problem\n try:\n summary.append([name_sp[i],\n digit_tex(totalReads[i]),\n digit_tex(mappedReads[i]),\n percent_format(mapRatio[i]),\n digit_tex(uniqueLocation[i]),\n percent_format(redundant[i])])\n except:\n pass\n for i in range(len(name_sp)):\n self.checker.append({\"desc\": 'Unique location',\n \"data\": name_sp[i],\n \"value\": uniqueLocation[i],\n \"cutoff\": 5000000}) \n \n return summary,name_sp,mapRatio", "title": "" }, { "docid": "50c333debdab323a7ba3f58e23867df1", "score": "0.6474591", "text": "def compute_statistics(self, region):\n finalreg=dict()\n count=1\n\n for key, value in region.items():\n x = 0\n y = 0\n\n for i in range(0, len(value)):\n x = x + value[i][0]\n y = y + value[i][1]\n x = round(x / len(value))\n y = round(y / len(value))\n centroid = [x, y]\n if (len(value) >= 15):\n finalreg[count] = [centroid, len(value)]\n count=count+1\n\n print(len(finalreg))\n print(finalreg)\n # Please print your region statistics to stdout\n # <region number>: <location or center>, <area>\n # print(stats)\n\n return finalreg", "title": "" }, { "docid": "35673f37ff62a1b42786160c590e04d0", "score": "0.64497846", "text": "def summarize_mapped_reads(in_mapped, out_summary):\n with open(out_summary, 'w') as outfile:\n outfile.write(\"\"\"\n{| class=\"wikitable\"\n|+ Summary of mapped read counts\n!scope=\"col\" | Dataset\n!scope=\"col\" | Number of mapped reads\n|-\n\"\"\")\n for infile in in_mapped:\n for count, line in enumerate(open(infile)):\n pass\n outfile.write(\"| %s || %s\\n|-\\n\" % (infile, count))\n outfile.write('|}\\n')", "title": "" }, { "docid": "6b64699bcb8cb0b8a03d190517b40eb9", "score": "0.6340288", "text": "def compute_statistics(self, region):\n count = 0\n stats = []\n for key in region.keys():\n if len(region[key]) > 15:\n area = len(region[key])\n pixels = region[key]\n x = 0\n y = 0\n count += 1\n for points in pixels:\n x += points[0]\n y += points[1]\n x /= len(region[key])\n y /= len(region[key])\n centroid =(int(x),int(y))\n stats.append([count,area,centroid])\n print(\"Region: \", count, \"Area: \", area, \"Centroid\", centroid)\n # Please print your region statistics to stdout\n # <region number>: <location or center>, <area>\n # print(stats)\n\n return stats", "title": "" }, { "docid": "5aac776e606982cb4064b38467c600fd", "score": "0.6234284", "text": "def summarize_maps_data(fs_data):\n num_abnormal_ip = len(fs_data['abnormal_ips'].keys())\n num_abnormal_osts = 0\n if num_abnormal_ip:\n for _, ost_list in fs_data['abnormal_ips'].iteritems():\n num_abnormal_osts += len(ost_list)\n avg_overload = float(num_abnormal_osts) / float(num_abnormal_ip)\n avg_overload_factor = avg_overload / float(fs_data['mode'])\n else:\n avg_overload = 0.0\n avg_overload_factor = 1.0\n\n return {\n 'ost_overloaded_oss_count': num_abnormal_ip,\n 'ost_overloaded_ost_count': num_abnormal_osts,\n 'ost_avg_overloaded_ost_per_oss': avg_overload,\n 'ost_avg_overloaded_overload_factor': avg_overload_factor,\n }", "title": "" }, { "docid": "fc3557cf2ef042a44d2e32144ecfd22d", "score": "0.6191468", "text": "def gather_map_metrics():\n pass", "title": "" }, { "docid": "8a3f1923fd756bbbec91d6710d8c316a", "score": "0.610667", "text": "def compute_statistics(self, region):\n stats = dict()\n b = {}\n x_coord=[]\n y_coord=[]\n i = 0\n l = []\n counter = []\n counter2 = 1\n for k,v in region.items():\n\n if not v in b:\n l.append(v)\n b[v] = 1\n x_coord.append(k[0])\n y_coord.append(k[1])\n counter.append(1)\n i += 1\n\n else:\n b[v] +=1\n u = x_coord.pop()\n z = y_coord.pop()\n\n counter2 += 1\n\n x_coord.append(u+k[0])\n y_coord.append(z+k[1])\n\n centroid = len(x_coord)*[0]\n\n\n\n for ele in range(0,len(x_coord)):\n centroid_x = round((x_coord[ele]/len(x_coord)))\n centroid_y = round((y_coord[ele]/len(y_coord)))\n centroid[ele] = (centroid_x, centroid_y)\n\n\n q = dict(zip(l[:len(centroid)],centroid[:len(x_coord)]))\n print('q:')\n print(q)\n\n for k,v in b.items():\n if b[k] >= 15:\n stats[k] = q[k], b[k]\n print('Region: ' + str(k) + ', Centroid: ' + str(stats[k][0]) + ', Area: ' + str(stats[k][1]))\n print(stats)\n\n # Please print your region statistics to stdout\n # <region number>: <location or center>, <area>\n # print(stats)\n\n return stats", "title": "" }, { "docid": "5091d5786a05f831da2f5273d6408b92", "score": "0.6026859", "text": "def summarize(self):\n LOGGER.info('summarizing results')\n\n # feedstock-tillage type-region_production\n _summarize_by_region_production = self.results.groupby(['feedstock',\n 'tillage_type',\n 'region_production',\n 'pollutant'],\n as_index=False).sum()\n\n _summarize_by_region_production['unit_numerator'] = 'lb pollutant' # @TODO: should we be reading this in from the data?\n _summarize_by_region_production['unit_denominator'] = 'county-year'\n\n _summarize_by_region_production[['feedstock', 'tillage_type',\n 'region_production', 'pollutant',\n 'pollutant_amount',\n 'unit_numerator',\n 'unit_denominator']].to_csv(\n os.path.join(self.config.get('project_path'),\n '%s' %\n self.config.get('scenario_name') +\n '_total_emissions_by_production_region.csv'),\n index=False)\n\n # cache for results tab graphics\n self.summaries['by_region_production'] = _summarize_by_region_production[['feedstock',\n 'tillage_type',\n 'region_production',\n 'pollutant',\n 'pollutant_amount',\n 'unit_numerator',\n 'unit_denominator']]\n if self.config['fpeam'].get('inmap_county_export', False) is True:\n # save InMAP county-level output\n _shp_fpath_in = resource_filename('FPEAM', 'data/inputs/tl_2019_us_county/tl_2019_us_county.shp')\n _df = geopandas.read_file(_shp_fpath_in, dtype={'STATEFP': str, 'COUNTYFP': str, 'geometry': MultiPolygon})[['STATEFP', 'COUNTYFP', 'NAME', 'geometry']]\n _df['region_production'] = _df.STATEFP + _df.COUNTYFP\n\n _grp_df = _summarize_by_region_production[['region_production', 'pollutant', 'pollutant_amount']].groupby(['region_production', 'pollutant'], as_index=False).sum()\n _grp_df_pivot = _grp_df.pivot(index='region_production', columns='pollutant', values='pollutant_amount')\n\n _gdf_join = _df[['region_production', 'NAME', 'geometry']].join(other=_grp_df_pivot, on='region_production', how='right')\n\n _gdf_join.rename(columns={'region_production': 'cnty_fips',\n 'NAME': 'cnty_name',\n 'co': 'CO',\n 'nh3': 'NH3',\n 'nox': 'NOx',\n 'pm10': 'PM10',\n 'pm25': 'PM2_5',\n 'so2': 'SO2',\n 'voc': 'VOC'}, inplace=True)\n\n _shp_fname_out = '%s_county_inmap.shp' % self.config.get('scenario_name')\n _shp_fpath_out = os.path.join(self.config.get('project_path'), _shp_fname_out)\n _gdf_join.to_file(_shp_fpath_out)\n\n # feedstock-tillage type-region_production\n _results_to_normalize = self.results\n\n # calculate raw normalized pollutant amounts\n _results_to_normalize.eval('normalized_pollutant_amount = '\n 'pollutant_amount / feedstock_amount',\n inplace=True)\n\n # add unit columns for normalized pollutant amounts\n _results_to_normalize['normalized_pollutant_unit_numerator'] = \\\n _results_to_normalize['unit_numerator']\n _results_to_normalize['normalized_pollutant_unit_denominator'] = \\\n _results_to_normalize['feedstock_unit_numerator']\n\n # sum normalized pollutant amounts over modules and activities\n _results_normalized = _results_to_normalize.groupby(['feedstock',\n 'feedstock_measure',\n 'tillage_type',\n 'region_production',\n 'feedstock_amount',\n 'feedstock_unit_numerator',\n 'pollutant',\n 'unit_numerator',\n 'normalized_pollutant_unit_numerator',\n 'normalized_pollutant_unit_denominator'],\n as_index=False).sum()\n # cache for results tab graphics\n self.summaries['normalized'] = _results_normalized\n\n # save to csv\n _results_normalized.to_csv(os.path.join(self.config.get('project_path'),\n '%s' % self.config.get('scenario_name') +\n '_normalized_total_emissions_by_production_region.csv'),\n index=False)\n\n if 'region_transportation' in self.results.columns:\n # feedstock-tillage type-region_transportation\n _summarize_by_region_transportation = self.results.groupby(['feedstock',\n 'tillage_type',\n 'region_transportation',\n 'pollutant'],\n as_index=False).sum()\n\n _summarize_by_region_transportation['unit_numerator'] = 'lb pollutant'\n _summarize_by_region_transportation['unit_denominator'] = 'transportation county-year'\n\n self.summaries['by_region_transportation'] = _summarize_by_region_transportation\n\n _summarize_by_region_transportation[['feedstock', 'tillage_type',\n 'region_transportation',\n 'pollutant',\n 'pollutant_amount']].to_csv(\n os.path.join(self.config.get('project_path'),\n '%s' %\n self.config.get('scenario_name') +\\\n '_transportation_emissions_by_region.csv'),\n index=False)\n\n # feedstock-tillage type-module\n _summarize_by_module = self.results.groupby(['feedstock',\n 'tillage_type',\n 'module',\n 'pollutant'],\n as_index=False).sum()\n\n _summarize_by_module['unit_numerator'] = 'lb pollutant'\n _summarize_by_module['unit_denominator'] = 'county-year'\n\n self.summaries['by_module'] = _summarize_by_module\n\n _fpath_summary = os.path.join(self.config.get('project_path'),\n '%s_total_emissions_by_module.csv'\n % self.config.get('scenario_name'))\n\n _summarize_by_module[['feedstock', 'tillage_type', 'module', 'pollutant',\n 'pollutant_amount']].to_csv(_fpath_summary, index=False)", "title": "" }, { "docid": "acdc05adcd2a5a0ec1c866e33bb35386", "score": "0.60142785", "text": "def output(mappeddict, unmappeddict, conflictdict, refstats, unmap_stats, prefix, out):\n \n path_sum = '{out}'.format(out = out)\n refstats.to_csv(os.path.join(path_sum,'{genome_id}_referencesummary.tsv'.format(genome_id = prefix)), sep = '\\t', index = False)\n unmap_stats.to_csv(os.path.join(path_sum,'{genome_id}_unmapsummary.tsv'.format(genome_id = prefix)), sep = '\\t', index = False)\n \n # Write mapped regions FASTA file\n with open(os.path.join(out,'{prefix}_mappedregions.fasta'.format(prefix = prefix)), 'w+') as fasta:\n for key, value in mappeddict.items():\n fasta.write('>' + key + '\\n' + value + '\\n')\n \n # Write unmapped regions FASTA file\n with open(os.path.join(out,'{prefix}_unmappedregions.fasta'.format(prefix = prefix)), 'w+') as fasta:\n for key, value in unmappeddict.items():\n fasta.write('>' + key + '\\n' + value + '\\n')\n \n # Write mapped regions FASTA file\n with open(os.path.join(out,'{prefix}_conflictregions.fasta'.format(prefix = prefix)), 'w+') as fasta:\n for key, value in conflictdict.items():\n fasta.write('>' + key + '\\n' + value + '\\n')", "title": "" }, { "docid": "b044fbb11a869f1d5107c38549dd47ab", "score": "0.5890273", "text": "def show_map(self):\n\n #TODO must sort by real start address first, otherwise unused region calcs are wrong!\n print(\"MEMORY MAP\")\n last_end = 0\n for i in self.map:\n name, start, size, handler = i\n if start != last_end:\n uname = \"UNUSED\"\n ustart = last_end\n uend = start-1\n usize = uend-ustart-1\n print(\"%10s %5x %5x %5x\" % (uname, ustart, uend, usize))\n print(\"%10s %5x %5x %5x %s\" % (name, start, start+size-1, size, str(handler)))\n last_end = start + size\n #TODO: show any final unused space up to FFFF at end", "title": "" }, { "docid": "816ee64ab7afe323e19dfa1c923ba078", "score": "0.575683", "text": "def region_statistics(labels):\n all_labels = np.unique(labels);\n all_labels = np.setdiff1d(all_labels,[0,1])\n \n # Use NDarray to contain a defined tuple\n stats = np.zeros(len(all_labels) , dtype = [('num_pixel',int),\n ('num_z_slice',int),\n ('label',int)])\n \n idx = 0;\n for l in all_labels:\n # Find statistics\n num_pixel = labels[labels == l].size\n num_z_slice = np.sum(np.any(labels ==l,(1,2)))\n \n stats[idx] = (num_pixel,num_z_slice,l)\n idx += 1 # Advance counter for preallocation\n \n return stats", "title": "" }, { "docid": "6de792de5c5a7d09a9606c53cf6ad10d", "score": "0.57393247", "text": "def refstats(reference, mappedlocations, unmappedlocations, conflictlocations, reverselocations, unmappeddict):\n # Calculate genome fraction\n sum_map = 0\n for i in range(0, mappedlocations.shape[0]):\n sum_map = sum_map + abs(mappedlocations.iloc[i,1] - mappedlocations.iloc[i,0])\n \n sum_confl = 0\n for i in range(0, conflictlocations.shape[0]):\n sum_confl = sum_confl + abs(conflictlocations.iloc[i,1] - conflictlocations.iloc[i,0])\n \n sum_rev = 0\n for i in range(0, reverselocations.shape[0]):\n sum_rev = sum_rev + abs(reverselocations.iloc[i,1] - reverselocations.iloc[i,0])\n \n total_map = sum_map + sum_confl + sum_rev\n \n sum_unmap = 0\n for i in range(0, unmappedlocations.shape[0]):\n sum_unmap = sum_unmap + abs(unmappedlocations.iloc[i,1] - unmappedlocations.iloc[i,0])\n \n read = SeqIO.read(reference, format = 'fasta')\n refstats_dict = dict()\n refstats_dict = [{'GCContent': SeqUtils.GC(read.seq),\n 'Length': len(str(read.seq)),\n 'NumberMappedRegions': mappedlocations.shape[0] + reverselocations.shape[0] + conflictlocations.shape[0],\n 'NumberUnmappedRegions': unmappedlocations.shape[0],\n 'FilteredUnmappedRegions': len(unmappeddict),\n 'FractionMapped': (total_map/len(str(read.seq)))*100,\n 'FractionUnmapped': (sum_unmap/len(str(read.seq)))*100}]\n \n # Create reference summary dataframe\n refstats_t = pd.DataFrame.from_dict(refstats_dict)\n refstats_t.reset_index(drop = True, inplace = True)\n refstats_t.sort_index(inplace = True)\n \n return refstats_t", "title": "" }, { "docid": "0f517f41cd1d72fdf9014d67f229fdb5", "score": "0.57301086", "text": "def region_summary(self, where=st):\n st = where\n region = self.region\n\n st.title(f\"{region.name} ({region.id})\")\n st.header(_(\"Basic information\"))\n\n extra = {}\n if region.icu_capacity is not None:\n extra[_(\"ICU beds\")] = fmt(region.icu_capacity)\n if region.hospital_capacity is not None:\n extra[_(\"Hospital beds\")] = fmt(region.icu_capacity)\n\n st.cards({_(\"Population\"): fmt(region.population), **extra})", "title": "" }, { "docid": "bcce0851d9b282c0413716da897ff75a", "score": "0.5650829", "text": "def summary_stats(self, data):", "title": "" }, { "docid": "4500828f489083730ade761d52a25e9e", "score": "0.5563425", "text": "def summarize(abbreviations, verbose, summary, municipal):\n if not abbreviations:\n abbreviations = get_all_abbreviations()\n\n summarizer = Summarizer()\n for abbr in abbreviations:\n summarizer.process_legislature(abbr)\n summarizer.print_summary()", "title": "" }, { "docid": "271f01e660a16cdb2bbc60fcbf941a5d", "score": "0.55519027", "text": "def compute_built_in_summary_results(self):", "title": "" }, { "docid": "6b909f756653c6061b4fc986e95d2f5c", "score": "0.5520262", "text": "def summary(self):\n if not self.valid:\n return {}\n else:\n return {\n \"source\": self.source,\n \"assembly\": self.assembly,\n \"build\": self.build,\n \"build_detected\": self.build_detected,\n \"count\": self.count,\n \"chromosomes\": self.chromosomes_summary,\n \"sex\": self.sex,\n }", "title": "" }, { "docid": "bfcb2214b7adfd7dc3ac9a0f7936eacc", "score": "0.5459296", "text": "def _computeStatistics(self):", "title": "" }, { "docid": "8db459a3042b1609ff9fcbd0f604ca12", "score": "0.544318", "text": "def summarize_offtake():\n\n site_csv = r\"C:\\Users\\Ginger\\Dropbox\\NatCap_backup\\Forage_model\\CENTURY4.6\\Kenya\\input\\regional_properties\\regional_properties.csv\"\n site_list = pd.read_csv(site_csv).to_dict(orient=\"records\")\n\n outer_dir = r\"C:\\Users\\Ginger\\Dropbox\\NatCap_backup\\Forage_model\\Forage_model\\model_results\\regional_properties\"\n cp_opts = ['varying', 'constant']\n marg_dict = {'site': [], 'avg_offtake_varying_cp': [],\n 'avg_offtake_constant_cp': []}\n for site in site_list:\n marg_dict['site'].append(site['name'])\n for cp_o in cp_opts:\n inner_folder_name = \"herd_avg_uncalibrated_0.3_{}_cp_GL\".format(cp_o)\n inner_dir = os.path.join(outer_dir, inner_folder_name)\n outdir = os.path.join(inner_dir,\n 'site_{:d}'.format(int(site['name'])))\n sum_csv = os.path.join(outdir, 'summary_results.csv')\n sum_df = pd.read_csv(sum_csv)\n subset = sum_df.loc[sum_df['year'] > 2013]\n avg_offtake = subset.total_offtake.mean()\n if cp_o == 'varying':\n marg_dict['avg_offtake_varying_cp'].append(avg_offtake)\n else:\n marg_dict['avg_offtake_constant_cp'].append(avg_offtake)\n df = pd.DataFrame(marg_dict)\n summary_csv = os.path.join(outer_dir, 'offtake_summary.csv')\n df.to_csv(summary_csv)", "title": "" }, { "docid": "98e46882b7144c35d5eee8210b8484ff", "score": "0.5420453", "text": "def summarize(self):\n \n try:\n chroms=self.get_chroms()\n if not chroms: raise ValueError\n except (AttributeError,ValueError):\n return\n \n # obtain a summary statistics\n self.init_table('whole')\n array_adder=lambda x,y: [x[i]+y[i] for i in range(0,len(x))]\n for chrom in chroms:\n if chrom!='whole':\n self['whole']['promoter']=array_adder(self['whole']['promoter'],self[chrom]['promoter'])\n self['whole']['bipromoter']=array_adder(self['whole']['bipromoter'],self[chrom]['bipromoter'])\n self['whole']['downstream']=array_adder(self['whole']['downstream'],self[chrom]['downstream'])\n self['whole']['gene']=array_adder(self['whole']['gene'],self[chrom]['gene'])\n self['whole']['rel_loc'][0]=array_adder(self['whole']['rel_loc'][0],self[chrom]['rel_loc'][0])\n self['whole']['rel_loc'][1]=array_adder(self['whole']['rel_loc'][1],self[chrom]['rel_loc'][1])\n self['whole']['rel_loc_cds'][0]=array_adder(self['whole']['rel_loc_cds'][0],self[chrom]['rel_loc_cds'][0])\n self['whole']['rel_loc_cds'][1]=array_adder(self['whole']['rel_loc_cds'][1],self[chrom]['rel_loc_cds'][1])\n self['whole']['roi']+=self[chrom]['roi']\n self['whole']['Ns']+=self[chrom]['Ns']", "title": "" }, { "docid": "b83f6a550efce245c93d41100eeda069", "score": "0.5419295", "text": "def area(seg_img, units, *args): \n data_dict1 = [region.area for region in regions]\n if unitLength and not embeddedpixelsize:\n data_dict = [dt_pixel / pixelsPerunit**2 for dt_pixel in data_dict1]\n else:\n data_dict = data_dict1\n logger.debug('Completed extracting area for ' + seg_file_names1.name)\n return data_dict", "title": "" }, { "docid": "b35ce98ad8344ea2a087eaab5acf9bfc", "score": "0.54011685", "text": "def calculate_mapping_coverage(coverage_dict):\r\n\r\n ## Create an empty list that will hold all data to save\r\n statistics = []\r\n\r\n # Adding the necessary information to a list in tuples\r\n for gene in coverage_dict:\r\n # Amount of low coverage bases\r\n low_coverage = len([coverage_dict[gene] for i in coverage_dict[gene] if i < 30])\r\n # Coverage length per gene\r\n coverage = coverage_dict[gene]\r\n # Adding the gene name, coverage length, average coverage and amount of low coverage scores\r\n statistics.append((gene, len(coverage),\r\n round(sum(coverage) / len(coverage), 1), low_coverage))\r\n\r\n return statistics", "title": "" }, { "docid": "056b1a43c90bce3e0bfb4f81f3e8a985", "score": "0.5364049", "text": "def summary(self) :", "title": "" }, { "docid": "ce8bdd72f1c91025639e770f92804b2c", "score": "0.5347609", "text": "def make_summary(self): \n sumWT = 0\n OT = 0\n IT = 0\n WT_by_group = {}\n crowding_by_group = {}\n for g in self.patient_groups:\n WT_by_group[g] = 0\n crowding_by_group = 0\n for iter in range(self.n_iterations):\n IT += self.idle_time_observations[iter]\n OT += self.overtime_observations[iter]\n for id in self.wait_time_observations_by_patient:\n obs = self.wait_time_observations_by_patient[id]\n g = self.patient_groups[id]", "title": "" }, { "docid": "f200e668c00d1666d7684c3f9b200e61", "score": "0.5345776", "text": "def summaries(self):\n ...", "title": "" }, { "docid": "fc78289ca65214f0bc4024a7c2246994", "score": "0.5332844", "text": "def _extract_regions(self, sample):\n\n if self.regions is None:\n return sample\n\n # get the pileup\n\n bam = AlignmentFile(sample.sample_bam)\n region_counts = []\n\n for i in self.regions.index:\n\n chrom = self.regions.at[i, 0]\n start = int(self.regions.at[i, 1])\n end = int(self.regions.at[i, 2])\n\n count = bam.count(chrom, start, end)\n\n region_counts.append({\n 'chrom': chrom,\n 'start': start,\n 'end': end,\n 'count': count})\n\n if len(region_counts) > 0:\n region_counts = pd.DataFrame(region_counts)\n sample.region_counts = region_counts\n\n return sample", "title": "" }, { "docid": "1afb6a864e81617b2bf4132fb6b80220", "score": "0.53124833", "text": "def print_sum(self):\n return '{} {} {} {}, {} {} {}'.format(\"Station covers\", self.index, \"to\", \\\n (self.extent()[2],self.extent()[3]), \"and contains\", len(self.population), \"souls\")", "title": "" }, { "docid": "1409e1e43523806c1e3b12cbe2af89be", "score": "0.53043664", "text": "def region_dist(dst, region):\r\n plc_lst = dataset.qloadrows(GEOTWEET, 'SELECT place_id, place.name, count(place_id) as cnt, place.lat, place.lng from sample left join place on sample.place_id = place.id where MBRContains({0}, place.geo) group by place_id'.format(text_util.geo_rect(*region)))\r\n for plc in plc_lst:\r\n plc['text'] = plc['name'] + ',' + str(plc['cnt'])\r\n geo_map(dst, plc_lst)", "title": "" }, { "docid": "f8569493b2a7a933aeaf9fb225b4c802", "score": "0.52916706", "text": "def summary(self):\n\n\t\tcount = 0\n\t\tdetail = dict()\n\t\tfor i in self.data:\n\t\t\tdetail[i]={}\n\t\t\tif i=='votes' or i=='fips' or i =='fraction_votes':\n\t\t\t\tdetail[i][\"Min\"]=np.min(self.data[i], axis=0)\n\t\t\t\tdetail[i][\"Max\"]=np.max(self.data[i], axis=0)\n\t\t\t\tdetail[i][\"1st Qu\"]=np.percentile(self.data[i], 25)\n\t\t\t\tdetail[i][\"Median\"]= np.median(self.data[i], axis=0)\n\t\t\t\tdetail[i][\"Mean\"]=np.mean(self.data[i], axis=0)\n\t\t\t\tdetail[i][\"2nd Qu\"]=np.percentile(self.data[i], 50)\n\t\t\t\tdetail[i][\"StdDev\"]=np.std(self.data[i], axis=0)\n\t\t\telse:\n\t\t\t\tpass\n\t\t\tcount += 1\n\t\treturn detail", "title": "" }, { "docid": "2a0d7081f6e4f1b30e28ce21382a3124", "score": "0.52798945", "text": "def summarize(self):\n \n try:\n chroms=self.get_chroms()\n if not chroms: raise ValueError\n except (AttributeError,ValueError):\n return\n \n # obtain a summary statistics\n self.init_table('whole')\n array_adder=lambda x,y: [x[i]+y[i] for i in range(0,len(x))]\n for chrom in chroms:\n if chrom!='whole':\n self['whole']['promoter']=array_adder(self['whole']['promoter'],self[chrom]['promoter'])\n self['whole']['downstream']=array_adder(self['whole']['downstream'],self[chrom]['downstream'])\n self['whole']['gene']=array_adder(self['whole']['gene'],self[chrom]['gene'])\n self['whole']['enhancer']+=self[chrom]['enhancer']\n self['whole']['total']+=self[chrom]['total']", "title": "" }, { "docid": "c3954b7352f546853f3849a6916f00c8", "score": "0.5270878", "text": "def analyze():\n print('[STATS]')\n stats = {\n 'All species': len(db.all()),\n 'Largest genome': _largest_genome(),\n }\n\n _print_dictionary(stats.items())\n\n # @TODO rewrite into single entire db iteration\n print('Groups')\n _print_dictionary(_count_groups().items())", "title": "" }, { "docid": "6d48182305927bf2fb4acf59cb61a6ed", "score": "0.52622867", "text": "def run_all(self):\n metrics = {}\n metrics[\"mean\"] = self.mean()\n metrics[\"spread\"] = self.spread()\n metrics[\"coverage\"] = self.coverage(num_bins=self.num_bins)\n metrics[\"num_groups\"] = self.num_groups()\n metrics[\"dispersion\"] = self.dispersion()\n #metrics[\"distinctness\"] = self.distinctness()\n metrics[\"size_parity\"] = self.size_parity()\n metrics[\"group_consensus\"] = self.group_consensus()\n metrics[\"histogram\"] = self.bin(num_bins=self.num_bins)\n return metrics", "title": "" }, { "docid": "957ba301c492a0df06a049516cf8315b", "score": "0.52572113", "text": "def _land_sea_stats(self, n_clusters, seed):\n all_lat = self.X_latlon[0]\n all_lon = self.X_latlon[1]\n bins = (39, 192)\n geog_range = [[-24, 24], [0, 360]]\n\n land_mask_fn = os.path.join(self.suite.suite_dir, 'land_sea_mask', 'qrparm.mask')\n land_mask = iris.load(land_mask_fn)[0]\n\n land_mask_tropics = land_mask.data[self.settings.TROPICS_SLICE, :].astype(bool)\n label_key = 'nc-{}_seed-{}'.format(n_clusters, seed)\n path = self.file_path('land_sea_percentages_nclust-{}_seed-{}.txt'.format(n_clusters, seed))\n with open(path, 'w') as f:\n f.write('RWP, land %, sea %, # prof\\n')\n for i in range(10):\n keep = self.df_remapped_labels[label_key].values == i\n cluster_lat = all_lat[keep]\n cluster_lon = all_lon[keep]\n hist, lat, lon = np.histogram2d(cluster_lat, cluster_lon,\n bins=bins, range=geog_range)\n land_frac = hist[land_mask_tropics].sum() / hist.sum()\n sea_frac = hist[~land_mask_tropics].sum() / hist.sum()\n f.write('C{}, {:.2f}, {:.2f}, {}\\n'.format(i + 1,\n land_frac * 100,\n sea_frac * 100,\n keep.sum()))", "title": "" }, { "docid": "46c92faa3b644e96ee88da7055618e86", "score": "0.52535325", "text": "def print_maps(self):\n for k in self.pagemap.keys():\n pfn=k\n vmentry = self.pagemap[k]\n if vmentry.map_count > 1:\n print \"List of processes sharing PFN %x(count=%d)\"%(pfn,vmentry.map_count)\n for x in vmentry.vma_list:\n (vma_start,vma_end,vma,flags)=x[1]\n print \"--->%d in vmrange[0%x-0%x]@0x%x:[%s]\"%(x[0],vma_start,vma_end,vma,PrintFlags(flags))", "title": "" }, { "docid": "c0c4a8fe0be6c3e0445a31f7198a0403", "score": "0.52258307", "text": "def sbs_class_areas(self):\n ha__px = self.sbs_wgs_area_ha / self.sbs_wgs_n\n counts = self.sbs_class_counts\n areas = {}\n tot_px = sum(counts.values()) # total count of non-nodata pixels \n for k in counts:\n areas[k] = counts[k] * ha__px \n\n return areas", "title": "" }, { "docid": "9b2dda435c5861ddca9eb9c6a8496e2b", "score": "0.5217433", "text": "def summarizegeom(*args, **kwargs):\n\n getgeom(*args, **kwargs).summary()", "title": "" }, { "docid": "d2da09511c19c775e97defc30c05a88b", "score": "0.5193722", "text": "def test_sum_rasters(self):\n\n # arr = sr.sum_rasters(TestSpatialReader.BIOMASS_DIR)\n #\n # print(arr.shape)\n # print(arr.min())\n # print(arr.max())\n # print(arr.unique())\n\n self.assertEqual(2, 2)\n pass", "title": "" }, { "docid": "1f37e9249c21bc3e7ccfab87eb990b6c", "score": "0.5190719", "text": "def aggregate_data(business_data, estimated_data, area_id, lookup, lad_id):\n area_km2 = lookup[area_id]['area_km2']\n\n households = 0\n hh_fixed_access = 0\n hh_wifi_access = 0\n\n for item in estimated_data:\n if area_id == item['Area']:\n\n households += 1\n\n if item['hh_fixed_access'] == 1:\n hh_fixed_access += 1\n if item['hh_wifi_access'] == 1:\n hh_wifi_access += 1\n\n if hh_fixed_access > 0 or households > 0:\n perc_hh_fixed_access = (hh_fixed_access / households) * 100\n else:\n perc_hh_fixed_access = 0\n\n if hh_fixed_access > 0 or households > 0:\n perc_hh_wifi_access = (hh_wifi_access / households) * 100\n else:\n perc_hh_wifi_access = 0\n\n return {\n 'msoa': area_id,\n 'area_km2': area_km2,\n 'population': lookup[area_id]['population'],\n 'population_km2': lookup[area_id]['population'] / area_km2,\n 'urban_rural': lookup[area_id]['geotype'],\n 'households': households,\n 'households_km2': households / area_km2,\n 'hh_fixed_access': hh_fixed_access,\n 'hh_wifi_access': hh_wifi_access,\n 'hh_fixed_access_km2': hh_fixed_access / area_km2,\n 'hh_wifi_access_km2': hh_wifi_access / area_km2,\n 'perc_hh_fixed_access': perc_hh_fixed_access,\n 'perc_hh_wifi_access': perc_hh_wifi_access,\n 'region': lookup[area_id]['region'],\n 'lad_id': lad_id,\n 'businesses': business_data['businesses'],\n 'business_density_km2': business_data['businesses'] / area_km2,\n #busines adoption - ba_\n 'ba_micro': business_data['ba_micro'],\n 'ba_small': business_data['ba_small'],\n 'ba_medium': business_data['ba_medium'],\n 'ba_large': business_data['ba_large'],\n 'ba_very_large': business_data['ba_very_large'],\n 'ba_total': business_data['ba_total'],\n #busines adoption floor area - bafa_\n 'bafa_micro': business_data['bafa_micro'],\n 'bafa_small': business_data['bafa_small'],\n 'bafa_medium': business_data['bafa_medium'],\n 'bafa_large': business_data['bafa_large'],\n 'bafa_very_large': business_data['bafa_very_large'],\n 'bafa_total': business_data['bafa_total'],\n #business access points - baps_\n 'baps_total_low': business_data['baps_total_low'],\n 'baps_density_km2_low': business_data['baps_total_low'] / area_km2,\n 'baps_total_baseline': business_data['baps_total_baseline'],\n 'baps_density_km2_baseline': business_data['baps_total_baseline'] / area_km2,\n 'baps_total_high': business_data['baps_total_high'],\n 'baps_density_km2_high': business_data['baps_total_high'] / area_km2,\n }", "title": "" }, { "docid": "250b5b8ec648b37df61f9786edfa967a", "score": "0.518644", "text": "def search_regions(self):\n regions = {}\n region_i = 0\n for chrno, size in sorted(self.chr_max_len.items()):\n if chrno == 6:\n # Avoid human leukocyte antigen loci\n continue\n\n chros = {}\n for group in self.groups:\n chros[group] = self.load_chromosome(size, self.groups[group][chrno])\n\n print \"[SRCH] Chr:%d\" % (chrno)\n # Ignore 0 position\n for i, region_s in enumerate(range(1, size+1-self.LENGTH+1, self.STRIDE)):\n region_e = region_s + self.LENGTH - 1\n regions[region_i] = {\n \"ichr\": i,\n \"group_counts\": {},\n \"chr\": chrno,\n \"pos_start\": region_s,\n \"pos_end\": region_e\n }\n\n for group in self.groups:\n num_variants = np.sum(chros[group][region_s:region_e+1])\n regions[region_i][\"group_counts\"][group] = num_variants\n\n # TODO Should we be ignoring these regions?\n # Record this region (if it contained variants in this group)\n if num_variants > 0:\n if num_variants not in self.group_buckets[group]:\n # Add this particular number of variants as a bucket\n self.group_buckets[group][num_variants] = []\n\n # Add the region id to the bucket\n self.group_buckets[group][num_variants].append(region_i)\n\n # Append the number of variants counted in this region\n # for this group to a list used to calculate the median\n self.group_counts[group].append(num_variants)\n\n if self.GRAPHING:\n # NOTE Use i not region_i so regions in the plot start\n # at 0 for each chromosome rather than cascading\n print \"%s\\t%d\\t%d\\t%d\" % (group, chrno, i, num_variants)\n\n region_i += 1\n return regions", "title": "" }, { "docid": "5e76417215a21949835e624765836529", "score": "0.51792777", "text": "def get_statistics_for_year(rasters, year, mean_path, std_path, sd_mean_path, deviation_path, land_use, raster_base_path=os.path.join(base_folder, \"spatial_comparisons\",), debug=False):\n\tsummed_rasters = []\n\tfor raster in rasters:\n\t\tif type(raster) is not arcpy.Raster:\n\t\t\traster_path = os.path.join(raster_base_path, raster)\n\t\telse:\n\t\t\traster_path = raster\n\n\t\tsummed_rasters.append(make_annual(raster_path, year))\n\n\tif debug:\n\t\tfor raster in summed_rasters:\n\t\t\toutput = tempfile.mktemp(prefix=\"summed_\", suffix=os.path.split(str(raster))[1]) # add the original filename to the end\n\t\t\traster.save(output)\n\t\t\tprint(\"Composite Output at {}\".format(output))\n\n\t# create the mask of features we actually want\n\tif use_backup_mask is False:\n\t\tmask = make_mask(land_use, dsa=dsa_feature, mask_query=land_use_mask_queries[year])\n\telse:\n\t\tmask = backup_masks[year]\n\n\twith Env(\"mask\", mask):\n\n\t\tarcpy.CheckOutExtension(\"Spatial\")\n\t\ttry:\n\n\t\t\tmean_raster = arcpy.sa.CellStatistics(summed_rasters, \"MEAN\", \"NODATA\")\n\t\t\tstd_raster = arcpy.sa.CellStatistics(summed_rasters, \"STD\", \"NODATA\")\n\n\t\t\thistogram_from_raster(mean_raster, \"Histogram of mean ET for {}\".format(year), output_folder=output_folder)\n\n\t\t\tmean_raster.save(mean_path)\n\t\t\tstd_raster.save(std_path)\n\n\t\t\toverall_mean = get_overall_mean(mean_raster) # get the mean value across the whole raster\n\t\t\tdeviation_from_mean_raster = (mean_raster - overall_mean)/overall_mean\n\t\t\tdeviation_from_mean_raster.save(deviation_path)\n\n\t\t\tsd_mean = std_raster / mean_raster\n\t\t\tsd_mean.save(sd_mean_path)\n\t\tfinally:\n\t\t\tarcpy.CheckInExtension(\"Spatial\")\n\n\treturn mean_raster, std_raster", "title": "" }, { "docid": "8baf643c8a60415d5ddb88167e59199d", "score": "0.5167541", "text": "def test_total_population():\n island_map = \"\"\"\\\n OOOOOOO\n OSSSSSO\n OSJJJSO\n OSJMJSO\n OSDDJSO\n OSJJJSO\n OSSSSSO\n OOOOOOO\"\"\"\n\n island = bi.Island(island_map)\n popgen = pg.Population(n_herbivores=3,\n coord_herb=[(5, 2), (2, 5), (4, 3)],\n n_carnivores=2, coord_carn=[(5, 3), (1, 5)])\n pop = popgen.get_animals()\n island.populate_the_island(pop)\n species_population = island.total_species_population\n total_population = island.total_island_population\n assert species_population == (9, 4)\n assert total_population == 13", "title": "" }, { "docid": "d822f11fe8ab02e59c95f7c4406df749", "score": "0.51577467", "text": "def summary_stat1(all_data_features, sample_name, nb ,chromosomes = HUMAN, write=False, f=''):\n covered = 0\n tot = 0\n for c in HUMAN:\n for element in all_data_features[sample_name][c]:\n tot += 1\n if element[1] != 0:\n covered +=1\n\n print(nb, ' - ', sample_name, ' - ', covered*100/float(tot))\n if write == True:\n t = ''.join([nb, ' - ', sample_name, ' - ', covered*100/float(tot)])\n #t = str(t)\n f.write(t)\n #return() ", "title": "" }, { "docid": "0f8488589379e4a866d8bea56c347d3f", "score": "0.51544416", "text": "def summarize_all_from_directory(dir='.'):\n full_path = os.path.abspath(dir)\n (clmaps, clims, hdf5s, txts, sifs) = sortfnames(dir=full_path)\n print('CL Maps', clmaps)\n print('CL Images', clims)\n #make_image_summaries(clims, full_path)\n #make_si_summary(clmaps, full_path)", "title": "" }, { "docid": "2e7265719138a1ff9eabe3b80c68fd16", "score": "0.5148203", "text": "def MAP(y_true,y_pred,region_mask):\n region_mask = np.where(region_mask >= 1,0,-1000)\n tmp_y_true = y_true + region_mask\n tmp_y_pred = y_pred + region_mask\n \n accident_grids_nums = nonzero_num(tmp_y_true)\n \n true_top_k = get_top(tmp_y_true,accident_grids_nums)\n pred_top_k = get_top(tmp_y_pred,accident_grids_nums)\n\n all_k_AP = []\n for sample in range(len(true_top_k)):\n all_k_AP.append(AP(list(true_top_k[sample]),list(pred_top_k[sample])))\n return sum(all_k_AP)/len(all_k_AP)", "title": "" }, { "docid": "a26f1d94fc5bbb5b6a987a57cdb8dd3c", "score": "0.5145101", "text": "def get_stats(args):\n mapping_dict = nested_dict.nested_dict()\n try:\n handle = pysam.AlignmentFile(args.input, 'rb')\n except OSError:\n print 'error'\n #Samples can be added from several lanes, which will results in different read groups\n #in order to only account for samples here, make a dict mapping RG_ID to sample\n RG_to_sample = dict([(r['ID'],r['SM']) for r in handle.header['RG']])\n count = 0\n for read in handle:\n count += 1\n if not count%1000000:\n print '%s reads processed' % count\n if not read.is_duplicate and not read.is_qcfail:\n #make dict of read tag objects\n tag_dict = dict(read.tags)\n sample = RG_to_sample[tag_dict['RG']]\n #add count of valid read tot total for this sample\n try:\n mapping_dict['total'][sample] += 1\n except TypeError:\n mapping_dict['total'][sample] = 1\n if 'mono' in sample:\n if sample.replace(' ','_') not in read.reference_name:\n try:\n if read.reference_name not in mapping_dict['discard']:\n mapping_dict['discard'].append(read.reference_name)\n except AttributeError:\n mapping_dict['discard'] = [read.reference_name]\n except KeyError:\n mapping_dict['discard'] = [read.reference_name]\n try:\n mapping_dict[read.reference_name][sample] += 1\n except TypeError:\n mapping_dict[read.reference_name][sample] = 1\n return mapping_dict", "title": "" }, { "docid": "337844698f1c3ad7a32a7e373c9edcf1", "score": "0.5129482", "text": "def region_map(request):\n app_workspace=app.get_app_workspace()\n dirs=next(os.walk(app_workspace.path))[1]\n regions=[]\n for entry in dirs:\n # One time code to fix aquifer names\n # names_list = ['Name', 'AQ_NAME', 'AQU_NAME', 'Hydro_Zone', 'altName', 'WMU_NAME']\n # directory = os.path.join(app_workspace.path,entry)\n # the_csv=os.path.join(directory,entry+'_Aquifers.csv')\n # majorfile=os.path.join(directory,'MajorAquifers.json')\n # aquifercsv=[]\n # with open(the_csv) as csvfile:\n # reader = csv.DictReader(csvfile)\n # for row in reader:\n # if row:\n # aq=((row['ID']),(row[\"Name\"]),(row['CapsName']))\n # aquifercsv.append(aq)\n # with open(majorfile) as f:\n # json_object=json.load(f)\n # for aq in aquifercsv:\n # for aquifer in json_object['features']:\n # if aq[2]==aquifer['properties']['Aquifer_Name']:\n # if 'DisplayName' in aquifer:\n # del aquifer['DisplayName']\n # aquifer['properties']['AquiferID']=aq[0]\n # aquifer['properties']['DisplayName']=aq[1]\n # print(aq[1])\n # break\n # with open(majorfile,'w') as f:\n # json.dump(json_object,f)\n # for filename in os.listdir(directory):\n # if filename.startswith('MinorAquifers.json'):\n # minorfile=os.path.join(directory,'MinorAquifers.json')\n # with open(minorfile) as f:\n # json_object = json.load(f)\n # for aq in aquifercsv:\n # for aquifer in json_object['features']:\n # if aq[2] == aquifer['properties']['Aquifer_Name']:\n # if 'DisplayName' in aquifer:\n # del aquifer['DisplayName']\n # aquifer['properties']['AquiferID'] = aq[0]\n # aquifer['properties']['DisplayName'] = aq[1]\n # break\n # with open(minorfile, 'w') as f:\n # json.dump(json_object, f)\n # for filename in os.listdir(directory):\n # if filename.startswith('MajorAquifers.json'):\n # myfile=os.path.join(directory,'MajorAquifers.json')\n # with open(myfile) as f:\n # json_object=json.load(f)\n # for aquifer in json_object['features']:\n # for name in names_list:\n # if name in aquifer['properties']:\n # aquifer['properties']['Aquifer_Name']=aquifer['properties'][name]\n # break\n # with open(myfile, 'w') as f:\n # json.dump(json_object, f)\n # for filename in os.listdir(directory):\n # myfile = os.path.join(directory, 'MinorAquifers.json')\n # if filename.startswith('MinorAquifers.json'):\n # with open(myfile) as f:\n # json_object=json.load(f)\n # for aquifer in json_object['features']:\n # for name in names_list:\n # if name in aquifer['properties']:\n # aquifer['properties']['Aquifer_Name']=aquifer['properties'][name]\n # break\n # with open(myfile, 'w') as f:\n # json.dump(json_object, f)\n # #end one time fix code\n\n region=(entry.replace(\"_\",\" \"),entry)\n regions.append(region)\n select_region = SelectInput(display_text='Select Region',\n name='select_region',\n multiple=False,\n options=regions,\n initial='Texas',\n attributes={\n 'onchange':'list_aquifer()'\n }\n )\n region_home=Button(display_text='Region Home',\n name='region_home',\n icon='glyphicon glyphicon-home',\n attributes={\n 'data-toggle': 'tooltip',\n 'data-placement': 'top',\n 'title': 'Jump to Home View for Region',\n 'onclick':\"list_aquifer()\",\n }\n )\n\n select_aquifer=SelectInput(display_text='Select Aquifer',\n name='select_aquifer',\n multiple=False,\n options=[('',9999),('Carrizo',10),('Edwards',11),('Edwards-Trinity',13),('Gulf Coast',15),('Hueco Bolson',1),('Ogallala',21),\n ('Pecos Valley',3),('Seymour',4),('Trinity',28),('Blaine',6),('Blossom',7),('Bone Spring-Victorio Peak',8),\n ('Brazos River Alluvium',5),('Capitan Reef Complex',9),('Dockum',26),('Edwards-Trinity-High Plains',12),\n ('Ellenburger-San Saba',14),('Hickory',16),('Igneous',17),('Lipan', 30),('Marathon',18),\n ('Marble Falls',19),('Nacatoch',20),('Queen City',24),('Rita Blanca',23),('Rustler',25),\n ('Sparta',27),('West Texas Bolsons',2),('Woodbine',29),('Yegua Jackson',31),('None',22),('Texas',32)],\n initial='',\n attributes={\n 'onchange':'list_dates(2)' #this calls list_dates, which then calls change_aquifer\n }\n )\n\n select_view=SelectInput(display_text='Select Data Type',\n name='select_view',\n multiple=False,\n options=[(\"Depth to Groundwater\", 'depth'), ('Elevation of Groundwater', 'elevation'),(\"Well Drawdown\",\"drawdown\")],\n attributes={\n 'onchange':'changeWMS()'\n }\n )\n\n required_data = SelectInput(display_text='Minimum Samples per Well',\n name='required_data',\n multiple=False,\n options=[(\"0\",\"0\"),(\"1\",\"1\"),(\"2\",\"2\"),(\"3\",\"3\"),(\"4\",\"4\"),(\"5\",\"5\"),(\"6\",\"6\"),\n (\"7\", \"7\"),(\"8\",\"8\"),(\"9\",\"9\"),(\"10\",\"10\"),(\"11\",\"11\"),(\"12\",\"12\"),(\"13\",\"13\"),\n (\"14\", \"14\"),(\"15\",\"15\"),(\"16\",\"16\"),(\"17\",\"17\"),(\"18\",\"18\"),(\"19\",\"19\"),(\"20\",\"20\"),\n (\"21\", \"21\"),(\"22\",\"22\"),(\"23\",\"23\"),(\"24\",\"24\"),(\"25\",\"25\"),(\"26\",\"26\"),(\"27\",\"27\"),\n (\"28\", \"28\"),(\"29\",\"29\"),(\"30\",\"30\"),(\"31\",\"31\"),(\"32\",\"32\"),(\"33\",\"33\"),(\"34\",\"34\"),\n (\"35\", \"35\"),(\"36\",\"36\"),(\"37\",\"37\"),(\"38\",\"38\"),(\"39\",\"39\"),(\"40\",\"40\"),(\"41\",\"41\"),\n (\"42\", \"42\"),(\"43\",\"43\"),(\"44\",\"44\"),(\"45\",\"45\"),(\"46\",\"46\"),(\"47\",\"47\"),(\"48\",\"48\"),\n (\"49\", \"49\"),(\"50\",\"50\"),],\n initial=\"5\",\n attributes={\n 'onchange': 'change_filter()'\n }\n )\n\n available_dates=SelectInput(display_text='Available Raster Animations',\n name='available_dates',\n multiple=False,\n options=[],\n attributes={\n 'onchange': 'changeWMS();toggleButtons()'\n }\n )\n delete_button=Button(display_text='Delete Raster',\n name='delete_button',\n icon='glyphicon glyphicon-remove',\n style='danger',\n disabled=False,\n attributes={\n 'data-toggle': 'tooltip',\n 'data-placement': 'top',\n 'title': 'Delete Selected Raster Animation',\n 'onclick':\"confirm_delete()\",\n }\n )\n default_button = Button(display_text='Make Raster Default',\n name='default_button',\n icon='glyphicon glyphicon-menu-right',\n style='default',\n disabled=False,\n attributes={\n 'data-toggle': 'tooltip',\n 'data-placement': 'top',\n 'title': 'Set Selected Raster Animation as Default',\n 'onclick': \"confirm_default()\",\n }\n )\n volume_button=Button(display_text='Aquifer Storage',\n name='default_button',\n icon='glyphicon glyphicon-stats',\n style='default',\n disabled=False,\n attributes={\n 'data-toggle': 'tooltip',\n 'data-placement': 'top',\n 'title': 'Display Change in Aquifer Storage',\n 'onclick': \"totalvolume()\",\n }\n )\n\n context = {\n \"select_region\":select_region,\n \"select_aquifer\":select_aquifer,\n \"required_data\": required_data,\n \"select_view\":select_view,\n \"available_dates\":available_dates,\n 'delete_button':delete_button,\n 'default_button':default_button,\n 'region_home':region_home,\n 'volume_button':volume_button\n }\n\n return render(request, 'gw/region_map.html', context)", "title": "" }, { "docid": "c9c56db64d19b5163db4f074c5e6de2b", "score": "0.512753", "text": "def summary(self):\n pass", "title": "" }, { "docid": "8783f12e7a3faa887d8020e8f3fa2590", "score": "0.51163983", "text": "def summarize_sample(possible_worlds, problem_spec):\n summary_grid=[]\n for i in range(problem_spec.x):\n summary_grid.append([])\n for j in range(problem_spec.y):\n summary_grid[i].append(0)\n for world in possible_worlds:\n\n for coordinate, unit in world.grid.iteritems():\n if unit and unit in 'F':\n summary_grid[coordinate[0]][coordinate[1]] += 1\n return summary_grid", "title": "" }, { "docid": "58e2afb420f0eac12334b27b39773fed", "score": "0.510907", "text": "def draw_maps_by_range(station_info_dict, \n valid_temperature_data_dict, \n start_year, \n years_per_map, \n num_top_values,\n num_maps):", "title": "" }, { "docid": "2088b6a07783e06eeb7a46d6c1fa37bf", "score": "0.51041603", "text": "def report_normalized_raw_data_map(self):\n copy_map = defaultdict(list)\n for para in self.block_map:\n for i in xrange(len(self.block_map[para])):\n start, stop, var, block = self.block_map[para][i]\n if var is not None:\n copy_map[para].append([start, stop, block.adjusted_count / len(block.variables)])\n prev_var = block.adjusted_count / len(block.variables)\n else:\n copy_map[para].append([start, stop, prev_var])\n return copy_map", "title": "" }, { "docid": "9698c70089bf2257045ed33fcf04d1ee", "score": "0.51017535", "text": "def biome_zonal_stats():\n biome_shp_path = \"F:/Data_terrestrial_ecoregions/wwf_terr_ecos_diss.shp\"\n fid_field = 'OBJECTID'\n output_dir = \"C:/Users/ginge/Dropbox/NatCap_backup/KBA+ES/processing_2020/zonal_stats_biome\"\n save_as = os.path.join(output_dir, 'zonal_stat_biome_combined.csv')\n service_zonal_stats(biome_shp_path, fid_field, output_dir, save_as)", "title": "" }, { "docid": "1b7fd8816f8b49115e81d6ace01460dc", "score": "0.5099861", "text": "def calc_export_stats(feat,img):\r\n\t# var get_ic_counts = comp_ic.map(function(img){ \r\n\tpixel_ct_dict = img.reduceRegion(\r\n\t\treducer=ee.Reducer.sum(), #TESTING the mean values to determine bias btwn MODIS and VIIRS (8/22/2021). Otherwise should be sum gets the basin sum for a binary raster \r\n\t\tgeometry=feat.geometry(),\r\n\t\tscale=500,\r\n\t\ttileScale=4,\r\n\t\tmaxPixels=1e13\r\n\t\t)\r\n\tdict_out = pixel_ct_dict.set('basin',feat.get('huc8')).set('date',ee.Date(img.get('system:time_start')))\r\n\tdict_feat = ee.Feature(None, dict_out) \r\n\treturn dict_feat", "title": "" }, { "docid": "95428ccef3d9b02a8a427320b074cbac", "score": "0.50990355", "text": "def update_regions(self):\n self.clear_all_regions()\n for unit, region in self.results.items():\n self.regions[region].unit = unit", "title": "" }, { "docid": "c6d2665be1278e9d93ce40e0eb1fb25f", "score": "0.50967264", "text": "def get_salvage_locations(self, image):", "title": "" }, { "docid": "5c2ffb3b63d984ac241a57e5d9033235", "score": "0.5089791", "text": "def test_map_metrics():\n pass", "title": "" }, { "docid": "1d1bb8d55f0ff16fd945c0f6ff34facb", "score": "0.5088398", "text": "def summarize_results(\r\n risk_factor_vector_list, tm_world_borders_path,\r\n target_result_point_vector_path):\r\n if os.path.exists(target_result_point_vector_path):\r\n os.remove(target_result_point_vector_path)\r\n\r\n countries_myregions_df = pandas.read_csv(\r\n 'countries_myregions_final_md5_7e35a0775335f9aaf9a28adbac0b8895.csv',\r\n usecols=['country', 'myregions'], sep=',')\r\n country_to_region_dict = {\r\n row[1][1]: row[1][0] for row in countries_myregions_df.iterrows()}\r\n\r\n\r\n LOGGER.debug(\"build country spatial index\")\r\n country_rtree, country_geom_fid_map = build_spatial_index(\r\n tm_world_borders_path)\r\n country_vector = gdal.OpenEx(tm_world_borders_path, gdal.OF_VECTOR)\r\n country_layer = country_vector.GetLayer()\r\n\r\n base_point_vector_path = risk_factor_vector_list[0][0]\r\n base_ref_wkt = pygeoprocessing.get_vector_info(\r\n base_point_vector_path)['projection']\r\n base_spatial_reference = osr.SpatialReference()\r\n base_spatial_reference.ImportFromWkt(base_ref_wkt)\r\n\r\n gpkg_driver = ogr.GetDriverByName(\"GPKG\")\r\n target_result_point_vector = gpkg_driver.CreateDataSource(\r\n target_result_point_vector_path)\r\n target_result_point_layer = target_result_point_vector.CreateLayer(\r\n os.path.splitext(os.path.basename(\r\n target_result_point_vector_path))[0],\r\n base_spatial_reference, ogr.wkbPoint)\r\n risk_id_list = []\r\n for _, _, risk_id in risk_factor_vector_list:\r\n target_result_point_layer.CreateField(ogr.FieldDefn(\r\n risk_id, ogr.OFTReal))\r\n risk_id_list.append(risk_id)\r\n target_result_point_layer.CreateField(\r\n ogr.FieldDefn('country', ogr.OFTString))\r\n target_result_point_layer.CreateField(\r\n ogr.FieldDefn('region', ogr.OFTString))\r\n target_result_point_layer_defn = target_result_point_layer.GetLayerDefn()\r\n\r\n # define initial geometry and fid lookup\r\n fid_lookup = {}\r\n risk_factor_vector = ogr.Open(risk_factor_vector_list[0][0])\r\n risk_factor_layer = risk_factor_vector.GetLayer()\r\n target_result_point_layer.StartTransaction()\r\n LOGGER.debug(\"copying layer\")\r\n for base_point_feature in risk_factor_layer:\r\n grid_id = base_point_feature.GetField('grid_id')\r\n point_id = base_point_feature.GetField('point_id')\r\n fid_lookup[(grid_id, point_id)] = base_point_feature.GetFID()\r\n target_feature = ogr.Feature(target_result_point_layer_defn)\r\n target_feature.SetGeometry(\r\n base_point_feature.GetGeometryRef().Clone())\r\n point_geom = shapely.wkb.loads(\r\n target_feature.GetGeometryRef().ExportToWkb())\r\n # picking 4 because that seems pretty reasonable for nearest countries\r\n intersection_list = list(country_rtree.nearest(point_geom.bounds, 4))\r\n min_feature_index = intersection_list[0]\r\n min_dist = country_geom_fid_map[min_feature_index].distance(\r\n point_geom)\r\n for feature_index in intersection_list[1::]:\r\n dist = country_geom_fid_map[feature_index].distance(point_geom)\r\n if dist < min_dist:\r\n min_dist = dist\r\n min_feature_index = feature_index\r\n country_name = country_layer.GetFeature(\r\n min_feature_index).GetField('name')\r\n target_feature.SetField('country', country_name)\r\n try:\r\n target_feature.SetField(\r\n 'region', country_to_region_dict[country_name])\r\n except KeyError:\r\n target_feature.SetField('region', 'UNKNOWN')\r\n target_result_point_layer.CreateFeature(target_feature)\r\n\r\n target_result_point_layer.CommitTransaction()\r\n target_result_point_layer.SyncToDisk()\r\n\r\n for risk_count, (risk_factor_path, field_id, risk_id) in enumerate(\r\n risk_factor_vector_list):\r\n LOGGER.debug(\r\n \"processing risk factor %d of %d %s\", risk_count+1,\r\n target_result_point_vector.GetLayerCount(), risk_factor_path)\r\n risk_vector = ogr.Open(risk_factor_path)\r\n risk_layer = risk_vector.GetLayer()\r\n n_features = target_result_point_layer.GetFeatureCount()\r\n if n_features == 0:\r\n continue\r\n base_risk_values = numpy.empty(n_features)\r\n fid_index_map = {}\r\n risk_feature = None\r\n for feature_index, risk_feature in enumerate(risk_layer):\r\n risk_value = risk_feature.GetField(field_id)\r\n point_id = risk_feature.GetField('point_id')\r\n grid_id = risk_feature.GetField('grid_id')\r\n target_fid = fid_lookup[(grid_id, point_id)]\r\n fid_index_map[feature_index] = target_fid\r\n base_risk_values[feature_index] = float(risk_value)\r\n # use the last feature to get the grid_id\r\n if field_id != risk_id:\r\n # convert to risk\r\n target_risk_array = numpy.searchsorted(\r\n numpy.percentile(base_risk_values, [20, 40, 60, 80, 100]),\r\n base_risk_values) + 1\r\n else:\r\n # it's already a risk\r\n target_risk_array = base_risk_values\r\n target_result_point_layer.ResetReading()\r\n target_result_point_layer.StartTransaction()\r\n for target_index in range(len(fid_index_map)):\r\n target_fid = fid_index_map[target_index]\r\n target_feature = target_result_point_layer.GetFeature(\r\n target_fid)\r\n target_feature.SetField(\r\n risk_id, float(target_risk_array[target_index]))\r\n target_result_point_layer.SetFeature(target_feature)\r\n target_feature = None\r\n target_result_point_layer.CommitTransaction()\r\n target_result_point_layer.SyncToDisk()\r\n target_result_point_layer = None\r\n target_result_point_vector = None", "title": "" }, { "docid": "ecd6ad2ee036403609832a21a00fefd4", "score": "0.50881207", "text": "def summary(self, info: bool = True):\n\n # Add Entropy if scipy is installed\n stats = import_optional_dependency(\"scipy.stats\")\n\n sum_info = {}\n length = self.shape[0]\n if str(self.dtype)[:3] == \"int\":\n sum_info['Unique'] = len(self.unique())\n sum_info['Unique(%)'] = sum_info['Unique'] / length\n sum_info['Missing'] = self.isnull().sum()\n sum_info['Missing(%)'] = sum_info['Missing'] / length\n sum_info['Means'] = self.sum() / length\n sum_info['Minimum'] = self.min()\n sum_info['Maximum'] = self.max()\n sum_info['Zeros'] = (self == 0).sum()\n sum_info['Zeros(%)'] = sum_info['Zeros'] / length\n if stats:\n sum_info['Entropy'] = round(\n stats.entropy(self.value_counts(normalize=True), base=2), 2)\n sum_info['Memory Size(KB)'] = self.memory_usage() / 1024\n\n if info:\n print('Unique: {}({:.2f}%)'.format(sum_info['Unique'], sum_info['Unique(%)'] * 100))\n print('Missing: {}({:.2f}%)'.format(sum_info['Missing'], sum_info['Missing(%)'] * 100))\n print('Zeros: {}({:.2f}%)'.format(sum_info['Zeros'], sum_info['Zeros(%)'] * 100))\n print('Means: {:.2f}'.format(sum_info['Means']))\n print('Minimum: {}'.format(sum_info['Minimum']))\n print('Maximum: {}'.format(sum_info['Maximum']))\n if stats:\n print('Entropy: {}'.format(sum_info['Entropy']))\n print('Memory Size: {:.1f}KB'.format(sum_info['Memory Size(KB)']))\n\n elif str(self.dtype) == \"object\":\n sum_info['Unique'] = len(self.unique())\n sum_info['Unique(%)'] = sum_info['Unique'] / length\n sum_info['Missing'] = self.isnull().sum()\n sum_info['Missing(%)'] = sum_info['Missing'] / length\n if stats:\n sum_info['Entropy'] = round(\n stats.entropy(self.value_counts(normalize=True), base=2), 2)\n sum_info['Memory Size(KB)'] = self.memory_usage() / 1024\n\n if info:\n print('Unique: {}({:.2f}%)'.format(sum_info['Unique'], sum_info['Unique(%)'] * 100))\n print('Missing: {}({:.2f}%)'.format(sum_info['Missing'], sum_info['Missing(%)'] * 100))\n if stats:\n print('Entropy: {}'.format(sum_info['Entropy']))\n print('Memory Size: {:.2f}KB'.format(sum_info['Memory Size(KB)']))\n\n return self._one_data(sum_info, index=[0])", "title": "" }, { "docid": "e28c4754dd9061efe4eca11204f9e374", "score": "0.50871307", "text": "def _agg_data(self):\n self.data_agg = self.data.groupby('aortic zone').agg({'bbh': ['count', 'median']})\n self.data_agg_counts = self.data.groupby('aortic zone').agg({'group': 'value_counts'}).sort_index().unstack()", "title": "" }, { "docid": "ae9f635d5456905243ea0fbe619c7179", "score": "0.507853", "text": "def summarize_remaining_biomass():\n\n site_csv = r\"C:\\Users\\Ginger\\Dropbox\\NatCap_backup\\Forage_model\\CENTURY4.6\\Kenya\\input\\regional_properties\\regional_properties.csv\"\n site_list = pd.read_csv(site_csv).to_dict(orient=\"records\")\n\n outer_dir = r\"C:\\Users\\Ginger\\Dropbox\\NatCap_backup\\Forage_model\\Forage_model\\model_results\\regional_properties\"\n cp_opts = ['cp'] # ['varying', 'constant']\n marg_dict = {'site': [], 'month': [], 'year': [], 'remaining_biomass': [],\n 'gazelle_equivalents': [], 'cp_option': []}\n for site in site_list:\n for cp_o in cp_opts:\n # inner_folder_name = \"herd_avg_uncalibrated_0.3_{}_cp_GL\".format(cp_o)\n inner_folder_name = \"herd_avg_uncalibrated_constant_cp_GL_est_densities\"\n inner_dir = os.path.join(outer_dir, inner_folder_name)\n outdir_folder = [f for f in os.listdir(inner_dir) if\n f.startswith('site_{:d}_'.format(int(site['name'])))]\n try:\n outdir = os.path.join(inner_dir, outdir_folder[0])\n except:\n continue\n # outdir = os.path.join(inner_dir,\n # 'site_{:d}'.format(int(site['name'])))\n sum_csv = os.path.join(outdir, 'summary_results.csv')\n sum_df = pd.read_csv(sum_csv)\n subset = sum_df.loc[sum_df['year'] > 2013]\n subset.total_biomass = subset['{:.0f}_green_kgha'.format(site['name'])] + \\\n subset['{:.0f}_dead_kgha'.format(site['name'])]\n subset.available = subset.total_biomass / 2\n subset.remaining = subset.available - subset.total_offtake\n gazelle_equiv = subset.remaining / 56.29\n marg_dict['site'].extend([site['name']] * len(subset.month))\n marg_dict['month'].extend(subset.month.tolist())\n marg_dict['year'].extend(subset.year.tolist())\n marg_dict['remaining_biomass'].extend(subset.remaining.tolist())\n marg_dict['gazelle_equivalents'].extend(gazelle_equiv.tolist())\n marg_dict['cp_option'].extend([cp_o] * len(subset.month))\n import pdb; pdb.set_trace()\n df = pd.DataFrame(marg_dict)\n summary_csv = os.path.join(outer_dir, 'biomass_remaining_summary.csv')\n df.to_csv(summary_csv)", "title": "" }, { "docid": "168408d95780e2e477a32b7865e6f098", "score": "0.50758284", "text": "def summarize(self, doc_paths):\n\n\t\traise NotImplementedError", "title": "" }, { "docid": "32cb8bc5c7ad8dbd4fdbe8c8d3ca9914", "score": "0.50697404", "text": "def generate_map_xml(self):\n from hs_core.hydroshare import encode_resource_url\n from hs_core.hydroshare.utils import current_site_url, get_file_mime_type\n\n current_site_url = current_site_url()\n # This is the qualified resource url.\n hs_res_url = os.path.join(current_site_url, 'resource', self.resource.file_path)\n # this is the path to the resource metadata file for download\n aggr_metadata_file_path = self.metadata_short_file_path\n metadata_url = os.path.join(hs_res_url, aggr_metadata_file_path)\n metadata_url = encode_resource_url(metadata_url)\n # this is the path to the aggregation resourcemap file for download\n aggr_map_file_path = self.map_short_file_path\n res_map_url = os.path.join(hs_res_url, aggr_map_file_path)\n res_map_url = encode_resource_url(res_map_url)\n\n # make the resource map:\n utils.namespaces['citoterms'] = Namespace('http://purl.org/spar/cito/')\n utils.namespaceSearchOrder.append('citoterms')\n\n ag_url = res_map_url + '#aggregation'\n a = Aggregation(ag_url)\n\n # Set properties of the aggregation\n a._dc.title = self.dataset_name\n agg_type_url = \"{site}/terms/{aggr_type}\"\\\n .format(site=current_site_url, aggr_type=self.get_aggregation_type_name())\n a._dcterms.type = URIRef(agg_type_url)\n a._citoterms.isDocumentedBy = metadata_url\n a._ore.isDescribedBy = res_map_url\n\n res_type_aggregation = AggregatedResource(agg_type_url)\n res_type_aggregation._rdfs.label = self.get_aggregation_term_label()\n res_type_aggregation._rdfs.isDefinedBy = current_site_url + \"/terms\"\n\n a.add_resource(res_type_aggregation)\n\n # Create a description of the metadata document that describes the whole resource and add it\n # to the aggregation\n resMetaFile = AggregatedResource(metadata_url)\n resMetaFile._citoterms.documents = ag_url\n resMetaFile._ore.isAggregatedBy = ag_url\n resMetaFile._dc.format = \"application/rdf+xml\"\n\n # Create a description of the content file and add it to the aggregation\n files = self.files.all()\n resFiles = []\n for n, f in enumerate(files):\n res_uri = '{hs_url}/resource/{res_id}/data/contents/{file_name}'.format(\n hs_url=current_site_url,\n res_id=self.resource.short_id,\n file_name=f.short_path)\n res_uri = encode_resource_url(res_uri)\n resFiles.append(AggregatedResource(res_uri))\n resFiles[n]._ore.isAggregatedBy = ag_url\n resFiles[n]._dc.format = get_file_mime_type(os.path.basename(f.short_path))\n\n # Add the resource files to the aggregation\n a.add_resource(resMetaFile)\n for f in resFiles:\n a.add_resource(f)\n\n # Create a description of the contained aggregations and add it to the aggregation\n child_ore_aggregations = []\n for n, child_aggr in enumerate(self.get_children()):\n res_uri = '{hs_url}/resource/{res_id}/data/contents/{aggr_name}'.format(\n hs_url=current_site_url,\n res_id=self.resource.short_id,\n aggr_name=child_aggr.map_short_file_path + '#aggregation')\n res_uri = encode_resource_url(res_uri)\n child_ore_aggr = Aggregation(res_uri)\n child_ore_aggregations.append(child_ore_aggr)\n child_ore_aggregations[n]._ore.isAggregatedBy = ag_url\n child_agg_type_url = \"{site}/terms/{aggr_type}\"\n child_agg_type_url = child_agg_type_url.format(\n site=current_site_url, aggr_type=child_aggr.get_aggregation_type_name())\n child_ore_aggregations[n]._dcterms.type = URIRef(child_agg_type_url)\n\n # Add contained aggregations to the aggregation\n for aggr in child_ore_aggregations:\n a.add_resource(aggr)\n\n # Register a serializer with the aggregation, which creates a new ResourceMap that\n # needs a URI\n serializer = RdfLibSerializer('xml')\n # resMap = a.register_serialization(serializer, res_map_url)\n a.register_serialization(serializer, res_map_url)\n\n # Fetch the serialization\n remdoc = a.get_serialization()\n # remove this additional xml element - not sure why it gets added\n # <ore:aggregates rdf:resource=\"https://www.hydroshare.org/terms/[aggregation name]\"/>\n xml_element_to_replace = '<ore:aggregates rdf:resource=\"{}\"/>\\n'.format(agg_type_url)\n xml_string = remdoc.data.replace(xml_element_to_replace, '')\n return xml_string", "title": "" }, { "docid": "584cc805e18d197059a9d5ccdca19324", "score": "0.5069441", "text": "def index(self):\n max_usage = self.configuration.get_float(\n 'indexing.saturation', default=0.5)\n log.debug(f\"Indexing maps (up to {100 * max_usage}% \"\n f\"of RAM saturation).\")\n\n max_available = (psutil.virtual_memory().total\n - self.get_reduction_footprint(self.pixels()))\n check_memory = self.configuration.get_bool(\n 'indexing.check_memory', default=True)\n\n max_used = int(max_available * max_usage)\n for scan in self.scans:\n for integration in scan.integrations:\n if check_memory and psutil.virtual_memory().used > max_used:\n return\n self.create_lookup(integration)", "title": "" }, { "docid": "9dd031ec5ec94c780cc162e0daa4e330", "score": "0.5065046", "text": "def get_brief_statistics(self):\n healthy_people = 0\n infectious_people = 0\n sick_people = 0\n recovered_people = 0\n dead_people = 0\n\n for i in range(self.persons.shape[0]):\n for j in range(self.persons.shape[1]):\n if self.persons[i, j].state == PersonState.HEALTHY:\n healthy_people += 1\n elif self.persons[i, j].state == PersonState.INFECTIOUS:\n infectious_people += 1\n elif self.persons[i, j].state == PersonState.SICK:\n sick_people += 1\n elif self.persons[i, j].state == PersonState.RECOVERED:\n recovered_people += 1\n elif self.persons[i, j].state == PersonState.DEATH:\n dead_people += 1\n return {\n 'fitness': int(self.fitness),\n 'healthy': healthy_people,\n 'infectious': infectious_people,\n 'sick': sick_people,\n 'recovered': recovered_people,\n 'dead': dead_people,\n 'healthcare': self.healthcare,\n 'hygiene': self.hygiene,\n 'mask': self.mask,\n 'distancing': self.distancing,\n 'curfew': self.curfew,\n 'test_rate': self.test_rate,\n 'quarantine_rules': self.quarantine_rules,\n 'isolation_rules': self.isolation_rules,\n }", "title": "" }, { "docid": "5e47c9a64df03e11c730c1b41ae38c22", "score": "0.50573915", "text": "def service_zonal_stats(aggregate_vector_path, fid_field, output_dir, save_as):\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n service_raster_dir = \"F:/Data_service_rasters\"\n service_raster_bn_list = [\n f for f in os.listdir(service_raster_dir) if f.endswith('.tif')]\n service_raster_bn_list.remove(\n 'realized_natureaccess10_nathab_md5_af07e76ecea7fb5be0fa307dc7ff4eed.tif')\n service_raster_bn_list.remove(\n 'realized_natureaccess100_nathab_md5_ac72bb0f6c0460d7d48f7ee25e161b0f.tif')\n service_raster_path_list = [\n os.path.join(service_raster_dir, bn) for bn in service_raster_bn_list]\n service_raster_path_list.append(\n \"F:/Data_service_rasters/natureaccess_aligned/realized_natureaccess100_nathab_md5_ac72bb0f6c0460d7d48f7ee25e161b0f.tif\")\n service_raster_path_list.append(\n \"F:/Data_service_rasters/natureaccess_aligned/realized_natureaccess10_nathab_md5_af07e76ecea7fb5be0fa307dc7ff4eed.tif\")\n fid_to_objectid = map_FID_to_field(aggregate_vector_path, fid_field)\n df_path_list = []\n for raster_path in service_raster_path_list:\n if os.path.basename(raster_path).startswith('realized_nature'):\n colname = os.path.basename(raster_path)[9:24]\n else:\n colname = os.path.basename(raster_path)[9:15]\n intermediate_path = os.path.join(\n output_dir, 'zonal_stat_biome_{}.csv'.format(colname))\n df_path_list.append(intermediate_path)\n if not os.path.exists(intermediate_path):\n print(\"processing {} under {}\".format(\n colname, aggregate_vector_path))\n zonal_stat_dict = pygeoprocessing.zonal_statistics(\n (raster_path, 1), aggregate_vector_path,\n polygons_might_overlap=False)\n objectid_zonal_stats_dict = {\n objectid: zonal_stat_dict[fid] for (fid, objectid) in\n fid_to_objectid.items()\n }\n zonal_df = pandas.DataFrame(\n {\n fid_field: [\n key for key, value in sorted(\n objectid_zonal_stats_dict.items())],\n '{}_sum'.format(colname): [\n value['sum'] for key, value in\n sorted(objectid_zonal_stats_dict.items())],\n '{}_count'.format(colname): [\n value['count'] for key, value in\n sorted(objectid_zonal_stats_dict.items())]\n })\n zonal_df['{}_mean'.format(colname)] = (\n zonal_df['{}_sum'.format(colname)] /\n zonal_df['{}_count'.format(colname)])\n zonal_df.to_csv(intermediate_path, index=False)\n merge_data_frame_list(df_path_list, fid_field, save_as)", "title": "" }, { "docid": "c6754ced7a08bec41d60320cf7fcf104", "score": "0.50510323", "text": "def calculate_zonal_statistics(self):\n counter = 1\n for layer in self.input_elevation_rasters:\n QgsMapLayerRegistry.instance().addMapLayer(layer)\n for polygon in self.copied_groyne_cell_polygons:\n QgsMapLayerRegistry.instance().addMapLayer(polygon)\n # Arguments - (polygon, raster, attribute prefix, band, stat to calculate\n zonal_stats = QgsZonalStatistics(polygon, layer.source(), \"GR_{0!s}_\".format(counter), 1,\n QgsZonalStatistics.Mean)\n zonal_stats.calculateStatistics(None)\n counter += 1\n\n self.add_height_adjustment()", "title": "" }, { "docid": "8ccbfc09d33500a1ac54ae6c26423f8c", "score": "0.50365293", "text": "def regions_of_interest(self):\n subseq = [self.start, self.stop]\n\n low_roi = self.rois.get_low_rois(subseq)\n high_roi = self.rois.get_high_rois(subseq)\n js = self.datatable.create_javascript_function()\n lroi = DataTable(low_roi, \"lroi\", self.datatable)\n hroi = DataTable(high_roi, \"hroi\", self.datatable)\n html_low_roi = lroi.create_datatable(float_format=\"%.3g\")\n html_high_roi = hroi.create_datatable(float_format=\"%.3g\")\n roi_paragraph = (\n \"<p>Regions with a z-score {0}er than {1:.2f} and at \"\n \"least one base with a z-score {0}er than {2:.2f} are detected as \"\n \"{0} coverage region. Thus, there are {3} {0} coverage regions \"\n \"between the position {4} and the position {5}</p>\"\n )\n low_paragraph = roi_paragraph.format(\n \"low\",\n self.chromosome.thresholds.low2,\n self.chromosome.thresholds.low,\n len(low_roi),\n self.start,\n self.stop,\n )\n high_paragraph = roi_paragraph.format(\n \"high\",\n self.chromosome.thresholds.high2,\n self.chromosome.thresholds.high,\n len(high_roi),\n self.start,\n self.stop,\n )\n\n self.sections.append(\n {\n \"name\": \"Regions Of Interest (ROI)\",\n \"anchor\": \"roi\",\n \"content\": \"{4}\\n\"\n \"<p>Running median is the median computed along the genome \"\n \"using a sliding window. The following tables give regions of \"\n \"interest detected by sequana. Here are some definition of the \"\n \"table's columns:</p>\\n\"\n \"<ul><li><b>mean_cov</b>: the average of coverage</li>\\n\"\n \"<li><b>mean_rm</b>: the average of running median</li>\\n\"\n \"<li><b>mean_zscore</b>: the average of zscore</li>\\n\"\n \"<li><b>max_zscore</b>: the higher zscore contains in the \"\n \"region</li>\\n\"\n \"<li><b>log2_ratio</b>:log2(mean_cov/mean_rm)</li></ul>\\n\"\n \"<h3>Low coverage region</h3>\\n{0}\\n{1}\\n\"\n \"<h3>High coverage region</h3>\\n{2}\\n{3}\\n\".format(\n low_paragraph, html_low_roi, high_paragraph, html_high_roi, js\n ),\n }\n )", "title": "" }, { "docid": "a61bf8f79305d9c31f697355b40ddaf5", "score": "0.5031337", "text": "def Regions(self,\n skip_executable_regions=False,\n skip_shared_regions=False,\n skip_readonly_regions=False):\n\n address = ctypes.c_ulong(0)\n mapsize = ctypes.c_ulong(0)\n count = ctypes.c_uint32(submap_info_size)\n sub_info = vm_region_submap_short_info_data_64()\n depth = 0\n depth_end_addresses = {}\n\n while True:\n c_depth = ctypes.c_uint32(depth)\n\n r = libc.mach_vm_region_recurse(self.task, ctypes.pointer(address),\n ctypes.pointer(mapsize),\n ctypes.pointer(c_depth),\n ctypes.pointer(sub_info),\n ctypes.pointer(count))\n\n # If we get told \"invalid address\", we have crossed into kernel land...\n if r == 1:\n break\n\n if r != 0:\n raise process_error.ProcessError(\"Error in mach_vm_region, ret=%s\" % r)\n\n if depth > 0 and address.value >= depth_end_addresses[depth]:\n del depth_end_addresses[depth]\n depth -= 1\n continue\n\n p = sub_info.protection\n is_executable = p & VM_PROT_EXECUTE\n if skip_executable_regions and is_executable:\n address.value += mapsize.value\n continue\n\n if skip_shared_regions and sub_info.share_mode in [\n SM_COW, SM_SHARED, SM_TRUESHARED\n ]:\n address.value += mapsize.value\n continue\n\n if not p & VM_PROT_READ:\n address.value += mapsize.value\n continue\n\n is_writable = p & VM_PROT_WRITE\n if skip_readonly_regions and not is_writable:\n address.value += mapsize.value\n continue\n\n if sub_info.is_submap:\n depth += 1\n depth_end_addresses[depth] = address.value + mapsize.value\n else:\n yield rdf_memory.ProcessMemoryRegion(\n start=address.value,\n size=mapsize.value,\n is_readable=True,\n is_executable=is_executable,\n is_writable=is_writable)\n address.value += mapsize.value", "title": "" }, { "docid": "36a6e4ffbc045236f47606811f786fc2", "score": "0.5028124", "text": "def print_map_summary(mean_ap, results, dataset=None):\n\tnum_scales = len(results[0]['ap']) if isinstance(results[0]['ap'],np.ndarray) else 1\n\tnum_classes = len(results)\n\n\trecalls = np.zeros((num_scales, num_classes), dtype=np.float32)\n\tprecisions = np.zeros((num_scales, num_classes), dtype=np.float32)\n\taps = np.zeros((num_scales, num_classes), dtype=np.float32)\n\tnum_gts = np.zeros((num_scales, num_classes), dtype=int)\n\tfor i, cls_result in enumerate(results):\n\t\tif cls_result['recall'].size > 0:\n\t\t\trecalls[:, i] = np.array(cls_result['recall'], ndmin=2)[:, -1]\n\t\t\tprecisions[:, i] = np.array(\n\t\t\t\tcls_result['precision'], ndmin=2)[:, -1]\n\t\taps[:, i] = cls_result['ap']\n\t\tnum_gts[:, i] = cls_result['num_gts']\n\n\tif dataset is None:\n\t\tlabel_names = [str(i) for i in range(1, num_classes + 1)]\n\telif mmcv.is_str(dataset):\n\t\tlabel_names = get_classes(dataset)\n\telse:\n\t\tlabel_names = dataset\n\n\tif not isinstance(mean_ap, list):\n\t\tmean_ap = [mean_ap]\n\theader = ['class', 'gts', 'dets', 'recall', 'precision', 'ap']\n\tfor i in range(num_scales):\n\t\ttable_data = [header]\n\t\tfor j in range(num_classes):\n\t\t\trow_data = [\n\t\t\t\tlabel_names[j], num_gts[i, j], results[j]['num_dets'],\n\t\t\t\t'{:.3f}'.format(recalls[i, j]), '{:.3f}'.format(\n\t\t\t\t\tprecisions[i, j]), '{:.3f}'.format(aps[i, j])\n\t\t\t]\n\t\t\ttable_data.append(row_data)\n\t\ttable_data.append(['mAP', '', '', '', '', '{:.3f}'.format(mean_ap[i])])\n\t\ttable = AsciiTable(table_data)\n\t\ttable.inner_footing_row_border = True\n\n\t\t#print(table.table)", "title": "" }, { "docid": "9c05511e42c399e3bac69e60ecf01810", "score": "0.50123817", "text": "def add_pruning_summaries(self):\n with tf.name_scope(self._spec.name + '_summaries'):\n tf.summary.scalar('sparsity', self._sparsity)\n tf.summary.scalar('last_mask_update_step', self._last_update_step)\n tf.summary.scalar('last_gradient_update_step',\n self._last_gradient_update_step)\n masks = get_masks()\n thresholds = get_thresholds()\n gradients = get_gradients()\n\n for mask, threshold, gradient in zip(masks, thresholds, gradients):\n tf.summary.scalar(mask.op.name + '/sparsity', tf.nn.zero_fraction(mask))\n tf.summary.scalar(threshold.op.name + '/threshold', threshold)\n tf.summary.scalar(gradient.op.name + '/gradient', tf.norm(gradient))\n tf.summary.scalar(gradient.op.name + '/gradient-sparsity',\n tf.nn.zero_fraction(gradient))\n tf.summary.histogram(gradient.op.name + '/abs.gradient', gradient)", "title": "" }, { "docid": "6fabc4abbac7c4c83e3b1e447ee9ab0a", "score": "0.5006452", "text": "def global_kba_summary(service_raster_path, kba_raster_path):\n service_nodata = pygeoprocessing.get_raster_info(\n service_raster_path)['nodata'][0]\n kba_nodata = pygeoprocessing.get_raster_info(\n kba_raster_path)['nodata'][0]\n\n service_raster = gdal.OpenEx(service_raster_path)\n service_band = service_raster.GetRasterBand(1)\n\n kba_raster = gdal.OpenEx(kba_raster_path)\n kba_band = kba_raster.GetRasterBand(1)\n\n try:\n summary_dict = {\n 'global_service_sum': 0,\n 'global_service_sum_in_KBA': 0,\n }\n last_blocksize = None\n for block_offset in pygeoprocessing.iterblocks(\n (service_raster_path, 1), offset_only=True):\n blocksize = (block_offset['win_ysize'], block_offset['win_xsize'])\n\n if last_blocksize != blocksize:\n service_array = numpy.zeros(\n blocksize,\n dtype=pygeoprocessing._gdal_to_numpy_type(service_band))\n kba_array = numpy.zeros(\n blocksize,\n dtype=pygeoprocessing._gdal_to_numpy_type(kba_band))\n last_blocksize = blocksize\n\n service_data = block_offset.copy()\n service_data['buf_obj'] = service_array\n service_band.ReadAsArray(**service_data)\n\n kba_data = block_offset.copy()\n kba_data['buf_obj'] = kba_array\n kba_band.ReadAsArray(**kba_data)\n\n valid_mask = (service_array != service_nodata)\n # the below was necessary for 10km CV raster\n # valid_mask = (service_array > 0)\n summary_dict['global_service_sum'] += (\n numpy.sum(service_array[valid_mask]))\n\n kba_mask = (\n valid_mask &\n (kba_array != kba_nodata) &\n (kba_array > 0))\n summary_dict['global_service_sum_in_KBA'] += (\n numpy.sum(service_array[kba_mask]))\n\n summary_dict['global_service_percent_in_KBA'] = (\n float(summary_dict['global_service_sum_in_KBA']) /\n summary_dict['global_service_sum'] * 100)\n\n finally:\n service_band = None\n kba_band = None\n gdal.Dataset.__swig_destroy__(service_raster)\n gdal.Dataset.__swig_destroy__(kba_raster)\n\n return summary_dict", "title": "" }, { "docid": "bf257628ed351db20ba2c61bebd236dd", "score": "0.5000245", "text": "def test_regions_get(self):\n pass", "title": "" }, { "docid": "b299ed69cae6fca273c34342b96f7657", "score": "0.49989748", "text": "def unmap_all(self):\n\n # clear out the processed / computed coverage data structures\n self.nodes = {}\n self.functions = {}\n self.partial_nodes = set()\n self.partial_instructions = set()\n self.orphan_addresses = set()\n\n # dump the source coverage data back into an 'unmapped' state\n self.unmapped_addresses = set(self._hitmap.keys())", "title": "" }, { "docid": "4d665680bf302042f72bdf49517afa1c", "score": "0.49922562", "text": "def perform_statistics(image, data_table):\n\n print('Calculating statistics')\n\n num_objects = int(np.max(data) - 1)\n\n for object_number in range(2, num_objects+2):\n\n # Determine the area\n area = image[np.where(image == object_number)].size\n data_table[object_number-2]['Area'] = area\n\n # Classify the cosmic ray\n if area < 5:\n \tdata_table[object_number-2]['Classification'] = 'S'\n elif 5 <= area <= 12:\n \tdata_table[object_number-2]['Classification'] = 'M'\n else:\n \tdata_table[object_number-2]['Classification'] = 'L'", "title": "" }, { "docid": "162c3a07e7aa7d8f96537b7c9cb6632d", "score": "0.4988535", "text": "def generate_p_value_maps(src):\n #Load the data (heat maps generated previously )\n #make groups\n groupA = [os.path.join(flds, fld) for fld in os.listdir(flds) if conditions[os.path.basename(fld)] == \"homecage_control\"]; groupA.sort() \n groupB = [os.path.join(flds, fld) for fld in os.listdir(flds) if conditions[os.path.basename(fld)] == \"CNO_control_no_reversal\"]; groupB.sort()\n groupC = [os.path.join(flds, fld) for fld in os.listdir(flds)if conditions[os.path.basename(fld)] == \"CNO_control_reversal\"]; groupC.sort()\n groupD = [os.path.join(flds, fld) for fld in os.listdir(flds) if conditions[os.path.basename(fld)] == \"DREADDs\"]; groupC.sort()\n\n group_a = [xx+\"/cells_heatmap_60um_erosion.tif\" for xx in groupA]\n group_b = [xx+\"/cells_heatmap_60um_erosion.tif\" for xx in groupB] \n group_c = [xx+\"/cells_heatmap_60um_erosion.tif\" for xx in groupC] \n group_d = [xx+\"/cells_heatmap_60um_erosion.tif\" for xx in groupD] \n \n \n grp_a = stat.readDataGroup(group_a)\n grp_b = stat.readDataGroup(group_b)\n grp_c = stat.readDataGroup(group_c)\n grp_d = stat.readDataGroup(group_d)\n \n #Generated average and standard deviation maps\n ##############################################\n grp_aa = np.mean(grp_a, axis = 0)\n grp_as = np.std(grp_a, axis = 0)\n \n grp_ba = np.mean(grp_b, axis = 0)\n grp_bs = np.std(grp_b, axis = 0)\n \n grp_ca = np.mean(grp_c, axis = 0)\n grp_cs = np.std(grp_c, axis = 0)\n \n grp_da = np.mean(grp_d, axis = 0)\n grp_ds = np.std(grp_d, axis = 0)\n \n io.writeData(os.path.join(src, \"group_a_mean.raw\"), rsp.sagittalToCoronalData(grp_aa))\n io.writeData(os.path.join(src, \"group_a_std.raw\"), rsp.sagittalToCoronalData(grp_as))\n \n io.writeData(os.path.join(src, \"group_b_mean.raw\"), rsp.sagittalToCoronalData(grp_ba))\n io.writeData(os.path.join(src, \"group_b_std.raw\"), rsp.sagittalToCoronalData(grp_bs))\n \n io.writeData(os.path.join(src, \"group_c_mean.raw\"), rsp.sagittalToCoronalData(grp_ca))\n io.writeData(os.path.join(src, \"group_c_std.raw\"), rsp.sagittalToCoronalData(grp_cs))\n \n io.writeData(os.path.join(src, \"group_d_mean.raw\"), rsp.sagittalToCoronalData(grp_da))\n io.writeData(os.path.join(src, \"group_d_std.raw\"), rsp.sagittalToCoronalData(grp_ds))\n \n #Generate the p-values map\n ##########################\n #first comparison\n #pcutoff: only display pixels below this level of significance\n pvals, psign = stat.tTestVoxelization(grp_a.astype(\"float\"), grp_d.astype(\"float\"), signed = True, pcutoff = 0.05)\n \n #color the p-values according to their sign (defined by the sign of the difference of the means between the 2 groups)\n pvalsc = stat.colorPValues(pvals, psign, positive = [0,1], negative = [1,0]);\n io.writeData(os.path.join(src, \"pvalues_homecage_control_vs_DREADDs.tif\"), rsp.sagittalToCoronalData(pvalsc.astype(\"float32\")));\n \n #second comparison\n pvals, psign = stat.tTestVoxelization(grp_a.astype(\"float\"), grp_b.astype(\"float\"), signed = True, pcutoff = 0.05)\n pvalsc = stat.colorPValues(pvals, psign, positive = [0,1], negative = [1,0]);\n io.writeData(os.path.join(src, \"pvalues_homecage_control_vs_CNO_control_no_reversal.tif\"), rsp.sagittalToCoronalData(pvalsc.astype(\"float32\")))\n \n #third comparison\n pvals, psign = stat.tTestVoxelization(grp_b.astype(\"float\"), grp_c.astype(\"float\"), signed = True, pcutoff = 0.05)\n pvalsc = stat.colorPValues(pvals, psign, positive = [0,1], negative = [1,0]);\n io.writeData(os.path.join(src, \"pvalues_CNO_control_no_reversal_vs_CNO_control_reversal.tif\"), rsp.sagittalToCoronalData(pvalsc.astype(\"float32\")))\n \n #fourth comparison\n pvals, psign = stat.tTestVoxelization(grp_c.astype(\"float\"), grp_d.astype(\"float\"), signed = True, pcutoff = 0.05)\n pvalsc = stat.colorPValues(pvals, psign, positive = [0,1], negative = [1,0]);\n io.writeData(os.path.join(src, \"pvalues_CNO_control_reversal_vs_DREADDs.tif\"), rsp.sagittalToCoronalData(pvalsc.astype(\"float32\")))", "title": "" }, { "docid": "5f3a71daf8aa44a4c57a035a5f861631", "score": "0.49882853", "text": "def create_reg_mean_txtfile(mmvalues,filename,lat,lon):\n\n plotregions = ['NE US', 'SE US', 'IMW US', \\\n 'C. Europe', 'S. Asia', 'E. Asia','midlatplusboreal']\n reg_minlat = np.array([36., 30., 36., 45.,25., 25.,40])\n reg_maxlat = np.array([46., 38., 46., 54.,40., 40.,65])\n reg_minlon = np.array([-80., -95.,-110., 0., 65,105.,0])\n reg_maxlon = np.array([-70., -80.,-100., 25.,90, 120.,360])\n nregions = 7\n\n reg_minlon[reg_minlon<0.] = reg_minlon[reg_minlon<0.]+360.\n reg_maxlon[reg_maxlon<0.] = reg_maxlon[reg_maxlon<0.]+360.\n\n outF = open(\"./txt/\"+filename,\"w\")\n for r in range(0,nregions):\n # get indices for lat/lon bounds for a given region\n reg_minlat_idx = geo_idx(reg_minlat[r],lat)\n reg_maxlat_idx = geo_idx(reg_maxlat[r],lat)\n reg_minlon_idx = geo_idx(reg_minlon[r],lon)\n reg_maxlon_idx = geo_idx(reg_maxlon[r],lon)\n data = np.nanmean(mmvalues[reg_minlat_idx:reg_maxlat_idx,reg_minlon_idx:reg_maxlon_idx])\n outF.write(plotregions[r]+\" \")\n outF.write(str(round(data,3)))\n outF.write(\"\\n\")\n outF.close()", "title": "" }, { "docid": "b843cb445c37dda154743ac75cbc395f", "score": "0.4978564", "text": "def aggregateUsersByTweetCountByRegion(self):", "title": "" }, { "docid": "1f96e2d30d2bbeada7872b3502accb20", "score": "0.49783677", "text": "def get_crop_statistics_for_year(mean_raster, std_raster, land_use_raster, crop_codes, crop_means_output=None, crop_mean_difference_output=None, crop_mean_difference_pct_output=None, crop_variation_output=None):\n\n\t# Alternative approach to get one raster back - Extract by attributes the land use mask per crop - then extract\n\t# by mask the ET data for those locations, and get the mean value (use function for that)\n\t# Store the mean value in a dictionary per crop, and then, when done getting all mean values, use them to do a single\n\t# Reclassify on the land use mask to make a crop means raster. Use that in the output to get deviation from mean\n\n\tarcpy.CheckOutExtension(\"Spatial\")\n\n\tmeans = [] # we'll need a list of lists for this to work - something of the form [[1,9],[2,16]] - maps values that were one to new value of nine, etc\n\ttry:\n\t\tfor crop in crop_codes[\"values\"]:\n\t\t\tif type(crop) in (six.text_type, six.binary_type): # if it's text, rather than numbers, make sure to quote it both for expressions and for Reclassify\n\t\t\t\tcrop_code = \"'{}'\".format(crop)\n\t\t\telse:\n\t\t\t\tcrop_code = crop\n\n\t\t\tcrop_mean = get_crop_mean(mean_raster, land_use_raster, crop_codes[\"variable\"], crop_code)\n\t\t\tif crop_mean is None or np.isnan(crop_mean):\n\t\t\t\tcontinue\n\n\t\t\tmeans.append([crop_code, int(crop_mean)]) # get the mean for the crop and store it - make it an int, so it'll work in reclassify (adds a very tiny amount of error)\n\n\t\tcrop_means_raster = arcpy.sa.Reclassify(land_use_raster, crop_codes[\"variable\"], arcpy.sa.RemapValue(means), missing_values=\"NODATA\")\n\t\tcrop_mean_difference_raster = crop_means_raster - mean_raster\n\t\tcrop_variation = std_raster / crop_means_raster\n\t\tcrop_mean_difference_pct_raster = crop_mean_difference_raster / mean_raster\n\n\t\tif crop_means_output:\n\t\t\tcrop_means_raster.save(crop_means_output)\n\t\tif crop_mean_difference_output:\n\t\t\tcrop_mean_difference_raster.save(crop_mean_difference_output)\n\t\tif crop_variation_output:\n\t\t\tcrop_variation.save(crop_variation_output)\n\t\tif crop_mean_difference_pct_output:\n\t\t\tcrop_mean_difference_pct_raster.save(crop_mean_difference_pct_output)\n\tfinally:\n\t\tarcpy.CheckInExtension(\"Spatial\")\n\n\treturn crop_variation", "title": "" }, { "docid": "e5ed3c93fabbc74d14442aafc8c537a1", "score": "0.4976454", "text": "def write_output(mapping_dict, args):\n handle = pysam.AlignmentFile(args.input, 'rb')\n out_handle = open(args.output,'w')\n header = ['Species','Locus']\n sample_order = []\n for rg_dict in handle.header['RG']:\n header.append(rg_dict['SM'])\n sample_order.append(rg_dict['SM'])\n out_handle.write('\\t'.join(header)+'\\n')\n for contig in handle.header['SQ']:\n contig_name = contig['SN']\n out_line = ['_'.join(contig_name.split('_')[:-1]),contig_name]\n if contig_name in mapping_dict and contig_name not in mapping_dict['discard']:\n subdict = mapping_dict[contig['SN']]\n if sum(subdict.values()) > 50:\n for sample in sample_order:\n if sample in subdict:\n out_line.append(str(subdict[sample]))\n else:\n out_line.append('0')\n out_handle.write('\\t'.join(out_line)+'\\n')\n out_handle.close()", "title": "" }, { "docid": "b9f432c3100ed554b67f687a509cdcae", "score": "0.49720514", "text": "def gen_analysis(states):\n\n ret = {} # dictionary to hold all the graphs\n df = _get_df(states)\n\n # use these keys to get the correponding graphs\n ret['prediction'] = _gen_predictive_model(df, use_outliers=True)\n ret['top10'] = _significant_months_barchart(df, metric='damage')\n ret['month_dist'] = _monthly_dist(df, metric='damage')\n ret['total_injuries'] = df['INJURIES'].sum()\n ret['total_deaths'] = df['DEATHS'].sum()\n \n temp = df['DAMAGE'].sum()\n magnitude = 0\n while temp >= 1000:\n magnitude += 1\n temp /= 1000.0\n # add more suffixes if you need them\n ret['total_damage'] = ('%.2f%s' % (temp, ['', 'K', 'M', 'B', 'T', 'Q'][magnitude]))\n\n return ret", "title": "" }, { "docid": "5a4049eb7ccd2786bc735fcdc4770ae5", "score": "0.49715474", "text": "def bar_print_by_country(l_members_by_country, l_data_range, l_mappers_file):\n\n # minimum percent of a country to be labeled in the chart\n label_min = 0.3 / 100\n\n day = 1\n months = (l_data_range - 1) % 12 + 1\n year = 2016 + int(l_data_range - 1) / 12\n date = str(int(year)) + '/' + str(months) + '/' + str(day)\n\n labels = list()\n osmf_sizes = list()\n mapper_sizes = list()\n mappers = dict()\n\n total = 0\n for l_line in l_members_by_country:\n total = total + l_line[1]\n\n with open(l_mappers_file) as f:\n content = f.readlines()\n # you may also want to remove whitespace characters like `\\n` at the end of each line\n content = [x.strip() for x in content]\n for l_line in content:\n country = l_line.split(\"\\t\")\n mappers[country[0]] = country[1]\n\n for l_line in l_members_by_country:\n if float(l_line[1]) / total >= label_min:\n labels.append((l_line[0] + ' (' + '{:.1%}'.format(float(l_line[1]) / total) + ')'))\n else:\n labels.append(' ')\n osmf_sizes.append(l_line[1])\n\n\n if l_line[0] == \"Iran, Islamic Republic of\":\n mapper_sizes.append(mappers.get('Iran', 0))\n elif l_line[0] == \"Russian Federation\":\n mapper_sizes.append(mappers.get('Russia', 0))\n elif l_line[0] == \"Côte d'Ivoire\":\n mapper_sizes.append(mappers.get('Ivory Coast', 0))\n elif l_line[0] == \"Congo, The Democratic Republic of\":\n mapper_sizes.append(mappers.get('Congo - Kinshasa', 0))\n elif l_line[0] == \"Myanmar\":\n mapper_sizes.append(mappers.get('Myanmar(Burma)', 0))\n elif mappers.get(l_line[0], 0) == 0:\n print(l_line[0], ' not found')\n mapper_sizes.append(mappers.get(l_line[0], 0))\n else:\n mapper_sizes.append(mappers.get(l_line[0], 0))\n\n l_mapper_members_df = pd.DataFrame(\n {'Country': labels,\n 'OSMF Members': osmf_sizes,\n 'OSM Mappers': mapper_sizes\n })\n\n print(l_mapper_members_df)\n l_mapper_members_df['OSM Mappers'] = l_mapper_members_df['OSM Mappers'].astype(np.int64)\n\n y_pos = np.arange(len(labels))\n\n fig, ax = plt.subplots()\n\n # Create horizontal bars\n l_mapper_members_df.plot(kind='barh', legend=True, fontsize=8, figsize=(28, 12), ax=ax)\n\n plt.title(\"OSMF Membership Statistics\", fontweight='bold', fontsize=14)\n\n plt.figtext(0.01, 0.01, ('OSMF as of ' + date + '- Mapper as of Nov 2018 - ex Corporate Members' + ' - Countries with less than {:.1%}'.format(float(label_min)) + ' left blank intentionally'), horizontalalignment='left', fontsize=8)\n\n # plt.subplots_adjust(right=0.97, left=0.08, bottom=0.05)\n\n # Create names on the y-axis\n plt.yticks(y_pos, labels)\n\n fig.tight_layout()\n fig.subplots_adjust(bottom=0.05)\n filename = 'byCountry1.png'\n filename = chart_location + filename\n plt.savefig(filename)\n if not headless:\n plt.show()\n\n return l_mapper_members_df", "title": "" }, { "docid": "9151eafd1ee6962aa92d644dabeb9626", "score": "0.49675223", "text": "def CalculateResidentSize(self, mmap):\n assert(isinstance(mmap, memory_map.Map))\n for alloc in self.allocations:\n # This function loops over all the memory pages that intersect, partially\n # or fully, with each allocation. For each of them, the allocation is\n # attributed a resident size equal to the size of intersecting range iff\n # the page is resident.\n # The tricky part is that, in the general case, an allocation can span\n # over multiple (contiguous) mmaps. See the chart below for a reference:\n #\n # VA space: |0 |4k |8k |12k |16k |20k |24k |28k |32k |\n # Mmaps: [ mm 1 ][ mm2 ] [ map 3 ]\n # Allocs: <a1> < a2 > < a3 >\n #\n # Note: this accounting technique is not fully correct but is generally a\n # good tradeoff between accuracy and speed of profiling. The OS provides\n # resident information with the page granularity (typ. 4k). Finer values\n # would require more fancy techniques based, for instance, on run-time\n # instrumentation tools like Valgrind or *sanitizer.\n cur_start = alloc.start\n mm = None\n while cur_start < alloc.end:\n if not mm or not mm.Contains(cur_start):\n mm = mmap.Lookup(cur_start)\n if mm:\n page, page_off = mm.GetRelativeMMOffset(cur_start)\n if mm.IsPageResident(page):\n page_end = mm.start + page * PAGE_SIZE + PAGE_SIZE - 1\n alloc_memory_in_current_page = PAGE_SIZE - page_off\n if alloc.end < page_end:\n alloc_memory_in_current_page -= page_end - alloc.end\n alloc.resident_size += alloc_memory_in_current_page\n # Move to the next page boundary.\n cur_start = (cur_start + PAGE_SIZE) & ~(PAGE_SIZE - 1)", "title": "" }, { "docid": "649ab53a86b3cdb0d56d07df9ab2dc10", "score": "0.49666697", "text": "def extract_stats(shp_files, rst_files, field,\n stats_of_interest, output_fn):\n all_combinations = itools.product(*[shp_files, rst_files])\n\n try:\n _loop = tqdm(all_combinations)\n except NameError:\n _loop = all_combinations\n for shp, rst in _loop:\n # Gets raster file name\n sheet_name = os.path.basename(rst)\n shp_name = os.path.basename(shp)\n\n try:\n # Set msg for progress bar if available\n _loop.set_description(\"{} {} {}\".format(shp_name, sheet_name, field))\n except AttributeError:\n pass\n\n shp_data = gpd.read_file(shp)\n shp_data['area'] = shp_data.geometry.area\n shp_data = shp_data.sort_values('area')\n\n if field not in shp_data.columns:\n print(\"Error: could not find {} in shapefile\".format(field))\n print(\" Must be one of {}\".format(shp_data.columns))\n continue\n\n with rasterio.open(rst) as src:\n crs_matches, issue = matching_crs(src, shp_data)\n if not crs_matches:\n with pd.ExcelWriter(output_fn, engine='openpyxl')\\\n as writer:\n df = pd.DataFrame()\n df.to_excel(writer, sheet_name, startrow=2)\n del df\n\n cmt = \"Could not process {} with {}, incorrect CRS.\"\\\n .format(sheet_name, shp_name)\n worksheet = writer.sheets[sheet_name]\n worksheet.cell(row=1, column=1).value = cmt\n worksheet.cell(row=2, column=1).value = issue\n # End with\n continue\n # End if\n\n nodata = src.nodatavals\n transform = src.transform\n rst_data = src.read(1)\n rst_data = np.ma.masked_array(rst_data, mask=(rst_data == nodata))\n # End with\n\n d_shp = shp_data.dissolve(by=field)\n result = zonal_stats(d_shp, rst_data,\n affine=transform,\n stats=stats_of_interest,\n nodata=nodata,\n geojson_out=True)\n geostats = gpd.GeoDataFrame.from_features(result)\n df = pd.DataFrame(geostats)\n\n # Filter out rows with empty data\n df = df[(df.loc[:, stats_of_interest] > 0.0).any(axis=1)]\n\n # Reset index name so it correctly appears when writing out to file\n df.index.name = field\n\n # Write out dataframe to excel file\n try:\n book = load_workbook(output_fn)\n except FileNotFoundError:\n book = None\n\n # Write out data to excel file\n with pd.ExcelWriter(output_fn, engine='openpyxl') as writer:\n if book:\n writer.book = book\n\n df.to_excel(writer, sheet_name, startrow=2)\n comment = \"# Extracted from {} using {}\"\\\n .format(sheet_name,\n shp_name)\n worksheet = writer.sheets[sheet_name]\n worksheet.cell(row=1, column=1).value = comment\n # End with\n # End combination loop", "title": "" }, { "docid": "b094b3f0eb9020a76fb7f85ece936f6b", "score": "0.4964625", "text": "def test_get_mapping_dump(self):\n pass", "title": "" }, { "docid": "ef35c713dbd0189165243fb35e5e0429", "score": "0.49643943", "text": "def demo_top_n_regions(self, n, year):\n # read demo file\n demo_df = pd.read_csv(demography_file, encoding='latin1', index_col='GEO')\n # clean population data\n demo_df['Value'] = demo_df['Value'].map(lambda val: pd.NaT if val == ':' else float(val.replace(',', '')))\n # filter by any year, as regions don't actually move, right?\n demo_df = demo_df[demo_df['TIME'] == year]\n # filter all regions with an id of length 5 all others are countries etc\n demo_df = demo_df[[len(reg) == 5 for reg in demo_df.index]]\n # sort by population\n demo_df.sort_values('Value', axis=0, ascending=False, inplace=True, kind=\"quicksort\", na_position=\"last\")\n return {self.get_region(region_id).record.NUTS_NAME.strip('\\000'): region['Value']\n for region_id, region in demo_df.head(n).iterrows()}", "title": "" }, { "docid": "da076b5397547fe5c394e121590a7324", "score": "0.49608833", "text": "def mapping(resp):\n map_data = [data for data in resp]\n for data in map_data:\n region_map = Map(\n identifier=\"region\",\n zoom=10,\n style=\"height:600px;width:100%;margin:0;\",\n lat=data['latitude'],\n lng=data['longitude'],\n markers=[(data['latitude'], data['longitude'], data['title']) for\n data in\n map_data],\n fit_markers_to_bounds=True,\n # zooms in/out to fit all markers in box\n )\n return region_map", "title": "" }, { "docid": "ddd21dc53d28fd624e8918d724f584a8", "score": "0.49608007", "text": "def print_stats(self):\n headers = []\n headers.append(\"IMAGES\")\n if self.prediction_type == \"r\":\n lbl_tmp = self.df[self.label_col].unique()\n hist, bin_edges = np.histogram(lbl_tmp, bins='auto')\n p = bin_edges[0]\n for s in bin_edges[1:]:\n headers.append(str(np.round(p, 2)) + \" - \" + str(np.round(s, 2)))\n p = s\n else:\n headers += [cls for cls in self.classes]\n headers.append(\"PATIENTS\")\n if self.prediction_type == \"r\":\n hist, bin_edges = np.histogram(self.df[self.label_col].unique(), bins='auto')\n p = bin_edges[0]\n for s in bin_edges[1:]:\n headers.append(str(np.round(p, 2)) + \" - \" + str(np.round(s, 2)))\n p = s\n else:\n headers += [cls for cls in self.classes]\n\n if self.hold_out_size is not None:\n stats = [\n [\"Train\"] + self._get_stats(self.df_trn, self.prediction_type, self.label_col, self.ptid_col,\n self.classes),\n [\"Val\"] + self._get_stats(self.df_val, self.prediction_type, self.label_col, self.ptid_col,\n self.classes),\n [\"Hold\"] + self._get_stats(self.df_ho, self.prediction_type, self.label_col, self.ptid_col,\n self.classes),\n [\"Total\"] + self._get_stats(self.df, self.prediction_type, self.label_col, self.ptid_col, self.classes),\n ]\n else:\n stats = [\n [\"Train\"] + self._get_stats(self.df_trn, self.prediction_type, self.label_col, self.ptid_col,\n self.classes),\n [\"Val\"] + self._get_stats(self.df_val, self.prediction_type, self.label_col, self.ptid_col,\n self.classes),\n [\"Total\"] + self._get_stats(self.df, self.prediction_type, self.label_col, self.ptid_col, self.classes),\n ]\n print(tabulate(stats, headers=headers))\n print()\n print(\"Data shape: {}\".format(self.train_ds.shape))\n if self.z_factor is not None:\n print(\n \"NOTE: data have been downsized by a factor of {}\".format(self.z_factor)\n )", "title": "" }, { "docid": "6247db90f633e546c23b7fe513c2f5af", "score": "0.49579135", "text": "def scan_locations_optogen_illumination():\n count = 0\n \n irrs = [ a*b for a in [0.01, 0.1] for b in [1,2,5]] + list(np.arange(1.,5.1,0.25)) \n \n\n for factor in factors:\n for irr in irrs: \n pp = {}\n # neuron model and params\n pp['cell'] = ['Neuron', 'SHStellate']\n pp['cell_params'] = {}\n \n # opsin\n chrdict = {'exp':5e-4, 'irradiance' :irr, 'pulsewidth': light_dur,'lightdelay':light_on,'interpulse_interval':250, 'n_pulses':1}\n hrdict = {'exp':5e-4, 'irradiance' :factor*irr, 'pulsewidth': light_dur,'lightdelay':light_on,'interpulse_interval':250, 'n_pulses':1}\n \n pp['opsindict'] = {}\n if irr > 0 :\n pp['opsindict']['ChR'] = {'soma': chrdict,\n 'dendrite':chrdict}\n pp['ChR_areas'] = {'whole' : pp['opsindict']['ChR'].keys()}\n else:\n pp['ChR_areas'] = {'none' : [None]}\n \n if irr > 0 and factor > 0:\n pp['opsindict']['NpHR'] = {'soma': hrdict,\n 'dendrite':hrdict}\n pp['NpHR_areas'] = {'whole' : pp['opsindict']['NpHR'].keys()}\n \n else:\n pp['NpHR_areas'] = {'none' : [None]}\n \n # general settings \n pp['experiment_type'] = 'opsinonly'\n pp['savedata'] = True # False #True\n \n pp['tstart'] = 0\n pp['tstop'] = tstop\n \n pp['mark_loc'] = {}\n pp['mark_loc']['names'] = ['mysoma']\n pp['mark_loc']['sections'] = ['soma']\n pp['mark_loc']['ids'] = [(0,0.5)]\n \n pp['record_loc'] = {}\n pp['record_loc']['v'] = ['mysoma']\n \n pp['num_threads'] = 1\n \n pp.update({'expname':expbase,\n 'description':'irr%.3f_factor%.2f_illOnly'%(irr,factor)})\n \n es.run_single_experiment(expbase, 'missing', pp)\n #return\n count += 1\n \n print '%g jobs submitted'%count", "title": "" }, { "docid": "a9c90d512cd9d610063557e99b25b266", "score": "0.49577627", "text": "def tag_regions(stokes_cube, \n regionsfn = \"dE.reg\", \n sigma = 2.3, \n block_size=80, \n hdu_id = 0, \n use_stokes=\"I\", \n global_stat_percentile=30.0,\n min_blocks_in_region = 3,\n min_distance_from_centre = 0,\n exclusion_zones=[],\n max_right_skewness=np.inf,\n max_abs_skewness=np.inf,\n max_positive_to_negative_flux=np.inf):\n fn = stokes_cube\n w, hdr, band_avg = read_stokes_slice(stokes_cube, hdu_id, use_stokes, average_channels=True)\n bin_lower = np.arange(0, band_avg.shape[0], block_size)\n bin_upper = np.clip(bin_lower + block_size, 0, band_avg.shape[0])\n assert bin_lower.shape == bin_upper.shape\n if band_avg.shape[0] != band_avg.shape[1]:\n raise TypeError(\"Image must be square!\")\n print>>log, \"Creating regions of {0:d} px\".format(block_size)\n binned_stats = np.zeros((bin_lower.shape[0],\n bin_lower.shape[0]))\n for y, (ly, uy) in enumerate(zip(bin_lower, bin_upper)):\n for x, (lx, ux) in enumerate(zip(bin_lower, bin_upper)):\n wnd = band_avg[ly:uy, lx:ux].flatten()\n binned_stats[y, x] = np.std(wnd)\n percentile_stat = np.nanpercentile(binned_stats, global_stat_percentile)\n segment_cutoff = percentile_stat * sigma\n print>>log, \"Computed regional statistics (global std of {0:.2f} mJy)\".format(percentile_stat * 1.0e3)\n tagged_regions = []\n for (y, x) in np.argwhere(binned_stats > segment_cutoff):\n det = binned_stats[y, x] / float(percentile_stat)\n reg_name = \"reg[{0:d},{1:d}]\".format(x, y)\n tagged_regions.append(BoundingBox(bin_lower[x], bin_upper[x], \n bin_lower[y], bin_upper[y], \n det, reg_name, w, band_avg))\n \n if min_distance_from_centre > 0:\n print>>log, \"Enforsing radial exclusion zone of {0:.2f} px form \" \\\n \"phase tracking centre\".format(min_distance_from_centre)\n crra, crdec = getcrpix(fn, hdu_id, use_stokes)\n exclusion_zones.append((crra,\n crdec,\n float(min_distance_from_centre)))\n\n # enforce all exclusion zones\n print>>log, \"Enforsing exclusion zones:\"\n for (cx, cy, exclrad) in exclusion_zones:\n tagged_regions = filter(notin(filter(within_radius_from(exclrad, cx, cy), \n tagged_regions)), \n tagged_regions)\n if len(exclusion_zones) == 0: \n print>>log, \"\\t - No exclusion zones\"\n print>>log, \"Merging regions:\" \n prev_tagged_regions = copy.deepcopy(tagged_regions)\n tagged_regions = [i for i in merge_regions(tagged_regions, \n exclusion_zones=exclusion_zones)]\n if prev_tagged_regions == tagged_regions: \n print>>log, \"\\t - No mergers\" \n # apply regional filters\n print>>log, \"Culling regions based on filtering criteria:\"\n prev_tagged_regions = copy.deepcopy(tagged_regions)\n min_area=min_blocks_in_region * block_size**2\n tagged_regions = filter(notin(filter(arealess(min_area=min_area), \n tagged_regions)), \n tagged_regions)\n tagged_regions = filter(notin(filter(skewness_more(max_skewness=max_right_skewness,\n absskew=False), \n tagged_regions)),\n tagged_regions)\n tagged_regions = filter(notin(filter(skewness_more(max_skewness=max_abs_skewness,\n absskew=True), \n tagged_regions)),\n tagged_regions)\n tagged_regions = filter(notin(filter(pos2neg_more(max_positive_to_negative_flux), \n tagged_regions)),\n tagged_regions)\n if prev_tagged_regions == tagged_regions: \n print>>log, \"\\t - No cullings\"\n # finally we're done\n with open(regionsfn, \"w+\") as f:\n f.write(\"# Region file format: DS9 version 4.0\\n\")\n f.write(\"global color=red font=\\\"helvetica 6 normal roman\\\" edit=1 move=1 delete=1 highlite=1 include=1 wcs=wcs\\n\")\n for reg in tagged_regions:\n f.write(\"physical; polygon({0:s}) #select=1 text={1:s}\\n\".format(\",\".join(map(str, reg.corners.flatten())),\n \"{mean area deviation %.2fx}\" % reg._sigma))\n print>>log, \"Writing dE regions to DS9 regions file {0:s}\".format(regionsfn)\n print>>log, \"The following regions must be tagged for dEs ({0:.2f}x{1:.2f} mJy)\".format(sigma, percentile_stat * 1.0e3)\n if len(tagged_regions) > 0:\n for r in tagged_regions:\n print>>log, \"\\t - {0:s}\".format(str(r))\n else:\n print>>log, \"\\t - No regions met cutoff criteria. No dE tags shall be raised.\"\n return tagged_regions", "title": "" }, { "docid": "32aecd181ed0666f7a0c8d3d79ef22c7", "score": "0.49390844", "text": "def getstats(ds_v, act_v, dis_v):\n # Get some ifno and stats\n var = ds_v[act_v]\n var_m = round(var.mean(), 8)\n var_sd = round(var_m / len(var) ** 0.5, 8)\n var_max = round(np.amax(var), 8)\n var_min = round(np.amin(var), 8)\n var_sm = var.mean(axis=(1, 2)) # get the spatial mean (keeps the time dimension)\n var_tm = var.mean(axis=0) # get of mean over time (keeps lat/lon)\n var_sm_sd = var_sm / len(var_sm) ** 0.5\n var_tm_sd = var_tm / len(var_tm) ** 0.5\n var_units = ds_v[act_v + '_units']\n north = np.around(ds_v['max_lat'], 2)\n south = np.around(ds_v['min_lat'], 2)\n east = np.around(ds_v['max_lon'], 2)\n west = np.around(ds_v['min_lon'], 2)\n lat_units = ds_v['lat_units']\n lon_units = ds_v['lon_units']\n\n sd = {'variable': str(dis_v), 'units': str(var_units), 'overall mean': str(var_m),\n 'stdev': str(var_sd), 'max': str(var_max), 'min': str(var_min), 'max_lat': str(north),\n 'min_lat': str(south), 'max_lon': str(east), 'min_lon': str(west)}\n\n s = ['\\n' + 'Variable: ' + '\\n' +\n str(dis_v) + '\\n\\n' +\n 'Units: ' + str(var_units) + '\\n\\n' +\n 'Overall Mean: ' + str(var_m) + '\\n\\n' +\n 'St.Dev: ' + str(var_sd) + '\\n\\n' +\n 'Region Coordinates: ' + '\\n\\n' +\n str(lat_units) + ': ' + str(south) + ' to ' + str(north) + '\\n\\n' +\n str(lon_units) + ': ' + str(west) + ' to ' + str(east)]\n\n return s[0], sd", "title": "" }, { "docid": "5e3eec2df79b4a8996aee38d20e21a21", "score": "0.49379995", "text": "def get_index_stats(self):\n\n assert self._check_idx, 'No index available'\n idx_stats = []\n for ref in range(self._header.n_refs):\n try:\n mapped = self._index.unmapped[ref].n_mapped\n unmapped = self._index.unmapped[ref].n_unmapped\n idx_stats.append((mapped, unmapped, mapped + unmapped))\n except KeyError:\n idx_stats.append((0, 0, 0))\n return idx_stats", "title": "" }, { "docid": "2ff90093f688237dd3c2cc9fd7023d05", "score": "0.4932914", "text": "def describeRasterCoverage(raster, epsg):\n height=raster.size().ysize\n print(height)\n width=raster.size().xsize\n g=raster.geoReference()\n topleft=g.pixel2Coord(ilw.Pixel(1,1))\n topleftx=topleft.x\n toplefty=topleft.y\n bottomright=g.pixel2Coord(ilw.Pixel(width-1,height-1))\n bottomrightx=bottomright.x\n bottomrighty=bottomright.y\n pixelsizeh=abs(((bottomright.x-topleftx)/(width-1)/2))\n topleftx=topleft.x - pixelsizeh\n toplefty=topleft.y + pixelsizeh #this might not work in the southern emisphere\n bottomrightx=bottomright.x + pixelsizeh\n bottomrighty=bottomright.y - pixelsizeh #this might not work in the southern emisphere\n name=raster.name().split('.')[0]\n georeference=\"code=georef:type=corners,csy=epsg:\"+str(epsg)+\",envelope=\"+str(topleftx)+\" \"+str(toplefty)+\" \"+str(bottomrightx)+\" \"+str(bottomrighty)+\",gridsize=\"+str(width)+\" \"+str(height)+\",cornerofcorners=yes,name=\"+name\n\n return width,height,name,georeference", "title": "" }, { "docid": "2fee33757aa21281a1228ebf7f96a393", "score": "0.49326706", "text": "def compute_coverage_statistics( self ):\n import pandas\n \n def compute_coverages( genes, exons, cds, sequences ):\n # the .reset_index() is used here to turn the analysis column back into a normal column.\n # (otherwise it is an 'index' and behaves differently)\n result = {\n \"genes\": compute_genome_bases_covered( genes, sequences ).reset_index(),\n \"exons\": compute_genome_bases_covered( exons, sequences ).reset_index(),\n \"cds\": compute_genome_bases_covered( cds, sequences ).reset_index()\n }\n return result\n \n def build_single_table( coverages ):\n # Now build a single table\n result = coverages['genes'][['analysis', 'sequence_length']]\n for what in [ 'genes', 'exons', 'cds' ]:\n result = pandas.merge(\n result,\n coverages[what][['analysis', 'bases_covered', 'proportion' ]],\n left_on = 'analysis',\n right_on = 'analysis'\n )\n result.rename(\n columns = {\n \"bases_covered\": \"%s:bases_covered\" % what,\n \"proportion\": \"%s:proportion_covered\" % what\n },\n inplace = True\n )\n return result\n\n coverages = compute_coverages( self.m_genes, self.m_exons, self.m_cds, self.m_sequences )\n return build_single_table( coverages )", "title": "" }, { "docid": "ea25746b7511a6605a1d2692a39d1e8c", "score": "0.49295852", "text": "def analyze(self, iou_threshold):\n # if IoU of two regions is less than threshold below - we treat them as non-intersecting\n iou_precision_threshold = 0.05\n gt_to_found = [np.nonzero(self.__iou_table[i] > iou_precision_threshold)[0] for i in\n range(self.__gt_boxes_count)]\n found_to_gt = [np.nonzero(self.__iou_table[:, j] > iou_precision_threshold)[0] for j in\n range(self.__found_boxes_count)]\n\n one_to_ones = []\n one_to_manys = [] # 1 gt <-> many found\n many_to_ones = [] # many gt <-> 1 found\n for gt_index, indices in enumerate(gt_to_found):\n if len(indices) == 1:\n found_area_index = indices[0]\n inverse_indices = found_to_gt[found_area_index]\n if len(inverse_indices) == 1:\n # 1 <-> 1 match\n one_to_ones.append([gt_index, found_area_index])\n elif len(indices) > 1:\n # 1 <-> many\n if all(len(found_to_gt[index]) == 1 for index in indices):\n one_to_manys.append([gt_index, indices])\n\n for found_area_index, inverse_indices in enumerate(found_to_gt):\n if len(inverse_indices) > 1:\n # many <-> 1\n if all(len(gt_to_found[index]) == 1 for index in inverse_indices):\n many_to_ones.append([inverse_indices, found_area_index])\n\n metrics = FtMetrics(all_type_names=self.__all_object_types,\n compute_classification_metrics=self.__are_classification_metrics_computed)\n matched_gt = 0\n matched_found = 0\n iou_summed = 0\n\n # check all 1 <-> 1 matches: they are either TP or FN (if the intersection is too low)\n one_to_ones_iou = [self.__calc_iou(self.__gt_boxes_areas[gt_index], self.__found_boxes_areas[found_index],\n self.__intersections_table[gt_index][found_index])\n for [gt_index, found_index] in one_to_ones]\n match_iou = [(gt_found, iou) for gt_found, iou in zip(one_to_ones, one_to_ones_iou) if iou >= iou_threshold]\n if match_iou:\n one_to_ones, one_to_ones_iou = list(zip(*match_iou))\n else:\n one_to_ones, one_to_ones_iou = [], []\n\n one_to_ones_count = len(one_to_ones_iou)\n metrics.one_to_one = one_to_ones_count\n matched_gt += one_to_ones_count\n matched_found += one_to_ones_count\n iou_summed += sum(one_to_ones_iou)\n metrics.matched_boxes_count += one_to_ones_count\n\n if self.__are_classification_metrics_computed:\n # 1 <-> 1\n confusion_matrix = np.zeros((len(self.__all_object_types), len(self.__all_object_types)), dtype=np.float32)\n for gt_index, found_index in one_to_ones:\n self.__update_confusion_matrix([gt_index], [found_index], confusion_matrix)\n\n # 1 <-> many\n for [gt_index, found_indices] in one_to_manys:\n iou = self.__get_group_to_box_iou(self.__gt_boxes[gt_index], self.__found_boxes[found_indices])\n if iou >= iou_threshold:\n matched_gt += 1\n metrics.one_to_many += 1\n matched_found += len(found_indices)\n iou_summed += iou\n metrics.matched_boxes_count += 1\n if self.__are_classification_metrics_computed:\n self.__update_confusion_matrix([gt_index], found_indices, confusion_matrix)\n\n # many <-> 1\n for [gt_indices, found_index] in many_to_ones:\n iou = self.__get_group_to_box_iou(self.__found_boxes[found_index], self.__gt_boxes[gt_indices])\n if iou >= iou_threshold:\n matched_gt += len(gt_indices)\n metrics.many_to_one += len(gt_indices)\n matched_found += 1\n iou_summed += iou\n metrics.matched_boxes_count += 1\n if self.__are_classification_metrics_computed:\n self.__update_confusion_matrix(gt_indices, [found_index], confusion_matrix)\n\n metrics.tp = matched_gt\n metrics.fn = self.__gt_boxes_count - matched_gt\n metrics.fp = self.__found_boxes_count - matched_found\n\n metrics.average_iou = iou_summed / metrics.matched_boxes_count if metrics.matched_boxes_count > 0 else 0\n metrics.average_precision_by_area, metrics.average_recall_by_area, metrics.average_iou_by_area = \\\n self.__calc_iou_by_area()\n metrics.detection_rate = 1 if metrics.average_iou_by_area > iou_threshold else 0\n metrics.matched_images_count = 1\n\n if self.__are_classification_metrics_computed:\n metrics.confusion_matrix = confusion_matrix\n metrics.all_type_names = self.__all_object_types\n return metrics", "title": "" }, { "docid": "613fc5477b506d71f7b577a41731b4d0", "score": "0.49258015", "text": "def analysis_summary(self):\n try: \n self.aggregated_shares = engine.aggregate_shares(\n self.shared, self.zones)\n except AttributeError: \n pass\n self.lines = analysis.tp_summary(self.loaded_links, self.od_stack)\n self.lines = analysis.analysis_tp_summary(self.lines)\n self.economic_series = analysis.economic_series(self.od_stack, self.lines)", "title": "" }, { "docid": "c3e7b4a72160a8222fb1146363cd1a25", "score": "0.4913883", "text": "def get_visualization_statistics(self):\n healthy_people = 0\n infectious_people = 0\n sick_people = 0\n recovered_people = 0\n dead_people = 0\n\n for i in range(self.persons.shape[0]):\n for j in range(self.persons.shape[1]):\n if self.persons[i, j].state == PersonState.HEALTHY:\n healthy_people += 1\n elif self.persons[i, j].state == PersonState.INFECTIOUS:\n infectious_people += 1\n elif self.persons[i, j].state == PersonState.SICK:\n sick_people += 1\n elif self.persons[i, j].state == PersonState.RECOVERED:\n recovered_people += 1\n elif self.persons[i, j].state == PersonState.DEATH:\n dead_people += 1\n return {\n 'fitness': int(self.fitness),\n 'healthy': healthy_people,\n 'infectious': infectious_people,\n 'sick': sick_people,\n 'recovered': recovered_people,\n 'dead': dead_people,\n 'healthcare': self.healthcare,\n 'hygiene': self.hygiene,\n 'mask': self.mask,\n 'distancing': self.distancing,\n 'curfew': self.curfew,\n 'test_rate': self.test_rate,\n 'quarantine_rules': self.quarantine_rules.value,\n 'isolation_rules': self.isolation_rules.value,\n }", "title": "" }, { "docid": "9172ddb7ef021ba359665a76c1b7bfb5", "score": "0.49116617", "text": "def stats(request):\n avg_provinces_per_zone = Zone.objects \\\n .annotate(num_province=Count('province')) \\\n .aggregate(average=Avg('num_province'))\n avg_provinces_per_zone = avg_provinces_per_zone['average']\n \n avg_municipality_per_zone = Zone.objects \\\n .annotate(num_municipality=Count('province__municipality')) \\\n .aggregate(average=Avg('num_municipality'))\n avg_municipality_per_zone = avg_municipality_per_zone['average']\n \n result = {}\n result['avg_prov_per_zone'] = float(avg_provinces_per_zone)\n result['avg_mun_per_zone'] = float(avg_municipality_per_zone)\n \n json_result = json.dumps(result)\n return HttpResponse(json_result, content_type='application/json')", "title": "" }, { "docid": "9d972a400f6a2d50fbe1aa289b8a3356", "score": "0.49031222", "text": "def test_regions(counties):\n res_file = os.path.join(TESTDATADIR, 'nsrdb/', 'ri_100_nsrdb_2012.h5')\n sam_files = os.path.join(TESTDATADIR, 'SAM/naris_pv_1axis_inv13_cs.json')\n\n with Resource(res_file) as f:\n meta = f.meta\n\n baseline = meta.loc[meta['county'].isin(counties)].index.values.tolist()\n\n regions = {c: 'county' for c in counties}\n pp = ProjectPoints.regions(regions, res_file, sam_files)\n\n assert sorted(baseline) == pp.sites", "title": "" }, { "docid": "869da9069248a2a954c8a5a6bea3f449", "score": "0.4898461", "text": "def cnt_map(region, table = 'sample', draw = True):\r\n twt_lst = dataset.loadrows(GEOTWEET, ('lat', 'lng'),\r\n ('MBRContains({0}, geo)'.format(dataset.geo_rect(*region)),), table)\r\n lat = list();\r\n lng = list();\r\n for twt in twt_lst:\r\n lat.append(twt['lat'])\r\n lng.append(twt['lng'])\r\n if draw:\r\n x = np.array(lng)\r\n y = np.array(lat)\r\n xmin = x.min()\r\n xmax = x.max()\r\n ymin = y.min()\r\n ymax = y.max()\r\n\r\n plt.hexbin(x,y, gridsize=200, cmap=cm.jet)\r\n plt.axis([xmin, xmax, ymin, ymax])\r\n plt.title(\"Hexagon binning\")\r\n cb = plt.colorbar()\r\n cb.set_label('counts')\r\n\r\n plt.show()\r\n return lat, lng", "title": "" }, { "docid": "cd5da2ba51ccbc9a68f5d39f2643e7ec", "score": "0.489699", "text": "def unmapped(self):\n return self.__unmapped + self.__nocoordinate", "title": "" } ]
5985dba22fcc927115102f2c73371d31
Test equal initialization class methods
[ { "docid": "ad2b04faa22a7acf60ea7cd712f88359", "score": "0.0", "text": "def test_payload_and_api_init_are_equal(self, mock):\n # Camera ID Mocks\n subdomain = self.c_mock_esession['activeBrandSubdomain']\n setup_ee_camera_mock(\n mock, subdomain, 'device_camera.json')\n\n ee_api = self.first_building.eagleeye_api\n camera_id = \"random_id\"\n\n c_api = EagleEyeCamera.from_api(ee_api, camera_id)\n c_payload = EagleEyeCamera.from_list_payload(\n ee_api, self.e_mock_device_list[0])\n\n self.assertEqual(c_api.account_id, c_payload.account_id)\n self.assertEqual(c_api.entity_id, c_payload.entity_id)\n self.assertEqual(c_api.unique_entity_id, c_payload.unique_entity_id)\n self.assertEqual(c_api.name, c_payload.name)\n self.assertEqual(c_api.utc_offset, c_payload.utc_offset)\n self.assertEqual(c_api.timezone, c_payload.timezone)\n self.assertEqual(c_api.permissions, c_payload.permissions)\n self.assertEqual(c_api.guid, c_payload.guid)\n self.assertEqual(c_api.tags, c_payload.tags)\n self.assertEqual(c_api.bridges, c_payload.bridges)", "title": "" } ]
[ { "docid": "0714b55a928f10390dd112e4a71d64e7", "score": "0.76406264", "text": "def test_init(self):\n pass", "title": "" }, { "docid": "e1d913a5d348c714c776c3a28899f32b", "score": "0.7467957", "text": "def test_bad_init(self):", "title": "" }, { "docid": "eaa2604b3e54d4eaa66788fed871de75", "score": "0.74444735", "text": "def testInit(self):", "title": "" }, { "docid": "7f4bd8441b6341f683adf5a15ebd757c", "score": "0.7419418", "text": "def test_constructor(cls, data, expect):\n do_constructor_test2(cls, data, expect)", "title": "" }, { "docid": "7aa901082d35d01ca6714dd5be861065", "score": "0.7300404", "text": "def test_initialization(self):\n\n self.assertEqual(2+2, 4)", "title": "" }, { "docid": "d94c11bc20fd62fe97a55e96bd66eb47", "score": "0.7040414", "text": "def test_sphere_init():\n sphere1 = Sphere()\n sphere2 = Sphere(10)\n\n assert sphere1.radius == 0\n assert sphere1.diameter == 0\n assert sphere2.radius == 10\n assert sphere2.diameter == 20", "title": "" }, { "docid": "25dc78e30f61d756c57b81dc591f13fd", "score": "0.6965814", "text": "def test_baseinit(self):\n b = Base()\n self.assertEqual(b.id, 1)\n b1 = Base()\n self.assertEqual(b1.id, 2)\n b3 = Base(12)\n self.assertEqual(b3.id, 12)", "title": "" }, { "docid": "d4f8eb698b46a2eba74506350d064519", "score": "0.6959611", "text": "def setUpClass(cls):", "title": "" }, { "docid": "d4f8eb698b46a2eba74506350d064519", "score": "0.6959611", "text": "def setUpClass(cls):", "title": "" }, { "docid": "b4eeb4075c1336ca5ef6d47792634eed", "score": "0.6908172", "text": "def test_instantiation(self):\n self.assertIsInstance(self.model1_test, City)\n self.assertIsInstance(self.model2_test, City)\n self.assertTrue(hasattr(self.model1_test, \"state_id\"))\n self.assertTrue(hasattr(self.model1_test, \"name\"))\n self.assertTrue(self.model1_test.id != self.model2_test.id)", "title": "" }, { "docid": "e478a9aa2504d9de78eb631c0d47db6f", "score": "0.6856045", "text": "def test_init(self):\n self.app = Application()\n\n # There are not much we can test except all the objects are under the correct name\n self.assertEqual(NAME, self.app.name)\n self.assertEqual(NAME, self.app.parser.description)\n # The dummy stats client has no awareness of the name. Just check the class.\n self.assertIsInstance(self.app.stats, DummyStatsClient)\n self.assertEqual(NAME, self.app.logger.name)\n\n self.assertIsInstance(self.app.boto, Boto)\n self.assertIsInstance(self.app.boto3, Boto3)", "title": "" }, { "docid": "29501a13682790c8d3dce10fe49a1fb1", "score": "0.6838009", "text": "def test_initSetup_1(self):\n\t\tself.assertTrue(True)", "title": "" }, { "docid": "f3b2d6e10065ff6b983c1edd3630c152", "score": "0.68305194", "text": "def classSetUp(self):\r\n assert self.something == True\r\n self.something = False", "title": "" }, { "docid": "4923e25c7a37ce9b6b2beb0505345bed", "score": "0.682388", "text": "def run_test_init():\n\n print()\n print('-----------------------------------------------------------')\n print('Testing the __init__ method of the CoffeePot class.')\n print('-----------------------------------------------------------')\n\n # Test 1\n print('\\nTest 1:')\n pot1 = CoffeePot(12, 7)\n run_test_instance_variables(pot1, 12, 7)\n\n # Test 2\n print('\\nTest 2:')\n pot2 = CoffeePot(10, 0)\n run_test_instance_variables(pot2, 10, 0)\n\n # Test 3\n print('\\nTest 3:')\n pot1 = CoffeePot(8, 8)\n run_test_instance_variables(pot1, 8, 8)", "title": "" }, { "docid": "55f1ac04ec859d9470c6a84455dab8d8", "score": "0.6807664", "text": "def test_init(self):\n c = 3\n bs = BayesianSets(3)\n self.assertEqual(c, bs.c)", "title": "" }, { "docid": "1a9fe0323957aad9aa75d03327bfb448", "score": "0.6797312", "text": "def test_base_init(self):\n baseobj = BaseEagle()\n self.assertEqual(baseobj.input_dict, dict())\n self.assertEqual(baseobj._proj_path, None)\n self.assertEqual(baseobj._run_pattern_match, None)\n self.assertEqual(baseobj._run_similarity, None)\n self.assertEqual(baseobj._run_cloc_metric, None)\n self.assertEqual(baseobj._run_cyclomatic_complexity, None)\n self.assertEqual(baseobj._annotation, None)\n self.assertEqual(baseobj._pattern, None)\n self.assertEqual(baseobj._pattern_seperator, None)\n self.assertEqual(baseobj._delta, None)\n self.assertEqual(baseobj._exclude_extraction, None)\n self.assertEqual(baseobj._cyclo_exclude, None)\n self.assertEqual(baseobj._report_path, None)\n self.assertEqual(baseobj._cloc_args, None)\n self.assertEqual(baseobj._cyclo_args, None)\n self.assertEqual(baseobj._similarity_range, None)\n self.assertEqual(baseobj._report_folder, None)", "title": "" }, { "docid": "747c68ee9479c125964645b887e5a7ff", "score": "0.6782754", "text": "def test___init__(self):\r\n\r\n # Create test set\r\n test_set = []\r\n \r\n # Create expected results test set\r\n res_set = []\r\n \r\n # Run test for all tests in test_set\r\n for i in range(len(test_set)):\r\n \r\n # Test function with inputs and expected outputs\r\n self.assertEqual(\r\n CPP().__init__(*test_set[i]), res_set[i]\r\n )", "title": "" }, { "docid": "869ac22cbdfe6008167fc77f9b9dfcd6", "score": "0.67772764", "text": "def test_init(self):\n b_std = Base()\n b_none = Base(None)\n b_num = Base(99)\n \"\"\"b_float = Base(float('inf'))\n b_str = Base(\"nine\")\n b_list = Base([1, 2.5, \"three\"])\n \"\"\"\n\n self.assertEqual(b_std.id, 1)\n self.assertEqual(b_none.id, 2)\n self.assertEqual(b_num.id, 99)", "title": "" }, { "docid": "bb121acc8c2fdbc5675e78cd9fd472ee", "score": "0.67763835", "text": "def test_init():\n # should work\n version = '1.0.0'\n v = ComparableVersion(version)\n # actually should not happen for proper constructor\n assert v is not None\n\n # both should raise\n with pytest.raises(TypeError):\n _ = ComparableVersion(None)\n assert _ is not None\n _ = ComparableVersion(1.0)\n assert _ is not None", "title": "" }, { "docid": "0092eaefbfbc329a328997cbb32b28cc", "score": "0.6741682", "text": "def test_default_init(self):\n c = Connection(network_setup = (\"hurz\", \"bla-if0\"))\n\n # check instance vars\n self.assertEquals(c._login, (\"root\", \"\"))\n self.assertEquals(c._target_if, \"bla-if0\")\n self.assertEquals(c._serial, MockSerialConn.instance)\n self.assertEquals(c.host, \"hurz\")\n\n # check initialisation of serial_conn class\n self.assertEquals(len(MockSerialConn.called[\"__init__\"]), 1)\n self.assertEquals(MockSerialConn.called[\"__init__\"][0], \n (c._login, True, \"HidaV boot on\", None))\n self.assertEquals(len(MockSerialConn.called[\"open\"]), 1)\n self.assertEquals(MockSerialConn.called[\"open\"][0], True)", "title": "" }, { "docid": "9d3ecb8f4c912107cae9a1d31c8a7c03", "score": "0.67136925", "text": "def test_init_with_parameter(self):\n\n r1 = Square(10, 2)\n r2 = Square(2, 10)\n r3 = Square(10, 2)\n self.assertEqual(r3.id, 3)\n Base._Base__nb_objects = 0", "title": "" }, { "docid": "818adbea55c756d3216f2a7f1dbdb34f", "score": "0.6694074", "text": "def test_init():\n a = OdwrotnieClass(name='bartosz')\n assert a._name == 'bartosz'\n assert isinstance(a._name, str)", "title": "" }, { "docid": "6d650645de64f9f447486d3eadba5264", "score": "0.6644136", "text": "def test_init():\n pass", "title": "" }, { "docid": "772608387b361e93a43350625675014d", "score": "0.6643699", "text": "def test_target_initialization():\n\n try:\n Target(name=\"Band gap\", objective=\"asdf\")\n assert False, \"Target class should require that objective be one of Min or Max\"\n except CitrinationClientError:\n pass\n\n # These initializations should not throw an error\n Target(name=\"Band gap\", objective=\"Min\")\n Target(name=\"Band gap\", objective=\"Max\")\n Target(name=\"Band gap\", objective=\"5.0\")", "title": "" }, { "docid": "3153cf8f2f51a6dda156779a9a7e4bba", "score": "0.6618538", "text": "def initializing():\n assert False # pragma: no cover", "title": "" }, { "docid": "bb09c8383147b6a3c00054a1c75d1967", "score": "0.6614038", "text": "def testExistingInitsAreSupported(self, init_type):\n self.assertTrue(self.factory.init_is_supported(init_type))", "title": "" }, { "docid": "d9b250ad1c4004d031b7cfb25b3b1b4e", "score": "0.6601697", "text": "def test_meta_class_init_arg(self):\n k = MyKlass()\n self.assertEqual(MyMeta_Init_Arg['cls'], MyKlass)\n self.assertEqual(MyMeta_Init_Arg['name'], 'MyKlass')\n self.assertEqual(MyMeta_Init_Arg['bases'], (object,))\n self.assertTrue('foo' in MyMeta_Init_Arg['dct'].keys())\n self.assertTrue('barattr' in MyMeta_Init_Arg['dct'].keys())", "title": "" }, { "docid": "f2cafd9a06d9a220310c7bcf741224e0", "score": "0.65965366", "text": "def test_init():\n c = Circle(4)", "title": "" }, { "docid": "e8e5f0664f6d3fbc73a5aaf9e227472c", "score": "0.65912974", "text": "def setUpClass(cls):\n cls.b1 = Base()\n cls.b2 = Base(50)\n cls.b3 = Base(-1)\n cls.b4 = Base(12)\n cls.b5 = Base()", "title": "" }, { "docid": "1335046778d221db2d829eebeeb6cc74", "score": "0.6583081", "text": "def is_initialized(self) -> bool:", "title": "" }, { "docid": "1335046778d221db2d829eebeeb6cc74", "score": "0.6583081", "text": "def is_initialized(self) -> bool:", "title": "" }, { "docid": "d04d25c9b6d61381b999afa556264e5f", "score": "0.6571736", "text": "def test_init(self):\n #Test Rna\n rna_finder = KWordModuleFinder(self.RnaAlignment,RnaAlphabet)\n self.assertEqual(rna_finder.Alignment, self.RnaAlignment)\n self.assertEqual(rna_finder.Alphabet, RnaAlphabet)\n self.assertEqual(rna_finder.ModuleDict,{})\n self.assertEqual(rna_finder.ModuleOrder,[])\n #Test Protein\n protein_finder = KWordModuleFinder(self.ProteinAlignment\\\n ,ProteinAlphabet)\n self.assertEqual(protein_finder.Alignment, self.ProteinAlignment)\n self.assertEqual(protein_finder.Alphabet, ProteinAlphabet)\n self.assertEqual(protein_finder.ModuleDict,{})\n self.assertEqual(protein_finder.ModuleOrder,[])", "title": "" }, { "docid": "eb8ae6b3458b4475e3607172d0ca7f74", "score": "0.6562143", "text": "def test_init_operator(self):\n op1 = Operator(self.rand_matrix(4, 4))\n op2 = Operator(op1)\n self.assertEqual(op1, op2)", "title": "" }, { "docid": "ae9beccc99538bdc45c21cb20037623e", "score": "0.6555075", "text": "def test__init__(self):\n self.assertEqual(self.new_acc.acc_username,\"Sydx10\")\n self.assertEqual(self.new_acc.platform_name, \"Snapchat\")\n self.assertEqual(self.new_acc.acc_password, \"syd123\")", "title": "" }, { "docid": "46bd581b69bb80b6be82ceb8ed8f9345", "score": "0.6553592", "text": "def test_constructor(self):\n\n treasure = TreasureGenerator(apl=10)\n self.assertTrue(treasure is not None)", "title": "" }, { "docid": "73b434cc29bb1d516f01aca8807c9925", "score": "0.65489346", "text": "def run_test_init():\n m1t.run_test_init() # This runs OUR tests.\n # -------------------------------------------------------------------------\n # One ADDITIONAL test (or set of tests).\n # -------------------------------------------------------------------------\n p1 = Point(30, 17)\n p2 = Point(50, 80)\n line = Line(p1, p2) # Causes __init__ to run\n\n print(line.start) # Should print Point(30, 17)\n print(line.end) # Should print Point(50, 80)\n print(line.start == p1) # Should print True\n print(line.start is p1) # Should print False\n\n print('The above should print:')\n print(' Point(30, 17)')\n print(' Point(50, 80)')\n print(' True')\n print(' False')", "title": "" }, { "docid": "3b678465826a6873d35d8abf63a1b949", "score": "0.65482336", "text": "def test_constructor(self):\n cls = self.analyzer.get_object(['ClassWithProperties'])\n assert isinstance(cls.constructor, Function)", "title": "" }, { "docid": "c85ca650b4719ca68198ddeeeab62caa", "score": "0.65411264", "text": "def test__init__(self):\n class Klass(Immutable):\n __slots__ = ('a', 'b', 'c')\n def __init__(self):\n super().__init__(**dict(zip(['a', 'b', 'c'], range(3))))\n\n ins = Klass()\n for i, key in enumerate(Klass.__slots__):\n assert getattr(ins, key) == i", "title": "" }, { "docid": "1020f9278cdfaa93e4732c3cd4517798", "score": "0.6530839", "text": "def test_init(self):\n self.assertTrue(hasattr(self.test, \"mouse_is_on\"))\n self.assertTrue(hasattr(self.test, \"hover\"))\n self.assertTrue(hasattr(self.test, \"request_list\"))\n self.assertTrue(hasattr(self.test, \"command_list\"))", "title": "" }, { "docid": "39d1e4b8a393b823fec5ed0de63b0ff3", "score": "0.65272576", "text": "def test(cls):\n pass", "title": "" }, { "docid": "dec901863cec1fbe8df595beb94c78eb", "score": "0.6520808", "text": "def test_init(self) -> None:\r\n f: Fraction = Fraction(3, 4)\r\n # f1: Fraction = Fraction('e', 'w')\r\n self.assertEqual(f.num, 3)\r\n # self.assertNotEqual(f1.num, 3)\r\n self.assertEqual(f.denom, 4)\r\n # self.assertNotEqual(f1.denom, 4)\r", "title": "" }, { "docid": "a266ebd6f36e42ebfef83aff526ba4b7", "score": "0.65105355", "text": "def test_init_arg(self):\n i = City(23)\n self.assertEqual(type(i).__name__, \"City\")\n self.assertFalse(hasattr(i, \"23\"))", "title": "" }, { "docid": "e66a5fff93d05d34ca16342b5321b609", "score": "0.65099823", "text": "def test_GarbageTracker_init():\n\n # The code to be tested\n obj = GarbageTracker()\n\n assert obj.enabled is False\n assert obj.ignored is False\n assert obj.leaks_only is False\n assert obj.ignored_type_names == []\n assert obj.garbage == []", "title": "" }, { "docid": "59b4c2dd99e9a27a6c02d5e834eeaeb8", "score": "0.65045655", "text": "def setUpClass(cls):\n pass", "title": "" }, { "docid": "59b4c2dd99e9a27a6c02d5e834eeaeb8", "score": "0.65045655", "text": "def setUpClass(cls):\n pass", "title": "" }, { "docid": "9e421897c5b1ca5df7d1d3d367abaf73", "score": "0.6496038", "text": "def test_constructor(self):\n evaluation = IPETEvaluation()", "title": "" }, { "docid": "355701669fc720efc902d50f9f4a3095", "score": "0.6475686", "text": "def test_init(self):\n\t\tself.assertEqual(self.user1.username, 'us')\n\t\tself.assertEqual(self.user1.login_password, \"jare2000\")", "title": "" }, { "docid": "51f35c96fe367ace0d686a1971bd1021", "score": "0.644975", "text": "def test_instrument_init(self):\n\n assert self.testInst.new_thing\n return", "title": "" }, { "docid": "194b0723090ea0523a38a8555cbf5025", "score": "0.6444901", "text": "def test_initialize_class_no_args(self):\n lwf = LongestWordFinder()\n self.assertEqual(lwf.longest_word, '')\n self.assertEqual(lwf.longest_word_transposed, '')", "title": "" }, { "docid": "e73710781b3866985e72c0ec2be2b92b", "score": "0.64368397", "text": "def test_initial():\n assert True == True", "title": "" }, { "docid": "7f8213504726c0972fce235a1080d255", "score": "0.6429573", "text": "def test_init():\n p = People()\n assert p is not None", "title": "" }, { "docid": "4b85d11fc418ba1c6807c5346a6518b3", "score": "0.6429504", "text": "def test_constructor():\n assert not gc.game_over and \\\n len(gc.board) == 0 and \\\n gc.ROW == 6 and \\\n gc.COL == 7 and \\\n gc.red_turn and \\\n gc.if_red and \\\n gc.circle_size == 90 and \\\n gc.start_point == 50 and \\\n len(gc.falling_circle) == 0 and \\\n gc.blank_size == 100 and \\\n gc.line_size == 20 and \\\n gc.WIN_MOVES == 4 and \\\n not gc.red_win and \\\n not gc.yellow_win and \\\n gc.start_time == 999 and \\\n not gc.scored", "title": "" }, { "docid": "563f2bf6af2a60f1abf4ec6c34e5be63", "score": "0.64257914", "text": "def test_class_creation(self):\n class_directly = create_a_class(True)\n class_indirectly = create_a_class(False)\n self.assertEqual('Bar', class_directly.__name__)\n self.assertTrue('get_i' in class_directly.__dict__.keys())", "title": "" }, { "docid": "51f6ae603162b0c1af50b7e8e69814ee", "score": "0.6424729", "text": "def CheckInitialized(self):\n self.check_initialized()", "title": "" }, { "docid": "51f6ae603162b0c1af50b7e8e69814ee", "score": "0.6424729", "text": "def CheckInitialized(self):\n self.check_initialized()", "title": "" }, { "docid": "933df1938e1939bd46a00cfca1e5b611", "score": "0.64159346", "text": "def test_init(self):\n result = BestForTheCountryStrategy(self.decision, self.member, self.bill)\n\n self.assertEqual(result._name, \"Best for the Country\")\n self.assertEqual(result._decision, self.decision)\n self.assertEqual(result._member, self.member)\n self.assertEqual(result._bill, self.bill)\n self.assertEqual(result._success, False)", "title": "" }, { "docid": "f4da8029cf6b611c9fb608551fd27a31", "score": "0.6414882", "text": "def init_class(self):\n pass", "title": "" }, { "docid": "6d83e08f4713d71d25f4ba67b0c97169", "score": "0.64145124", "text": "def test_init(self):\n # Generates exactly enough instances in the population\n for population_size in {1, 2, 10, 253, 7102}:\n GeneticModel.POPULATION_SIZE = population_size\n genetic_model = GeneticModel(game_name=\"SnakeGen-v1\", input_shape=(68,),\n action_space=self.environment.action_space)\n self.assertEqual(len(genetic_model.population), population_size)\n\n # Each instance is a WeightSet\n GeneticModel.POPULATION_SIZE = 10\n genetic_model = GeneticModel(game_name=\"SnakeGen-v1\", input_shape=(21,),\n action_space=self.environment.action_space)\n for instance in genetic_model.population:\n self.assertIs(type(instance), WeightSet)\n\n # Each instance is a unique instance\n for i in range(genetic_model.POPULATION_SIZE):\n for j in range(i + 1, genetic_model.POPULATION_SIZE):\n self.assertIsNot(genetic_model.population[i], genetic_model.population[j])\n\n # Initializes variables correctly\n self.assertEqual(genetic_model.scores, list())\n self.assertEqual(genetic_model.current_weight_set_id, 0)\n self.assertEqual(genetic_model.generation_outcomes, list())\n self.assertEqual(genetic_model.generation_id, 0)", "title": "" }, { "docid": "7fac132982d2afeae2982234af3b85b6", "score": "0.64070696", "text": "def test_cls_and_inst_attr(self):\n b = B()\n self.assertEqual(B.Version, 1.0)\n self.assertEqual(b.Version, 1.0)\n\n b.Version = 2.0\n\n self.assertEqual(B.Version, 1.0)\n self.assertEqual(b.Version, 2.0)", "title": "" }, { "docid": "db3d57cbf37ee951907cbac117eeb09b", "score": "0.64070606", "text": "def test_init(self, hotp):\n hotp.assert_not_called()\n hotp.return_value.verify.assert_not_called()", "title": "" }, { "docid": "cbc6d12b8ea6f06faf3fd8051a658b70", "score": "0.6406841", "text": "def test_PrecisionClass_init(self):\n\n test_case_1 = {'alpha': 0.0, 'cardinality': 'one', 'bias': 'flat'}\n test_case_2 = {'alpha': 0.0, 'cardinality': 'one', 'bias': None}\n test_case_3 = {'alpha': 10.0, 'cardinality': 'one', 'bias': 'flat'}\n\n # test of the normal call\n obj = TimeSeriesPrecision(**test_case_1)\n self.assertEqual(obj.alpha, test_case_1['alpha'])\n self.assertEqual(obj.cardinality, test_case_1['cardinality'])\n self.assertEqual(obj.bias, test_case_1['bias'])\n\n # test of the invalid bias\n with self.assertRaises(Exception):\n obj = TimeSeriesPrecision(**test_case_2)\n\n # test of the invalid alpha\n with self.assertRaises(Exception):\n obj = TimeSeriesPrecision(**test_case_3)", "title": "" }, { "docid": "e21b86ecae4498bcffa5cc860baafe99", "score": "0.6403883", "text": "def test_init():\n universe = make_randomwalk()\n strategy = RandomTrader().run(universe)\n\n assert_result_equal(History(strategy), strategy.history)", "title": "" }, { "docid": "04dc6659cd5281f91cc0857787b33b73", "score": "0.64031184", "text": "def setUpClass(cls):\n\n Base = _Base__nb_objects = 0\n cls.s1 = Square(5)\n cls.s2 = Square(7)\n cls.s3 = Square(2, 2)\n cls.s4 = Square(3, 1, 2)", "title": "" }, { "docid": "94c68c6be8aea24fcca1d0dff5a607e4", "score": "0.63973844", "text": "def setUpClass(cls):\n print(\"setUpClass\")\n print(\"==========\")", "title": "" }, { "docid": "5244f863db8da1b4a4f3f7fc5780173a", "score": "0.6386683", "text": "def test_init_arg(self):\n i = Amenity(23)\n self.assertEqual(type(i).__name__, \"Amenity\")\n self.assertFalse(hasattr(i, \"23\"))", "title": "" }, { "docid": "610e5d8132079795869066aa853516e6", "score": "0.63797104", "text": "def test_init():\n\n # with defaults\n tree = CellTree(nodes, faces)\n\n # with everything specified\n tree = CellTree(nodes, faces, num_buckets=2, cells_per_leaf=1)\n\n # with num_buckets\n tree = CellTree(nodes, faces, num_buckets=4)\n\n # with cells_per_leaf\n tree = CellTree(nodes, faces, cells_per_leaf=2)\n\n assert True", "title": "" }, { "docid": "ac09b0ae574ff19d2624fca6a7b53caa", "score": "0.637934", "text": "def test_init(self):\n f = BotFactory('world')\n self.assertEqual(f.world, 'world')", "title": "" }, { "docid": "cb43af066e3ed27bedfc15a0d1193c5c", "score": "0.63784766", "text": "def test_create_initializer_configuration(self):\n pass", "title": "" }, { "docid": "08176561870287d8d7f92c82331233b5", "score": "0.6370063", "text": "def setUpClass(cls):\n print(\"setUpClass\")\n print(\"===========\")", "title": "" }, { "docid": "4078b5cf2b8905f0d25f517f95db1468", "score": "0.634398", "text": "def test_init(self):\n # Ensure that the created_at timestamp is set if not given\n ts = ModuleTimestamp()\n self.assertIsNotNone(ts.created_at)\n self.assertIsNone(ts.started_at)\n self.assertIsNone(ts.finished_at)\n created_at = ts.created_at\n # Ensure created_at and started_at are initialize properly\n ts = ModuleTimestamp(created_at=created_at, started_at=created_at)\n self.assertIsNotNone(ts.created_at)\n self.assertIsNotNone(ts.started_at)\n self.assertIsNone(ts.finished_at)\n self.assertEqual(ts.created_at, created_at)\n self.assertEqual(ts.started_at, created_at)\n # Ensure that ValueError is raised if created_at is None but one of\n # the other two timestamp arguments is not\n with self.assertRaises(ValueError):\n ModuleTimestamp(started_at=created_at)\n with self.assertRaises(ValueError):\n ModuleTimestamp(finished_at=created_at)\n with self.assertRaises(ValueError):\n ModuleTimestamp(started_at=created_at, finished_at=created_at)", "title": "" }, { "docid": "4acc340f00371ec4a826c5450a6971f1", "score": "0.63438714", "text": "def test_init(self, hotp):\n hotp.assert_not_called\n hotp.return_value.verify.assert_not_called()", "title": "" }, { "docid": "3de1818ca511bbc9637af4fc285e1e67", "score": "0.6340431", "text": "def test_dict_init(self):\n self.assertIs(type(self.model3.name), str)\n self.assertEqual(self.model3.name, \"Erwin\")\n self.assertIs(type(self.model3.my_number), int)\n self.assertEqual(self.model3.my_number, 42)\n self.assertIs(type(self.model3.test), str)\n self.assertEqual(self.model3.test, \"test\")\n self.assertIs(type(self.model3.city_id), str)\n self.assertEqual(self.model3.city_id, 'SanFran')\n self.assertIs(type(self.model3.user_id), str)\n self.assertEqual(self.model3.user_id, 'Gove')\n self.assertIs(type(self.model3.description), str)\n self.assertEqual(self.model3.description, 'nice')\n self.assertIs(type(self.model3.number_rooms), int)\n self.assertEqual(self.model3.number_rooms, 7)\n self.assertIs(type(self.model3.number_bathrooms), int)\n self.assertEqual(self.model3.number_bathrooms, 7)\n self.assertIs(type(self.model3.max_guest), int)\n self.assertEqual(self.model3.max_guest, 7)\n self.assertIs(type(self.model3.price_by_night), int)\n self.assertEqual(self.model3.price_by_night, 7)\n self.assertIs(type(self.model3.latitude), float)\n self.assertEqual(self.model3.latitude, 7.7)\n self.assertIs(type(self.model3.longitude), float)\n self.assertEqual(self.model3.longitude, 7.7)\n self.assertIs(type(self.model3.amenity_ids), list)\n self.assertEqual(self.model3.amenity_ids, ['bla', 'bla'])", "title": "" }, { "docid": "ca40e9a2dff9442562a2aaaa005119b1", "score": "0.63285816", "text": "def test_init(self):\n self.assertEqual(self.new_credential.user_name,\"murungi\")\n self.assertEqual(self.new_credential.credential_name,\"my secret\")\n self.assertEqual(self.new_credential.credential_password,\"234\")", "title": "" }, { "docid": "dc82627a212bf1bd9e0605efab107e33", "score": "0.6325823", "text": "def setUpClass(cls):\n\t\tprint (\"setUpClass\")\n\t\tprint (\"==========\")", "title": "" }, { "docid": "c1ad0b9a99912da11bf8627393eb9f61", "score": "0.6311597", "text": "def init() -> None:\r\n ...", "title": "" }, { "docid": "460c8b15844b6e11d2cf562aa070100a", "score": "0.6300887", "text": "def test_init(self):\n self.assertEqual(self.new_user.first_name, \"Nicholas\")\n self.assertEqual(self.new_user.last_name, \"Owino\")\n self.assertEqual(self.new_user.phone_number, \"0728671041\")\n self.assertEqual(self.new_user.email, \"[email protected]\")", "title": "" }, { "docid": "1bc969f9971dfd5fd418d221280ae2b9", "score": "0.62954134", "text": "def test_init(mockpatch):\n era5 = fetch.Fetch(\n years=[2008, 2009],\n months=list(range(1, 13)),\n days=list(range(1, 32)),\n hours=list(range(24)),\n variables=[\"total_precipitation\"],\n outputformat=\"netcdf\",\n outputprefix=\"era5\",\n period=\"hourly\",\n ensemble=True,\n statistics=None,\n synoptic=None,\n pressurelevels=\"surface\",\n merge=False,\n threads=2,\n prelimbe=False,\n )\n\n assert era5.months == ALL_MONTHS\n assert era5.days == ALL_DAYS\n assert era5.hours == ALL_HOURS\n\n assert era5.variables == [\"total_precipitation\"]\n assert era5.outputformat == \"netcdf\"\n assert era5.outputprefix == \"era5\"\n assert era5.period == \"hourly\"\n assert era5.ensemble\n assert era5.statistics is None\n assert era5.synoptic is None\n assert era5.pressure_levels == \"surface\"\n assert not era5.merge\n assert era5.threads == 2\n assert not era5.prelimbe\n assert not era5.land\n\n # initializing hourly variable with days=None should result in ValueError\n with pytest.raises(TypeError):\n era5 = initialize(\n variables=[\"temperature\"], period=\"hourly\", days=None, pressurelevels=[1]\n )\n\n # initializing monthly variable with days=None returns fetch.Fetch object\n era5 = initialize(\n variables=[\"temperature\"], period=\"monthly\", days=None, pressurelevels=[1]\n )\n assert isinstance(era5, fetch.Fetch)\n\n with pytest.raises(_request_size.TooLargeRequestError):\n initialize(\n land=True, variables=[\"skin_temperature\"], ensemble=False, splitmonths=False\n )\n\n with pytest.raises(ValueError, match=\"are not compatible\"):\n initialize(merge=True, splitmonths=True)", "title": "" }, { "docid": "09368c5c450c73c84d9cb94b3eed9626", "score": "0.62915295", "text": "def setUpClass(cls):\n Base._Base__nb_objects = 0\n cls.s1 = Square(10)\n cls.s2 = Square(5, 5)\n cls.s3 = Square(1, 2, 3)\n cls.s4 = Square(4, 5, 6, 7)", "title": "" }, { "docid": "91f2443a51be59b2ff15d3eebeb39bfd", "score": "0.6290979", "text": "def test_init(self):\n\t\tself.assertEqual(self.new_user.user_name, \"Newuser\")\n\t\tself.assertEqual(self.new_user.password, \"567\")", "title": "" }, { "docid": "dc5182acee61137d8313fcdf8aae83ca", "score": "0.6285436", "text": "def test_student_init(self):\n\n student1 = Students('10440989', 'Mrunal, S', 'SFEN')\n\n self.assertEqual(student1.name, 'Mrunal, S')\n self.assertEqual(student1.major, 'SFEN')\n self.assertEqual(student1.cwid, '10440989')\n self.assertEqual(type(student1.courses), type(defaultdict()))\n self.assertEqual(type(student1.remaining_required), type(list()))\n self.assertEqual(type(student1.remaining_electives), type(list()))\n \n student2 = Students('10345678', 'Anirudha, P', 'CS')\n self.assertEqual(student2.name, 'Anirudha, P')\n self.assertEqual(student2.major, 'CS')\n self.assertEqual(student2.cwid, '10345678')\n self.assertEqual(type(student2.courses), type(defaultdict()))\n self.assertEqual(type(student2.remaining_required), type(list()))\n self.assertEqual(type(student2.remaining_electives), type(list()))", "title": "" }, { "docid": "ac72173d9df6ed7e73a1b9e8adc5e667", "score": "0.6281774", "text": "def setUp(cls):\n pass", "title": "" }, { "docid": "c92637668a066945e6c1624fbc4d1976", "score": "0.6279683", "text": "def initialize():", "title": "" }, { "docid": "17038fb255d1226d6870f62861f379d9", "score": "0.6260956", "text": "def initialize(self) -> bool:\n return True", "title": "" }, { "docid": "dffcca3e66c9e556a35d6ea61a51a3e6", "score": "0.62569934", "text": "def test_is_singleton():\n obj = TestProcessor()\n obj1 = TestProcessor()\n assert obj is obj1, \"check if Objects are same as this is singleton class\"", "title": "" }, { "docid": "a2a7e98aa0ca49cb43901b72474affb8", "score": "0.6254742", "text": "def test_circuit_init(self):\n circuit, target = self.simple_circuit_no_measure()\n op = Operator(circuit)\n target = Operator(target)\n self.assertEqual(op, target)", "title": "" }, { "docid": "82815e8f330845c57d8826a78a373780", "score": "0.6252124", "text": "def setUp(self):\n self.obj = SomeClass()", "title": "" }, { "docid": "cca387f17060d0b19e4331721746d669", "score": "0.62493384", "text": "def test_init_values(self):\n loc1 = Location(\"SLO\", 35.3, -120.7)\n loc2 = Location(\"Paris\", 48.9, 2.4)\n\n self.assertEqual(loc1.name, 'SLO')\n self.assertEqual(loc1.lat, 35.3)\n self.assertEqual(loc1.lon, -120.7)\n self.assertEqual(loc2.name, 'Paris')\n self.assertEqual(loc2.lat, 48.9)\n self.assertEqual(loc2.lon, 2.4)", "title": "" }, { "docid": "02334ff38c62890a8f50a3b4ccb6f317", "score": "0.6246215", "text": "def test_init(self):\n self.assertTrue(isinstance(self.base, BaseModel))", "title": "" }, { "docid": "e01c256d461e107811b3f3bd48edf4bb", "score": "0.6238053", "text": "def test_constructor(self):\n self.assertEqual(self.db.client, self.client)\n self.assertEqual(self.db.database_name, self.test_dbname)\n self.assertEqual(self.db.r_session, self.client.r_session)\n self.assertIsInstance(self.db.result, Result)", "title": "" }, { "docid": "6b5809949845b960f5e47516433c8df1", "score": "0.62339413", "text": "def setUpClass(cls):\n cls._data = data.load('KN03')", "title": "" }, { "docid": "fbbf13a3651bdcf263daa7ca60bd1eee", "score": "0.6231784", "text": "def test_init(self, hotp):\n self.secret.assert_not_called()\n self.window.assert_not_called()\n hotp.assert_not_called()\n hotp.return_value.verify.assert_not_called()\n self.counter.assert_not_called()\n self.method.assert_not_called()", "title": "" }, { "docid": "cd4af922f0d6fb7d9b83f68ce3260a85", "score": "0.6228185", "text": "def test_creation(self):", "title": "" }, { "docid": "a07e5d8f5f05b86c5603e05c457dff13", "score": "0.62276417", "text": "def test_init(self):\n\n self.assertEqual(self.new_credential.application_name, \"application\")\n self.assertEqual(self.new_credential.account_username, \"username\")\n self.assertEqual(self.new_credential.pass_code, \"passcode\")", "title": "" }, { "docid": "3e33c6665350d33ff17d7f7fea10025d", "score": "0.622677", "text": "def test_check_methods(self):\n self.assertTrue(hasattr(BaseModel, \"__init__\"))\n self.assertTrue(hasattr(BaseModel, \"__str__\"))\n self.assertTrue(hasattr(BaseModel, \"save\"))\n self.assertTrue(hasattr(BaseModel, \"to_dict\"))", "title": "" }, { "docid": "e96306b4e146be230b8eb731134c9eb5", "score": "0.6218456", "text": "def test_init(self):\n # should create an 8x8 board\n std_board = StandardBoard()\n self.assertEqual(std_board._num_rows, 8)\n self.assertEqual(std_board._num_cols, 8)", "title": "" }, { "docid": "60d38f0a7424961a445d2db1e96592cb", "score": "0.62169164", "text": "def test_instantiate():\n # GIVEN a scout-api and mutacc-auto-api\n\n mutacc_auto_api = 1\n scout_api = 1\n\n # When instatiating the mutacc_upload api\n mutacc_upload_api = UploadToMutaccAPI(scout_api, mutacc_auto_api)\n\n # THEN all attributes should have been set\n assert mutacc_upload_api.scout == scout_api\n assert mutacc_upload_api.mutacc_auto == mutacc_auto_api", "title": "" }, { "docid": "75cf56f13aeb9fbc40db29831a7653cd", "score": "0.62164325", "text": "def test_instance_method():\n assert hasattr(ResSAttnGRUBlock, '__init__')\n assert inspect.signature(ResSAttnGRUBlock.__init__) == Signature(\n parameters=[\n Parameter(\n name='self',\n kind=Parameter.POSITIONAL_OR_KEYWORD,\n default=Parameter.empty,\n ),\n Parameter(\n name='d_hid',\n kind=Parameter.KEYWORD_ONLY,\n annotation=int,\n default=Parameter.empty,\n ),\n Parameter(\n name='n_hid_lyr',\n kind=Parameter.KEYWORD_ONLY,\n annotation=int,\n default=Parameter.empty,\n ),\n Parameter(\n name='p_hid',\n kind=Parameter.KEYWORD_ONLY,\n annotation=float,\n default=Parameter.empty,\n ),\n Parameter(\n name='kwargs',\n kind=Parameter.VAR_KEYWORD,\n annotation=Optional[Dict],\n ),\n ],\n return_annotation=Signature.empty,\n )\n\n assert hasattr(ResSAttnGRUModel, '__init__')\n assert inspect.signature(ResSAttnGRUModel.__init__) == Signature(\n parameters=[\n Parameter(\n name='self',\n kind=Parameter.POSITIONAL_OR_KEYWORD,\n default=Parameter.empty,\n ),\n Parameter(\n name='d_emb',\n kind=Parameter.KEYWORD_ONLY,\n annotation=int,\n default=Parameter.empty,\n ),\n Parameter(\n name='d_hid',\n kind=Parameter.KEYWORD_ONLY,\n annotation=int,\n default=Parameter.empty,\n ),\n Parameter(\n name='n_hid_lyr',\n kind=Parameter.KEYWORD_ONLY,\n annotation=int,\n default=Parameter.empty,\n ),\n Parameter(\n name='n_post_hid_lyr',\n kind=Parameter.KEYWORD_ONLY,\n annotation=int,\n default=Parameter.empty,\n ),\n Parameter(\n name='n_pre_hid_lyr',\n kind=Parameter.KEYWORD_ONLY,\n annotation=int,\n default=Parameter.empty,\n ),\n Parameter(\n name='p_emb',\n kind=Parameter.KEYWORD_ONLY,\n annotation=float,\n default=Parameter.empty,\n ),\n Parameter(\n name='p_hid',\n kind=Parameter.KEYWORD_ONLY,\n annotation=float,\n default=Parameter.empty,\n ),\n Parameter(\n name='tknzr',\n kind=Parameter.KEYWORD_ONLY,\n annotation=BaseTknzr,\n default=Parameter.empty,\n ),\n Parameter(\n name='kwargs',\n kind=Parameter.VAR_KEYWORD,\n annotation=Optional[Dict],\n ),\n ],\n return_annotation=Signature.empty,\n )", "title": "" }, { "docid": "801be9e54b91396497442b025caecff4", "score": "0.62157035", "text": "def __init__(self):\n self.initialized = False", "title": "" }, { "docid": "801be9e54b91396497442b025caecff4", "score": "0.62157035", "text": "def __init__(self):\n self.initialized = False", "title": "" }, { "docid": "801be9e54b91396497442b025caecff4", "score": "0.62157035", "text": "def __init__(self):\n self.initialized = False", "title": "" }, { "docid": "a4fcae6f82d61e5d2435c0a0c1d1fab5", "score": "0.6212427", "text": "def test_init():\n mmb = MaliciousMacroBot()\n assert 1 == 1", "title": "" } ]
62b727fc7e53feddb778032e637fb7ac
place a phase subplot on a figure, given figure handle and axis postion
[ { "docid": "c0ed67a6293866971bdb811fe6115b47", "score": "0.6554235", "text": "def phase_sub_plot(self, ax, ttl_str=\"\", axRect=None, pred=None):\n\n phi = self.tf.phi\n # rotate phases so all are positive:\n negative_phi_indices = np.where(phi < 0)[0]\n phi[negative_phi_indices] += 180.0\n\n Tmin, Tmax = self.set_period_limits()\n axis_limits = [Tmin, Tmax, 0, 90]\n # xticks = self.get_xticks()\n\n [xb, yb] = err_log(\n np.transpose(self.tf.periods),\n self.tf.phi[:, 0],\n self.tf.phi_se[:, 0],\n \"XLOG\",\n axis_limits,\n )\n # figure(ax); #need this?\n # set current axes\n # hax = plt.axes('Position', axRect);\n ax.semilogx(xb, yb, \"b-\")\n ax.semilogx(self.tf.periods, phi[:, 0], \"bo\")\n print(\"OK, now ser linewidth and markersize\")\n # set(lines, 'LineWidth', 1, 'MarkerSize', 7);\n # hold on;\n xb, yb = err_log(\n np.transpose(self.tf.periods),\n self.tf.phi[:, 1],\n self.tf.phi_se[:, 1],\n \"XLOG\",\n axis_limits,\n )\n ax.semilogx(xb, yb, \"r-\")\n ax.semilogx(self.tf.periods, phi[:, 1], \"ro\")\n # set(lines, 'LineWidth', 1, 'MarkerSize', 7);\n if pred is not None:\n plt.plot(pred.tf.periods, pred.tf.phi[:, 0], \"b-\", \"linewidth\", 2)\n plt.plot(pred.tf.periods, pred.tf.phi[:, 1], \"r-\", \"linewidth\", 2)\n\n # (lims_ph);\n ax.set_xlim(axis_limits[0], axis_limits[1])\n ax.set_ylim(axis_limits[2], axis_limits[3])\n title_pos_x = np.log(axis_limits[0]) + 0.1 * (\n np.log(axis_limits[1] / axis_limits[0])\n )\n title_pos_x = np.ceil(np.exp(title_pos_x))\n title_pos_y = axis_limits[2] + 0.8 * (axis_limits[3] - axis_limits[2])\n # ttl_str = f\"$\\phi$ : {self.tf.header.local_station_id}\"\\\n # + \\\"PKD\"#self.tf.Header.LocalSite.SiteID\n ax.text(title_pos_x, title_pos_y, ttl_str, fontsize=14, fontweight=\"demi\")\n # set(gca, 'FontWeight', 'bold', 'FontSize', 11, 'Xtick', xticks);\n ax.set_xlabel(\"Period (s)\")\n ax.set_ylabel(\"Degrees\")", "title": "" } ]
[ { "docid": "58cf15113cf22c51d3d0f51fc0925655", "score": "0.7136438", "text": "def phase_plot(self, pred=None):\n axRect = [0.1446, 0.2150, 0.7604, 0.7100]\n # plt.figure(22, figsize = (8.5, 11), dpi=300)\n fig, ax = plt.subplots()\n if pred is not None:\n self.phase_sub_plot(ax, axRect, pred=pred)\n else:\n self.phase_sub_plot(ax, axRect)", "title": "" }, { "docid": "e1d1bb25dbad0495c71687162a9a0668", "score": "0.6272574", "text": "def plot_phase_plane_closed_loop(self , x_axis = 0 , y_axis = 1 ):\n\n pp = phaseanalysis.PhasePlot( self , x_axis , y_axis )\n \n pp.compute_grid()\n pp.plot_init()\n \n # Closed-loop Behavior\n pp.color = 'r'\n pp.compute_vector_field()\n pp.plot_vector_field()\n \n # Open-Loop Behavior\n pp.f = self.plant.f\n pp.ubar = self.plant.ubar\n pp.color = 'b'\n pp.compute_vector_field()\n pp.plot_vector_field()\n \n pp.plot_finish()\n \n pp.phasefig.show()\n \n return pp", "title": "" }, { "docid": "2eba54eded877a729ded6f4eabc9403b", "score": "0.6196892", "text": "def plot_phase_plane_closed_loop( self , x_axis = 0 , y_axis = 1 ):\n\n pp = phaseanalysis.PhasePlot( self.plant , x_axis , y_axis )\n \n pp.compute_grid()\n pp.plot_init()\n \n # Closed-loop Behavior\n pp.color = 'b'\n pp.compute_vector_field()\n pp.plot_vector_field()\n \n # Open-Loop Behavior\n pp.f = self.fzbar # assume default internal states\n pp.ubar = self.ubar\n pp.color = 'r'\n pp.compute_vector_field()\n pp.plot_vector_field()\n \n pp.plot_finish()\n \n return pp", "title": "" }, { "docid": "95616bc09e3da5bf1e54947ff715e4f6", "score": "0.61467636", "text": "def plot_phase_plane_trajectory_closed_loop(self, x_axis=0, y_axis=1):\n \n pp = phaseanalysis.PhasePlot( self , x_axis , y_axis )\n \n pp.compute_grid()\n pp.plot_init()\n \n # Closed-loop Behavior\n pp.color = 'r'\n pp.compute_vector_field()\n pp.plot_vector_field()\n \n # Open-Loop Behavior\n pp.f = self.plant.f\n pp.ubar = self.plant.ubar\n pp.color = 'b'\n pp.compute_vector_field()\n pp.plot_vector_field()\n \n # Check is trajectory is already computed\n if self.traj == None:\n self.compute_trajectory()\n \n traj = self.traj\n \n plt.plot(traj.x[:,x_axis], traj.x[:,y_axis], 'b-') # path\n plt.plot([traj.x[0,x_axis]], [traj.x[0,y_axis]], 'ko') # start\n plt.plot([traj.x[-1,x_axis]], [traj.x[-1,y_axis]], 'rx') # end\n \n pp.plot_finish()\n \n pp.phasefig.show()", "title": "" }, { "docid": "5902392918336a8586ff111d563e7655", "score": "0.6067031", "text": "def __init__(self):\n self.fig = pl.figure(1,figsize=(8,6), dpi=80 , frameon = True , facecolor = '0.75' , edgecolor = 'w')\n self.fig.add_subplot(111 , axisbg = 'w' , projection = 'rectilinear') #if you want to add axes on particular place: fig.add_axes([0.15, 0.1, 0.7, 0.3]) where -> [begin , bottom to start axes , width , height ]\n self.separated = True #if we have a list and need to plot the plots separated", "title": "" }, { "docid": "c51955c8d9e6994afc5a75dc7e6b10d6", "score": "0.60600406", "text": "def plot_phase_diagram(param, ax=None, title=None):\n if ax is None:\n ax = plt.gca()\n if title is None:\n title = \"Phase space, {}\".format(param)", "title": "" }, { "docid": "c892b4012ae824d92468a0641d38c565", "score": "0.6002124", "text": "def add_figure(self,sig,index,title='',xlabel='',ylabel=''):\n self.last_index = index\n ax = self.fig.add_subplot(self.position+index)\n ax.set_title(title)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n ax.plot(sig)", "title": "" }, { "docid": "d6ebbb232f905e939d791ed00e7b655f", "score": "0.59039545", "text": "def create_figure(self) -> None:\n plt.ion()\n self.fig = plt.figure(1)\n self.axis = self.fig.add_subplot(111, xlim=(0, 1), ylim=(0, 1))\n self.axis.grid(True)\n plt.xticks(np.linspace(0, 1, self._param[\"n_v\"] + 1))\n plt.yticks(np.linspace(0, 1, self._param[\"n_v\"] + 1))\n a_plt, = self.axis.plot([], [], 'bx', markersize=5)\n l_plt, = self.axis.plot([], [], 'r.', markersize=15)\n self.plots = [a_plt, l_plt]", "title": "" }, { "docid": "c5a6f15d1f44cb0fdac503a3b05f39b7", "score": "0.5901827", "text": "def plot_phase_plane(self , x_axis = 0 , y_axis = 1 ):\n\n pp = phaseanalysis.PhasePlot( self , x_axis , y_axis )\n \n pp.plot()", "title": "" }, { "docid": "78e103c1643c56e1b836a237af599196", "score": "0.5901325", "text": "def populate_plot_axis(self,plot,ax='x'):\n\n fig=plt.gcf()\n\n extra_ax=[]\n\n if ax=='x':\n\n ticks=plot.get_xticks()\n\n lim=plot.get_xlim()\n\n for i in range(len(self.names)):\n\n if i==0:\n\n axn=plot\n\n axn.spines['bottom'].set_position(('outward',10))\n\n axn.spines['bottom'].set_visible(True)\n\n else:\n\n dy_fig=0.08\n\n prev_ax_position=axn.get_position()\n\n extra_ax.append(fig.add_axes(\\\n (prev_ax_position.x0,\\\n prev_ax_position.y0-2*dy_fig,\\\n prev_ax_position.width,\\\n 0),'autoscalex_on',True))\n\n axn=extra_ax[i-1]\n\n axn.yaxis.set_visible(False)\n\n for side in axn.spines.keys():\n\n axn.spines[side].set_linewidth(1)\n\n axn.set_xticks(ticks)\n\n ticksnames=[float(str(x)) for x in self.values[i]]\n\n axn.set_xticklabels(\\\n [\"{:.2f}\".format(x).rstrip('0').rstrip('.') for x in ticksnames],\\\n rotation = 45)\n\n xlab=axn.set_xlabel(self.names[i])\n\n xlab.set_fontsize(10)\n\n axn.tick_params(axis='x',labelsize=10)\n\n axn.set_xlim(lim)\n\n\n\n elif ax=='y':\n\n ticks=plot.get_yticks()\n\n lim=plot.get_ylim()\n\n for i in range(len(self.names)):\n\n if i==0:\n\n axn=plot\n\n axn.spines['left'].set_position(('outward',10))\n\n axn.spines['left'].set_visible(True)\n\n else:\n\n dx_fig=0.08\n\n plot_position=plot.get_position()\n\n prev_ax_position=axn.get_position()\n\n extra_ax.append(fig.add_axes(\\\n (prev_ax_position.x0-2*dx_fig,\\\n prev_ax_position.y0,\\\n 0,\\\n prev_ax_position.height),'autoscalex_on',True))\n\n axn=extra_ax[i-1]\n\n axn.xaxis.set_visible(False) # hide the yaxis\n\n for side in axn.spines.keys(): # 'top', 'bottom', 'left', 'right'\n\n axn.spines[side].set_linewidth(1)\n\n axn.set_yticks(ticks)\n\n ticksnames=[float(str(x)) for x in self.values[i]]\n\n axn.set_yticklabels(\\\n [\"{:.2f}\".format(x).rstrip('0').rstrip('.') for x in ticksnames],\\\n rotation = 45)\n\n ylab=axn.set_ylabel(self.names[i])\n\n ylab.set_fontsize(10)\n\n axn.tick_params(axis='y',labelsize=10)\n\n axn.set_ylim(lim)\n\n else:\n\n raise ValueError(\"Axis can be 'x' or 'y'\")", "title": "" }, { "docid": "9d25c1e35afe6331c111ff9a19ee8f5e", "score": "0.5855845", "text": "def _InitAxes( self ):\n self.ax = self.fig.add_subplot( 111 )", "title": "" }, { "docid": "b4c36bb670d3db5340abd310d4d81b9c", "score": "0.5854985", "text": "def plot_phase_plane_trajectory(self, x_axis=0, y_axis=1):\n \n # Check is trajectory is already computed\n if self.traj == None:\n self.compute_trajectory()\n \n traj = self.traj\n \n pp = phaseanalysis.PhasePlot( self , x_axis , y_axis )\n pp.plot()\n\n plt.plot(traj.x[:,x_axis], traj.x[:,y_axis], 'b-') # path\n plt.plot([traj.x[0,x_axis]], [traj.x[0,y_axis]], 'ko') # start\n plt.plot([traj.x[-1,x_axis]], [traj.x[-1,y_axis]], 'rx') # end\n \n plt.draw()\n\n pp.phasefig.tight_layout()\n \n plt.draw()\n plt.show()", "title": "" }, { "docid": "d8f568aefda8658dd08d156a2c4c0c3e", "score": "0.58458996", "text": "def plot_phase(self,res):\n x = np.linspace(0,1,res)\n y = np.linspace(0,1,res)\n X,Y = np.meshgrid(x,y)\n plt.figure()\n ax = plt.axes(projection = '3d')\n ax.plot_surface(X,Y,np.angle(self.psi)[:,:,math.floor(res/2)])\n plt.show()", "title": "" }, { "docid": "24bb1e8d08165fb340173518de9a6786", "score": "0.5829915", "text": "def setup_axes3(fig, rect):\n\n # rotate a bit for better orientation\n tr_rotate = Affine2D().translate(-95, 0)\n\n # scale degree to radians\n tr_scale = Affine2D().scale(np.pi/180., 1.)\n\n tr = tr_rotate + tr_scale + PolarAxes.PolarTransform()\n\n grid_locator1 = angle_helper.LocatorHMS(4)\n tick_formatter1 = angle_helper.FormatterHMS()\n\n grid_locator2 = MaxNLocator(3)\n\n ra0, ra1 = 8.*15, 14.*15\n cz0, cz1 = 0, 14000\n grid_helper = floating_axes.GridHelperCurveLinear(\n tr, extremes=(ra0, ra1, cz0, cz1),\n grid_locator1=grid_locator1,\n grid_locator2=grid_locator2,\n tick_formatter1=tick_formatter1,\n tick_formatter2=None)\n\n ax1 = floating_axes.FloatingSubplot(fig, rect, grid_helper=grid_helper)\n fig.add_subplot(ax1)\n\n # adjust axis\n ax1.axis[\"left\"].set_axis_direction(\"bottom\")\n ax1.axis[\"right\"].set_axis_direction(\"top\")\n\n ax1.axis[\"bottom\"].set_visible(False)\n ax1.axis[\"top\"].set_axis_direction(\"bottom\")\n ax1.axis[\"top\"].toggle(ticklabels=True, label=True)\n ax1.axis[\"top\"].major_ticklabels.set_axis_direction(\"top\")\n ax1.axis[\"top\"].label.set_axis_direction(\"top\")\n\n ax1.axis[\"left\"].label.set_text(r\"cz [km$^{-1}$]\")\n ax1.axis[\"top\"].label.set_text(r\"$\\alpha_{1950}$\")\n\n # create a parasite axes whose transData in RA, cz\n aux_ax = ax1.get_aux_axes(tr)\n\n aux_ax.patch = ax1.patch # for aux_ax to have a clip path as in ax\n ax1.patch.zorder = 0.9 # but this has a side effect that the patch is\n # drawn twice, and possibly over some other\n # artists. So, we decrease the zorder a bit to\n # prevent this.\n\n return ax1, aux_ax", "title": "" }, { "docid": "61474251a5b5f8428c9b1a35596da431", "score": "0.57825744", "text": "def createFigure(self,numSubplots,figWidth,figHeight):\r\n#\t\tif self.makeTB:\r\n#\t\t\tself.createToolbar(self.vbox)\r\n#\t\tself.vbox.pack_start(self.myTB,False,False)\r\n\t\tself.axisList=[]\r\n\t\tself.axis=None\r\n\t\t# define handles to widgets\r\n\r\n\t\t############## FIGURE\r\n\t\tself.figure = Figure(dpi=60)\t\t\r\n\t\tself.figure.set_facecolor(figBgColour)\r\n\t\tself.figure.set_edgecolor(figBgColour)\r\n\r\n\t\t#self.axis.set_title('Graph')\r\n\r\n\t\tself.canvas = FigureCanvas(self.figure)\r\n\t\tself.canvas.set_size_request(figWidth,figHeight)\r\n\t\tself.canvas.show()\r\n\t\tself.buttonCallback=self.canvas.mpl_connect('button_press_event', self.OnPress)\r\n#\t\tself.canvas.mpl_connect('resize_event', onAutoScale, None, self.axis, self.canvas)\r\n\r\n \r\n\t\t############## AXIS\r\n\t\t#self.axis=self.figure.add_axes(plotPosition,axisbg=axisColour)\r\n\t\tsubplotList=[]\r\n\t\tfor m in range(numSubplots[0]*numSubplots[1]):\r\n\t\t\tsubplotList.append(numSubplots[0]*100 + numSubplots[1] * 10 + m+1)\r\n\r\n\t\tif len(subplotList)==1:\r\n\t\t\tself.axisList.append(self.figure.add_subplot(111,axisbg=axisColour,polar=self.plotPolar))\r\n\t\t\tself.axisList[0].set_position(PLOT_POSITION)\r\n\t\telse:\r\n\t\t\tfor x in subplotList:\r\n\t\t\t\tself.axisList.append(self.figure.add_subplot(x,axisbg=axisColour))\r\n\r\n\t\tself.axis=self.axisList[0]\r\n\r\n\t\t# format each axis correctly\r\n\t\tfor axis in self.axisList:\r\n\t#\t\tself.axis.grid(True,which='major')\r\n\t\t\taxis.grid(True)\r\n\t#\t\tself.axis.grid(True,which='minor',color='r', linestyle='-', linewidth=2)\r\n\t#\t\tself.axis.set_position(plotPosition)\r\n\r\n\t\t\txax=axis.get_xticklabels()\r\n\t\t\tyax=axis.get_yticklabels()\r\n\r\n\t\t\tfor tick in xax:\r\n\t\t\t\ttick.set_fontsize(axisTextSize)\r\n\r\n\t\t\tfor tick in yax:\r\n\t\t\t\ttick.set_fontsize(axisTextSize)\t\t\r\n\r\n\t\t\r\n\t\tself.legendStr=[]\r\n\t\tself.gaList=[]\r\n\r\n\t\t## add cursor function to axis when mouse is over it\r\n#\t\tself.cursor = Cursor(self.axis, useblit=True, color='red', linewidth=1)\r\n\r\n\t\tself.canvas.draw()\r\n\r\n\t\t# plot a transparent rectangle just on axis 1\r\n\t\tcurrXlim=self.axis.get_xlim()\r\n\t\tdx=abs(currXlim[1]-currXlim[0])\r\n\t\tx0=currXlim[0]\r\n\t\tcurrYlim=self.axis.get_ylim()\r\n\t\tdy=abs(currYlim[1]-currYlim[0])\r\n\t\ty0=currYlim[0]\r\n\r\n\t\tself.axis.r1=plotRect(self.axis,self.canvas,(x0,y0),dx,dy,showRect=self.showRect)\r\n\r\n\t\t#self.axis.cla()\r\n\r\n\t\t\r\n\t\t############## TOOLBAR\r\n\t\t# use a custom version of the matplotlib toolbar\r\n#\t\ttoolbar = NavigationToolbar2(self.canvas, self.win)\r\n\t\tself.toolbar = PlotToolbar(self.canvas,self.win,self.axis)\r\n\t\tzoomtoolbar = PlotZoomToolbar(self.canvas,self.win,self.axis,)\r\n\r\n\t\t# make a TB menu\r\n\t\tmenuList=['|FFT|','Normalised |FFT|','|FFT| & arg(FFT)','|T| & <T','Re & Im (T)','Re & Im (1/T - 1)','n & alpha']\r\n\t\tmnuBtn = MenuToolButtonWidget(menuList, icon=gtk.STOCK_SELECT_COLOR, label='FFT')\r\n\t\tmnuBtn.btn.connect(\"clicked\",self.newFFTwin2,0)\r\n\t\tfor m in range(len(menuList)):\r\n\t\t\tmnuBtn.menuItems[m].connect(\"activate\",self.newFFTwin,m)\r\n\r\n\t\tmnuBtn.btn.set_tooltip_text('Take windowed FFT of ALL lines.')\r\n\t\tself.toolbar.add(mnuBtn.btn)\r\n\r\n\r\n\r\n\t\tsep=gtk.SeparatorToolItem()\r\n\t\tself.toolbar.insert(sep,1)\r\n\r\n\r\n\t\tbtn6=gtk.ToolButton(gtk.STOCK_CLEAR)\r\n\t\tbtn6.connect(\"clicked\",self.OnClear)\r\n\t\tbtn6.set_label('Clear')\r\n\t\tbtn6.set_tooltip_text('Clear the axis.')\r\n\t\tself.toolbar.insert(btn6,1)\r\n\r\n\t\tbtn0=gtk.ToolButton(gtk.STOCK_SAVE_AS)\r\n\t\tbtn0.connect(\"clicked\",self.OnExport)\r\n\t\tbtn0.set_label('Export')\r\n\t\tbtn0.set_tooltip_text('Export data from a curve.')\r\n\t\tself.toolbar.insert(btn0,1)\r\n\r\n\r\n\t\t# make a TB menu\r\n\t\tfitMenuList=['Linear','Polynomial','Exp decay','Subtract exp']\r\n\t\tfitmnuBtn = MenuToolButtonWidget(fitMenuList, icon=gtk.STOCK_ABOUT, label='Fit')\r\n\t\tfitmnuBtn.btn.connect(\"clicked\",self.fitPolynomial,0)\r\n\t\tfor m in range(len(fitMenuList)):\r\n\t\t\tfitmnuBtn.menuItems[m].connect(\"activate\",self.fitPolynomial,m)\r\n\r\n\t\tfitmnuBtn.btn.set_tooltip_text('Fits a polynomial to data (default is a linear fit).')\r\n\t\tself.toolbar.add(fitmnuBtn.btn)\r\n\r\n\r\n\t\tbtn7=gtk.ToolButton(gtk.STOCK_CONVERT)\r\n\t\tbtn7.connect(\"clicked\",self.getBeamWidth)\r\n\t\tbtn7.set_label('Beamwidth')\r\n\t\tbtn7.set_tooltip_text('Get the beamwidth (fits Gaussian to dy/dx).')\r\n\t\tself.toolbar.add(btn7)\r\n\r\n\t\tbtn8=gtk.ToolButton(gtk.STOCK_EDIT)\r\n\t\tbtn8.connect(\"clicked\",self.editPlotParams)\r\n\t\tbtn8.set_label('Axes')\r\n\t\tbtn8.set_tooltip_text('Edit plot parameters.')\r\n\t\tself.toolbar.add(btn8)\r\n\r\n\t\tbtn9=gtk.ToolButton(gtk.STOCK_PROPERTIES)\r\n\t\tbtn9.connect(\"clicked\",self.editLegend)\r\n\t\tbtn9.set_label('Legend')\r\n\t\tbtn9.set_tooltip_text('Edit legend.')\r\n\t\tself.toolbar.add(btn9)\r\n\r\n#\t\tself.toolbar.set_style(gtk.TOOLBAR_BOTH) # make toolbar icons and labels visible\r\n\r\n\t\tif self.makeTB:\r\n\t\t\tself.vbox.pack_start(self.toolbar,False,False)\r\n\r\n\t\tself.vbox.pack_start(self.canvas,True,True)\r\n\t\tself.vbox.pack_start(zoomtoolbar,False,False)\r\n\r\n\t\t####### Line selector/axis alteration toolbar\r\n\t\thbox=gtk.HBox(homogeneous=False, spacing=0)\r\n\r\n\t\tparamNames = ['Line:']\r\n\t\tparamTypes = ['cmb']\r\n\t\tparamDefaultValues = [[]]\r\n\r\n\t\tparamBox = ParamWidget(paramNames,paramTypes,paramDefaultValues)\r\n\t\tself.cmbBox = paramBox.objectList[0]\r\n#\t\tself.cmbBox.connect('changed',self.line_changed)\r\n\r\n\t\tself.hideBtn = gtk.ToggleToolButton(gtk.STOCK_NO)\r\n\t\tself.hideBtn.set_tooltip_text('Hide')\r\n\t\tself.hideBtn.connect('clicked',self.toggle_line)\r\n\t\tparamBox.table.attach(self.hideBtn,0,1,0,1,xoptions=gtk.EXPAND,yoptions=gtk.EXPAND)\r\n\t\t\r\n\t\tself.colourBtn = gtk.ToolButton(gtk.STOCK_COLOR_PICKER)\r\n\t\tself.colourBtn.set_tooltip_text('Colour')\r\n\t\tself.colourBtn.connect('clicked',self.change_colour)\r\n\t\tself.color=gtk.gdk.Color(red=0,green=0,blue=1)\r\n\r\n\t\tparamBox.table.attach(self.colourBtn,1,2,0,1,xoptions=gtk.EXPAND,yoptions=gtk.EXPAND)\r\n\t\t\r\n\t\tself.cmbStyle = gtk.combo_box_new_text()\r\n\r\n\t\tfor style in STYLES:\r\n\t\t\tself.cmbStyle.append_text(style)\r\n\t\tself.cmbStyle.set_active(0)\r\n#\t\tself.style.set_tooltip_text('Line style')\r\n\t\tself.cmbStyle.connect('changed',self.change_style)\r\n\r\n\t\tparamBox.table.attach(self.cmbStyle,2,3,0,1,xoptions=gtk.EXPAND,yoptions=gtk.EXPAND)\r\n\r\n\t\tself.removeBtn = gtk.ToolButton(gtk.STOCK_DELETE)\r\n\t\tself.removeBtn.set_tooltip_text('Remove')\r\n\t\tself.removeBtn.connect('clicked',self.remove_line)\r\n\r\n\t\tparamBox.table.attach(self.removeBtn,3,4,0,1,xoptions=gtk.EXPAND,yoptions=gtk.EXPAND)\r\n\r\n\r\n\t\thbox.pack_start(paramBox.frame,False,False)\r\n\r\n\t\tparamNames = ['Axis:','Left-click sets:']\r\n\t\tparamTypes = ['lbl','cmb']\r\n\t\tparamDefaultValues = ['',['Nothing','Window left','Window right','Axis left','Axis right','Plots point']]\r\n\r\n\t\tparamBox = ParamWidget(paramNames,paramTypes,paramDefaultValues)\r\n\t\thbox.pack_start(paramBox.frame,False,False)\r\n\t\t\r\n\t\tself.cmbBtn = paramBox.objectList[1]\r\n\t\tself.cmbBtn.set_active(0)\r\n\t\tself.cmbBtn.connect(\"changed\", self.onModeChanged)\r\n\r\n\t\thbox.show_all()\r\n\r\n#\t\tself.canvas.mpl_connect('axes_enter_event', self.enter_axes)\r\n#\t\tself.canvas.mpl_connect('axes_leave_event', self.leave_axes)\r\n\r\n\t\tif self.makeTB:\r\n#\t\t\tself.connectToolbar()\r\n\t\t\tself.vbox.pack_start(hbox,False,False)", "title": "" }, { "docid": "1c8e53066dab6de0e178bc4489fe0cbd", "score": "0.5754971", "text": "def setup_axes2(fig, rect,tmin, tmax,zmin,zmax):\n\n tr =PolarAxes.PolarTransform()\n pi = np.pi\n\n angle_ticks = [(tmin, '%.2f' % tmin), (0,r'$0$'), (tmax, '%.2f' % tmax)]\n\n grid_locator1 = FixedLocator([v for v, s in angle_ticks])\n tick_formatter1 = DictFormatter(dict(angle_ticks))\n\n grid_locator2 = MaxNLocator(4)\n\n grid_helper = floating_axes.GridHelperCurveLinear(\n tr, extremes=(tmax, tmin, zmax, zmin),\n grid_locator1=grid_locator1,\n grid_locator2=grid_locator2,\n tick_formatter1=tick_formatter1,\n tick_formatter2=None)\n\n ax1 = floating_axes.FloatingSubplot(fig, rect, grid_helper=grid_helper)\n fig.add_subplot(ax1)\n\n # create a parasite axes whose transData in RA, cz\n aux_ax = ax1.get_aux_axes(tr)\n\n aux_ax.patch = ax1.patch # for aux_ax to have a clip path as in ax\n ax1.patch.zorder = 0.95 # but this has a side effect that the patch is\n # drawn twice, and possibly over some other\n # artists. So, we decrease the zorder a bit to\n # prevent this.\n\n return ax1, aux_ax", "title": "" }, { "docid": "7aa6b48822c6dae909ea28e51d6ddce1", "score": "0.5729493", "text": "def init_plot():\n fig = plt.figure(constrained_layout=True, figsize=(7,9), dpi=130)\n gs = fig.add_gridspec(5, 1)\n ax2 = fig.add_subplot(gs[:1, :])\n ax1 = fig.add_subplot(gs[1:, :], projection='3d')\n\n tick_color = (0.2, 0.2, 0.2, 1.0)\n pane_color = (0.12, 0.12, 0.12, 1.0)\n ax1.w_xaxis.set_pane_color(pane_color)\n ax1.w_yaxis.set_pane_color(pane_color)\n ax1.w_zaxis.set_pane_color(pane_color)\n\n ax1.tick_params(axis='x', colors=tick_color)\n ax1.tick_params(axis='y', colors=tick_color)\n ax1.tick_params(axis='z', colors=tick_color)\n ax1.view_init(elev=90, azim=180)\n\n ax1.set_xlim3d(0, 80)\n ax1.set_zlim3d(-2, 5)\n \n return (ax1, ax2)", "title": "" }, { "docid": "54fd915b0cb5ec6049d0d7852e88177b", "score": "0.5721738", "text": "def etio_subplot(df, ax, title, graph_color='skyblue'):\n\n post_dx_histo = histo_dx_includes(df)\n hist_df = pd.DataFrame({\"Dx\": post_dx_histo.index, \"Count\": post_dx_histo.data})\n #hist_df = hist_df.drop(1)\n print(hist_df)\n\n graph_range = range(1,len(hist_df.index)+1)\n ax.hlines(y=graph_range, xmin=0, xmax=hist_df['Count'], color=graph_color)\n ax.plot(hist_df['Count'], graph_range, \"D\", color=graph_color)\n ax.set_yticks(range(1, len(hist_df['Dx'])+1))\n ax.set_yticklabels(hist_df['Dx'], fontsize='10')\n\n ax.set_title(title, fontsize='10')\n return ax", "title": "" }, { "docid": "1d40252125a6eb1e6a0a2a8b9fbcaad1", "score": "0.5705315", "text": "def analyze_on_axis(phase_space, id_begin, id_end, ds_slice, zplot):\n\n ps = phase_space[:, (id_begin-1):id_end, :]\n # print(np.shape(ps))\n # ps = ps[numpy.logical_not(numpy.isnan(ps))]\n\n x = ps[0, :, :]\n px = ps[1, :, :]\n y = ps[2, :, :]\n py = ps[3, :, :]\n\n id_on_axis = np.zeros((4, int(id_end-id_begin+1)))\n\n for n in range(int(id_end-id_begin+1)):\n x_this = x[n, :]\n px_this = px[n, :]\n y_this = y[n, :]\n py_this = py[n, :]\n\n # Remove all NAN elements in the phase space array\n x_this = x_this[np.logical_not(np.isnan(x_this))]\n px_this = px_this[np.logical_not(np.isnan(px_this))]\n y_this = y_this[np.logical_not(np.isnan(y_this))]\n py_this = py_this[np.logical_not(np.isnan(py_this))]\n\n ## Plot X\n plt.subplot(2, 2, 1)\n plt.plot(zplot[0:len(x_this)]*1e+6, x_this*1e+6)\n plt.ylabel('Position in X/ $\\mu$m', fontsize=10)\n\n ## Plot Y\n plt.subplot(2, 2, 2)\n plt.plot(zplot[0:len(y_this)]*1e+6, y_this*1e+6)\n plt.ylabel('Position in Y/ $\\mu$m', fontsize=10)\n\n ## Plot px\n plt.subplot(2, 2, 3)\n plt.plot(zplot[0:len(px_this)]*1e+6, px_this)\n plt.ylabel('Angle in X', fontsize=10)\n\n ## Plot py\n plt.subplot(2, 2, 4)\n plt.plot(zplot[0:len(py_this)]*1e+6, py_this)\n plt.ylabel('Angle in Y', fontsize=10)\n\n\n # plt.xlabel('Longitudianl Direction of the Bunch $s$/ $\\mu$m')\n # plt.title('First Undulator Section')\n # plt.title('Second Undulator Section')\n # plt.title('Third Undulator Section')\n\n id_on_axis[0, n] = np.argmin(np.abs(x_this))\n id_on_axis[1, n] = np.argmin(np.abs(px_this))\n id_on_axis[2, n] = np.argmin(np.abs(y_this))\n id_on_axis[3, n] = np.argmin(np.abs(py_this))\n\n fig = plt.gcf()\n fig.set_size_inches(13.5, 9)\n ax = plt.gca()\n ax.yaxis.get_major_formatter().set_powerlimits((0,1))\n fig.savefig('phase_space_U3_new.png', dpi=100)\n plt.show()\n\n\n s_on_axis = np.average(id_on_axis[2:4,:])*ds_slice\n\n return id_on_axis, s_on_axis", "title": "" }, { "docid": "9312648225eee8561b4062b38263c9ed", "score": "0.56240815", "text": "def plot_autocorrs(self, axis=0, n_rows=4, n_cols=8):\n self.current_plot = 'multi'\n self.ax_zoomed = False\n \n bls = self.uv.d_uv_data['BASELINE']\n\n # Extract the relevant baselines using a truth array\n # bls = bls.tolist()\n bl_ids = set([256*i + i for i in range(1, n_rows * n_cols + 1)])\n bl_truths = np.array([(b in bl_ids) for b in bls])\n \n #print self.uv.d_uv_data['DATA'].shape\n #x_data = self.d_uv_data['DATA'][bl_truths,0,0,:,0,axis] # Baselines, freq and stokes\n #x_cplx = x_data[:,:,0] + 1j * x_data[:,:,1]\n\n x_cplx = self.stokes[axis][bl_truths]\n\n\n \n # Plot the figure\n #print self.uv.n_ant\n fig = self.sp_fig\n figtitle = '%s %s: %s -- %s'%(self.uv.telescope, self.uv.instrument, self.uv.source, self.uv.date_obs)\n for i in range(n_rows):\n for j in range(n_cols):\n ax = fig.add_subplot(n_rows, n_cols, i*n_cols + j +1)\n ax.set_title(self.uv.d_array_geometry['ANNAME'][i*n_cols + j], fontsize=10)\n #ax.set_title(\"%s %s\"%(i, j))\n \n x = x_cplx[i*n_cols+j::self.uv.n_ant]\n \n if self.scale_select.currentIndex() == 0 or self.scale_select.currentIndex() == 1:\n if x.shape[0] == self.uv.n_ant:\n self.plot_spectrum(ax, x, label_axes=False)\n else:\n self.plot_spectrum(ax, x, stat='max', label_axes=False)\n self.plot_spectrum(ax, x, stat='med', label_axes=False)\n self.plot_spectrum(ax, x, stat='min', label_axes=False)\n else:\n self.plot_spectrum(ax, x, label_axes=False)\n self.updateFreqAxis(ax)\n \n if i == n_rows-1:\n ax.set_xlabel('Freq')\n if j == 0:\n ax.set_ylabel('Amplitude')\n \n plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n plt.tick_params(axis='both', which='major', labelsize=10)\n plt.tick_params(axis='both', which='minor', labelsize=8)\n plt.xticks(rotation=30)\n \n plt.subplots_adjust(left=0.05, right=0.98, top=0.95, bottom=0.1, wspace=0.3, hspace=0.45)\n return fig, ax", "title": "" }, { "docid": "2c36323b5701623b493b86c8fcbaed79", "score": "0.561955", "text": "def addPlot(ax, track, contig, nplotted,\n nsubplotted=None,\n nsubplots=None,\n y_min=None,\n y_max=None):\n\n if contig not in track.mData:\n return\n\n # step function\n # datapoint is in window average\n xvals = map(lambda x: (x[1] + x[0]) / 2.0, track.mData[contig])\n yvals = map(lambda x: x[2], track.mData[contig])\n l = len(xvals)\n\n if nsubplots:\n plotnum = nsubplotted\n else:\n plotnum = nplotted\n\n if track.style == \"matrix\":\n\n # unequal window sizes confuse the image. Therefore, normalize\n # xvals and yvals to a constant image size\n matrix = pylab.array(yvals)\n matrix.shape = (1, len(yvals))\n\n # make sure that the extent of the image and the plot coincide by\n # using extent\n if nsubplots is not None:\n y_width = float(y_max - y_min) / nsubplots\n extent = (min(xvals), max(xvals), y_min + y_width *\n nsubplotted, y_min + y_width * (nsubplotted + 1))\n else:\n extent = (min(xvals), max(xvals), min(yvals), max(yvals))\n ax.imshow(matrix,\n cmap=track.color_scheme,\n extent=extent,\n interpolation=\"nearest\")\n\n symbol = options.symbols[plotnum % len(options.symbols)]\n plot = ax.plot(xvals, yvals, symbol, lw=2)\n else:\n symbol = options.symbols[plotnum % len(options.symbols)]\n plot = ax.plot(xvals, yvals, symbol)\n\n return plot", "title": "" }, { "docid": "39af4ab187060659943c8bee651831e9", "score": "0.5604717", "text": "def assemblePlot(self):\n self.clearPlot()\n self.axes = self.figure.add_subplot(111)\n\n # Reset handles\n self._fluxOverlayHandles = []\n self._magneticAxisHandle = None\n self._orbitHandles = []\n self._separatrixOverlayHandle = None\n self._wallCrossSectionOverlayHandle = None\n\n # Plot image\n self.plotEq()\n\n # Plot overlays\n self.plotOverlays()\n\n self.adjustAxes()", "title": "" }, { "docid": "0ff94d4c8fa60c8550e2b633a9dc619a", "score": "0.5585463", "text": "def plot(self):\n\t\t\n\t\ttf=tfData(self.shotno,tStart=None,tStop=None)\n\t\t\n\t\t_plt.figure()\n\t\tax1 = _plt.subplot2grid((3,2), (0,1), rowspan=3) #tf\n\t\tax2 = _plt.subplot2grid((3,2), (0,0)) #vf\n\t\tax3 = _plt.subplot2grid((3,2), (1,0),sharex=ax2) #oh\n\t\tax4 = _plt.subplot2grid((3,2), (2, 0),sharex=ax2) #sh\n\t\tfig=_plt.gcf()\n\t\tfig.set_size_inches(10,5)\n\t\t\t\t\n\t\ttStart=-2\n\t\ttStop=20\n\t\t\n\t\tax1.plot(tf.time*1e3,tf.tfBankField)\n\t\tax1.axvspan(tStart,tStop,color='r',alpha=0.3)\n\t\t_plot.finalizeSubplot(ax1,xlabel='Time (s)',xlim=[-150,450],ylabel='TF Field (T)')#,title=self.title\n\t\t\n\t\tax2.plot(self.vfTime*1e3,self.vfBankCurrent*1e-3)\n\t\t_plot.finalizeSubplot(ax2,ylabel='VF Current\\n(kA)')\n\t\t\n\t\tax3.plot(self.ohTime*1e3,self.ohBankCurrent*1e-3)\n\t\t_plot.finalizeSubplot(ax3,ylim=[-20,30],ylabel='OH Current\\n(kA)')\n\t\t\n\t\tax4.plot(self.shTime*1e3,self.shBankCurrent*1e-3)\n\t\t_plot.finalizeSubplot(ax4,ylim=[tStart,tStop],xlabel='Time (s)',ylabel='SH Current\\n(kA)')\n\t\t\n\t\t_plot.finalizeFigure(fig,title=self.title)\n#\t\tfig.set_tight_layout(True)\n\t\t\n\t\treturn fig", "title": "" }, { "docid": "6d94eac6849855a69e53cdb88ca037af", "score": "0.55740106", "text": "def plot_single_spk(spk,subplot = None, spines = ['left', 'bottom'],\n figure_size = (5,5),dpi=600,**kwargs):\n if subplot == None: \n # Creating the figure \n f = plt.figure(figsize=figure_size,dpi=dpi)\n # creating the axes\n ax = f.add_subplot(111)\n else:\n ax = subplot\n #ax.plot(range(-20,44),spk.waveform,**kwargs)\n time_vec = np.linspace(spk.time_edge[0],spk.time_edge[1],spk.waveform.shape[0],endpoint=True)*1000\n ax.plot(time_vec,spk.waveform,**kwargs)\n plt.xlabel('Time (ms)')\n adjust_spines(ax, spines)", "title": "" }, { "docid": "2cedb9f643a132344e9448101e90352e", "score": "0.5564214", "text": "def fig_coh_ph(coh, ph, direc):\n\n colors = plt.cm.cividis(np.linspace(0, 1, coh.shape[0]))\n\n if coh.ndim > 1:\n f, (ax1, ax2) = plt.subplots(1, 2)\n for i, (co, p) in enumerate(zip(coh, ph)):\n ax1.plot(direc, co, c=colors[i])\n ax2.plot(direc, p*180./np.pi, c=colors[i])\n ax1.set_ylabel('Coherence')\n ax1.set_ylim((0, 1.))\n ax2.set_ylabel('Phase')\n ax1.set_xlabel('Angle from H1')\n ax2.set_xlabel('Angle from H1')\n plt.tight_layout()\n\n else:\n plt.figure()\n plt.subplot(121)\n plt.plot(direc, coh, c=colors[0])\n plt.ylim((0, 1.))\n plt.subplot(122)\n plt.plot(direc, ph*180./np.pi, c=colors[0])\n plt.tight_layout()\n\n return plt", "title": "" }, { "docid": "1cf6a37835c6c6e98904cf53a5ebeb2c", "score": "0.5533898", "text": "def plot_phase_map(self, phasemap, label, filter=0):\r\n\r\n anglemap = self.mkcmap()\r\n\r\n mpl.title(label)\r\n\r\n# imgp1 = mpl.imshow(scipy.ndimage.gaussian_filter(map, filter, order=0, mode='reflect'), cmap=anglemap)#matplotlib.cm.hsv)\r\n\r\n imgp1 = mpl.imshow(phasemap, cmap=anglemap)#matplotlib.cm.hsv)\r\n\r\n imgp1.set_clim=(-np.pi/2.0, np.pi/2.0)\r\n\r\n mpl.colorbar()", "title": "" }, { "docid": "618a7bddb9b80436b4be807848eb2a46", "score": "0.5529327", "text": "def plot(\n self,\n group_delay=False,\n slce=None,\n flim=None,\n dblim=None,\n tlim=None,\n grpdlim=None,\n dbref=1,\n show=False,\n use_fig=None,\n label=None,\n unwrap_phase=False,\n logf=True,\n third_oct_f=True,\n plot_kw={},\n **fig_kw,\n ):\n if use_fig is None:\n fig_kw = {**{\"figsize\": (10, 10)}, **fig_kw}\n fig, axes = plt.subplots(nrows=3, constrained_layout=True, **fig_kw)\n else:\n fig = use_fig\n axes = fig.axes\n\n self.plot_magnitude(\n use_ax=axes[0],\n slce=slce,\n dblim=dblim,\n flim=flim,\n dbref=dbref,\n label=label,\n plot_kw=plot_kw,\n logf=logf,\n third_oct_f=third_oct_f,\n )\n if group_delay:\n self.plot_group_delay(\n use_ax=axes[1],\n slce=slce,\n flim=flim,\n ylim=grpdlim,\n plot_kw=plot_kw,\n logf=logf,\n third_oct_f=third_oct_f,\n )\n else:\n self.plot_phase(\n use_ax=axes[1],\n slce=slce,\n flim=flim,\n plot_kw=plot_kw,\n unwrap=unwrap_phase,\n logf=logf,\n third_oct_f=third_oct_f,\n )\n self.plot_time(\n use_ax=axes[2], tlim=tlim, slce=slce, plot_kw=plot_kw\n )\n\n if show:\n plt.show()\n\n return fig", "title": "" }, { "docid": "692802c6eda4f57238e2e4c4f4ecd714", "score": "0.5517248", "text": "def __init__(self,options,pos):\n self.options = options\n numobjects = pos.shape[1]\n plt.ion() # turn on interactive plotting mode\n dpi=72.0 # set dpi (I think this is appropriate on mac)\n # fig accepts size in inches\n # so divide desired pixel width, height by dpi to get inches\n w,h=(self.options.width/dpi,self.options.height/dpi)\n fig = plt.figure(1,figsize=(w,h),dpi=dpi)\n fig.clear()\n\n #w = self.options.width/fig.get_dpi() # desired width in inches\n #h = self.options.height/fig.get_dpi() # desired height in inches\n #fig.set_size_inches(w,h,forward=True) # last arg resizes the canvas to match\n\n self.ax = plt.axes()\n self.ax.set_xlim(self.options.xmin,self.options.xmax)\n self.ax.set_ylim(self.options.ymin,self.options.ymax)\n #pyplot.axis('scaled')\n\n # I don't know why axis('scaled') doesn't work here\n # But I think the next two commands are equivalent\n self.ax.set_aspect('equal', adjustable='box', anchor='C')\n self.ax.set_autoscale_on(False)\n\n #self.redraw()\n\n\n #facecolors = [cm.jet(x) for x in np.random.rand(len(vicon_objects))]\n facecolors = [cm.jet(x) for x in np.linspace(0,1,numobjects)]\n if self.options.visualize_switch_xy:\n if self.options.axis==1:\n self.ax.axvline(linewidth=4, c='k')\n else:\n self.ax.axhline(linewidth=4, c='k')\n self.col = plt.scatter(pos[:,1],pos[:,0],c=facecolors,s=3000)\n else:\n if self.options.axis==1:\n self.ax.axhline(linewidth=4, c='k')\n else:\n self.ax.axvline(linewidth=4, c='k')\n self.col = plt.scatter(pos[:,0],pos[:,1],c=facecolors,s=3000)\n\n # scores\n self.tpos = self.ax.text(0.75*self.options.xmax,0.75*self.options.ymin,str(50),\n size=72,color='k',ha='center',va='center')\n self.tneg = self.ax.text(0.75*self.options.xmin,0.75*self.options.ymin,str(50),\n size=72,color='k',ha='center',va='center')\n\n self.canvas = agg.FigureCanvasAgg(fig)\n self.canvas.draw()\n self.renderer = self.canvas.get_renderer()\n raw_data = self.renderer.tostring_rgb()\n\n pygame.init()\n \n self.window = pygame.display.set_mode((options.width,options.height), DOUBLEBUF)\n self.screen = pygame.display.get_surface()\n\n self.set_caption(\"Possession: Waiting for Vicon\")\n \n size = self.canvas.get_width_height()\n \n surf = pygame.image.fromstring(raw_data, size, \"RGB\")\n self.screen.blit(surf, (0,0))\n pygame.display.flip()", "title": "" }, { "docid": "037d8da5ee388d4b4ae154530abf662a", "score": "0.55165887", "text": "def one_data_figure_sep(obs, fig, subplot_spec=None, **kwargs):\n if subplot_spec is None:\n gs = gridspec.GridSpec(2,1,height_ratios = [3,1], hspace=0)\n else:\n gs = gridspec.GridSpecFromSubplotSpec(2, 1, hspace=0,\n subplot_spec=subplot_spec,\n height_ratios = [3,1])\n \n \n spec = pl.Subplot(fig, gs[0,0])\n spec.plot(obs['wavelength'], obs['spectrum'], **kwargs)\n spec.set_ylabel(r'$f_\\lambda \\times \\, C$')\n pl.setp(spec.get_xticklabels(), visible = False)\n fig.add_subplot(spec)\n unc = pl.Subplot(fig, gs[1,0])\n unc.plot(obs['wavelength'], obs['unc'], **kwargs)\n unc.set_ylabel(r'$\\sigma f_\\lambda$')\n unc.set_xlabel(r'$\\lambda (\\AA)$')\n fig.add_subplot(unc)\n return fig, gs", "title": "" }, { "docid": "0ad2faf485398af7b34362e5d2d5db61", "score": "0.5516036", "text": "def plot_init(bottom_left: Point, top_right: Point):\n global figure\n global axes\n\n plt.ion()\n figure, axes = plt.subplots(1, 1)\n axes.set_xlim(bottom_left[0], top_right[0])\n axes.set_ylim(bottom_left[1], top_right[1])\n axes.set_aspect(\"equal\", adjustable=\"box\")", "title": "" }, { "docid": "5cdc237476a3e46973dcd68ba9e4738e", "score": "0.55145377", "text": "def velocity_curve(self, output='test'):\n self.figure = figure() \n self.gridSpec = GridSpec(2, 1)\n self.axes = subplot(self.gridSpec[0, 0]) \n self.axes.plot(self.xs, [-v for v in self.vs], 'ko', alpha=0.5) \n self.axes.set_aspect('auto')\n self.axes = subplot(self.gridSpec[0, 1]) \n self.axes.plot(self.ys, [-u for u in self.us], 'ko', alpha=0.5) \n self.axes.set_aspect('auto')\n self.figure.savefig(output + '_velocity_curve.pdf')", "title": "" }, { "docid": "1616600cb72854411da2becba40dd14b", "score": "0.5501968", "text": "def plot(frame, clipped, auto, lag, threshold, freq, save):\n fig, axes = plt.subplots(4, constrained_layout=True)\n fig.set_size_inches(8.0, 8.0)\n fig.canvas.set_window_title('Excercise 4')\n\n ax_frame, ax_clipped, ax_auto, ax_freq = axes\n\n time = np.linspace(0, frame.size / SAMPLE_RATE, num=frame.size)\n for ax in axes:\n ax.set_xlabel('time [s]')\n ax.set_ylabel('y')\n\n\n ax_frame.plot(time, frame)\n ax_clipped.plot(time, clipped)\n\n ax_auto.plot(auto)\n ax_auto.axvline(threshold, color='black', label='Threshold')\n ax_auto.stem([lag[0]], [lag[1]], linefmt='r-', basefmt=None, label='Lag')\n\n ax_freq.plot(freq[0], 'g-', label='mask-on')\n ax_freq.plot(freq[1], 'r-', label='mask-off')\n\n ax_auto.legend(loc=1)\n ax_freq.legend(loc=0)\n\n ax_frame.set_title('Maskon frame')\n ax_clipped.set_title('Central clipping with 70%')\n ax_auto.set_title('Autocorrelation')\n ax_freq.set_title('Primary frequencies of frames')\n\n ax_auto.set_xlabel('frames')\n ax_freq.set_xlabel('frames')\n\n ax_freq.set_ylabel('f0')\n\n if save:\n save_figure(fig, 'ex4')\n else:\n plt.show()", "title": "" }, { "docid": "aa8a01d9c028f1d586bfe6bc9531b8dc", "score": "0.54967505", "text": "def plotstep(subplot,binends,plot,s='--',c='k',a=1,w=1,d=[(0,(1,0.0001))],l=None,r=False):\n ploth(subplot,binends,plot,s,c,a,w,d,l,r)\n plotv(subplot,binends,plot,s,c,a,w,d,r)", "title": "" }, { "docid": "1fdbebe0bfdaf77360bdaf12bede1352", "score": "0.5486353", "text": "def add_subplot(gridRows, gridCols, plotNo):\n pl.subplot(gridRows, gridCols, plotNo)", "title": "" }, { "docid": "090ce377fefc14771d7aad0e2d1d2cc8", "score": "0.5462015", "text": "def init_plot(self, master):\n b = Figure(figsize=(8, 6), dpi=100)\n ac = b.add_subplot(111)\n ac.plot(10, 10)\n ac.set_title('Current tour plot')\n ac.set_xlabel('X axis coordinates')\n ac.set_ylabel('Y axis coordinates')\n ac.grid(True)\n canvas = FigureCanvasTkAgg(b, master)\n canvas.draw()\n canvas.get_tk_widget().grid(row=1, column=1, sticky=W)", "title": "" }, { "docid": "1579cc91e4006460456fea0d268d135f", "score": "0.5459133", "text": "def create_init_fig(wrapped_signal, freq_arr, xcm_arr):\n \n fig, ax = pyplot.subplots(figsize=(10.0, 5.0))\n pyplot.tight_layout()\n fig.suptitle('Frequency = {:.2f}'.format(freq_arr[0]))\n\n ax1 = pyplot.subplot2grid((1, 3), (0, 0))\n ax2 = pyplot.subplot2grid((1, 3), (0, 1), colspan=2)\n\n circle1 = pyplot.Circle((0, 0), 1, fill=None, lw=2, ls='--', alpha=0.3)\n\n ax1.add_patch(circle1)\n ax1.grid()\n\n ticks= numpy.linspace(-1,1, 5, endpoint=True)\n\n ylabels = [-1, -0.5, None, 0.5, 1]\n\n ax1.set_xticks(ticks)\n ax1.set_yticks(ticks)\n ax1.set_yticklabels(ylabels)\n\n\n wrapped_signal_plot = ax1.plot(wrapped_signal.real, \n wrapped_signal.imag, alpha=0.5,\n label=r'$g(t)e^{2\\pi ift}$')[0]\n\n # Move left y-axis and bottim x-axis to centre, passing through (0,0)\n ax1.spines['left'].set_position('center')\n ax1.spines['bottom'].set_position('center')\n\n # Eliminate upper and right axes\n ax1.spines['right'].set_color('none')\n ax1.spines['top'].set_color('none')\n\n\n ax1.set_adjustable('box')\n ax1.set_aspect('equal')\n ax1.set_xlim(-1.1,1.1)\n ax1.set_ylim(-1.1,1.1)\n ax1.legend(loc='upper left', bbox_to_anchor=(0.48, 1.12))\n\n #f_list = numpy.full_like(freqs, None)\n almost_fourier_plot = ax2.plot(freq_arr[0], xcm_arr[0], '-')[0]\n ax2.spines['right'].set_color('none')\n ax2.spines['top'].set_color('none')\n ax2.set_adjustable('box')\n ax2.set_aspect('equal')\n ax2.set_xlabel('Frequency')\n ax2.set_ylabel('xcm')\n\n ax2.set_xlim(0.9,5.1)\n ax2.set_ylim(-0.3,1.1)\n ax2.grid()\n pyplot.tight_layout()\n pyplot.close()\n \n return {'fig': fig, 'WSP': wrapped_signal_plot, 'AF': almost_fourier_plot}", "title": "" }, { "docid": "68708c24506aac839f1223f19903d019", "score": "0.5455409", "text": "def plot_phase_diagram(param, ax=None, title=None):\n if ax is None:\n ax = plt.gca()\n if title is None:\n title = \"Phase space, {}\".format(param) \n \n ax.set(xlabel='v', ylabel='w', title=title)\n \n # Isocline and flow... \n xlimit = (-1.5, 1.5)\n ylimit = (-.6, .9)\n plot_vector_field(ax, param, xlimit, ylimit)\n plot_isocline(ax, **param, vmin=xlimit[0],vmax=xlimit[1])\n \n # Plot the equilibria \n eqnproot = find_roots(**param)\n eqstability = [stability(jacobian_fitznagumo(e[0],e[1], **param)) for e in eqnproot] \n for e,n in zip(eqnproot,eqstability):\n ax.scatter(*e, color=EQUILIBRIUM_COLOR[n])\n \n # Show a small perturbation of the stable equilibria...\n time_span = np.linspace(0, 200, num=1500)\n if n[:6] == 'Stable':\n for perturb in (0.1, 0.6):\n ic = [e[0]+abs(perturb*e[0]),e[1]]\n traj = scipy.integrate.odeint(partial(fitzhugh_nagumo, **param),\n y0=ic,\n t=time_span)\n ax.plot(traj[:,0], traj[:,1])\n\n # Legend\n labels = frozenset(eqstability)\n ax.legend([mpatches.Patch(color=EQUILIBRIUM_COLOR[n]) for n in labels], labels, \n loc='lower right')", "title": "" }, { "docid": "2a89a5e7fa3c0744273092ca4d345a64", "score": "0.5451487", "text": "def construct_plot(self, amprtb):\n self.fig, [[self.ax1, self.ax2], [self.ax3, self.ax4]] = \\\n plt.subplots(2, 2, figsize=(10, 10),\n subplot_kw={'projection': self.projection})\n ind1, ind2 = amprtb._get_scan_indices(\n self.scanrange, self.timerange, False)\n\n # 10 GHz plot\n stuff = amprtb.plot_ampr_track(\n var='10'+self.chan, latrange=self.latrange,\n lonrange=self.lonrange, parallels=self.parallels,\n meridians=self.meridians, title='', wmts_layer=self.wmts_layer,\n clevs=self.clevs, cmap=self.cmap, show_track=self.show_track,\n maneuver=self.maneuver, scanrange=self.scanrange,\n show_grid=self.show_grid, equator=self.equator,\n show_qc=self.show_qc, resolution=self.resolution,\n projection=self.projection, ax=self.ax1, fig=self.fig,\n verbose=self.verbose, timerange=self.timerange, return_flag=True)\n self.ax1.set_title(self.make_title('10', amprtb, ind1, ind2))\n\n # 19 GHz plot\n amprtb.plot_ampr_track(\n var='19'+self.chan, latrange=self.latrange,\n lonrange=self.lonrange, parallels=self.parallels,\n meridians=self.meridians, title='', wmts_layer=self.wmts_layer,\n clevs=self.clevs, cmap=self.cmap, show_track=self.show_track,\n maneuver=self.maneuver, scanrange=self.scanrange,\n show_grid=self.show_grid, equator=self.equator,\n show_qc=self.show_qc, resolution=self.resolution,\n projection=self.projection, ax=self.ax2, fig=self.fig,\n verbose=self.verbose, timerange=self.timerange)\n self.ax2.set_title(self.make_title('19', amprtb, ind1, ind2))\n\n # 37 GHz plot\n amprtb.plot_ampr_track(\n var='37'+self.chan, latrange=self.latrange,\n lonrange=self.lonrange, parallels=self.parallels,\n meridians=self.meridians, title='', wmts_layer=self.wmts_layer,\n clevs=self.clevs, cmap=self.cmap, show_track=self.show_track,\n maneuver=self.maneuver, scanrange=self.scanrange,\n show_grid=self.show_grid, equator=self.equator,\n show_qc=self.show_qc, resolution=self.resolution,\n projection=self.projection, ax=self.ax3, fig=self.fig,\n verbose=self.verbose, timerange=self.timerange)\n self.ax3.set_title(self.make_title('37', amprtb, ind1, ind2))\n\n # 85 GHz plot\n amprtb.plot_ampr_track(\n var='85'+self.chan, latrange=self.latrange,\n lonrange=self.lonrange, parallels=self.parallels,\n meridians=self.meridians, title='', wmts_layer=self.wmts_layer,\n clevs=self.clevs, cmap=self.cmap, show_track=self.show_track,\n maneuver=self.maneuver, scanrange=self.scanrange,\n show_grid=self.show_grid, equator=self.equator,\n show_qc=self.show_qc, resolution=self.resolution,\n projection=self.projection, ax=self.ax4, fig=self.fig,\n verbose=self.verbose, timerange=self.timerange)\n self.ax4.set_title(self.make_title('85', amprtb, ind1, ind2))\n\n # plt.tight_layout()\n return True", "title": "" }, { "docid": "1ffc8bfc0b298ddac9685b09b742202d", "score": "0.5450606", "text": "def make_plot(solution, t, plot_Ts, plot_T1, plot_T2, xaxis, cc, delta_cc, albedo,delta_albedo\\\n , em1, delta_em1, em2, delta_em2):\n\n plt.close('all')\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n \n if xaxis == 'cloud cover':\n inc_cc = []\n for i in range(len(solution[0,:])):\n inc_cc.append(cc + (i*delta_cc)/calcs_per_timestep)\n\n if plot_Ts == 'On': ax1.plot(inc_cc,solution[0,:],label = 'Surface temperature')\n if plot_T1 == 'On': ax1.plot(inc_cc,solution[1,:], label = 'Lower atmospheric temperature')\n if plot_T2 == 'On': ax1.plot(inc_cc,solution[2,:], label = 'Upper atmospheric temperature')\n if plot_Ts == 'Off' and plot_T1 == 'Off' and plot_T2 == 'Off': raise ValueError('No y variable selected')\n\n elif xaxis == 'time':\n \n #for i in range(len(solution[0,:])):\n #t.append(i*(timestep/calcs_per_timestep))\n \n if plot_Ts == 'On': ax1.plot(t,solution[0,:],label = 'Surface temperature')\n if plot_T1 == 'On': ax1.plot(t,solution[1,:], label = 'Lower atmospheric temperature')\n if plot_T2 == 'On': ax1.plot(t,solution[2,:], label = 'Upper atmospheric temperature')\n if plot_Ts == 'Off' and plot_T1 == 'Off' and plot_T2 == 'Off': raise ValueError('No y variable selected')\n \n elif xaxis == 'albedo':\n inc_alb = []\n for i in range(len(solution[0,:])):\n inc_alb.append(albedo+(i*delta_albedo)/calcs_per_timestep)\n \n if plot_Ts == 'On': ax1.plot(inc_alb,solution[0,:],label = 'Surface temperature')\n if plot_T1 == 'On': ax1.plot(inc_alb,solution[1,:], label = 'Lower atmospheric temperature')\n if plot_T2 == 'On': ax1.plot(inc_alb,solution[2,:], label = 'Upper atmospheric temperature')\n if plot_Ts == 'Off' and plot_T1 == 'Off' and plot_T2 == 'Off': raise ValueError('No y variable selected')\n \n elif xaxis == 'epsilon1':\n inc_em = []\n for i in range(len(solution[0,:])):\n inc_em.append(em1+(i*delta_em1)/calcs_per_timestep)\n \n if plot_Ts == 'On': ax1.plot(inc_em,solution[0,:],label = 'Surface temperature')\n if plot_T1 == 'On': ax1.plot(inc_em,solution[1,:], label = 'Lower atmospheric temperature')\n if plot_T2 == 'On': ax1.plot(inc_em,solution[2,:], label = 'Upper atmospheric temperature')\n if plot_Ts == 'Off' and plot_T1 == 'Off' and plot_T2 == 'Off': raise ValueError('No y variable selected')\n \n elif xaxis == 'epsilon2':\n inc_em = []\n for i in range(len(solution[0,:])):\n inc_em.append(em2+(i*delta_em2)/calcs_per_timestep)\n \n if plot_Ts == 'On': ax1.plot(inc_em,solution[0,:],label = 'Surface temperature')\n if plot_T1 == 'On': ax1.plot(inc_em,solution[1,:], label = 'Lower atmospheric temperature')\n if plot_T2 == 'On': ax1.plot(inc_em,solution[2,:], label = 'Upper atmospheric temperature')\n if plot_Ts == 'Off' and plot_T1 == 'Off' and plot_T2 == 'Off': raise ValueError('No y variable selected')\n \n else: raise ValueError('No x axis selected')\n \n fig.suptitle('Global Average Temperature')\n ax1.set_title(f'Final Surface Temperature = {round(solution[0,-1],2)} K')\n ax1.legend()\n\n if xaxis == 'cloud cover': ax1.set_xlabel('Cloud Cover (%)')\n elif xaxis == 'time': ax1.set_xlabel('Time (years)')\n elif xaxis == 'albedo': ax1.set_xlabel('Albedo')\n elif xaxis == 'epsilon1': ax1.set_xlabel(u'\\u03B5\\u2081')\n elif xaxis == 'epsilon2': ax1.set_xlabel(u'\\u03B5\\u2082')\n plt.ylabel('Temerature (K)')\n return fig", "title": "" }, { "docid": "7d3aff69ec3cef6e7c48a34a3e84fe81", "score": "0.54406446", "text": "def figure4():\n\n plot_settings = {'y_limits': [-80, -50],\n 'x_limits': None,\n 'y_ticks': [-80, -70, -60, -50],\n 'locator_size': 5,\n 'y_label': 'Voltage (mV)',\n 'x_ticks': [],\n 'scale_size': 20,\n 'x_label': \"\",\n 'scale_loc': 4,\n 'figure_name': 'figure_4',\n 'legend': ['control', 'apamin'],\n 'legend_size': 8,\n 'y_on': True}\n line_styles = ['-', 'dotted']\n\n plt.figure(figsize=(5, 3), dpi=96)\n\n plt.subplot(2, 1, 1) # Generate figure 1 (top)\n for ix, g_sk_bar in enumerate([0.3, 0]):\n t, y = solver(100, g_sk_bar=g_sk_bar)\n plt.plot(t, y[:, 0], c='k', linestyle=line_styles[ix])\n alter_figure(plot_settings) # Alter figure for publication\n\n plt.subplot(2, 1, 2)\n t1 = 1200\n t, y = solver(t1, t_start=50, duration=t1, i_bias_on=0.33, g_sk_bar=0.03)\n plt.plot(t, y[:, 0], 'k-')\n\n plot_settings['y_limits'] = [-100, 30]\n plot_settings['x_limits'] = [0, t1]\n plot_settings['y_ticks'] = [-80, -60, -40, -20, 0, 20]\n plot_settings['locator_size'] = 10\n plot_settings['scale_size'] = 100\n plot_settings['legend'] = None\n alter_figure(plot_settings, close=True) # Alter plot for publication", "title": "" }, { "docid": "21f3311391fbd10ef3bd034294e7af26", "score": "0.5437429", "text": "def figure3():\n\n plot_settings = {'y_limits': [-75, -50],\n 'x_limits': None,\n 'y_ticks': [-75, -70, -65, -60, -55, -50],\n 'locator_size': 2.5,\n 'y_label': 'Voltage (mV)',\n 'x_ticks': [],\n 'scale_size': 2,\n 'x_label': \"\",\n 'scale_loc': 3,\n 'figure_name': 'figure_3',\n 'legend': ['0.1 $\\mu$S', '0.125 $\\mu$S', '0.15 $\\mu$S'],\n 'legend_size': 8,\n 'legend_location': 4,\n 'y_on': True}\n line_styles = ['-', 'dotted', '-.']\n\n plt.figure(figsize=(5, 3))\n plt.subplot(1, 2, 1) # Generate subplot 1 (left)\n for ix, g_t_bar in enumerate([0.1, 0.125, 0.15]):\n t, y = solver(6, g_t_bar=g_t_bar, t_start=0.5)\n plt.plot(t, y[:, 0], c='k', linestyle=line_styles[ix])\n alter_figure(plot_settings) # Alter figure for publication\n\n plt.subplot(1, 2, 2) # Generate subplot 2 (right)\n for ix, i_bias_on in enumerate([0, -0.1, -0.2]):\n \"\"\"\n First step (hyperpolarizing)\n \"\"\"\n t1 = 1000\n duration = t1\n\n t, y_hold = solver(t1, duration=duration, i_bias_on=i_bias_on)\n y_0 = y_hold[-1, :] # Create new initial conditions\n\n \"\"\"\n Second step (current pulse)\n \"\"\"\n t1 = 20\n t, y = solver(t1, t_start=0, y_hold=y_0)\n v = y[:, 0]\n\n \"\"\"\n Append the end of the first step onto the second step (for plotting purposes)\n \"\"\"\n len_pre = 100\n t = np.concatenate((np.linspace(-len_pre * np.diff(t)[0], -np.diff(t)[0], len_pre), t))\n v_bar = np.concatenate((y_hold[-len_pre:, 0], v))\n v_bar = v_bar - v_bar[0] + -7.16300325e+01 # Align solution to initial condition of the \"standard simulation\"\n\n plt.plot(t, v_bar, c='k', linestyle=line_styles[ix])\n\n plot_settings['y_ticks'] = []\n plot_settings['y_label'] = \"\"\n plot_settings['x_limits'] = [-1, 20]\n plot_settings['legend'] = ['-72 mV', '-75 mV', '-78 mV']\n plot_settings['scale_size'] = 5\n plot_settings['legend_location'] = 1\n alter_figure(plot_settings, close=True)", "title": "" }, { "docid": "65567a67dd1e34d49866da6e1c087a6b", "score": "0.5430482", "text": "def setplot(plotdata):\n#-------------------------- \n\n\n plotdata.clearfigures() # clear any old figures,axes,items data\n\n # Figure for q[0]\n plotfigure = plotdata.new_plotfigure(name='Pressure', figno=1)\n\n # Set up for axes in this figure:\n plotaxes = plotfigure.new_plotaxes()\n plotaxes.axescmd = 'subplot(211)'\n \n #plotaxes.xlimits = [0.,150.]\n plotaxes.ylimits = [-1.,1.0]\n plotaxes.title = 'Pressure'\n\n # Set up for item on these axes:\n plotitem = plotaxes.new_plotitem(plot_type='1d_plot')\n plotitem.plot_var = 0\n plotitem.plotstyle = '-o'\n plotitem.color = 'b'\n plotitem.show = True # show on plot?\n plotitem.kwargs = {'linewidth':2,'markersize':5}\n \n\n\n # Set up for axes in this figure:\n plotaxes = plotfigure.new_plotaxes()\n plotaxes.axescmd = 'subplot(212)'\n plotaxes.xlimits = 'auto'\n plotaxes.ylimits = [-1.,1.]\n plotaxes.title = 'Velocity'\n\n # Set up for item on these axes:\n plotitem = plotaxes.new_plotitem(plot_type='1d_plot')\n plotitem.plot_var = 1\n plotitem.plotstyle = '-'\n plotitem.color = 'b'\n plotitem.show = True # show on plot?\n plotitem.kwargs = {'linewidth':3,'markersize':5}\n \n\n # Parameters used only when creating html and/or latex hardcopy\n # e.g., via visclaw.frametools.printframes:\n\n plotdata.printfigs = True # print figures\n plotdata.print_format = 'png' # file format\n plotdata.print_framenos = 'all' # list of frames to print\n plotdata.print_fignos = 'all' # list of figures to print\n plotdata.html = True # create html files of plots?\n plotdata.html_homelink = '../README.html'\n plotdata.latex = True # create latex file of plots?\n plotdata.latex_figsperline = 2 # layout of plots\n plotdata.latex_framesperline = 1 # layout of plots\n plotdata.latex_makepdf = False # also run pdflatex?\n\n return plotdata", "title": "" }, { "docid": "1f287e54f31231fcc6cd80fc3bea2345", "score": "0.54172206", "text": "def setup_axes3(fig, rect, component, fig_title=None, title_size=25, inclined=False, ylim=None):\n\n # Angle in degree\n angle_ticks = [(0., r\"$0^\\circ$\"),\n (15., r\"$15^\\circ$\"),\n (30., r\"$30^\\circ$\"),\n (45., r\"$45^\\circ$\"),\n (60., r\"$60^\\circ$\"),\n (75., r\"$75^\\circ$\"),\n (90., r\"$90^\\circ$\")]\n\n if not inclined:\n # rotate a bit for better orientation\n tr_rotate = Affine2D().translate(90, 0)\n\n # scale degree to radians\n tr_scale = Affine2D().scale(np.pi / 180., 1.)\n\n # ploting zenith angle range\n ra0, ra1 = 0., 100.\n\n grid_locator1 = FixedLocator([v for v, s in angle_ticks])\n tick_formatter1 = DictFormatter(dict(angle_ticks))\n\n else:\n # rotate a bit for better orientation\n tr_rotate = Affine2D().translate(-5, 0)\n\n # scale degree to radians\n tr_scale = Affine2D().scale(np.pi / 90., 1.)\n\n # ploting zenith angle range\n ra0, ra1 = 50., 100.\n\n grid_locator1 = None\n tick_formatter1 = DictFormatter(dict(angle_ticks))\n\n tr = tr_rotate + tr_scale + PolarAxes.PolarTransform()\n\n # Angle in minutes\n # grid_locator1 = angle_helper.LocatorHMS(6)\n # tick_formatter1 = angle_helper.FormatterHMS()\n\n grid_locator2 = MaxNLocator(11)\n\n if ylim is not None:\n cz0, cz1 = ylim\n else:\n cz0, cz1 = 0, 50.\n\n grid_helper = floating_axes.GridHelperCurveLinear(tr,\n extremes=(ra0, ra1, cz0, cz1),\n grid_locator1=grid_locator1,\n grid_locator2=grid_locator2,\n tick_formatter1=tick_formatter1,\n tick_formatter2=None,\n )\n\n ax1 = floating_axes.FloatingSubplot(fig, rect, grid_helper=grid_helper)\n fig.add_subplot(ax1)\n\n if fig_title is not None:\n plt.title(fig_title, fontsize=title_size, loc=\"left\")\n\n # adjust axis\n ax1.axis[\"left\"].set_axis_direction(\"bottom\")\n ax1.axis[\"right\"].set_axis_direction(\"top\")\n\n ax1.axis[\"bottom\"].set_visible(False)\n ax1.axis[\"top\"].set_axis_direction(\"bottom\")\n ax1.axis[\"top\"].toggle(ticklabels=True, label=True)\n ax1.axis[\"top\"].major_ticklabels.set_axis_direction(\"top\")\n ax1.axis[\"top\"].label.set_axis_direction(\"top\")\n\n if component == \"horizontal\" or component == \"hori\":\n ax1.axis[\"left\"].label.set_text(r\"$|H_{\\phi}|$ [m]\")\n elif component == \"meridional\":\n ax1.axis[\"left\"].label.set_text(r\"$|H_{\\theta}|$ [m]\")\n elif component == \"vertical-horizontal\":\n ax1.axis[\"left\"].label.set_text(r\"$|H_{\\theta,hor}|$ [m]\")\n elif component == \"vertical-vertical\":\n ax1.axis[\"left\"].label.set_text(r\"$|H_{\\theta.vert}|$ [m]\")\n elif component == \"vertical\":\n ax1.axis[\"left\"].label.set_text(r\"$|H_{v}| = |H_{\\theta}| \\cdot \\sin(\\theta)$ [m]\")\n\n ax1.axis[\"left\"].label.set_fontsize(24)\n ax1.axis[\"left\"].major_ticklabels.set_fontsize(22)\n ax1.axis[\"top\"].label.set_text(r\"$\\Theta$\")\n ax1.axis[\"top\"].label.set_fontsize(24)\n ax1.axis[\"top\"].major_ticklabels.set_fontsize(22)\n\n ax1.grid(True)\n\n # create a parasite axes whose transData in RA, cz\n aux_ax = ax1.get_aux_axes(tr)\n\n aux_ax.patch = ax1.patch # for aux_ax to have a clip path as in ax\n ax1.patch.zorder = 0.9 # but this has a side effect that the patch is\n # drawn twice, and possibly over some other\n # artists. So, we decrease the zorder a bit to\n # prevent this.\n\n return ax1, aux_ax", "title": "" }, { "docid": "532158d54f2e37d00d97a3fc92c9cbdc", "score": "0.53819036", "text": "def figure6():\n\n plot_settings = {'y_limits': [-100, 30],\n 'x_limits': None,\n 'y_ticks': [-80, -60, -40, -20, 0, 20],\n 'locator_size': 10,\n 'y_label': 'Voltage (mV)',\n 'x_ticks': [],\n 'scale_size': 50,\n 'x_label': \"\",\n 'scale_loc': 3,\n 'figure_name': 'figure_6',\n 'legend': None,\n 'legend_size': 8,\n 'y_on': True}\n\n marker = ['o', 's', '^']\n line_styles = ['-', 'dotted', '--']\n\n plt.figure(figsize=(5, 3), dpi=96)\n\n plt.subplot(2, 1, 1) # Generate subplot 1 (top)\n t, y = solver(240, i_bias_on=2, g_t_bar=0.1 / 10, duration=250)\n plt.plot(t, y[:, 0], 'k-')\n alter_figure(plot_settings)\n\n plt.subplot(2, 1, 2) # Generate subplot 2 (bottom)\n for ix, i_bias_on in enumerate([2, 1.5, 1]):\n t, y = solver(240, i_bias_on=i_bias_on, g_t_bar=0.1 / 10, duration=250)\n t_spike, f = spike_times(t, y[:, 0])\n plt.plot(t_spike[0:-1], f, c='k', linestyle=line_styles[ix], marker=marker[ix], fillstyle='none')\n\n plot_settings['y_limits'] = [0, 200]\n plot_settings['y_ticks'] = [0, 50, 100, 150, 200]\n plot_settings['locator_size'] = 25\n plot_settings['y_label'] = 'Frequency (Hz)'\n plot_settings['legend'] = ['2.0 nA', '1.5 nA', '1.0 nA']\n plot_settings['scale_size'] = 0\n alter_figure(plot_settings, close=True) # Alter figure for publication", "title": "" }, { "docid": "07e630725572fcb8df0a7e23666fd994", "score": "0.5376909", "text": "def plot(self):\n attr = self.Graph[\"root\"]\n if (self.type == 0 or self.type == 1):\n self.subplot_1(attr, 0)\n else:\n self.subplot_2(attr, 0)", "title": "" }, { "docid": "f0befee0bddb77b4c08b9366126ee2c9", "score": "0.5366182", "text": "def rho_plot2(self, pred=None):\n axRect = [0.1446, 0.2150, 0.7604, 0.7100]\n # plt.figure(22, figsize = (8.5, 11), dpi=300)\n fig, ax = plt.subplots()\n if pred is not None:\n self.rho_sub_plot(ax, axRect, pred=pred)\n else:\n self.rho_sub_plot(ax, axRect)", "title": "" }, { "docid": "bf04ebddb841d99c934535aa5f0643bb", "score": "0.5365794", "text": "def plot_section(file_handle, xq, ax, data1, data2,im=250, eta='e',yvar='yh', rep='pcm', xlim=(0,650), ylim=(-2000,0), cmap=plt.cm.bwr, hidex=False, hidey=False, m1=-2, m2=2):\n font = {'family': 'serif',\n 'color': 'darkred',\n 'weight': 'normal',\n 'size': 18,\n }\n\n e = file_handle.variables[eta][-24::,:,:,im].mean(axis=0) # Vertical grid positions\n y = file_handle.variables[yvar][:]\n x,z,q = m6toolbox.section2quadmesh(xq, e, data1, representation=rep) # This yields three areas at twice the model resolution\n #cs = ax.pcolormesh(x, z, q,norm=LogNorm(vmin=1, vmax=110), cmap=cmap)\n cs = ax.pcolormesh(x, z, q, vmin=m1, vmax=m2, cmap=cmap)\n if (len(data2.shape)>1):\n z = 0.5*(e[0:-1,:]+e[1:,:])\n [Y,TMP] = np.meshgrid(y,z[:,0])\n s = ax.contour(Y,z,data2-1000,[37.1],colors='gray',lw=10)\n ax.clabel(s, inline=1, fontsize=16,fmt='%4.2f', manual=[(500,-500)])\n else:\n print 'data2 will not be plotted!'\n #ax.plot(y,data2,'gray',lw=2);\n\n ax.set_ylim(ylim)\n ax.set_xlim(xlim)\n if not hidex:\n #ax.axes.get_xaxis().set_ticks([])\n ax.set_xlabel('y [km]')\n\n if not hidey:\n #ax.axes.get_yaxis().set_ticks([])\n ax.set_ylabel('Depth [m]')\n\n return cs", "title": "" }, { "docid": "f387bd4fe1ac8ec8f543255bfcf3da5b", "score": "0.5359861", "text": "def make_plot(d0,t0,d1,t1,d2,t2,d3,t3,suptitle,path_out):\n fig0 = plt.figure(1,(10.,7.))\n grid = ImageGrid(fig0, 111, # similar to subplot(111)\n nrows_ncols = (2, 2), # creates 2x2 grid of axes\n axes_pad=0.4, # pad between axes in inch.\n share_all=True, # share axes\n cbar_mode='each')\n \n im = grid[0].pcolor(d0)\n grid[0].set_title(t0)\n grid.cbar_axes[0].colorbar(im)\n grid[0].axis([0,d0.shape[1],0,d0.shape[0]])\n \n im = grid[1].pcolor(d1)\n grid[1].set_title(t1)\n grid.cbar_axes[1].colorbar(im)\n grid[0].axis([0,d1.shape[1],0,d1.shape[0]])\n \n im = grid[2].pcolor(d2)\n grid[2].set_title(t2)\n grid.cbar_axes[2].colorbar(im)\n grid[0].axis([0,d2.shape[1],0,d2.shape[0]])\n \n im = grid[3].pcolor(d3)\n grid[3].set_title(t3) \n grid.cbar_axes[3].colorbar(im)\n grid[0].axis([0,d3.shape[1],0,d3.shape[0]])\n\n \n fig0.suptitle(suptitle,fontsize=18)\n \n fig0.savefig(path_out, dpi=300)\n fig0.clf()\n return", "title": "" }, { "docid": "8d4feb3eceaf6986c45a384d93208bf0", "score": "0.5357031", "text": "def plot_wav(decomp):\n \n plt.figure(figsize=(10,10))\n gs = GridSpec(4, 4)\n \n ax = plt.subplot(gs[0, 0])\n plt.imshow(decomp[0])\n plt.xticks([])\n plt.yticks([])\n \n ax = plt.subplot(gs[1,0])\n plt.imshow(decomp[1][0])\n plt.xticks([])\n plt.yticks([])\n \n ax = plt.subplot(gs[0, 1])\n plt.imshow(decomp[1][1])\n plt.xticks([])\n plt.yticks([])\n \n ax = plt.subplot(gs[1, 1])\n plt.imshow(decomp[1][2])\n plt.xticks([])\n plt.yticks([])\n \n ax = plt.subplot(gs[2:,:2])\n plt.imshow(decomp[2][0])\n plt.xticks([])\n plt.yticks([])\n \n ax = plt.subplot(gs[:2,2:])\n plt.imshow(decomp[2][1])\n plt.xticks([])\n plt.yticks([])\n \n ax = plt.subplot(gs[2:,2:])\n plt.imshow(decomp[2][2])\n plt.xticks([])\n plt.yticks([])\n \n plt.tight_layout()\n \n return", "title": "" }, { "docid": "d62d9399ac5ae4e96d44eda4e70bad00", "score": "0.5356565", "text": "def add_orbit_plot(self, plane='XY', target=None, timelim=False, loc=111, \n ls='g.', title=False, invertX=True):\n import matplotlib.pyplot as plt\n \n if not plane.upper() in ('XY','XZ','YZ'):\n raise ValueError(\"{0} is not a valid plot plane.\".format(plane))\n\n fig, ax = set_target(target, loc=loc, figsize=(5,5))\n \n # Variables to map plot plane to correct variables:\n plane = plane.upper()\n ijk = {'X':0, 'Y':1, 'Z':2}\n i = ijk[plane[0]]\n j = ijk[plane[1]]\n\n if not timelim: \n # Set default time limit if none given.\n timelim = [self.time[0], self.time[-1]]\n iMin=0\n iMax=-1\n else:\n # Use timelim to get indices that bound our plots.\n timediff = abs(self.time - timelim[-1])\n iMax = np.nonzero(timediff == timediff.min())[0][0]\n timediff = abs(self.time - timelim[0])\n iMin = np.nonzero(timediff == timediff.min())[0][0]\n \n # Add orbit:\n ax.plot(self['SM_xyz'][iMin:iMax,i], self['SM_xyz'][iMin:iMax,j],ls)\n # Add body:\n add_body(ax,add_night=(plane!='YZ'))\n\n # Axis details:\n ax.axis('equal')\n if plane.upper() in ('XY','XZ') and invertX:\n xmin, xmax = ax.get_xlim()\n if xmin < xmax:\n ax.invert_xaxis()\n ax.set_xlabel('SM %s'%(plane[0]))\n ax.set_ylabel('SM %s'%(plane[1]))\n if title:\n ax.set_title(title)\n grid_zeros(ax)\n set_orb_ticks(ax)\n\n return fig, ax", "title": "" }, { "docid": "3a6c9ca81db227ed28ebebd0fbb3e085", "score": "0.53545755", "text": "def figure7():\n\n plot_settings = {'y_limits': [-100, 30],\n 'x_limits': None,\n 'y_ticks': [-80, -60, -40, -20, 0, 20],\n 'locator_size': 10,\n 'y_label': 'Voltage (mV)',\n 'x_ticks': [],\n 'scale_size': 50,\n 'x_label': \"\",\n 'scale_loc': 3,\n 'figure_name': 'figure_7',\n 'legend': None,\n 'legend_size': 8,\n 'y_on': True}\n\n marker = ['o', 's', '^']\n line_styles = ['-', 'dotted', '--']\n\n plt.figure(figsize=(5, 3), dpi=96)\n\n plt.subplot(2, 1, 1) # Generate subplot 1 (top)\n t, y = solver(250, i_bias_on=2, duration=260)\n plt.plot(t, y[:, 0], 'k-')\n alter_figure(plot_settings)\n\n plt.subplot(2, 1, 2) # Generate subplot 2 (bottom)\n for ix, i_bias_on in enumerate([2, 1.5, 1]):\n t, y = solver(250, i_bias_on=i_bias_on, duration=260)\n t_spike, f = spike_times(t, y[:, 0])\n plt.plot(t_spike[0:-1], f, c='k', linestyle=line_styles[ix], marker=marker[ix], fillstyle='none')\n\n plot_settings['y_limits'] = [20, 40]\n plot_settings['y_ticks'] = [20, 25, 30, 35, 40]\n plot_settings['locator_size'] = 2.5\n plot_settings['y_label'] = 'Frequency (Hz)'\n plot_settings['legend'] = ['2.0 nA', '1.5 nA', '1.0 nA']\n plot_settings['scale_size'] = 0\n plot_settings['legend_location'] = 4\n alter_figure(plot_settings, close=True)", "title": "" }, { "docid": "aaadafbbaffc784c6fb512fea6132a1a", "score": "0.5353839", "text": "def plot_main(self):\n\n f, axes = plt.subplots(2, 3, figsize=(16, 8))\n self.data_plot(ax=axes[0, 0])\n self.model_plot(ax=axes[0, 1])\n self.normalized_residual_plot(ax=axes[0, 2], v_min=-6, v_max=6)\n self.source_plot(ax=axes[1, 0], convolution=False, deltaPix_source=0.01, numPix=100)\n self.convergence_plot(ax=axes[1, 1], v_max=1)\n self.magnification_plot(ax=axes[1, 2])\n f.tight_layout()\n f.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0., hspace=0.05)\n return f, axes", "title": "" }, { "docid": "512761ccb592344ea0baef3234cca887", "score": "0.5338101", "text": "def plot_single_hfo(hfo, envelope = False, xlim =[-1,1], cutoff = None, v = True,\n axes = None, figure_size = (15,10),dpi=600,saveplot = None):\n if axes == None:\n # Creating the figure \n fig = plt.figure(figsize=figure_size,dpi=dpi)\n ax1 = fig.add_subplot(311)\n ax2 = fig.add_subplot(312)\n ax3 = fig.add_subplot(313)\n else:\n ax1 = axes[0]\n ax2 = axes[1]\n ax3 = axes[2]\n\n # number of points\n npoints = hfo.waveform.shape[0]\n time_v = np.linspace(-1,1,npoints,endpoint=True)\n # creating the axes\n \n ax1.plot(time_v,hfo.waveform[:,0],'b')\n ax1.plot(time_v[hfo.start_idx:hfo.end_idx],hfo.waveform[hfo.start_idx:hfo.end_idx,0],'k')\n \n adjust_spines(ax1, ['left'])\n ax1.set_xlim(xlim)\n \n \n \n filt = hfo.waveform[:,1]\n ax2.plot(time_v,filt) \n ax2.plot(time_v[hfo.start_idx:hfo.end_idx],filt[hfo.start_idx:hfo.end_idx],'k')\n if envelope:\n env = hfo.waveform[:,2]\n ax4 = ax2.twinx()\n ax4.plot(time_v,env,'g')\n \n\n \n adjust_spines(ax2, ['left', 'bottom'])\n ax2.set_xlim(xlim)\n \n \n hfo.spectrum.plot(cutoff = cutoff, v = v, ax = ax3)\n ax3.set_title('peak freq = ' + str(hfo.spectrum.peak_freq))\n adjust_spines(ax3, ['left', 'bottom'])\n \n if saveplot != None:\n if type(saveplot) == str: \n plt.savefig(saveplot, bbox_inches='tight')\n else:\n raise Exception('saveplot should be a string')\n plt.draw()", "title": "" }, { "docid": "c004d67aced8337bb906198c5c2d7a33", "score": "0.5334133", "text": "def despine(fig=None, ax=None, top=True, right=True,\n left=False, bottom=False):\n if fig is None and ax is None:\n axes = plt.gcf().axes\n elif fig is not None:\n axes = fig.axes\n elif ax is not None:\n axes = [ax]\n\n for ax_i in axes:\n for side in [\"top\", \"right\", \"left\", \"bottom\"]:\n ax_i.spines[side].set_visible(not locals()[side])", "title": "" }, { "docid": "9e5a517193e1e48d138cb3fbec021c3c", "score": "0.5332845", "text": "def plot_phase_advance(self, combined=True):\n raise NotImplementedError('Plotting Phase Advance Shift is not Implemented yet.')\n #TODO: reimplement the phase-advance shift calculations (if needed??)\n LOG.debug(\"Plotting Phase Advance\")\n tw = self.mad_twiss\n pa = self._phase_advance\n dpa = self._dphase_advance\n phase_advx = np.append(pa['X'].iloc[0, -1] + tw.Q1, pa['X'].values.diagonal(offset=-1))\n dphase_advx = np.append(dpa['X'].iloc[0, -1], dpa['X'].values.diagonal(offset=-1))\n phase_advy = np.append(pa['Y'].iloc[0, -1] + tw.Q2, pa['Y'].values.diagonal(offset=-1))\n dphase_advy = np.append(dpa['Y'].iloc[0, -1], dpa['Y'].values.diagonal(offset=-1))\n phase_adv = tw[[\"S\"]].copy()\n phase_adv['MUX'] = np.cumsum(phase_advx + dphase_advx) % 1 - .5\n phase_adv['MUY'] = np.cumsum(phase_advy + dphase_advy) % 1 - .5\n\n title = 'Phase'\n pstyle.set_style(self._plot_options.style, self._plot_options.manual)\n\n if combined:\n ax_dx = phase_adv.plot(x='S')\n ax_dx.set_title(title)\n pstyle.small_title(ax_dx)\n pstyle.set_name(title, ax_dx)\n pstyle.set_yaxis_label('phase', 'x,y', ax_dx, delta=False)\n ax_dy = ax_dx\n else:\n ax_dx = phase_adv.plot(x='S', y='MUX')\n ax_dx.set_title(title)\n pstyle.small_title(ax_dx)\n pstyle.set_name(title, ax_dx)\n pstyle.set_yaxis_label('phase', 'x', ax_dx, delta=False)\n\n ax_dy = phase_adv.plot(x='S', y='MUY')\n ax_dy.set_title(title)\n pstyle.small_title(ax_dy)\n pstyle.set_name(title, ax_dy)\n pstyle.set_yaxis_label('phase', 'y', ax_dy, delta=False)\n\n for ax in (ax_dx, ax_dy):\n self._nice_axes(ax)\n ax.legend()", "title": "" }, { "docid": "0e64bb029a4a8935091c8c7332371109", "score": "0.5332609", "text": "def figure1():\n\n plot_settings = {'y_limits': [-80, -50],\n 'x_limits': None,\n 'y_ticks': [-80, -70, -60, -50],\n 'locator_size': 5,\n 'y_label': 'Voltage (mV)',\n 'x_ticks': [],\n 'scale_size': 20,\n 'x_label': \"\",\n 'scale_loc': 4,\n 'figure_name': 'figure_1',\n 'legend_size': 8,\n 'legend': None,\n 'y_on': True}\n\n t, y = solver(100) # Integrate solution\n plt.figure(figsize=(5, 2)) # Create figure\n plt.plot(t, y[:, 0], 'k-') # Plot solution\n\n \"\"\"\n Annotate plot with figures\n \"\"\"\n plt.gca().annotate('fAHP', xy=(13.5, -65), xytext=(17, -60),\n arrowprops=dict(facecolor='black', shrink=0, headlength=10, headwidth=5, width=1), )\n plt.gca().annotate('ADP', xy=(15.5, -66), xytext=(25, -65),\n arrowprops=dict(facecolor='black', shrink=0, headlength=10, headwidth=5, width=1), )\n plt.gca().annotate('mAHP', xy=(38, -77), xytext=(43, -72),\n arrowprops=dict(facecolor='black', shrink=0, headlength=10, headwidth=5, width=1), )\n alter_figure(plot_settings, close=True) # Alter figure for publication", "title": "" }, { "docid": "99cc1cf2a1546120df3eb60f11948939", "score": "0.5326411", "text": "def plot_control_points(self, fig, ax, linewidth=1.25, linestyle='-.', color='red', markersize=5, markerstyle='o'):\n\n # One dimension (law of evolution)\n if self.ndim == 1:\n Px = np.real(self.P)\n u = np.linspace(0, 1, Px.size)\n line, = ax.plot(u, Px[0,:])\n line.set_linewidth(linewidth)\n line.set_linestyle(linestyle)\n line.set_color(color)\n line.set_marker(markerstyle)\n line.set_markersize(markersize)\n line.set_markeredgewidth(linewidth)\n line.set_markeredgecolor(color)\n line.set_markerfacecolor('w')\n line.set_zorder(4)\n # line.set_label(' ')\n\n\n # Two dimensions (plane curve)\n elif self.ndim == 2:\n Px, Py = np.real(self.P)\n line, = ax.plot(Px, Py)\n line.set_linewidth(linewidth)\n line.set_linestyle(linestyle)\n line.set_color(color)\n line.set_marker(markerstyle)\n line.set_markersize(markersize)\n line.set_markeredgewidth(linewidth)\n line.set_markeredgecolor(color)\n line.set_markerfacecolor('w')\n line.set_zorder(4)\n # line.set_label(' ')\n\n # Three dimensions (space curve)\n elif self.ndim == 3:\n Px, Py, Pz = np.real(self.P)\n line, = ax.plot(Px, Py, Pz)\n line.set_linewidth(linewidth)\n line.set_linestyle(linestyle)\n line.set_color(color)\n line.set_marker(markerstyle)\n line.set_markersize(markersize)\n line.set_markeredgewidth(linewidth)\n line.set_markeredgecolor(color)\n line.set_markerfacecolor('w')\n line.set_zorder(4)\n # line.set_label(' ')\n\n else: raise Exception('The number of dimensions must be 2 or 3')\n\n return fig, ax", "title": "" }, { "docid": "0e5fdd43ff96ae31463262f84020335a", "score": "0.5320163", "text": "def show_phase_space(phsp, rtype1, rtype2, num=None):\n mr1, mr2 = phsp_edge(phsp, rtype1, rtype2)\n mr1_range = max(mr1) - min(mr1)\n mr2_range = max(mr2) - min(mr2)\n height = int(7. * mr1_range / mr2_range)+1 if mr1_range < mr2_range else\\\n int(7. * mr2_range / mr1_range)+1\n plt.figure(num=num, figsize=(7, height))\n plt.plot(mr1, mr2, linestyle='-', color='blue')\n plt.gca().set_xlabel(r'$m^{2}_{\\mathrm{' + rtype1 + r'}}\\ (GeV^{2}/c^{4})$')\n plt.gca().set_ylabel(r'$m^{2}_{\\mathrm{' + rtype2 + r'}}\\ (GeV^{2}/c^{4})$')\n plt.axis('equal')\n plt.tight_layout()\n plt.xlim(0, 1.05*max(mr1))\n plt.ylim(0, 1.05*max(mr2))\n plt.grid()\n if num is None:\n plt.show()", "title": "" }, { "docid": "91a2325b23c73ed3df9c76a9c0f174e4", "score": "0.5319564", "text": "def panel_wrapper(_df, function, name, args=()):\n fig = plt.figure()\n fig.set_figheight(fig.get_figheight()*3)\n fig.set_figwidth(fig.get_figwidth()*3)\n for i, pft in enumerate(pft_order):\n print(pft)\n ax = fig.add_subplot(3, 3, i+1)\n function(_df.loc[(_df.pft==pft), :], ax, *args)\n plt.tight_layout()\n plt.savefig('../../doc/paper/%s' % name)\n return", "title": "" }, { "docid": "b2b534625af6db987445f05c737e42f4", "score": "0.53185564", "text": "def drawAxes(t):\r\n t.speed(0)\r\n t.pd()\r\n t.forward(500)\r\n t.back(500)", "title": "" }, { "docid": "c26fabeb36d87ca80815f74bddd22860", "score": "0.5306768", "text": "def test_plot_with_axes_or_figure(img_3d_mni):\n figure = plt.figure()\n plot_img(img_3d_mni, figure=figure)\n ax = plt.subplot(111)\n plot_img(img_3d_mni, axes=ax)\n plt.close()", "title": "" }, { "docid": "bee278c80779028a78b2238fcbb85a52", "score": "0.5304335", "text": "def plot_finalize():\n global figure\n global axes\n\n plot_refresh()\n plt.ioff()\n plt.show()\n\n figure, axes = None, None", "title": "" }, { "docid": "b720fc33ab52541c63f66c8d4c406ef2", "score": "0.5290945", "text": "def _init_plot(self) -> None:\n\n # create a grayscale plot\n out = sys.stdout\n sys.stdout = open(\"/dev/null\", \"w\")\n hdu = self.image_generator.image(self.ra, self.dec)\n self.plot = aplpy.FITSFigure(hdu)\n self.plot.show_grayscale()\n self.plot.set_theme(\"publication\")\n sys.stdout = out\n\n # label for the position angle\n pa_string = \"PA = %.1f\" % self.mode_details.position_angle().to_value(u.deg)\n if self.mode_details.automated_position_angle():\n pa_string += \" (auto)\"\n self.draw_label(0.95, -0.05, pa_string, style=\"italic\", weight=\"bold\")\n\n # label for the title\n if self.title:\n self.draw_label(\n 0.5, 1.03, self.title, style=\"italic\", weight=\"bold\", size=\"large\"\n )\n\n # label for the image source\n self.draw_label(\n -0.05,\n -0.05,\n \"%s\" % self.image_generator.source(),\n style=\"italic\",\n weight=\"bold\",\n )\n\n # grid overlay\n self.plot.add_grid()\n self.plot.grid.set_alpha(0.2)\n self.plot.grid.set_color(\"b\")\n\n # indicate the RSS field of view\n self.draw_circle(self.ra, self.dec, 4.0 * u.arcmin, \"g\")\n self.draw_label(\n 0.79,\n 0.79,\n \"RSS\",\n style=\"italic\",\n weight=\"bold\",\n size=\"large\",\n horizontalalignment=\"left\",\n color=(0, 0, 1),\n )\n\n # indicate the Salticam field of view\n self.draw_circle(self.ra, self.dec, 5.0 * u.arcmin, \"g\")\n self.draw_label(\n 0.86,\n 0.86,\n \"SCAM\",\n style=\"italic\",\n weight=\"bold\",\n size=\"large\",\n horizontalalignment=\"left\",\n color=(0, 0, 1),\n )\n\n # labels for north and east direction\n self.draw_label(\n self.ra,\n self.dec + 4.8 * u.arcmin,\n \"N\",\n style=\"italic\",\n weight=\"bold\",\n size=\"large\",\n color=(0, 0.5, 1),\n )\n self.draw_label(\n self.ra + 4.8 * u.arcmin / np.abs(np.cos(self.dec)),\n self.dec,\n \"E\",\n style=\"italic\",\n weight=\"bold\",\n size=\"large\",\n horizontalalignment=\"right\",\n color=(0, 0.5, 1),\n )\n\n # add cross hairs\n self.draw_centered_line(\n 0 * u.deg,\n 8 * u.arcmin,\n self.ra,\n self.dec,\n color=\"g\",\n linewidth=0.5,\n alpha=1.0,\n )\n self.draw_centered_line(\n 90 * u.deg,\n 8 * u.arcmin,\n self.ra,\n self.dec,\n color=\"g\",\n linewidth=0.5,\n alpha=1.0,\n )\n\n # label for the magnitude range and bandpass\n if self.magnitude_range:\n self._show_magnitudes()\n\n # add mode specific content\n if not self.basic_annotations:\n self.mode_details.annotate_finder_chart(self)", "title": "" }, { "docid": "7f7c0b50d6eb2d9be97fc6dc3ff1efaa", "score": "0.52909386", "text": "def init_plot(self):\n self.dpi = 100\n self.fig = Figure((5.0, 5.0), dpi = self.dpi)\n\n self.main_plot = self.fig.add_subplot(111)\n self.main_plot.set_axis_bgcolor('black')\n self.main_plot.set_title('Dynamic venous flow view', size = 12)\n\n pylab.setp(self.main_plot.get_xticklabels(), fontsize = 8)\n pylab.setp(self.main_plot.get_yticklabels(), fontsize = 8)\n\n # Plot the data as a green line\n self.plot_data = self.main_plot.plot(\n self.daq.data0,\n linewidth = 1,\n color = (0, 1, 0),\n )[0]\n self.main_plot.grid(True, color='gray')", "title": "" }, { "docid": "431d62bfb1cf8fe3d6690816a49087e7", "score": "0.52879995", "text": "def plot_channels(dat, chanaxis=-1, otheraxis=-2):\n ax = []\n n_channels = dat.data.shape[chanaxis]\n for i, chan in enumerate(dat.axes[chanaxis]):\n if i == 0:\n a = plt.subplot(10, n_channels / 10 + 1, i + 1)\n else:\n a = plt.subplot(10, n_channels / 10 + 1, i + 1, sharex=ax[0], sharey=ax[0])\n ax.append(a)\n x, y = dat.axes[otheraxis], dat.data.take([i], chanaxis)\n a.plot(dat.axes[otheraxis], dat.data.take([i], chanaxis).squeeze())\n a.set_title(chan)\n plt.axvline(x=0)\n plt.axhline(y=0)", "title": "" }, { "docid": "64fafa1a1103cb5238bac2eeb95117e1", "score": "0.5282262", "text": "def bodePlot(H_s,w_range=(0,8),points=800):\n \n w = logspace(*w_range,points)\n h_s = lambdify(s,H_s,'numpy')\n H_jw = h_s(1j*w)\n \n # find mag and phase\n mag = 20*np.log10(np.abs(H_jw))\n phase = angle(H_jw,deg = True)\n \n eqn = Eq(H,simplify(H_s))\n display(eqn)\n \n fig,axes = plt.subplots(1,2,figsize=(18,6))\n ax1,ax2 = axes[0],axes[1]\n \n # mag plot\n ax1.set_xscale('log')\n ax1.set_ylabel('Magntiude in dB')\n ax1.set_xlabel('$\\omega$ in rad/s')\n ax1.plot(w,mag)\n ax1.grid()\n ax1.set_title(\"Magnitude of $H(j \\omega)$\")\n \n # phase plot\n ax2.set_ylabel('Phase in degrees')\n ax2.set_xlabel('$\\omega$ in rad/s')\n ax2.set_xscale('log')\n ax2.plot(w,phase)\n ax2.grid()\n ax2.set_title(\"Phase of $H(j \\omega)$\")\n \n plt.show()", "title": "" }, { "docid": "50e467b2a6f69aeaf296f5d748bb8963", "score": "0.5282042", "text": "def plot_section(t, syn, obs):\n\n fig, ax = plt.subplots(1, 2, sharex=True, sharey=True, figsize=(8, 8))\n\n for ir in range(syn.shape[0]):\n\n # Synthetic\n ax[0].plot(t, ir + syn[ir, :], 'k')\n ax[0].set_xlabel('Time in s')\n ax[0].set_ylabel('Amplitude')\n ax[0].set_title('Synthetic')\n\n # Noisy observed data\n ax[1].plot(t, ir + obs[ir, :], 'k')\n ax[1].set_xlabel('Time in s')\n ax[1].set_title('Observed')\n ax[1].set_xlim([np.min(t), np.max(t)])\n ax[1].set_ylim([-1, syn.shape[0]+1.5])\n\n plt.tight_layout()\n\n plt.show(block=False)", "title": "" }, { "docid": "b683487e1492ee8b17a0e0052cb96785", "score": "0.52811694", "text": "def _setup_figure(self):\n\n plt.figure(1)\n plt.clf()\n\n # Two main axes\n self._tsne_window = plt.axes([0.05, 0.05, 0.4, 0.4])\n self._main_window = plt.axes([0.05, 0.55, 0.4, 0.4])\n\n # Nine sub axes\n self._sub_windows = []\n for row in range(3):\n for col in range(3):\n tt = plt.axes([0.5+0.17*col, 0.75-0.25*row, 0.15, 0.15])\n tt.set_xticks([])\n tt.set_yticks([])\n self._sub_windows.append(tt)\n\n # Register the button click\n self._cid = plt.figure(1).canvas.mpl_connect('button_press_event', self._onclick)\n\n # Text\n plt.figure(1).text(0.6, 0.2, 'Click with 2nd or 3rd mouse button to select image...')\n plt.figure(1).text(0.05, 0.5, 'Click in main image or tSNE plot to find similar cutouts...')\n plt.figure(1).text(0.6, 0.05, 'The tSNE data reduction calculated from data run through {}'.format(self._model_name), fontsize=8)\n\n # Show\n plt.figure(1).show()\n plt.figure(1).canvas.draw()", "title": "" }, { "docid": "4bcdb1c08d034aa7483e8e57c993b3ce", "score": "0.5281105", "text": "def initialize_portrait(ax_pos=[0.1, 0.10, 0.8, 0.60]):\n fig = plt.figure(figsize=(1.5 * 5, 1.5 * 7))\n # axes constructor axes([left, bottom, width, height])\n ax = plt.axes(ax_pos)\n return fig, ax", "title": "" }, { "docid": "880823e07f9130787b8de1180820847f", "score": "0.52770257", "text": "def __init__(self, subplot_class, *args, **kwargs):\n import pylab\n self.fig = pylab.figure(*args, **kwargs)\n self.subplot_class = subplot_class", "title": "" }, { "docid": "340e4b3f337d57b76f05d0a2a5a1e424", "score": "0.5273697", "text": "def _handle_axes(self, drawable, option):\n # If we already have an axes object, ignore this one\n if self._axes_object is not None:\n return\n\n # Grab the histogram used for axes style/range manipulation\n if is_stack(drawable) or is_graph(drawable):\n axes_histogram = drawable.GetHistogram()\n else:\n axes_histogram = drawable\n\n # Grab the histogram used for title manipulation\n if is_stack(drawable):\n title_histogram = drawable.GetHists()[0]\n else:\n title_histogram = drawable\n\n # Set the plot title\n title_histogram.SetTitle(self._title)\n\n # Grab axes\n x_axis, y_axis = axes_histogram.GetXaxis(), axes_histogram.GetYaxis()\n\n # Grab titles from first histogram if not set explicitly\n if self._x_title is None:\n self._x_title = title_histogram.GetXaxis().GetTitle()\n if self._y_title is None:\n self._y_title = title_histogram.GetYaxis().GetTitle()\n\n # Style x-axis, or hide it if this plot has a ratio plot\n if self._x_range is not None:\n x_axis.SetRangeUser(*self._x_range)\n if self._ratio_plot:\n x_axis.SetLabelOffset(999)\n x_axis.SetTitleOffset(999)\n else:\n x_axis.SetTitle(self._x_title)\n x_axis.SetTitleSize(self.PLOT_X_AXIS_TITLE_SIZE)\n x_axis.SetTitleOffset(self.PLOT_X_AXIS_TITLE_OFFSET)\n x_axis.SetLabelSize(self.PLOT_X_AXIS_LABEL_SIZE)\n if self._x_integer_ticks:\n x_axis.SetNdivisions(11) # hack for integer ticks \n\n # Style y-axis\n y_axis.SetTitle(self._y_title)\n y_axis.SetLabelFont(self.PLOT_ATLAS_STAMP_TEXT_FONT)\n y_axis.SetTitleSize(\n (self.PLOT_Y_AXIS_TITLE_SIZE_WITH_RATIO\n if self._ratio_plot\n else self.PLOT_Y_AXIS_TITLE_SIZE)\n )\n y_axis.SetTitleOffset(\n (self.PLOT_Y_AXIS_TITLE_OFSET_WITH_RATIO\n if self._ratio_plot\n else self.PLOT_Y_AXIS_TITLE_OFFSET)\n )\n y_axis.SetNdivisions(5,5,0)\n \n # set axis text sizes \n if self._ratio_plot:\n y_axis.SetLabelSize(self.PLOT_Y_AXIS_LABEL_SIZE_WITH_RATIO)\n else:\n y_axis.SetLabelSize(self.PLOT_Y_AXIS_LABEL_SIZE) \n y_axis.SetTitleSize(self.PLOT_Y_AXIS_TITLE_SIZE)\n y_axis.SetTitleOffset(self.PLOT_RATIO_Y_AXIS_TITLE_OFFSET)\n\n # Redraw the drawable with the new style\n drawable.Draw(option)", "title": "" }, { "docid": "134efe2f935b0a2cc603fac8b2fd3dd6", "score": "0.5270395", "text": "def plot_xy(self, subplot = None, center_of_gravity = False, marker = \"b-\"):\n if subplot is None:\n plot = plt.subplot(111)\n\n for mass in self.flatten():\n x,y = mass.geometry.project_xy.plot_coordinates\n subplot.plot(x, y, marker)\n\n if center_of_gravity:\n x, y, _ = mass.center_of_gravity_global.as_tuple()\n subplot.plot(x, y,\"*\")\n\n x, y, _ = self.center_of_gravity_global.as_tuple()\n subplot.plot(x, y, \"o\")\n\n return subplot", "title": "" }, { "docid": "3b3ac1024550338ffce697c10b5a9944", "score": "0.52674943", "text": "def on_click(event):\n ax = event.inaxes\n \n if ax is None:\n # Occurs when a region not in an axis is clicked...\n return\n \n if self.current_plot == 'single':\n if event.button is 1:\n if not self.ax_zoomed:\n # Change over to a single baseline plot\n try:\n self.ax_zoomed = True\n self.current_ax = ax\n ax.set_position([0.1, 0.05, 0.85, 0.80])\n ax.set_xlabel(\"Frequency\")\n #ax.set_ylabel(\"Time\")\n \n for axis in self.sp_fig.axes:\n if axis is not ax:\n axis.set_visible(False)\n \n except ValueError:\n raise\n self.sp_fig.canvas.mpl_disconnect(self.fig_connect)\n \n elif event.button is 3:\n if self.ax_zoomed:\n self.ax_zoomed = False\n #self.sp_fig.canvas.mpl_disconnect(self.fig_connect)\n self.updatePlot()\n \n else:\n # No need to re-draw the canvas if it's not a left or right click\n return\n \n elif self.current_plot == 'multi':\n if ax is None:\n # Occurs when a region not in an axis is clicked...\n return\n if event.button is 1:\n if not self.ax_zoomed:\n # Change over to a single baseline plot\n try:\n ant1, ant2 = ax.get_title().split(\" \")\n except:\n ant1 = int(ax.get_title().strip('Tile').strip('Antenna').strip('Stand'))\n ant2 = ant1 \n try:\n self.spin_ref_ant.setValue(int(ant1))\n self.spin_ref_ant2.setValue(int(ant2))\n self.plot_select.setCurrentIndex(0)\n self.current_plot = 'single'\n \n self.updatePlot()\n except:\n raise\n self.sp_fig.canvas.mpl_disconnect(self.fig_connect)\n \n elif event.button is 3:\n if not self.ax_zoomed:\n ax.set_position([0.1, 0.1, 0.85, 0.85])\n # TODO: fix labelling of zoom plots\n ax.set_xlabel(\"Frequency\")\n #ax.set_ylabel(\"Time\")\n self.orig_position = ax.get_position()\n for axis in event.canvas.figure.axes:\n # Hide all the other axes...\n if axis is not ax:\n axis.set_visible(False)\n self.ax_zoomed=True\n else:\n self.updatePlot()\n \n else:\n # No need to re-draw the canvas if it's not a left or right click\n return\n \n event.canvas.draw()", "title": "" }, { "docid": "cf39383c760cabfbc7304279d7394d78", "score": "0.52605635", "text": "def execute(self, fig):\n info = self._params\n renderer = fig._get_renderer()\n with getattr(renderer, \"_draw_disabled\", nullcontext)():\n kwargs = get_tight_layout_figure(\n fig, fig.axes, get_subplotspec_list(fig.axes), renderer,\n pad=info['pad'], h_pad=info['h_pad'], w_pad=info['w_pad'],\n rect=info['rect'])\n if kwargs:\n fig.subplots_adjust(**kwargs)", "title": "" }, { "docid": "94a2915376e2a4a63a01ec56265d8d1c", "score": "0.5259755", "text": "def plot_hypnogram(eeg, stages, srate):\r\n \r\n fig,ax1 = plt.subplots() #Needed for the multiple y-axes\r\n \r\n #Use the specgram function to draw the spectrogram as usual\r\n y_lim = 40;\r\n plt.specgram(eeg/np.sum(eeg),NFFT=512,Fs=srate)\r\n\r\n #Label your x and y axes and set the y limits for the spectrogram\r\n ax1.set_ylim((0,y_lim))\r\n ax1.set_xlim((0,len(eeg)/srate))\r\n plt.title ('Hypnogram')\r\n ax1.set_xlabel('Time in Seconds')\r\n ax1.set_ylabel('Frequency in Hz')\r\n \r\n ax2 = ax1.twinx() #Necessary for multiple y-axes\r\n \r\n #Use ax2.plot to draw the hypnogram. Be sure your x values are in seconds\r\n #HINT: Use drawstyle='steps' to allow step functions in your plot\r\n ax2.plot(np.arange(0,len(stages))*30,stages,drawstyle='steps')\r\n\r\n #Label your right y-axis and change the text color to match your plot\r\n ax2.set_ylabel('NREM Stages',color='b')\r\n\r\n \r\n #Set the limits for the y-axis \r\n ax2.set_ylim(0.5,3.5)\r\n ax2.set_xlim((0,len(eeg)/srate))\r\n #Only display the possible values for the stages\r\n ax2.set_yticks(np.arange(1,4))\r\n \r\n #Change the left axis tick color to match your plot\r\n for t1 in ax2.get_yticklabels():\r\n t1.set_color('b')\r\n \r\n #Title your plot \r", "title": "" }, { "docid": "6826884fbd5a6cc0b7830484aa4c2768", "score": "0.52563757", "text": "def panel_axes(self, side, **kwargs):\n return self.figure._add_axes_panel(self, side, **kwargs)", "title": "" }, { "docid": "3e72f63a208ab005ad804fb21a6c4671", "score": "0.5252924", "text": "def __init__(self, refstd, fig=None, rect=111, label='_', srange=(0, 1.5)):\n\n from matplotlib.projections import PolarAxes\n import mpl_toolkits.axisartist.floating_axes as FA\n import mpl_toolkits.axisartist.grid_finder as GF\n\n self.refstd = refstd # Reference standard deviation\n\n tr = PolarAxes.PolarTransform()\n\n # Correlation labels\n rlocs = NP.concatenate((NP.arange(10)/10., [0.95, 0.99]))\n tlocs = NP.arccos(rlocs) # Conversion to polar angles\n gl1 = GF.FixedLocator(tlocs) # Positions\n tf1 = GF.DictFormatter(dict(zip(tlocs, map(str, rlocs))))\n\n # Standard deviation axis extent (in units of reference stddev)\n self.smin = srange[0]*self.refstd\n self.smax = srange[1]*self.refstd\n\n ghelper = FA.GridHelperCurveLinear(tr,\n extremes=(0, NP.pi/2, # 1st quadrant\n self.smin, self.smax),\n grid_locator1=gl1,\n tick_formatter1=tf1)\n\n if fig is None:\n fig = PLT.figure()\n\n ax = FA.FloatingSubplot(fig, rect, grid_helper=ghelper)\n fig.add_subplot(ax)\n\n # Adjust axes\n ax.axis[\"top\"].set_axis_direction(\"bottom\") # \"Angle axis\"\n ax.axis[\"top\"].toggle(ticklabels=True, label=True)\n ax.axis[\"top\"].major_ticklabels.set_axis_direction(\"top\")\n ax.axis[\"top\"].label.set_axis_direction(\"top\")\n ax.axis[\"top\"].label.set_text(\"Correlation\")\n\n ax.axis[\"left\"].set_axis_direction(\"bottom\") # \"X axis\"\n ax.axis[\"left\"].label.set_text(\"Standard deviation\")\n\n ax.axis[\"right\"].set_axis_direction(\"top\") # \"Y axis\"\n ax.axis[\"right\"].toggle(ticklabels=True)\n ax.axis[\"right\"].major_ticklabels.set_axis_direction(\"left\")\n #ax.axis[\"right\"].label.set_text(\"Standard deviation\")\n\n ax.axis[\"bottom\"].set_visible(False) # Useless\n\n self._ax = ax # Graphical axes\n self.ax = ax.get_aux_axes(tr) # Polar coordinates\n\n # Add reference point and stddev contour\n l, = self.ax.plot([0], self.refstd, 'k*',\n ls='', ms=10, label=label)\n t = NP.linspace(0, NP.pi/2)\n r = NP.zeros_like(t) + self.refstd\n self.ax.plot(t, r, 'k--', label='_')\n\n # Collect sample points for latter use (e.g. legend)\n self.samplePoints = [l]", "title": "" }, { "docid": "da4594fbe8bf8ccd7444d29ebb7232e4", "score": "0.525222", "text": "def _plot_arm(self):\n fig, axs = plt.subplots()\n fig.show()\n axs.cla()\n axs.axis([-1, 2.5, -1, 2.5])\n axs.plot([0], [0], 'o')\n config_plots = []\n for t_step in range(0, int(self._t_sim / self._dt) + 1, 1000):\n axs.plot([0, self._x_1[t_step]], [0, self._y_1[t_step]])\n axs.plot(self._x_1[t_step], self._y_1[t_step], 'o')\n axs.plot(\n [self._x_1[t_step], self._x_2[t_step]],\n [self._y_1[t_step], self._y_2[t_step]]\n )\n axs.plot(self._x_2[t_step], self._y_2[t_step], 'o')\n axs.plot(\n [self._x_2[t_step], self._x_e[t_step]],\n [self._y_2[t_step], self._y_e[t_step]]\n )\n axs.plot(self._x_e[t_step], self._y_e[t_step], 'ro')\n axs.plot(\n self._obj_coords_plot[t_step, 0, 0],\n self._obj_coords_plot[t_step, 1, 0], 'g+')\n axs.plot(\n self._obj_coords_plot[t_step, 0, 1],\n self._obj_coords_plot[t_step, 1, 1], 'g.')\n axs.plot(\n self._obj_coords_plot[t_step, 0, 2],\n self._obj_coords_plot[t_step, 1, 2], 'g.')\n axs.plot(\n self._obj_coords_plot[t_step, 0, 3],\n self._obj_coords_plot[t_step, 1, 3], 'g.')\n axs.plot(\n self._obj_coords_plot[t_step, 0, 4],\n self._obj_coords_plot[t_step, 1, 4], 'g.')\n plt.axis('off')\n plt.pause(1 / self._plot_fps)\n fig.canvas.draw()\n image = np.frombuffer(fig.canvas.tostring_rgb(), dtype='uint8')\n config_plots.append(image.reshape(\n fig.canvas.get_width_height()[::-1] + (3, )))\n\n # Draw and create image\n return config_plots", "title": "" }, { "docid": "aa9f810c010342b7845b761638ea6a9d", "score": "0.5251805", "text": "def draw_hogs(img, hog_img, vect, rescale=True, fname=\"hog_plot.png\"):\n \"\"\"\n fig, (ax1, ax2) = plt.subplots(1,2, figsize=(16, 10), sharex=True, sharey=True)\n \n ax1.axis('off')\n ax1.imshow(img, cmap=plt.cm.gray)\n ax1.set_title('Vstupni obrazek')\n ax1.set_adjustable('box-forced')\n \n hog_img_rescaled = exposure.rescale_intensity(hog_img, in_range=(0, 0.02))\n \n ax2.axis('off')\n ax2.imshow(hog_img_rescaled, cmap=plt.cm.gray)\n ax2.set_title('Histogram Orientovanych Gradientu')\n ax2.set_adjustable('box-forced')\n \n if show_plots: plt.show()\"\"\"\n \n fig = plt.figure(figsize=(18, 12))\n\n gs = GridSpec(2, 2)\n ax1 = plt.subplot(gs[0, :1])\n ax2 = plt.subplot(gs[0, -1])\n ax3 = plt.subplot(gs[1, :])\n \n ax1.axis('off')\n ax1.imshow(img, cmap=plt.cm.gray)\n ax1.set_title('Vstupni obrazek - predzpracovany')\n #ax1.set_adjustable('box-forced')\n \n hog_img_rescaled = exposure.rescale_intensity(hog_img, in_range=(0, 0.02)) if rescale else hog_img\n \n ax2.axis('off')\n ax2.imshow(hog_img_rescaled, cmap=plt.cm.gray)\n ax2.set_title('Histogram Orientovanych Gradientu')\n #ax2.set_adjustable('box-forced')\n \n ax3.plot(vect)\n ax3.grid()\n \n if show_plots: \n plt.show()\n plt.savefig(foldername+\"/hog_plots/\"+fname)\n plt.savefig(parentname+\"/hog_plots/\"+fname+\"/\"+childname+\".png\")\n dr.save_image(hog_img, parentname+\"/hog_images/\"+fname+\"/\"+childname+\".png\")\n \n if close_plots:\n plt.close('all')", "title": "" }, { "docid": "aba631436acfe3a4024d817bc08b0761", "score": "0.524861", "text": "def _handle_setup_axis(self, axis_args):\n axis_name = axis_args['name']\n axes_dict = self.server.axes\n\n if axis_name not in [name for name, _ in axes_dict.items()]:\n print \"Adding a new axis:\", axis_name\n axis_count = len(axes_dict)\n newaxis = self.server.figure.add_subplot(axis_count+1, 1, axis_count+1)\n axes_dict[axis_name] = newaxis\n axes_dict[axis_name].grid(True)\n axes_dict[axis_name].set_xlabel(axis_args['x_label'])\n axes_dict[axis_name].set_ylabel(axis_args['y_label'])\n # TODO: support *.set_title(\"Title\")\n if FLAGS.logy:\n axes_dict[axis_name].set_yscale('log', nonposy='clip')\n\n if axis_count != 0:\n # Resize other axes if the above wasn't the first.\n axis_count = len(axes_dict)\n for row,(name, _) in enumerate(axes_dict.items(), 1):\n print name, axis_count, row\n axes_dict[name].change_geometry(axis_count, 1, row)", "title": "" }, { "docid": "099d5bd927fc1ecfdb545ace42fc7364", "score": "0.52477604", "text": "def plot(self, fig=None, ax=None,\n curve=True, control_points=True, frenet_serret=False, axis_off=False, ticks_off=False):\n\n if fig is None:\n\n # One dimension (law of evolution)\n if self.ndim == 1:\n fig = plt.figure(figsize=(6, 5))\n ax = fig.add_subplot(111)\n ax.set_xlabel('$u$ parameter', fontsize=12, color='k', labelpad=12)\n ax.set_ylabel('NURBS curve value', fontsize=12, color='k', labelpad=12)\n # ax_xy.xaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.1f'))\n # ax_xy.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.1f'))\n for t in ax.xaxis.get_major_ticks(): t.label.set_fontsize(12)\n for t in ax.yaxis.get_major_ticks(): t.label.set_fontsize(12)\n if ticks_off:\n ax.set_xticks([])\n ax.set_yticks([])\n if axis_off:\n ax.axis('off')\n\n # Two dimensions (plane curve)\n elif self.ndim == 2:\n fig = plt.figure(figsize=(6, 5))\n ax = fig.add_subplot(111)\n ax.set_xlabel('$x$ axis', fontsize=12, color='k', labelpad=12)\n ax.set_ylabel('$y$ axis', fontsize=12, color='k', labelpad=12)\n # ax_xy.xaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.1f'))\n # ax_xy.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.1f'))\n for t in ax.xaxis.get_major_ticks(): t.label.set_fontsize(12)\n for t in ax.yaxis.get_major_ticks(): t.label.set_fontsize(12)\n if ticks_off:\n ax.set_xticks([])\n ax.set_yticks([])\n if axis_off:\n ax.axis('off')\n\n # Three dimensions (space curve)\n elif self.ndim == 3:\n fig = mpl.pyplot.figure(figsize=(6, 5))\n ax = fig.add_subplot(111, projection='3d')\n ax.view_init(azim=-120, elev=30)\n ax.grid(False)\n ax.xaxis.pane.fill = False\n ax.yaxis.pane.fill = False\n ax.zaxis.pane.fill = False\n ax.xaxis.pane.set_edgecolor('k')\n ax.yaxis.pane.set_edgecolor('k')\n ax.zaxis.pane.set_edgecolor('k')\n ax.xaxis.pane._alpha = 0.9\n ax.yaxis.pane._alpha = 0.9\n ax.zaxis.pane._alpha = 0.9\n ax.set_xlabel('$x$ axis', fontsize=12, color='k', labelpad=12)\n ax.set_ylabel('$y$ axis', fontsize=12, color='k', labelpad=12)\n ax.set_zlabel('$z$ axis', fontsize=12, color='k', labelpad=12)\n # ax_xy.xaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.1f'))\n # ax_xy.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.1f'))\n # ax_xy.zaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.1f'))\n for t in ax.xaxis.get_major_ticks(): t.label.set_fontsize(8)\n for t in ax.yaxis.get_major_ticks(): t.label.set_fontsize(8)\n for t in ax.zaxis.get_major_ticks(): t.label.set_fontsize(8)\n ax.xaxis.set_rotate_label(False)\n ax.yaxis.set_rotate_label(False)\n ax.zaxis.set_rotate_label(False)\n if ticks_off:\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_zticks([])\n if axis_off:\n ax.axis('off')\n\n else: raise Exception('The number of dimensions must be 1, 2 or 3')\n\n\n # Add objects to the plot\n if curve: self.plot_curve(fig, ax)\n if control_points: self.plot_control_points(fig, ax)\n if frenet_serret: self.plot_frenet_serret(fig, ax)\n\n # Set the scaling of the axes\n self.rescale_plot(fig, ax)\n\n return fig, ax", "title": "" }, { "docid": "b633825d537d392df3cdf1b4129d5de4", "score": "0.5246851", "text": "def pylab_setup(figure, stream_data, original_width, runlimits, runflags):\n\n def on_key(event):\n \"\"\"on_key\"\"\"\n print('you pressed', event.key, event.xdata, event.ydata)\n\n #def diag_event(event):\n # \"\"\"diag_event\"\"\"\n # print event.name\n # if hasattr(event, 'height'):\n # print event.height, event.width\n # print event.name, event.canvas, event.guiEvent\n\n def pause_axis(unused_event):\n \"\"\"pause_axis\"\"\"\n # stops update of axis when updating lines\n # allows smooth scrolling by user\n print \"PAUSE pause axis\"\n runflags.update_axis = False\n\n def unpause_axis(event):\n \"\"\"unpause_axis\"\"\"\n # continues updating scrolling\n print \"RESUME axis\"\n runflags.update_axis = True\n if hasattr(event, 'height'):\n print event.height, event.width\n new_ratio = float(event.width)/float(event.height)\n default_ratio = 1.3\n print \"BEFORE: \", FLAGS.width\n FLAGS.width = original_width * new_ratio / default_ratio\n print \"AFTER: \", FLAGS.width\n\n figure.canvas.mpl_connect('key_press_event', on_key)\n figure.canvas.mpl_connect('resize_event', unpause_axis)\n figure.canvas.mpl_connect('scroll_event', pause_axis)\n\n timer = figure.canvas.new_timer(interval=500)\n timer.add_callback(plot_refresh_handler, (stream_data, runlimits, runflags))\n timer.start()\n print \"SHOW\"\n pylab.show()\n print \"AFTER\"", "title": "" }, { "docid": "b42cb5531135c03b8cf1794c86f0e1ce", "score": "0.52427804", "text": "def plot(self):\n\t\t\t\n\t\tfig,p1=_plt.subplots(4,sharex=True)\n\t\tp1[0].plot(self.time*1e3,self.eRogA,label='Rogowski A')\n\t\tp1[1].plot(self.time*1e3,self.eRogB,label='Rogowski B')\n\t\tp1[2].plot(self.time*1e3,self.eRogC,label='Rogowski C')\n\t\tp1[3].plot(self.time*1e3,self.eRogD,label='Rogowski D')\n\t\t_plot.finalizeSubplot(p1,xlabel='Time (ms)',ylabel='Current (A)')\n\t\t_plot.finalizeFigure(fig,title=self.title)\n\t\t\n\t\treturn p1", "title": "" }, { "docid": "b7a2cad88d66743ad0188108c75df37f", "score": "0.52424484", "text": "def __init__(self, refstd, fig=None, rect=111, label='_', srange=(0, 1.5)):\n\n from matplotlib.projections import PolarAxes\n import mpl_toolkits.axisartist.floating_axes as FA\n import mpl_toolkits.axisartist.grid_finder as GF\n\n self.refstd = refstd # Reference standard deviation\n\n tr = PolarAxes.PolarTransform()\n\n # Correlation labels\n rlocs = NP.concatenate((NP.arange(10)/10., [0.95, 0.99]))\n tlocs = NP.arccos(rlocs) # Conversion to polar angles\n gl1 = GF.FixedLocator(tlocs) # Positions\n tf1 = GF.DictFormatter(dict(zip(tlocs, map(str, rlocs))))\n\n # Standard deviation axis extent (in units of reference stddev)\n self.smin = srange[0]*self.refstd\n self.smax = srange[1]*self.refstd\n\n ghelper = FA.GridHelperCurveLinear(tr,\n extremes=(0, NP.pi, # 1st quadrant\n self.smin, self.smax),\n grid_locator1=gl1,\n tick_formatter1=tf1)\n\n if fig is None:\n fig = PLT.figure()\n\n ax = FA.FloatingSubplot(fig, rect, grid_helper=ghelper)\n fig.add_subplot(ax)\n\n # Adjust axes\n ax.axis[\"top\"].set_axis_direction(\"bottom\") # \"Angle axis\"\n ax.axis[\"top\"].toggle(ticklabels=True, label=True)\n ax.axis[\"top\"].major_ticklabels.set_axis_direction(\"top\")\n ax.axis[\"top\"].label.set_axis_direction(\"top\")\n ax.axis[\"top\"].label.set_text(\"Correlation\")\n\n ax.axis[\"left\"].set_axis_direction(\"bottom\") # \"X axis\"\n ax.axis[\"left\"].label.set_text(\"Standard deviation\")\n\n ax.axis[\"right\"].set_axis_direction(\"top\") # \"Y axis\"\n ax.axis[\"right\"].toggle(ticklabels=True)\n ax.axis[\"right\"].major_ticklabels.set_axis_direction(\"left\")\n #ax.axis[\"right\"].label.set_text(\"Standard deviation\")\n\n ax.axis[\"bottom\"].set_visible(False) # Useless\n\n self._ax = ax # Graphical axes\n self.ax = ax.get_aux_axes(tr) # Polar coordinates\n\n # Add reference point and stddev contour\n l, = self.ax.plot([0], self.refstd, 'k*',\n ls='', ms=10, label=label)\n t = NP.linspace(0, NP.pi/2)\n r = NP.zeros_like(t) + self.refstd\n self.ax.plot(t, r, 'k--', label='_')\n\n # Collect sample points for latter use (e.g. legend)\n self.samplePoints = [l]", "title": "" }, { "docid": "8ee5ce10dac7fc7fc94535d816ecc7b6", "score": "0.52359736", "text": "def plot_reconstruction_diagnostics(self, figsize=(20, 10)):\n figs = []\n fig_names = []\n\n # upsampled frequency\n fx_us = tools.get_fft_frqs(2 * self.nx, 0.5 * self.dx)\n fy_us = tools.get_fft_frqs(2 * self.ny, 0.5 * self.dx)\n\n # plot different stages of inversion\n extent = tools.get_extent(self.fy, self.fx)\n extent_upsampled = tools.get_extent(fy_us, fx_us)\n\n for ii in range(self.nangles):\n fig = plt.figure(figsize=figsize)\n grid = plt.GridSpec(3, 4)\n\n for jj in range(self.nphases):\n\n # ####################\n # separated components\n # ####################\n ax = plt.subplot(grid[jj, 0])\n\n to_plot = np.abs(self.separated_components_ft[ii, jj])\n to_plot[to_plot <= 0] = np.nan\n plt.imshow(to_plot, norm=LogNorm(), extent=extent)\n\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n if jj == 0:\n plt.title('O(f)otf(f)')\n plt.scatter(self.frqs[ii, 0], self.frqs[ii, 1], edgecolor='r', facecolor='none')\n plt.scatter(-self.frqs[ii, 0], -self.frqs[ii, 1], edgecolor='r', facecolor='none')\n elif jj == 1:\n plt.title('m*O(f-fo)otf(f)')\n plt.scatter(self.frqs[ii, 0], self.frqs[ii, 1], edgecolor='r', facecolor='none')\n elif jj == 2:\n plt.title('m*O(f+fo)otf(f)')\n plt.scatter(-self.frqs[ii, 0], -self.frqs[ii, 1], edgecolor='r', facecolor='none')\n\n plt.setp(ax.get_xticklabels(), visible=False)\n plt.setp(ax.get_yticklabels(), visible=False)\n\n plt.xlim([-2 * self.fmax, 2 * self.fmax])\n plt.ylim([2 * self.fmax, -2 * self.fmax])\n\n # ####################\n # deconvolved component\n # ####################\n ax = plt.subplot(grid[jj, 1])\n\n plt.imshow(np.abs(self.components_deconvolved_ft[ii, jj]), norm=LogNorm(), extent=extent)\n\n if jj == 0:\n plt.scatter(self.frqs[ii, 0], self.frqs[ii, 1], edgecolor='r', facecolor='none')\n plt.scatter(-self.frqs[ii, 0], -self.frqs[ii, 1], edgecolor='r', facecolor='none')\n elif jj == 1:\n plt.scatter(self.frqs[ii, 0], self.frqs[ii, 1], edgecolor='r', facecolor='none')\n elif jj == 2:\n plt.scatter(-self.frqs[ii, 0], -self.frqs[ii, 1], edgecolor='r', facecolor='none')\n\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n if jj == 0:\n plt.title('deconvolved component')\n plt.setp(ax.get_xticklabels(), visible=False)\n plt.setp(ax.get_yticklabels(), visible=False)\n\n plt.xlim([-2 * self.fmax, 2 * self.fmax])\n plt.ylim([2 * self.fmax, -2 * self.fmax])\n\n # ####################\n # shifted component\n # ####################\n ax = plt.subplot(grid[jj, 2])\n\n # avoid any zeros for LogNorm()\n cs_ft_toplot = np.abs(self.components_shifted_ft[ii, jj])\n cs_ft_toplot[cs_ft_toplot <= 0] = np.nan\n plt.imshow(cs_ft_toplot, norm=LogNorm(), extent=extent_upsampled)\n plt.scatter(0, 0, edgecolor='r', facecolor='none')\n\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n if jj == 1:\n circ2 = matplotlib.patches.Circle(-self.frqs[ii], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n elif jj == 2:\n circ2 = matplotlib.patches.Circle(self.frqs[ii], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n\n if jj == 0:\n plt.title('shifted component')\n plt.setp(ax.get_xticklabels(), visible=False)\n plt.setp(ax.get_yticklabels(), visible=False)\n\n plt.xlim([-2 * self.fmax, 2 * self.fmax])\n plt.ylim([2 * self.fmax, -2 * self.fmax])\n\n # ####################\n # normalized weights\n # ####################\n ax = plt.subplot(grid[jj, 3])\n\n to_plot = self.weights[ii, jj] / self.weight_norm\n to_plot[to_plot <= 0] = np.nan\n im2 = plt.imshow(to_plot, norm=LogNorm(), extent=extent_upsampled)\n im2.set_clim([1e-5, 1])\n fig.colorbar(im2)\n\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n if jj == 1:\n circ2 = matplotlib.patches.Circle(-self.frqs[ii], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n elif jj == 2:\n circ2 = matplotlib.patches.Circle(self.frqs[ii], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n\n if jj == 0:\n plt.title('normalized weight')\n plt.setp(ax.get_xticklabels(), visible=False)\n plt.setp(ax.get_yticklabels(), visible=False)\n\n plt.xlim([-2 * self.fmax, 2 * self.fmax])\n plt.ylim([2 * self.fmax, -2 * self.fmax])\n\n plt.suptitle('period=%0.3fnm at %0.3fdeg=%0.3frad, f=(%0.3f,%0.3f) 1/um\\n'\n 'mod=%0.3f, min mcnr=%0.3f, wiener param=%0.2f\\n'\n 'phases (deg) =%0.2f, %0.2f, %0.2f, phase diffs (deg) =%0.2f, %0.2f, %0.2f' %\n (self.periods[ii] * 1e3, self.angles[ii] * 180 / np.pi, self.angles[ii],\n self.frqs[ii, 0], self.frqs[ii, 1], self.mod_depths[ii, 1], np.min(self.mcnr[ii]), self.wiener_parameter,\n self.phases[ii, 0] * 180/np.pi, self.phases[ii, 1] * 180/np.pi, self.phases[ii, 2] * 180/np.pi,\n 0, np.mod(self.phases[ii, 1] - self.phases[ii, 0], 2*np.pi) * 180/np.pi,\n np.mod(self.phases[ii, 2] - self.phases[ii, 0], 2*np.pi) * 180/np.pi))\n\n figs.append(fig)\n fig_names.append('sim_combining_angle=%d' % (ii + 1))\n\n # #######################\n # net weight\n # #######################\n figh = plt.figure(figsize=figsize)\n grid = plt.GridSpec(1, 2)\n plt.suptitle('Net weight, Wiener param = %0.2f' % self.wiener_parameter)\n\n ax = plt.subplot(grid[0, 0])\n net_weight = np.sum(self.weights, axis=(0, 1)) / self.weight_norm\n im = ax.imshow(net_weight, extent=extent_upsampled, norm=PowerNorm(gamma=0.1))\n\n figh.colorbar(im, ticks=[1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 1e-2, 1e-3, 1e-4, 1e-5])\n\n ax.set_title(\"non-linear scale\")\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n circ2 = matplotlib.patches.Circle((0, 0), radius=2*self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n\n circ3 = matplotlib.patches.Circle(self.frqs[0], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ3)\n\n circ4 = matplotlib.patches.Circle(-self.frqs[0], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ4)\n\n circ5 = matplotlib.patches.Circle(self.frqs[1], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ5)\n\n circ6 = matplotlib.patches.Circle(-self.frqs[1], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ6)\n\n circ7 = matplotlib.patches.Circle(self.frqs[2], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ7)\n\n circ8 = matplotlib.patches.Circle(-self.frqs[2], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ8)\n\n ax.set_xlim([-2 * self.fmax, 2 * self.fmax])\n ax.set_ylim([2 * self.fmax, -2 * self.fmax])\n\n ax = plt.subplot(grid[0, 1])\n ax.set_title(\"linear scale\")\n im = ax.imshow(net_weight, extent=extent_upsampled)\n\n figh.colorbar(im)\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n circ2 = matplotlib.patches.Circle((0, 0), radius=2 * self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n\n circ3 = matplotlib.patches.Circle(self.frqs[0], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ3)\n\n circ4 = matplotlib.patches.Circle(-self.frqs[0], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ4)\n\n circ5 = matplotlib.patches.Circle(self.frqs[1], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ5)\n\n circ6 = matplotlib.patches.Circle(-self.frqs[1], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ6)\n\n circ7 = matplotlib.patches.Circle(self.frqs[2], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ7)\n\n circ8 = matplotlib.patches.Circle(-self.frqs[2], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ8)\n\n ax.set_xlim([-2 * self.fmax, 2 * self.fmax])\n ax.set_ylim([2 * self.fmax, -2 * self.fmax])\n\n figs.append(figh)\n fig_names.append('net_weight')\n\n return figs, fig_names", "title": "" }, { "docid": "d847aabb20ee7aa5f459b0ada0d36bde", "score": "0.52352893", "text": "def vplane(self, fig=None):\n #TODO more general multi-axis layout...\n figsize = (9, 6.5) # good for letter paper\n if fig is None: fig = plt.figure(figsize=figsize)\n else: fig.set_size_inches(*figsize)\n axkw = dict(frameon = True)\n left, width = 0.075, 0.6\n bh = 0.11\n pad = 0.04\n depth_ax = fig.add_axes((left, 6*pad+4.5*bh, width, bh*2), **axkw)\n axkw.update(dict(sharex = depth_ax))\n pitch_ax = fig.add_axes((left, 5*pad+3.5*bh, width, bh), **axkw)\n buoyancy_ax = fig.add_axes((left, 4*pad+2.5*bh, width, bh), **axkw)\n mass_ax = fig.add_axes((left, 3*pad + 1.5*bh, width, bh), **axkw)\n control_surface_ax = fig.add_axes((left, 2*pad + bh/2, width, bh), **axkw)\n control_mode_ax = fig.add_axes((left, pad, width, bh/2), **axkw)\n # TODO adjust scale and coverage for each axes\n # TODO do this again now that middle labels are removed\n\n self.plot_timeseries('depth', '-', axes=depth_ax)\n self.plot_timeseries('platform_pitch_angle', axes=pitch_ax)\n self.plot_timeseries('platform_mass_position', axes=mass_ax)\n self.plot_timeseries('platform_buoyancy_position', axes=buoyancy_ax)\n self.plot_timeseries('platform_elevator_angle', axes=control_surface_ax)\n # TODO Include another panel with VerticalControl mode (iff present)\n\n # TODO only if engineering data is requested...\n ### add to depth axes ###\n depth_science = {\n 'Depth_Keller/depth': 'c-',\n 'CTD_NeilBrown/depth': 'k-',\n 'Depth_MSI_US300/depth': 'm-'}\n for k, v in depth_science.items():\n try: self.plot_timeseries(k, v, axes=depth_ax)\n except: print('no {0}'.format(k))\n\n depth_engineering = {\n 'VerticalControl/smoothDepthInternal': 'r-',\n 'VerticalControl/depthCmd': 'g-',\n 'VerticalControl/depthErrorInternal': 'g:'}\n for k, v in depth_engineering.items():\n try: self.plot_timeseries(k, v, axes=depth_ax)\n except: print('no {0}'.format(k))\n # TODO only if sw debug flag is set \n depth_rate_engineering = {\n 'VerticalControl/depthRateCmd': 'gray',\n 'VerticalControl/depth_rate': 'gray', # XXX why same color?\n }\n for k, v in depth_rate_engineering.items():\n try: \n self.plot_timeseries(k, vi, axes=depth_ax, \n convert=oalib.make_multiplier(100))\n except: print('no {0}'.format(k))\n ### add to pitch axes ###\n pitch_engineering = {\n 'AHRS_sp3003D/platform_pitch_angle': 'k-', \n 'DVL_micro/platform_pitch_angle': 'm-',\n 'AHRS_3DMGX3/platform_pitch_angle': 'c-',\n 'InternalSim/platform_pitch_angle': ':r',\n }\n for k, v in pitch_engineering.items():\n try: self.plot_timeseries(k, v, axes=pitch_ax)\n except: print('no {0}'.format(k))\n ### add to mass axes ###\n mass_engineering = {\n 'VerticalControl/massPositionAction': 'g-', \n 'VerticalControl/massIntegralInternal': 'c-',\n 'MassServo/platform_mass_position': 'r-',\n #'VerticalControl/massPitchErrorInternal': ':r',\n }\n for k, v in mass_engineering.items():\n try: self.plot_timeseries(k, v, axes=mass_ax)\n except: print('no {0}'.format(k))\n ### add to buoyancy axes ###\n buoyancy_engineering = {\n 'VerticalControl/buoyancyAction': 'm-',\n 'BuoyancyServo/platform_buoyancy_position': 'b-',\n }\n for k, v in buoyancy_engineering.items():\n try: \n self.plot_timeseries(k, v,\n# convert=oalib.make_multiplier(-10), \n axes=buoyancy_ax)\n except: print('no {0}'.format(k))\n ### add to control surface axes ###\n control_surface_engineering = {\n 'VerticalControl/elevatorAngleAction': 'm-', \n 'VerticalControl/elevatorIntegralInternal': 'm:',\n 'ElevatorServo/platform_elevator_angle': 'c-',\n }\n for k, v in control_surface_engineering.items():\n try: \n self.plot_timeseries(k, v, convert = np.rad2deg, \n axes=control_surface_ax)\n except: print('no {0}'.format(k))\n \n\n # TODO only if supporting data is requested\n ### add other supporting data ###\n try: self.plot_timeseries('CTD_NeilBrown/depth', 'k-', axes=depth_ax)\n except: print('no CTD_NeilBrown/depth')\n try: self.plot_timeseries('Depth_MSI_US300', 'm-', axes=depth_ax)\n except: print('no Depth_MSI_US300')\n\n\n ### print additional information ###\n buoyancyNeutral = ('Config/Control/buoyancyNeutral',\n 'Config/Servo/buoyancyNeutral')\n for s in buoyancyNeutral:\n try:\n print('{0} = {1} {2}'.format(s, self[s+'/value'], self[s+'/units']))\n except:\n print('{0} not found'.format(s))\n \n# VertMd(0=N/A,1=Surf,2=Dep,3=DepRt,4=Pit0,5=Pit,6=PitRt,7=M&E,8=Flt),\n# VertHoldMd(0=N/A,1=Ms,2=El,3=Both)\n try:\n v, t = self.timeseries('VerticalControl/verticalMode')\n oalib.plot_date_blocks(t, v, axes=control_mode_ax, colormap=mpl.cm.jet)\n except: print('VerticalControl/verticalMode not found')\n\n depth_ax.invert_yaxis()\n for ax in fig.get_axes():\n ax.grid(True)\n try:\n ax.legend(loc='center left', bbox_to_anchor=(1, 0.5),\n fontsize='small')\n except:\n print('uncaught exception for legend...')\n for ax in fig.get_axes()[:-1]:\n plt.setp(ax.get_xticklabels(), visible=False)\n\n depth_ax.set_title(os.path.basename(self.filename))\n control_mode_ax.xaxis.set_major_formatter(mpl.dates.DateFormatter('%H:%M'))\n plt.setp(control_mode_ax.get_xticklabels(), rotation=30,\n fontsize='small')", "title": "" }, { "docid": "05a50b11eee09d93f94ffb579cb16ece", "score": "0.5234844", "text": "def add_figure1(self,x,y,index=1,title='',xlabel='',ylabel=''):\n self.last_index = index\n ax = self.fig.add_subplot(self.position+index)\n ax.set_title(title)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n ax.plot(x,y)", "title": "" }, { "docid": "26c50a25a67da79d5cf2000884bff24f", "score": "0.52332944", "text": "def makeQuadSubplots(df_rad_obs, \n df_dir_obs, \n df_rad_sen, \n df_dir_sen, \n suptitle='Big title',\n eps=3, \n min_samples=50):\n fig, axs = plt.subplots(2, 2, \n figsize=(10,10)\n )\n\n fig.suptitle('Clustering Output', fontsize=20)\n\n populateSubPlot(df=df_rad_obs,\n eps=eps, \n min_samples=min_samples,\n fig=fig, \n axs=axs, \n row=0, \n col=0, title='Obsever Wards Radiant')\n\n\n populateSubPlot(df=df_dir_obs, \n eps=eps, \n min_samples=min_samples,\n fig=fig, \n axs=axs, \n row=0, \n col=1, title='Obsever Wards Dire')\n\n\n populateSubPlot(df=df_rad_sen, \n eps=eps, \n min_samples=min_samples,\n fig=fig, \n axs=axs, \n row=1, \n col=0, title='Sentry Wards Radiant')\n\n populateSubPlot(df=df_dir_sen, \n eps=eps, \n min_samples=min_samples,\n fig=fig, \n axs=axs, \n row=1, \n col=1, title='Sentry Wards Dire')\n \n \n return fig, axs", "title": "" }, { "docid": "ed3a44e1df675e43cc10e3628a51623c", "score": "0.52246606", "text": "def __init__(self):\n super(vanderpol_output,self).__init__()\n\n # add figure object for further use\n fig = plt.figure()\n self.ax = fig.add_subplot(111)\n self.ax.set_xlim([-2.5,2.5])\n self.ax.set_ylim([-10.5,10.5])\n plt.ion()\n self.sframe = None", "title": "" }, { "docid": "f0f67282254e0c3f0f5f8a66e5dc5695", "score": "0.5216238", "text": "def plot_phase_diagram(self):\n t_max = np.log(max(self.temperatures))\n d_min = np.log(min(self.distortions))\n y_axis = [np.log(i) - d_min for i in self.distortions]\n x_axis = [t_max - np.log(i) for i in self.temperatures]\n\n plt.figure(figsize=(12, 9))\n plt.plot(x_axis, y_axis)\n\n region = {}\n for i, c in list(enumerate(self.n_eff_clusters)):\n if c not in region:\n region[c] = {}\n region[c]['min'] = x_axis[i]\n region[c]['max'] = x_axis[i]\n for c in region:\n if c == 0:\n continue\n plt.text((region[c]['min'] + region[c]['max']) / 2, 0.2,\n 'K={}'.format(c), rotation=90)\n plt.axvspan(region[c]['min'], region[c]['max'], color='C' + str(c),\n alpha=0.2)\n plt.title('Phases diagram (log)')\n plt.xlabel('Temperature')\n plt.ylabel('Distortion')\n plt.show()", "title": "" }, { "docid": "ebe95a2326392079d1dc38b503751a3e", "score": "0.5208675", "text": "def init_render(self):\n plt.ion() # interactive plot mode, panning, zooming enabled\n self.fig = plt.figure(figsize=(9,7)) # create figure object\n self.ax = self.fig.add_subplot(111, projection=\"3d\") # attach z-axis to plot\n # set axe limits and labels\n self.ax.set_xlim([-self.l1max, self.l1max])\n self.ax.set_ylim([-self.l1max, self.l1max])\n self.ax.set_zlim([-self.l1max, self.l1max])\n self.ax.set_xlabel(\"X\")\n self.ax.set_ylabel(\"Y\")\n self.ax.set_zlabel(\"Z\")\n # add 3 arrows of coordinate base frame\n ax_base = Arrow3D([0.0, self.arrow_len], [0.0, 0.0], [0.0, 0.0],\n arrowstyle=\"-|>\", lw=1, mutation_scale=10, color=\"r\")\n ay_base = Arrow3D([0.0, 0.0], [0.0, self.arrow_len], [0.0, 0.0],\n arrowstyle=\"-|>\", lw=1, mutation_scale=10, color=\"g\")\n az_base = Arrow3D([0.0, 0.0], [0.0, 0.0], [0.0, self.arrow_len],\n arrowstyle=\"-|>\", lw=1, mutation_scale=10, color=\"b\")\n self.ax.add_artist(ax_base)\n self.ax.add_artist(ay_base)\n self.ax.add_artist(az_base)\n plt.show(block=False) # display figure and bring focus (once) to plotting window\n self.fig.tight_layout() # fits the plot to window size", "title": "" }, { "docid": "c8e2e7840da0a1a70785da72ce2a2b7b", "score": "0.52061725", "text": "def update_plot(axes):\n axes.clear()\n\n i = C.i\n C.i += di # globale Zählvariable erhöhen\n if C.i >= len(tt):\n time.sleep(2)\n C.i = 0\n\n t = tt[i]\n q1 = qq1[i]\n q2 = qq2[i]\n q3 = qq3[i]\n CCframe(q1, q2, q3)\n\n # Ausgabe der aktuellen Zeit\n pl.text(0.06, 0.05, \"t = %3.2fs\" % t, transform = axes.transAxes)\n pl.axis([-3, 3, -3, 3])\n axes.figure.canvas.draw()", "title": "" }, { "docid": "0e12ea1a857f2e9875a903e5ce05eaab", "score": "0.52013624", "text": "def plot(self, *, axis=None):\n \n \n self.update_dF_args()\n \n if axis is None:\n axis = self.portrait.ax\n \n _x = np.linspace(*self.xRange, self.density)\n _y = _x.copy()\n \n _xdF = np.zeros([self.density, self.density])\n _ydF = np.zeros([self.density, self.density])\n \n for i,xx in enumerate(_x):\n for j,yy in enumerate(_y):\n _xdF[j,i], _ydF[j,i] = self.funcion(xx,yy, **self.dF_args)\n \n xct = axis.contourf(_x, _y,_xdF, levels=[-self.xprecision + self.offset, self.xprecision + self.offset], colors=[self.xcolor], extend='neither')\n yct = axis.contourf(_x, _y,_ydF, levels=[-self.yprecision + self.offset, self.yprecision + self.offset], colors=[self.ycolor], extend='neither')\n \n xct.cmap.set_over(self.bgcolor, alpha=self.alpha)\n yct.cmap.set_over(self.bgcolor, alpha=self.alpha)\n xct.cmap.set_under(self.bgcolor, alpha=self.alpha)\n yct.cmap.set_under(self.bgcolor, alpha=self.alpha)\n \n return xct, yct", "title": "" }, { "docid": "dc45c12cda592ef30786041dd492cf80", "score": "0.5197354", "text": "def plot_2D_edp(self, xmin=-100, xmax=100, zmin=-100, zmax=100, N=201):\n rho_xz = []\n xgrid = np.linspace(xmin, xmax, num=N)\n zgrid = np.linspace(zmin, zmax, num=N)\n for x in xgrid:\n for z in zgrid:\n tmp = self.phase * self.F * np.cos(self.qx*x+self.qz*z)\n rho_xz.append([x, z, tmp.sum(axis=0)])\n rho_xz = np.array(rho_xz, float) \n X, Y, Z= rho_xz[:,0], rho_xz[:,1], rho_xz[:,2]\n #Y = rho_xz[:,1]\n #Z = rho_xz[:,2]\n X.shape = (N, N)\n Y.shape = (N, N)\n Z.shape = (N, N)\n plt.figure()\n plt.contourf(X, Y, Z)", "title": "" }, { "docid": "768e7485bf8f3cbd8ed0a1155e721440", "score": "0.5197049", "text": "def frame():\n fig = plt.figure(figsize = (6, 3))\n\n plt.subplots_adjust(left=.15, bottom=.2, right=.95, top=.9)\n ax = fig.add_subplot(111)\n \n ax.tick_params(axis=\"x\", labelsize=12)\n ax.tick_params(axis=\"y\", labelsize=12)\n\n return fig, ax", "title": "" }, { "docid": "287ae54265d09f1baaccf600ced94f3e", "score": "0.51900804", "text": "def plots(corpus_parts, corpus):\n \"\"\"\n given the data obtained by the function percentage_hapaxes(dv_corpus, tokenized_corpus),\n the graphic for the percentage of hapaxes per partition is plotted\n \"\"\"\n h_parts = hapaxes_parts(corpus_parts)\n part_size = [x for x in range(len(h_parts))]\n \n percent_h = percentage_hapaxes(corpus_parts, corpus)\n percent_length = [i for i in range(len(percent_h))] \n \n fig, (ax1, ax2) = plt.subplots(1, 2)\n plt.setp(ax1, xticks=np.arange(0, len(part_size), 1))\n plt.setp(ax2, xticks=np.arange(0, len(percent_length), 1))\n fig.suptitle('Number (left) and percentage (right) of hapaxes in each part')\n ax1.bar(part_size, h_parts)\n ax2.bar(percent_length, percent_h) \n return plt.show()", "title": "" }, { "docid": "629099d8a552750c0fe1f79a882f261f", "score": "0.51899725", "text": "def plot_xyz():\n plt.subplot(3,1,1) # for x axis\n plt.title('x value v.s. time')\n plt.grid(True)\n plt.ylabel('X')\n plt.xlabel('t')\n plt.plot(x, '-r')\n\n plt.subplot(3,1,2) # for y axis\n plt.title('y value v.s. time')\n plt.grid(True)\n plt.ylabel('Y')\n plt.xlabel('t')\n plt.plot(y, '-g')\n\n plt.subplot(3,1,3) # for z axis\n plt.title('z value v.s. time')\n plt.grid(True)\n plt.ylabel('Z')\n plt.xlabel('t')\n plt.plot(z, '-b')", "title": "" }, { "docid": "e932638e3c7ab047fbb6398d62267ec4", "score": "0.5187344", "text": "def generate_plot(self):\r\n\t\tx, y = zip(*[p.p for p in self.universe])\r\n\t\tself.ax.cla()\r\n\t\tself.ax.plot(x, y, '.')\r\n\t\tself.ax.set_title('Universe at time: %d' % self.universe.time)\r\n\t\tself.ax.set_xlim([P_MU-4*P_STD, P_MU+4*P_STD])\r\n\t\tself.ax.set_ylim([P_MU-4*P_STD, P_MU+4*P_STD])", "title": "" }, { "docid": "0b69474fbd8878544cefc5b72efffbec", "score": "0.51862925", "text": "def plot_2d_topomap_intra(ax):\n\n # plot first Head \n N = 300 # number of points for interpolation\n xy_center = [-0.178,0] # center of the plot\n radius = 0.1 # radius\n\n # draw a circle\n circle = matplotlib.patches.Circle(xy = xy_center, radius = radius, edgecolor = \"k\", facecolor = \"w\")\n ax.add_patch(circle)\n \n # make the axis invisible \n for loc, spine in ax.spines.items():\n spine.set_linewidth(0)\n \n # remove the ticks\n ax.set_xticks([])\n ax.set_yticks([])\n \n # add some body parts. Hide unwanted parts by setting the zorder low\n # add two ears\n circle = matplotlib.patches.Ellipse(xy = [-0.083,-0.012], width = 0.025, height = 0.05, angle = 0, edgecolor = \"k\", facecolor = \"w\", zorder = 0)\n ax.add_patch(circle)\n circle = matplotlib.patches.Ellipse(xy = [-0.273,-0.012], width = 0.025, height = 0.05, angle = 0, edgecolor = \"k\", facecolor = \"w\", zorder = 0)\n ax.add_patch(circle)\n ## add a nose\n xy = [[-0.151,0.091],[-0.205,0.091], [-0.178,0.11]]\n polygon = matplotlib.patches.Polygon(xy = xy, edgecolor = \"k\", facecolor = \"w\", zorder = 0)\n ax.add_patch(polygon) \n \n\n # Plot second Head \n x2y2_center = [0.178,0] # center of the plot\n radius2 = 0.1 # radius\n \n # draw a circle\n circle = matplotlib.patches.Circle(xy = x2y2_center, radius = radius2, edgecolor = \"k\", facecolor = \"w\")\n ax.add_patch(circle)\n \n # make the axis invisible \n for loc, spine in ax.spines.items():\n spine.set_linewidth(0)\n \n # remove the ticks\n ax.set_xticks([])\n ax.set_yticks([])\n \n ## add some body parts. Hide unwanted parts by setting the zorder low\n # add two ears\n circle = matplotlib.patches.Ellipse(xy = [0.083,-0.012], width = 0.025, height = 0.05, angle = 0, edgecolor = \"k\", facecolor = \"w\", zorder = 0)\n ax.add_patch(circle)\n circle = matplotlib.patches.Ellipse(xy = [0.273,-0.012], width = 0.025, height = 0.05, angle = 0, edgecolor = \"k\", facecolor = \"w\", zorder = 0)\n ax.add_patch(circle)\n ## add a nose\n x2y2 = [[0.151,0.091],[0.205,0.091], [0.178,0.11]]\n polygon = matplotlib.patches.Polygon(xy = x2y2, edgecolor = \"k\", facecolor = \"w\", zorder = 0)\n ax.add_patch(polygon)", "title": "" }, { "docid": "fd998d9e5fc2a54ce56165b5dedd78a7", "score": "0.5180341", "text": "def setup_figure(self):\n \n # connect ui widgets to measurement/hardware settings or functions\n self.ui.start_pushButton.clicked.connect(self.start)\n self.ui.interrupt_pushButton.clicked.connect(self.interrupt)\n self.settings.save_h5.connect_to_widget(self.ui.save_h5_checkBox)\n self.settings.save_movie.connect_to_widget(self.ui.save_movie_checkBox)\n \n # Set up pyqtgraph graph_layout in the UI\n self.graph_layout=pg.GraphicsLayoutWidget()\n self.ui.plot_groupBox.layout().addWidget(self.graph_layout)\n \n self.aux_graph_layout=pg.GraphicsLayoutWidget()\n self.ui.aux_plot_groupBox.layout().addWidget(self.aux_graph_layout)\n \n self.camera_layout=pg.GraphicsLayoutWidget()\n self.ui.camera_groupBox.layout().addWidget(self.camera_layout)\n\n # Create PlotItem object (a set of axes) \n \n self.plot1 = self.graph_layout.addPlot(row=1,col=1,title=\"Lick\")\n self.plot2 = self.graph_layout.addPlot(row=2,col=1,title=\"breathing\")\n\n # Create PlotDataItem object ( a scatter plot on the axes )\n self.breathing_plot = self.plot2.plot([0])\n self.lick_plot_0 = self.plot1.plot([0])\n self.lick_plot_1 = self.plot1.plot([1]) \n \n self.lick_plot_0.setPen('y')\n self.lick_plot_1.setPen('g')\n \n self.T=np.linspace(0,10,10000)\n self.k=0\n \n self.camera_view=pg.ViewBox()\n self.camera_layout.addItem(self.camera_view)\n self.camera_image=pg.ImageItem()\n self.camera_view.addItem(self.camera_image)", "title": "" } ]
edebb6923f67de52c31b16487d8a82c9
Sets the id of this BackupDestinationDetails. The `OCID`__ of the backup destination.
[ { "docid": "505f131f3c7d30d913060d9471641e05", "score": "0.0", "text": "def id(self, id):\n self._id = id", "title": "" } ]
[ { "docid": "02ade94f2aa174c9825ecc16daee95c3", "score": "0.68016785", "text": "def set_id(self, id):\n self._id = id", "title": "" }, { "docid": "ee1f68ce7dc94a233e968c37748ab47b", "score": "0.6691179", "text": "def set_id(self, id):\n self.id = id", "title": "" }, { "docid": "6269a0886f4313a771d77c130300a5e4", "score": "0.66422004", "text": "def SetId(self, id):\n self._id = id", "title": "" }, { "docid": "a123c45ca4e33fa910bf5d02188f50de", "score": "0.6583005", "text": "def setId(self, id):\n self.__id = id", "title": "" }, { "docid": "bc3481162e1b61f1674c1a3ccfe65e5c", "score": "0.64434814", "text": "def id(self, id):\n \n self._id = id", "title": "" }, { "docid": "bc3481162e1b61f1674c1a3ccfe65e5c", "score": "0.64434814", "text": "def id(self, id):\n \n self._id = id", "title": "" }, { "docid": "bc3481162e1b61f1674c1a3ccfe65e5c", "score": "0.64434814", "text": "def id(self, id):\n \n self._id = id", "title": "" }, { "docid": "248d63535959d6e9d76d5369cd951885", "score": "0.6410301", "text": "def id(self, id: str):\n \n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" }, { "docid": "b0af6f89fd8dbe87ad2e41bc8a5cbdad", "score": "0.6374854", "text": "def id(self, id):\n\n self._id = id", "title": "" } ]
509bc6bda7806d989647518f1dc299e5
Move a tile of the right color to the image.
[ { "docid": "241d5a5fe336a77a4759122d4c88cc13", "score": "0.6386266", "text": "def make_move_color(result, tile, scale, yx, bgr):\n patt = tile.copy()\n patt += bgr\n patt[ma.masked_where(patt > 255, patt).mask] = 255.\n\n y, x = yx[0]*scale, yx[1]*scale\n result[y:y+scale, x:x+scale] = patt", "title": "" } ]
[ { "docid": "e4076fcb157d174c58557f0ab492b0e2", "score": "0.73865634", "text": "def move_tiles_right(self):\r\n\r\n # inits\r\n\r\n _at = self.matrix.get_object_at\r\n\r\n _acted = False\r\n\r\n # loop on rows\r\n\r\n for _row in range(self.rows):\r\n\r\n # pass 1: fusions\r\n\r\n for _column in range(self.columns - 1, -1, -1):\r\n\r\n # get tile\r\n\r\n _tile1 = _at(_row, _column)\r\n\r\n # got a tile?\r\n\r\n if _tile1:\r\n\r\n # get next tile\r\n\r\n for _col in range(_column - 1, -1, -1):\r\n\r\n # get tile\r\n\r\n _tile2 = _at(_row, _col)\r\n\r\n # matching values?\r\n\r\n if self.fuse_tiles(_tile1, _tile2):\r\n # we did something\r\n\r\n _acted = True\r\n\r\n # end if\r\n\r\n if _tile2: break\r\n\r\n # end for - next tile\r\n\r\n # end if - tile\r\n\r\n # end for - fusions\r\n\r\n # empty location inits\r\n\r\n _empty = None\r\n\r\n # pass 2: scrollings\r\n\r\n for _column in range(self.columns - 1, -1, -1):\r\n\r\n # get tile\r\n\r\n _tile1 = _at(_row, _column)\r\n\r\n # new empty location?\r\n\r\n if not _tile1 and not _empty:\r\n\r\n # empty location is at least here now\r\n\r\n _empty = (_row, _column)\r\n\r\n # got to move?\r\n\r\n elif _tile1 and _empty:\r\n\r\n self.move_tile(_tile1, *_empty)\r\n\r\n # empty location is at least here now\r\n\r\n _empty = (_row, _empty[1] - 1)\r\n\r\n # we did something\r\n\r\n _acted = True\r\n\r\n # end if\r\n\r\n # end for - scrollings\r\n\r\n # end for - row\r\n\r\n # pop-up next tile or game over\r", "title": "" }, { "docid": "3b25c035d65a76e48cbdd69e4926df84", "score": "0.7341642", "text": "def move_tiles_right(self):\r\n\r\n # inits\r\n\r\n _at = self.matrix.get_object_at\r\n\r\n _acted = False\r\n\r\n # loop on rows\r\n\r\n for _row in range(self.rows):\r\n\r\n # pass 1: fusions\r\n\r\n for _column in range(self.columns - 1, -1, -1):\r\n\r\n # get tile\r\n\r\n _tile1 = _at(_row, _column)\r\n\r\n # got a tile?\r\n\r\n if _tile1:\r\n\r\n # get next tile\r\n\r\n for _col in range(_column - 1, -1, -1):\r\n\r\n # get tile\r\n\r\n _tile2 = _at(_row, _col)\r\n\r\n # matching values?\r\n\r\n if self.fuse_tiles(_tile1, _tile2):\r\n # we did something\r\n\r\n _acted = True\r\n\r\n # end if\r\n\r\n if _tile2: break\r\n\r\n # end for - next tile\r\n\r\n # end if - tile\r\n\r\n # end for - fusions\r\n\r\n # empty location inits\r\n\r\n _empty = None\r\n\r\n # pass 2: scrollings\r\n\r\n for _column in range(self.columns - 1, -1, -1):\r\n\r\n # get tile\r\n\r\n _tile1 = _at(_row, _column)\r\n\r\n # new empty location?\r\n\r\n if not _tile1 and not _empty:\r\n\r\n # empty location is at least here now\r\n\r\n _empty = (_row, _column)\r\n\r\n # got to move?\r\n\r\n elif _tile1 and _empty:\r\n\r\n self.move_tile(_tile1, *_empty)\r\n\r\n # empty location is at least here now\r\n\r\n _empty = (_row, _empty[1] - 1)\r\n\r\n # we did something\r\n\r\n _acted = True\r\n\r\n # end if\r\n\r\n # end for - scrollings\r\n\r\n # end for - row\r\n\r\n # pop-up next tile or game over\r\n\r\n self.next_tile(acted=_acted)", "title": "" }, { "docid": "9e616198c4e5db2f18ca1ae7a5236315", "score": "0.68396986", "text": "def move_pixel_right(x, y, n):\n L = L4 if n == 4 else L8\n while x < (n - 1) and L[x + 1][y] == 0:\n L[x + 1][y] = L[x][y]\n L[x][y] = 0\n x = x + 1\n if x < (n - 1) and L[x + 1][y] == L[x][y]:\n L[x + 1][y] = L[x + 1][y] + 1\n L[x][y] = 0", "title": "" }, { "docid": "88bdab6ad2c0b7ce27995461bdec6468", "score": "0.66761214", "text": "def move_row_right(row) :\n taille = len(row)\n has_been_modified = [0] * taille \n #This list tells us if a tile has already merged during this move (and thus must not be moved again)\n for position in range(taille-1,-1,-1) :\n rightmost_tile_value, rightmost_tile_position = row_check_tile_direction(row,\"right\",position) #We retrieve the value of the first encountered tile\n if rightmost_tile_value == row[position] and has_been_modified[rightmost_tile_position] == 0 :\n row[position] = \" \"\n row[rightmost_tile_position] = str(2*int(rightmost_tile_value)) #We fuse the two tiles\n has_been_modified[rightmost_tile_position] = 1\n else :\n tempValue = row[position]\n row[position] = \" \"\n row[rightmost_tile_position - 1] = tempValue\n return row", "title": "" }, { "docid": "2f6cbaf58d804b69fbc451cbedf8766a", "score": "0.66593266", "text": "def __position_tile_right_down(self, target_row, target_col, to_move_tile):\n move_string = \"\"\n section_string = \"\"\n# to_move_tile = self.current_position(target_row,target_col)\n section_string += \"u\"\n move_string += \"u\"\n self.update_puzzle(section_string)\n# print self\n\n# print \"D2\"\n section_string=\"\"\n index_i = target_col\n while index_i != to_move_tile[1]:\n index_i += 1\n move_string += \"r\"\n section_string += \"r\"\n self.update_puzzle(section_string)\n# print self\n\n# print \"D3\"\n section_string=\"\"\n index_i = target_col\n while index_i != to_move_tile[1]-1:\n index_i += 1\n move_string += \"ulldr\"\n section_string += \"ulldr\"\n self.update_puzzle(section_string)\n# print self\n\n# print \"D4\"\n# move_string += \"ullddrlruld\"\n move_string += \"dluld\"\n section_string = \"\"\n# section_string += \"ullddrlruld\"\n section_string += \"dluld\"\n self.update_puzzle(section_string)\n# print self\n# self.update_puzzle(move_string)\n return move_string", "title": "" }, { "docid": "2d63fd5147658ee3a72a303730e4ed17", "score": "0.6622936", "text": "def move_tile(self, direction):\n if self.state == \"playing\":\n x, y = self.blank_position\n if direction == \"up\":\n tile = self.get_tile(x, y + 1)\n elif direction == \"down\":\n tile = self.get_tile(x, y - 1)\n if direction == \"left\":\n tile = self.get_tile(x + 1, y)\n if direction == \"right\":\n tile = self.get_tile(x - 1, y)\n if tile:\n tile.position, self.blank_position = self.blank_position, tile.position\n tile.animate()", "title": "" }, { "docid": "bd8ee8e49f0aaf19a25a46768c6bd3a4", "score": "0.65964484", "text": "def moved_right(n):\n L = L4 if n == 4 else L8\n for y in range(n):\n for z in range(n - 1):\n x = n - 2 - z\n if L[x][y] > 0 and x < (n - 1):\n move_pixel_right(x, y, n)\n set_pixels(n)\n new_block(n)", "title": "" }, { "docid": "1ca31227150f8ad64c9f2931db9cf9f2", "score": "0.65747625", "text": "def move_right(self):\n pass", "title": "" }, { "docid": "c39091b2699ee4d34956a020c0410d3a", "score": "0.6559017", "text": "def move_right():\n moved = False\n for i, row in enumerate(running_game.game_matrix):\n print \"row = {0}\".format(row)\n row.reverse()\n cells = move_cells(row)\n moved |= row != cells\n cells.reverse()\n running_game.game_matrix[i] = cells\n if moved:\n add_random_number()", "title": "" }, { "docid": "56f9e5d427840cbf7e2557d86b9f19b2", "score": "0.6524618", "text": "def move(self, direction):\n print 'new run'\n legal_move = False\n if direction == LEFT or direction == RIGHT:\n max_col = self.get_grid_height()\n max_row = self.get_grid_width()\n else:\n max_col = self.get_grid_width()\n max_row = self.get_grid_height()\n \n \n for dummy_col in range(max_col): \n lst = list()\n line = list()\n for dummy_row in range(max_row):\n print 'baseline:' + str(self._base_line[direction])\n print 'dummy_col %d' % dummy_col\n print 'dummy_row %d' % dummy_row\n print 'offsets: %s' % str(OFFSETS[direction])\n print self.get_grid_width(), self.get_grid_height()\n col = self._base_line[direction][dummy_col][0]+OFFSETS[direction][0]*dummy_row\n row = self._base_line[direction][dummy_col][1]+OFFSETS[direction][1]*dummy_row\n print 'col, row:'\n lst.append([col, row])\n print ' move: %d;%d' % (col,row)\n line.append(self.get_tile(col,row))\n merged = merge(line)\n for dummy_row in range(len(lst)):\n if self.get_tile(lst[dummy_row][0],lst[dummy_row][1]) != merged[dummy_row]:\n legal_move = True \n self.set_tile(lst[dummy_row][0],lst[dummy_row][1],merged[dummy_row])\n if legal_move: \n self.new_tile()", "title": "" }, { "docid": "51c80979a84959fd08f5aaf6d27e8ac9", "score": "0.6501977", "text": "def move_tile(self, tile, row, column):\r\n\r\n # param controls\r\n\r\n if tile:\r\n # move into matrix\r\n\r\n self.matrix.move_object(tile.row_column, (row, column))\r\n\r\n # end if\r", "title": "" }, { "docid": "2b291549a79a7c66a24a63588d1e1f38", "score": "0.6482308", "text": "def move_to_tile(self, current_location, destination):\n diff = tuple(x - y for x, y in zip(current_location, destination))\n if diff == (0, 1):\n action = self.DOWN\n elif diff == (1, 0):\n action = self.LEFT\n elif diff == (0, -1):\n action = self.UP\n elif diff == (-1, 0):\n action = self.RIGHT\n else:\n if current_location != destination:\n print(\"Failed to move to tile (tiles provided are not neighbours)\")\n self.path = []\n action = self.DO_NOTHING\n if destination in self.path:\n self.path = []\n\n return action", "title": "" }, { "docid": "4962c3663eb8af6b64daea1ed896acb5", "score": "0.64723796", "text": "def move(self, direction):\n dif_x, dif_y = direction\n\n # Make sure we move the last tiles first, so they don't block the\n # previous ones.\n x_range = range(4) if dif_x < 1 else range(3, -1, -1)\n y_range = range(4) if dif_y < 1 else range(3, -1, -1)\n\n new = Board(self)\n\n for x in x_range:\n for y in y_range:\n # Ignore empty tiles.\n if not self[x, y]:\n continue\n \n new_x, new_y = x, y\n while True:\n old_x, old_y = new_x, new_y\n new_x += dif_x\n new_y += dif_y\n\n if new[new_x, new_y] == new[old_x, old_y]:\n # Same pieces, merge.\n new[new_x, new_y] *= 2\n new[old_x, old_y] = 0\n break\n elif new[new_x, new_y]:\n # Hit a different tile (or border, which is -1), stop.\n break\n else:\n # Move piece one tile and leave an empty space behind.\n new[new_x, new_y] = new[old_x, old_y]\n new[old_x, old_y] = 0\n\n return new", "title": "" }, { "docid": "f43f18d3af408f68c9d1586afdee7025", "score": "0.6438465", "text": "def move_right(self):\n self.setx(self.xcor() + 30) # Move spaceship right 60 paces", "title": "" }, { "docid": "d35cf9bdac36add5baad9508c5b29481", "score": "0.6336771", "text": "def move_right(self):\n (dir_x, dir_y) = (self.snakebody[0].snakehead[0], self.snakebody[0].snakehead[1])\n self.turn()\n self.snakebody[0].snakehead = (dir_x + self.blocksize[0], dir_y)", "title": "" }, { "docid": "3dca6464302fc7ae2342735f393c3c1d", "score": "0.6297795", "text": "def __position_tile_right_up(self, target_row, target_col, to_move_tile):\n move_string = \"\"\n# section_string = \"\"\n# to_move_tile = self.current_position(target_row,target_col)\n# print \"B1\"\n index_i = to_move_tile[0]\n while index_i != target_row - 1:\n index_i += 1\n move_string += \"u\"\n# section_string += \"u\"\n# self.update_puzzle(section_string)\n# print self\n\n# print \"B2\"\n# section_string = \"\"\n index_i = to_move_tile[1]\n while index_i != target_col:\n index_i -= 1\n move_string += \"r\"\n# section_string += \"r\"\n move_string += \"uldr\"\n# self.update_puzzle(section_string)\n# section_string = \"\"\n# section_string += \"uldr\"\n# self.update_puzzle(section_string)\n# print self\n\n# print \"B3\"\n# section_string = \"\"\n index_i = to_move_tile[1]\n while index_i != target_col+1:\n index_i -= 1\n move_string += \"ulldr\"\n# section_string += \"ulldr\"\n# self.update_puzzle(section_string)\n# print self\n\n# print \"B4\"\n move_string += \"ul\"\n# section_string = \"ul\"\n# self.update_puzzle(section_string)\n# print self\n\n# print \"B5\"\n# section_string = \"\"\n index_i = to_move_tile[0]\n while index_i != target_row-1:\n index_i +=1\n move_string += \"lddru\"\n# section_string += \"lddru\"\n# self.update_puzzle(section_string)\n# print self\n\n# print \"B6\"\n move_string += \"ld\"\n# section_string = \"ld\"\n# self.update_puzzle(section_string)\n# print self\n self.update_puzzle(move_string)\n return move_string", "title": "" }, { "docid": "9dbe521d2d396b7e7e790aee58e4aad7", "score": "0.6276438", "text": "def move_right(self):\n tx, ty = self.x + 1, self.y\n if (tx, ty) in self.space.cells.keys() and self.space.cells[tx, ty].walkable:\n self.x += 1\n self.space.cells[(self.x - 1, self.y)].walkable = True", "title": "" }, { "docid": "dfd0754dc4e659a1648a1ee11959dd57", "score": "0.6251816", "text": "def move_right(self):\n if self.rect.centerx <= 730:\n self.rect.centerx += 20", "title": "" }, { "docid": "a795c734826fd1a711b82a8bca05026f", "score": "0.623336", "text": "def move(self, direction):\n offset = OFFSETS[direction]\n isMoved = False\n init_tiles = self.init_rows[direction]\n for tiles in range(len(init_tiles)):\n tile = init_tiles[tiles]\n temp_tiles = list()\n row = list()\n create_rows(temp_tiles,tile,offset,steps)\n for val in range(len(temp_tiles)):\n indices = temp_tiles[val]\n row.append(self.grid[indices[0]][indices[1]])\n row = merge(row)\n for val in range(len(temp_tiles)):\n indices = temp_tiles[val]\n if row[val] != self.grid[indices[0]][indices[1]]:\n self.set_tile(indices[0],indices[1],row[val])\n isMoved = True\n if isMoved == True:\n self.new_tile()\n isMoved = false", "title": "" }, { "docid": "eac96c22321c0a3998b87b0a4d73f3c5", "score": "0.6229151", "text": "def _flip_tile(self,pos):\n if pos in self.black_tiles:\n self.black_tiles.remove(pos)\n else:\n self.black_tiles.add(pos)", "title": "" }, { "docid": "20c0c19bd3d6448d66f4be767e1c7498", "score": "0.62264913", "text": "def _move_red_right(self, h):\n self._flip_colors(h)\n if self._is_red(h.left.left):\n h = self._rotate_right(h)\n self._flip_colors(h)\n return h", "title": "" }, { "docid": "da747a24de96669b4cf7152ffdbf3a8b", "score": "0.6208568", "text": "def reverse(self,initx,inity,tile,oppositetile): \n for xdirection, ydirection in [[0, 1], [1, 1], [1, 0], [1, -1], [0, -1], [-1, -1], [-1, 0], [-1, 1]]:# for the new tile, search in eight directions for its oppositetile\n x = initx\n y = inity\n x += xdirection\n y += ydirection\n \n while self.onboard(x,y) and self.board[y][x] == oppositetile:\n x += xdirection\n y += ydirection\n \n if self.onboard(x,y) and self.board[y][x] == tile:\n x -= xdirection\n y -= ydirection\n \n while self.onboard(x,y) and self.board[y][x] == oppositetile: # going backwards and reverse all the opposite tiles in between.\n print x,y\n self.board[y][x] = tile\n x -= xdirection\n y -= ydirection\n \n \n return self.board", "title": "" }, { "docid": "c7d0511ef69c498e31992cd54a31c16a", "score": "0.61955196", "text": "def move_tile(self, tile, row, column):\r\n\r\n # param controls\r\n\r\n if tile:\r\n # move into matrix\r\n\r\n self.matrix.move_object(tile.row_column, (row, column))\r\n\r\n # make some animation and updates\r\n\r\n tile.animate_move_to(row, column)\r\n\r\n # end if\r", "title": "" }, { "docid": "c8131708e527c3150dfbb4872ccd347b", "score": "0.6189553", "text": "def move_right(self):\n # If paddle is not touching right border\n if self.x <= WINDOW['width'] - self.width:\n self.x += 15", "title": "" }, { "docid": "2a020aeb3fb48ebb6ee5acc98daace31", "score": "0.6185716", "text": "def move(self, direction):\n new_d = {1: 3, 2: 1, 3: 0, 4: 2}\n for dummy_kdx in range(new_d.get(direction)):\n self.transpose()\n for idx in range(self._height):\n merge(self._grid[idx])\n for dummy_jdx in range(4 - new_d.get(direction)):\n self.transpose()\n self.new_tile()", "title": "" }, { "docid": "d6e1f332a9aee11038225963a01e6417", "score": "0.61785114", "text": "def turn_right(image):\n return np.rot90(image, 3)", "title": "" }, { "docid": "97c457e37f7c989f8031b1a7f81fd8f9", "score": "0.6165533", "text": "def move_tiles_down(self):\r\n\r\n # inits\r\n\r\n _at = self.matrix.get_object_at\r\n\r\n _acted = False\r\n\r\n # loop on columns\r\n\r\n for _column in range(self.columns):\r\n\r\n # pass 1: fusions\r\n\r\n for _row in range(self.rows - 1, -1, -1):\r\n\r\n # get tile\r\n\r\n _tile1 = _at(_row, _column)\r\n\r\n # got a tile?\r\n\r\n if _tile1:\r\n\r\n # get next tile\r\n\r\n for _row2 in range(_row - 1, -1, -1):\r\n\r\n # get tile\r\n\r\n _tile2 = _at(_row2, _column)\r\n\r\n # matching values?\r\n\r\n if self.fuse_tiles(_tile1, _tile2):\r\n # we did something\r\n\r\n _acted = True\r\n\r\n # end if\r\n\r\n if _tile2: break\r\n\r\n # end for - next tile\r\n\r\n # end if - tile\r\n\r\n # end for - fusions\r\n\r\n # empty location inits\r\n\r\n _empty = None\r\n\r\n # pass 2: scrollings\r\n\r\n for _row in range(self.rows - 1, -1, -1):\r\n\r\n # get tile\r\n\r\n _tile1 = _at(_row, _column)\r\n\r\n # new empty location?\r\n\r\n if not _tile1 and not _empty:\r\n\r\n # empty location is at least here now\r\n\r\n _empty = (_row, _column)\r\n\r\n # got to move?\r\n\r\n elif _tile1 and _empty:\r\n\r\n self.move_tile(_tile1, *_empty)\r\n\r\n # empty location is at least here now\r\n\r\n _empty = (_empty[0] - 1, _column)\r\n\r\n # we did something\r\n\r\n _acted = True\r\n\r\n # end if\r\n\r\n # end for - scrollings\r\n\r\n # end for - row\r\n\r\n # pop-up next tile or game over\r", "title": "" }, { "docid": "4f73f5657c79a1a88899b8fbbbe85529", "score": "0.61528367", "text": "def move_right(self):\n self.at(at.at_pcmd, True, self.speed, 0, 0, 0)", "title": "" }, { "docid": "afd6700b543adf7ef12c8151a3855952", "score": "0.61299914", "text": "def move_right(self):\n \n if self.blank_col == self.n - 1:\n\n return None\n\n else:\n\n blank_index = self.blank_row * self.n + self.blank_col\n\n target = blank_index + 1\n\n new_config = list(self.config)\n\n new_config[blank_index], new_config[target] = new_config[target], new_config[blank_index]\n\n return PuzzleState(tuple(new_config), self.n, parent=self, action=\"Right\", cost=self.cost + 1)", "title": "" }, { "docid": "be682415bcea50f5bb6594c8555913e9", "score": "0.6123108", "text": "def move(self, direction):\n moved = False\n \n for ids in self._init_tiles[direction]:\n line = []\n for step in range(self._num_steps[direction]):\n row = ids[0] + step * OFFSETS[direction][0]\n col = ids[1] + step * OFFSETS[direction][1]\n line.append(self.get_tile(row, col))\n output = merge(line)\n if output != line:\n moved = True\n for step in range(self._num_steps[direction]):\n row = ids[0] + step * OFFSETS[direction][0]\n col = ids[1] + step * OFFSETS[direction][1]\n self.set_tile(row, col, output[step])\n \n if moved:\n self.new_tile()", "title": "" }, { "docid": "01aeacc38acc662f0fed7a9945e3a45c", "score": "0.61087584", "text": "def __move_cell(self, c, destination):\n destination.x = (destination.x + self.width) % self.width\n destination.y = (destination.y + self.height) % self.height\n self.set(c.get_position(), None)\n c.set_position(destination)\n self.set(destination, c)", "title": "" }, { "docid": "7bc2d6e1b7ef19fea13a118b4e64ca79", "score": "0.6102588", "text": "def move(self, direction):\n # 1- Up 2- Down 3- Left 4- Right\n merge_titls_indices = self._indices.get(direction) ;\n merge_titls_value = [] ;\n \n # 得到需要合并的坐标 以及对应的值\n for dummy_i in range(len(merge_titls_indices)):\n tt_index = merge_titls_indices[dummy_i];\n temp_list = [] ; \n for dummy_j in range(len(tt_index) ): \n #get location (tuple)\n temp_location = tt_index[dummy_j];\n temp_list.append(self._cells[temp_location[0]][temp_location[1]])\n \n merge_titls_value.append(merge(temp_list));\n \n # 对cells 进行重新赋值\n for dummy_i in range(len(merge_titls_indices)):\n temp_list = [] ;\n for dummy_j in range(len(merge_titls_indices[dummy_i])):\n #get location (tuple)\n temp_location = merge_titls_indices[dummy_i][dummy_j];\n self.set_tile(temp_location[0], temp_location[1], merge_titls_value[dummy_i][dummy_j]) \n # 需要考虑不产生新的tile的情况\n self.new_tile();", "title": "" }, { "docid": "5da0090de5fd27eabb476786c1f0b83e", "score": "0.6099761", "text": "def move_tiles_down(self):\r\n\r\n # inits\r\n\r\n _at = self.matrix.get_object_at\r\n\r\n _acted = False\r\n\r\n # loop on columns\r\n\r\n for _column in range(self.columns):\r\n\r\n # pass 1: fusions\r\n\r\n for _row in range(self.rows - 1, -1, -1):\r\n\r\n # get tile\r\n\r\n _tile1 = _at(_row, _column)\r\n\r\n # got a tile?\r\n\r\n if _tile1:\r\n\r\n # get next tile\r\n\r\n for _row2 in range(_row - 1, -1, -1):\r\n\r\n # get tile\r\n\r\n _tile2 = _at(_row2, _column)\r\n\r\n # matching values?\r\n\r\n if self.fuse_tiles(_tile1, _tile2):\r\n # we did something\r\n\r\n _acted = True\r\n\r\n # end if\r\n\r\n if _tile2: break\r\n\r\n # end for - next tile\r\n\r\n # end if - tile\r\n\r\n # end for - fusions\r\n\r\n # empty location inits\r\n\r\n _empty = None\r\n\r\n # pass 2: scrollings\r\n\r\n for _row in range(self.rows - 1, -1, -1):\r\n\r\n # get tile\r\n\r\n _tile1 = _at(_row, _column)\r\n\r\n # new empty location?\r\n\r\n if not _tile1 and not _empty:\r\n\r\n # empty location is at least here now\r\n\r\n _empty = (_row, _column)\r\n\r\n # got to move?\r\n\r\n elif _tile1 and _empty:\r\n\r\n self.move_tile(_tile1, *_empty)\r\n\r\n # empty location is at least here now\r\n\r\n _empty = (_empty[0] - 1, _column)\r\n\r\n # we did something\r\n\r\n _acted = True\r\n\r\n # end if\r\n\r\n # end for - scrollings\r\n\r\n # end for - row\r\n\r\n # pop-up next tile or game over\r\n\r\n self.next_tile(acted=_acted)", "title": "" }, { "docid": "9736919c92d2d67f18f22d1f49ce3861", "score": "0.60973907", "text": "def try_move_right(self, move_distance, pieces_to_move):\n old_loc = self.location\n new_loc = (self.location[0]+move_distance, self.location[1])\n\n # Make sure it is a valid tile to move to\n if new_loc[0] > BOARD_SIZE_MAX or \\\n self.board.is_occupied_by_black(new_loc) or \\\n new_loc in self.visited_nodes:\n return False\n\n new_stack = self.board.get_stack_size(new_loc) + pieces_to_move\n new_board = self.try_move(new_loc, pieces_to_move)\n\n # Return the Node to represent this move\n updated_visited_nodes = deepcopy(self.visited_nodes)\n updated_visited_nodes.append(new_loc)\n updated_actions = deepcopy(self.actions)\n updated_actions.append((\"move\", pieces_to_move, old_loc, new_loc))\n return MyNode(board=new_board,\n location=new_loc,\n stack_size=new_stack,\n visited_nodes=updated_visited_nodes,\n actions=updated_actions)", "title": "" }, { "docid": "520132255c2c9f6e0e4f255d0a615e53", "score": "0.60905707", "text": "def moveright(self):\n if not self.right is None:\n return\n self.right = State(self.gamegrid, self.score)\n self.right.gamegrid = self.reverse(self.gamegrid)\n self.right.gamegrid, tempflagstack = self.stack(self.right.gamegrid)\n self.right.gamegrid, self.right.score, tempflagcombine = self.combine(\n self.right.gamegrid, self.score\n )\n self.right.gamegrid, _ = self.stack(self.right.gamegrid)\n self.right.gamegrid = self.reverse(self.right.gamegrid)\n if tempflagstack or tempflagcombine:\n self.right.gamegrid = self.add_new_tile(self.right.gamegrid)\n else:\n self.right = None\n return\n if self.game_over_look(self.right.gamegrid) == 1:\n self.right = None", "title": "" }, { "docid": "6ec3582531f94f4398cb9f826cc78c2e", "score": "0.60846", "text": "def swap_tiles_on_image(image, tiles):\n new_image = image.copy()\n\n for tile in tiles:\n new_image[tile[0] : tile[0] + tile[4], tile[1] : tile[1] + tile[5]] = image[\n tile[2] : tile[2] + tile[4], tile[3] : tile[3] + tile[5]\n ]\n\n return new_image", "title": "" }, { "docid": "dc7f365a68a516233e6632c0f218d9e0", "score": "0.60831296", "text": "def _move_node_right(self, node):\n pass", "title": "" }, { "docid": "f6b18a353f7bd6b5e82e17d895f89054", "score": "0.6078923", "text": "def move_tiles_left(self):\r\n\r\n # inits\r\n\r\n _at = self.matrix.get_object_at\r\n\r\n _acted = False\r\n\r\n # loop on rows\r\n\r\n for _row in range(self.rows):\r\n\r\n # pass 1: fusions\r\n\r\n for _column in range(self.columns - 1):\r\n\r\n # get tile\r\n\r\n _tile1 = _at(_row, _column)\r\n\r\n # got a tile?\r\n\r\n if _tile1:\r\n\r\n # get next tile\r\n\r\n for _col in range(_column + 1, self.columns):\r\n\r\n # get tile\r\n\r\n _tile2 = _at(_row, _col)\r\n\r\n # matching values?\r\n\r\n if self.fuse_tiles(_tile1, _tile2):\r\n # we did something\r\n\r\n _acted = True\r\n\r\n # end if\r\n\r\n if _tile2: break\r\n\r\n # end for - next tile\r\n\r\n # end if - tile\r\n\r\n # end for - fusions\r\n\r\n # empty location inits\r\n\r\n _empty = None\r\n\r\n # pass 2: scrollings\r\n\r\n for _column in range(self.columns):\r\n\r\n # get tile\r\n\r\n _tile1 = _at(_row, _column)\r\n\r\n # new empty location?\r\n\r\n if not _tile1 and not _empty:\r\n\r\n # empty location is at least here now\r\n\r\n _empty = (_row, _column)\r\n\r\n # got to move?\r\n\r\n elif _tile1 and _empty:\r\n\r\n self.move_tile(_tile1, *_empty)\r\n\r\n # empty location is just near last one\r\n\r\n _empty = (_row, _empty[1] + 1)\r\n\r\n # we did something\r\n\r\n _acted = True\r\n\r\n # end if\r\n\r\n # end for - scrollings\r\n\r\n # end for - row\r\n\r\n # pop-up next tile or game over\r", "title": "" }, { "docid": "c7db44d71895b2f07a3c3b905f6a3e00", "score": "0.6069774", "text": "def make_move(board):\n\n\t\tpass", "title": "" }, { "docid": "b9f5f1ae0deb9f3bcc61ab97ee7f965a", "score": "0.606026", "text": "def move(self, direction):\n # all check the map\n start_cells = {UP:[[0,col] for col in range(self._grid_width)],\n DOWN:[[self._grid_height - 1,col] for col in range(self._grid_width)],\n LEFT:[[row,0] for row in range(self._grid_height)],\n RIGHT:[[row , self._grid_width - 1] for row in range(self._grid_height)]}\n steps = {UP:self._grid_height,\n DOWN:self._grid_height,\n RIGHT:self._grid_width,\n LEFT:self._grid_width}\n \n for cell in start_cells[direction]:\n temp_list = []\n for step in range(steps[direction]):\n row = cell[0] + step * OFFSETS[direction][0]\n col = cell[1] + step * OFFSETS[direction][1]\n temp_list.append(self.get_tile(row,col))\n temp_list = merge(temp_list)\n for step in range(steps[direction]):\n row = cell[0] + step * OFFSETS[direction][0]\n col = cell[1] + step * OFFSETS[direction][1]\n self.set_tile(row,col,temp_list[step])\n #self.new_tile()", "title": "" }, { "docid": "6d4e5875b09082b1ba4dfb993df5e8ad", "score": "0.60601324", "text": "def turn_right(self, tak):\n newdir = tak.direction\n newdir += 1\n if newdir > 3:\n newdir = 0\n tak.direction = newdir\n return None", "title": "" }, { "docid": "674ba3424b3b81c78794afc17952ce42", "score": "0.60543424", "text": "def turn_right(self) -> None:\n self.direction = (self.direction << 1) % 15", "title": "" }, { "docid": "c5d8e9e2da4767706124e2bf9dce5a0d", "score": "0.60424316", "text": "def move(self, new_location):\n # You don't need to erase the food because the snake draws over it for you, creating a smoother eat process\n self.rect.left, self.rect.top = new_location\n self.screen.blit(self.image, self.rect)", "title": "" }, { "docid": "fa9c04d88432c804f23039973836add2", "score": "0.6040958", "text": "def do_move(move, board):\n reset_merged_markers(board)\n if move == 0: # Left\n did_swap = True\n while did_swap:\n did_swap = False\n for i in range(len(board)):\n for j in range(len(board[0])):\n if j == 0:\n pass\n elif board[i][j - 1].number == 0 and board[i][j].number != 0:\n board[i][j - 1] = board[i][j]\n board[i][j] = Tile(0, False)\n did_swap = True\n elif board[i][j - 1].number == board[i][j].number and board[i][j - 1].merged is False and board[i][\n j].merged is False:\n board[i][j - 1] = Tile(board[i][j - 1].number + board[i][j].number, True)\n board[i][j] = Tile(0, False)\n did_swap = True\n elif move == 1: # Up\n did_swap = True\n while did_swap:\n did_swap = False\n for j in range(len(board[0])):\n for i in range(len(board)):\n if i == 0:\n pass\n elif board[i - 1][j].number == 0 and board[i][j].number != 0:\n board[i - 1][j] = board[i][j]\n board[i][j] = Tile(0, False)\n did_swap = True\n elif board[i - 1][j].number == board[i][j].number and board[i - 1][j].merged is False and board[i][\n j].merged is False:\n board[i - 1][j] = Tile(board[i - 1][j].number + board[i][j].number, True)\n board[i][j] = Tile(0, False)\n did_swap = True\n elif move == 2: # Right\n did_swap = True\n while did_swap:\n did_swap = False\n for i in range(len(board)):\n for j in range(len(board[0]) - 1, -1, -1):\n if j == len(board[0]) - 1:\n pass\n elif board[i][j + 1].number == 0 and board[i][j].number != 0:\n board[i][j + 1] = board[i][j]\n board[i][j] = Tile(0, False)\n did_swap = True\n elif board[i][j + 1].number == board[i][j].number and board[i][j + 1].merged is False and board[i][\n j].merged is False:\n board[i][j + 1] = Tile(board[i][j + 1].number + board[i][j].number, True)\n board[i][j] = Tile(0, False)\n did_swap = True\n elif move == 3: # Down\n did_swap = True\n while did_swap:\n did_swap = False\n for j in range(len(board[0])):\n for i in range(len(board) - 1, -1, -1):\n if i == len(board) - 1:\n pass\n elif board[i + 1][j].number == 0 and board[i][j].number != 0:\n board[i + 1][j] = board[i][j]\n board[i][j] = Tile(0, False)\n did_swap = True\n elif board[i + 1][j].number == board[i][j].number and board[i + 1][j].merged is False and board[i][\n j].merged is False:\n board[i + 1][j] = Tile(board[i + 1][j].number + board[i][j].number, True)\n board[i][j] = Tile(0, False)\n did_swap = True\n else:\n print(\"Invalid Input.\")", "title": "" }, { "docid": "ffc1c1ec56316462b18d231ecad5365d", "score": "0.6040388", "text": "def move_right(self, x):\n return self.move(\"right\", x)", "title": "" }, { "docid": "8a8731f7ec536b0a944a1ae82f87b031", "score": "0.60286564", "text": "def f_move_tile_to(self, ifrom, ito):\n if self.data is None:\n return\n if ifrom == ito:\n return\n if ifrom < 0 or ifrom >= len(self.data.texlist):\n return\n if ito < 0 or ito >= len(self.data.texlist):\n return\n self.data.texlist[ifrom], self.data.texlist[ito] =\\\n self.data.texlist[ito], self.data.texlist[ifrom]\n self.select_index = ito\n self.updated = True\n self.refresh_data_panel()", "title": "" }, { "docid": "dd5419b20b0d3aa5202b99dbd0b260e3", "score": "0.6026747", "text": "def move_tiles_left(self):\r\n\r\n # inits\r\n\r\n _at = self.matrix.get_object_at\r\n\r\n _acted = False\r\n\r\n # loop on rows\r\n\r\n for _row in range(self.rows):\r\n\r\n # pass 1: fusions\r\n\r\n for _column in range(self.columns - 1):\r\n\r\n # get tile\r\n\r\n _tile1 = _at(_row, _column)\r\n\r\n # got a tile?\r\n\r\n if _tile1:\r\n\r\n # get next tile\r\n\r\n for _col in range(_column + 1, self.columns):\r\n\r\n # get tile\r\n\r\n _tile2 = _at(_row, _col)\r\n\r\n # matching values?\r\n\r\n if self.fuse_tiles(_tile1, _tile2):\r\n # we did something\r\n\r\n _acted = True\r\n\r\n # end if\r\n\r\n if _tile2: break\r\n\r\n # end for - next tile\r\n\r\n # end if - tile\r\n\r\n # end for - fusions\r\n\r\n # empty location inits\r\n\r\n _empty = None\r\n\r\n # pass 2: scrollings\r\n\r\n for _column in range(self.columns):\r\n\r\n # get tile\r\n\r\n _tile1 = _at(_row, _column)\r\n\r\n # new empty location?\r\n\r\n if not _tile1 and not _empty:\r\n\r\n # empty location is at least here now\r\n\r\n _empty = (_row, _column)\r\n\r\n # got to move?\r\n\r\n elif _tile1 and _empty:\r\n\r\n self.move_tile(_tile1, *_empty)\r\n\r\n # empty location is just near last one\r\n\r\n _empty = (_row, _empty[1] + 1)\r\n\r\n # we did something\r\n\r\n _acted = True\r\n\r\n # end if\r\n\r\n # end for - scrollings\r\n\r\n # end for - row\r\n\r\n # pop-up next tile or game over\r\n\r\n self.next_tile(acted=_acted)", "title": "" }, { "docid": "05cb0b9a55fb360860550dc7d8959ce0", "score": "0.6022873", "text": "def move(self, row, col, direction) -> None:\r\n old_val = self.boardRows[row][col]\r\n old_status = self.pieces[row][col]\r\n\r\n self.boardRows[row][col] = EMPTY\r\n self.pieces[row][col] = EMPTY_CELL\r\n\r\n if direction == DOWN:\r\n targetRow = row + 1\r\n self.boardRows[targetRow][col] = old_val\r\n self.pieces[targetRow][col] = old_status\r\n else:\r\n targetCol = col + direction\r\n self.boardRows[row][targetCol] = old_val\r\n self.pieces[row][targetCol] = old_status", "title": "" }, { "docid": "1ee1af775ac3c3074a34be5ef1cf9258", "score": "0.60196334", "text": "def move_right( state ):\n\t# Performs an object copy. Python passes by reference.\n\tnew_state = state[:]\n\tindex = new_state.index( 0 )\n\t# Sanity check\n\tif index not in [6, 7, 8]:\n\t\t# Swap the values.\n\t\ttemp = new_state[index + 3]\n\t\tnew_state[index + 3] = new_state[index]\n\t\tnew_state[index] = temp\n\t\treturn new_state\n\telse:\n\t\t# Can't move, return None\n\t\treturn None", "title": "" }, { "docid": "35eeabc48cb61937ed47706b69973364", "score": "0.6009746", "text": "def move(self, new_position):\n rook = Rook(self.position.copy(), self.color, self.board_state)\n rook.first_move = self.first_move\n self.board_state.moves_history.append(Move(self.position.copy(), new_position.copy(), rook,\n self.board_state.board[new_position[0]][new_position[1]]))\n self.board_state.move_counter += 1\n\n self.board_state.board[self.position[0]][self.position[1]] = None\n self.board_state.board[new_position[0]][new_position[1]] = self\n self.position = new_position\n\n if self.first_move:\n self.first_move = False\n\n if self.board_state.white_to_move:\n self.board_state.white_to_move = False\n else:\n self.board_state.white_to_move = True", "title": "" }, { "docid": "b1982c97e06b68435338493bbd9b2f6d", "score": "0.60097414", "text": "def __position_tile_left_down(self, target_row, target_col, to_move_tile):\n move_string = \"\"\n# section_string = \"\"\n# to_move_tile = self.current_position(target_row,target_col)\n# print \"E1\"\n move_string += \"u\"\n# section_string = \"\"\n# section_string += \"u\"\n# self.update_puzzle(section_string)\n# print self\n\n# print \"E2\"\n# section_string = \"\"\n index_i = to_move_tile[1]\n while index_i != target_col:\n index_i += 1\n move_string += \"l\"\n# section_string += \"l\"\n# self.update_puzzle(section_string)\n# print self\n\n# print \"E3\"\n# section_string = \"\"\n index_i = to_move_tile[1]\n while index_i != target_col-1:\n index_i += 1\n move_string += \"urrdl\"\n# section_string += \"urrdl\"\n# self.update_puzzle(section_string)\n# print self\n\n# print \"E4\"\n move_string += \"druld\"\n# section_string = \"\"\n# section_string += \"druld\"\n# self.update_puzzle(section_string)\n# print self\n\n self.update_puzzle(move_string)\n return move_string", "title": "" }, { "docid": "2af7cdadca358bf34bfac395ab613176", "score": "0.59997493", "text": "def move_right(self,right_unit):\r\n self.xposition = self.xposition + right_unit", "title": "" }, { "docid": "fc7c593b2e89e682dde6fc55fe0544ad", "score": "0.5975832", "text": "def move_right(self, coordinate):\n column = coordinate[1]\n row = coordinate[0]\n working_row = self.board[row]\n temp = None\n\n # ko rule comparison save\n self.previous_row.update({self.current_player: working_row})\n\n # scenario for moving an edge square with no blanks in the column\n if column == 0 and 'X' not in working_row:\n temp = working_row.pop()\n working_row.insert(0, 'X')\n\n # scenario for any other square in a column with blank spaces within\n elif (column != 6 and working_row[column - 1] == 'X') or (column == 0 and 'X' in working_row):\n location = column\n if 'X' in working_row[column:]:\n for item in working_row[column:]:\n if item == 'X':\n working_row.pop(location)\n working_row.insert(column, 'X')\n break\n location += 1\n else:\n working_row.insert(column, 'X')\n temp = working_row.pop()\n\n # scenario if the square to move does not have a blank square behind it\n elif column != 6 and working_row[column - 1] != 'X':\n return False\n\n # ko rule check\n self.new_row.update({self.current_player: working_row})\n if self.current_player is not None:\n current = self.current_player\n if self.current_player == self.p1:\n other = self.p2\n else:\n other = self.p1\n if self.new_row.get(current) == self.previous_row.get(other):\n return False\n\n #inserts working row into game board\n self.board[row] = working_row\n\n # updates board column with working column values\n if temp == 'R':\n self.get_current_turn().update_captured()\n if self.current_player.captured == 7:\n self.set_game_winner(self.current_player)\n return True\n self.change_current_player()\n else:\n self.change_current_player()\n\n return True", "title": "" }, { "docid": "3a91beb2352a82716c7d12db7f9e320d", "score": "0.59696233", "text": "def move(self, direction):\n # create list for each index\n initial_tiles = self._initial_index_[direction]\n index_len = self._initial_index_len_[direction]\n offsets = OFFSETS[direction]\n tiles_moved = 0\n\n print(\"\\n MOVE\", direction, initial_tiles)\n print(self)\n for initial_tile in initial_tiles:\n print(\"initial_tile is\", initial_tile)\n temp_list = []\n tile_index = [initial_tile[0], initial_tile[1]]\n for temp_list_index in range(index_len):\n tile = self.get_tile(tile_index[0], tile_index[1])\n temp_list.append(tile)\n tile_index[0] += offsets[0]\n tile_index[1] += offsets[1]\n print(\"unmerged\", temp_list)\n merge_output = merge(temp_list)\n self._score_ += merge_output[1]\n print(\"score to add\", self._score_)\n temp_list_merged = merge_output[0]\n print(\"merged\", temp_list_merged)\n print(merge_output[1])\n\n\n # setting merged list back as tiles\n tile_index = [initial_tile[0], initial_tile[1]]\n for temp_list_index in range(index_len):\n # add tiles moved counter\n if self.get_tile(tile_index[0], tile_index[1]) != temp_list_merged[temp_list_index]:\n tiles_moved += 1\n self.set_tile(tile_index[0], tile_index[1], temp_list_merged[temp_list_index])\n tile_index[0] += offsets[0]\n tile_index[1] += offsets[1]\n print(self)\n print(\"\")\n\n print(\"tiles_moved\", tiles_moved)\n if tiles_moved > 0:\n print(\"tiles moved - NEW TILE\")\n self.new_tile()", "title": "" }, { "docid": "2360cae620e7f20d2c4a4003ba0b8eb6", "score": "0.5953542", "text": "def go_right(self):\n self.change_x = 6\n self.direction = \"R\"", "title": "" }, { "docid": "3bac32ec179f7787bede4deae7de2380", "score": "0.5948535", "text": "def moveRight(self):\n\n self.rect.x += self.speed\n if self.game.check_collision(self, self.game.all_walls):\n self.rect.x -= self.speed", "title": "" }, { "docid": "3a86ae003f7d8c22f79cbbea86781aba", "score": "0.5947922", "text": "def move_right(self):\n if max(self.x[0:self.length]) == self.width - 1:\n return\n for x in range(self.length):\n self.x[x] += 1", "title": "" }, { "docid": "e302b3bf734d82a139aae6d376fd0ccc", "score": "0.5944606", "text": "def rightDownMove():\r\n\r\n var.ball.x+=5\r\n #increase x 5 pixels to go right\r\n\r\n var.ball.y+=5\r\n #increase y 5 pixels to go down\r", "title": "" }, { "docid": "5d73470df8cad8d1003409fadd34fb21", "score": "0.5939654", "text": "def move_right(self):\n if self.head.heading()!= LEFT:\n self.head.seth(RIGHT)", "title": "" }, { "docid": "6ebb4c789276f06a7578ede52d4e131a", "score": "0.5933959", "text": "def move_right(self):\n to_swap_row = self.empty_row\n to_swap_col = self.empty_col + 1\n if (self.is_valid(to_swap_row, to_swap_col)):\n return self.move_empty_to(to_swap_row, to_swap_col)\n else:\n return None", "title": "" }, { "docid": "0b0697667d5b966fb9d223ee1c753932", "score": "0.5919219", "text": "def picosubmove(self, direction): \n d = {'N':[0,'north'], 'E':[1,'east'], 'W':[2,'west'], 'S':[3,'south']}\n if direction in 'xX':\n self.pmap[self.i, self.j] = 2\n pass\n else:\n index = d[direction][0]\n #check if there's a wall, refuse if appropriate\n if self.surround[index] == '0':\n self.stop = True\n self.pmap[self.i, self.j] = 2\n self.message = \"can't move %s \\nstopping...\" % d[direction][1]\n self.create_labels()\n else: #move in specified direction\n if direction == 'N':\n self.i -= 1\n elif direction == 'E':\n self.j += 1\n elif direction == 'W':\n self.j -= 1\n else:\n self.i += 1\n #update color of picobot's location\n self.pmap[self.i, self.j] = 2", "title": "" }, { "docid": "e6aae407e23cae0137d54b1dab0c0939", "score": "0.59175146", "text": "def move_pixel_down(x, y, n):\n L = L4 if n == 4 else L8\n while y <= (n - 2) and L[x][y + 1] == 0:# Si la case est vide\n L[x][y + 1] = L[x][y]\n L[x][y] = 0\n y = y + 1\n if y < (n - 1) and L[x][y + 1] == L[x][y]:\n L[x][y + 1] = L[x][y + 1] + 1\n L[x][y] = 0", "title": "" }, { "docid": "6d0e848cb9ac72f834da9db9afa5033a", "score": "0.5911904", "text": "def random_move(self, tile_size=128, map_width=10, map_height=10, border_offset=[500, 100]):\n\n\n rot_x = 0\n rot_y = 0\n\n if self.dest_complete:\n self.dest_x = randint(0, map_width * tile_size // 2)\n self.dest_y = randint(0, map_height * tile_size // 2)\n self.dest_complete = False\n\n if (self.x, self.y) == (self.dest_x, self.dest_y):\n self.dest_complete = True\n else:\n if self.speed > abs(self.x - self.dest_x):\n self.x = self.dest_x\n elif self.x != self.dest_x:\n if self.x > self.dest_x:\n self.x -= self.speed\n rot_x = -1\n else:\n self.x += self.speed\n rot_x = 1\n if self.speed > abs(self.y - self.dest_y):\n self.y = self.dest_y\n elif self.y != self.dest_y:\n if self.y > self.dest_y:\n self.y -= self.speed\n rot_y = -1\n else:\n self.y += self.speed\n rot_y = 1\n if self.entity_manager.is_collision(self):\n self.x -= self.speed * rot_x\n self.y -= self.speed * rot_y\n\n self.dest_x = randint(0, map_width * tile_size // 2)\n self.dest_y = randint(0, map_height * tile_size // 2)\n else:\n \"\"\"changing texture rotation\"\"\"\n\n if rot_x == rot_y == 1:\n self.rotation = 0\n elif rot_x == rot_y == -1:\n self.rotation = 2\n elif rot_x == 1 and rot_y == -1:\n self.rotation = 1\n elif rot_x == -1 and rot_y == 1:\n self.rotation = 3\n elif rot_x == -1 and rot_y == 0:\n self.rotation = 2\n elif rot_x == 1 and rot_y == 0:\n self.rotation = 0\n elif rot_y == 1 and rot_x == 0:\n self.rotation = 3\n else:\n self.rotation = 1\n\n if self.image and len(self.image) >= self.rotation + 1:\n self.texture = self.image[self.rotation]", "title": "" }, { "docid": "11cead4e6028fe4cc07fdbfd25410493", "score": "0.59039557", "text": "def move(self, direction):\n tiles_moved = 0\n for each_index in self.dir_dict[direction]:\n init_index1 = list(each_index)\n init_index2 = list(each_index)\n temp_list = []\n for next_item in range(self.dir_moves[direction]):\n temp_list.append(self.grid[each_index[0]][each_index[1]])\n init_index1[0] += OFFSETS[direction][0]\n init_index1[1] += OFFSETS[direction][1]\n\n temp_list = merge(temp_list)\n\n for next_item in range(self.dir_moves[direction]):\n if temp_list[next_item] != self.grid[init_index2[0]][init_index2[1]]:\n tiles_moved = 1\n self.grid[init_index2[0]][init_index2[1]] = temp_list[next_item]\n init_index2[0] += OFFSETS[direction][0]\n init_index2[1] += OFFSETS[direction][1]\n\n if tiles_moved == 1:\n self.new_tile()", "title": "" }, { "docid": "ce11603f91af52e13bf9725caf1ebf60", "score": "0.5903891", "text": "def __position_tile_left_up(self, target_row, target_col, to_move_tile):\n move_string = \"\"\n# section_string = \"\"\n# to_move_tile = self.current_position(target_row,target_col)\n print \"A1\"\n# section_string = \"\"\n index_i = to_move_tile[0]\n while index_i != target_row:\n index_i += 1\n move_string += \"u\"\n# section_string += \"u\"\n# self.update_puzzle(section_string)\n print self\n\n print \"A2\"\n# section_string = \"\"\n index_i = to_move_tile[1]\n while index_i < target_col:\n index_i += 1\n move_string += \"l\"\n# section_string += \"l\"\n# self.update_puzzle(section_string)\n print self\n\n print \"A3\"\n# section_string = \"\"\n index_i = to_move_tile[1]\n while index_i < target_col-1:\n index_i += 1\n move_string += \"drrul\"\n# section_string += \"drrul\"\n# self.update_puzzle(section_string)\n print self\n\n\n if to_move_tile[1] < target_col:\n print \"A4\"\n move_string += \"dru\"\n# section_string = \"\"\n# section_string += \"dru\"\n# self.update_puzzle(section_string)\n print self\n\n print \"A5\"\n# section_string = \"\"\n index_i = to_move_tile[0]\n while index_i != target_row-1:\n index_i += 1\n move_string += \"lddru\"\n# section_string += \"lddru\"\n# self.update_puzzle(section_string)\n print self\n\n print \"A6\"\n move_string += \"ld\"\n# section_string = \"ld\"\n# self.update_puzzle(section_string)\n print self\n self.update_puzzle(move_string)\n return move_string", "title": "" }, { "docid": "bf5c0dc04cf26936109bf0a7d07c1bf8", "score": "0.5903153", "text": "def go_right(self):\r\n self.change_x = 8\r\n self.direction = 1", "title": "" }, { "docid": "91e3840b2a9de9668fc71d51506e0574", "score": "0.59027994", "text": "def move(self, direction):\n initial_tiles = self._initial_tiles[direction]\n target_direction = OFFSETS[direction]\n if len(initial_tiles) == self._grid_height:\n num_steps = self._grid_width\n else:\n num_steps = self._grid_height\n change = False\n for tile in initial_tiles:\n values = list()\n path = list()\n for step in xrange(num_steps):\n row = tile[0] + step * target_direction[0]\n col = tile[1] + step * target_direction[1]\n values.append(self._grid[row][col])\n path.append((row, col))\n new_line = merge(values)\n for idx in xrange(len(new_line)):\n cell = path[idx]\n self._grid[cell[0]][cell[1]] = new_line[idx]\n if new_line[idx] != values[idx]:\n change = True\n if change:\n self.new_tile()", "title": "" }, { "docid": "b10da0468ae7a15cea805dc8a35a8b2d", "score": "0.5901016", "text": "def try_move(self, tile):\n clicked_position = tile.position\n if self.state == \"playing\":\n x, y = self.blank_position\n if tile.position[0] == x or tile.position[1] == y:\n while self.blank_position != clicked_position:\n if tile.position[0] < x:\n self.move_tile(\"right\")\n elif tile.position[0] > x:\n self.move_tile(\"left\")\n elif tile.position[1] < y:\n self.move_tile(\"down\")\n elif tile.position[1] > y:\n self.move_tile(\"up\")", "title": "" }, { "docid": "ead38cab1ae56308d3f566eae79f8bbd", "score": "0.5897227", "text": "def move_board(board, direction: int, tile_to_add: int, np_rand) -> int:\n total_score_delta, nexttile_possible_rows = _move_board(board, direction)\n\n # Add a new tile in one of the moved rows.\n row_to_add = np_rand.choice(len(nexttile_possible_rows))\n nexttile_possible_rows[row_to_add][-1] = tile_to_add\n\n return total_score_delta", "title": "" }, { "docid": "46ddffaba407928f70a2e6efdd9d3142", "score": "0.5891423", "text": "def move_right(state):\r\n # Performs an object copy. Python passes by reference.\r\n new_state = state[:]\r\n index = new_state.index(0)\r\n # Sanity check\r\n if index not in [6, 7, 8]:\r\n # Swap the values.\r\n temp = new_state[index + 3]\r\n new_state[index + 3] = new_state[index]\r\n new_state[index] = temp\r\n return new_state\r\n else:\r\n # Can't move, return None\r\n return None", "title": "" }, { "docid": "3098c20830d9ad79e29cfc952b6e9fea", "score": "0.58827233", "text": "def turn_right(self):\n self.at(at.at_pcmd, True, 0, 0, 0, self.speed)", "title": "" }, { "docid": "9b26b6b595a47ceedcbc5a2bdbee7bba", "score": "0.5880971", "text": "def position_tile(self, target_row, target_col, to_move_tile=(-1, -1)):\n\n if to_move_tile[0]==-1 and to_move_tile[1]==-1:\n to_move_tile = self.current_position(target_row,target_col)\n print \"to_move_tile=\",to_move_tile,\"target_row, target_col =\", target_row, target_col\n\n if to_move_tile[0] < target_row and to_move_tile[0] +1 == target_row and to_move_tile[1] > target_col and to_move_tile[0] >= 1:\n print \"position_tile_right_down\"\n return self.__position_tile_right_down(target_row, target_col, to_move_tile)\n elif to_move_tile[0] < target_row and to_move_tile[1] > target_col:\n print \"position_tile_right_up\"\n return self.__position_tile_right_up(target_row, target_col, to_move_tile)\n elif to_move_tile[0] < target_row and to_move_tile[1] < target_col and to_move_tile[0] +1 == target_row and to_move_tile[0] >= 1:\n print \"position_tile_left_down\"\n return self.__position_tile_left_down(target_row, target_col, to_move_tile)\n elif to_move_tile[0] < target_row and to_move_tile[1] <=target_col :\n print \"position_tile_left_up\"\n return self.__position_tile_left_up(target_row, target_col, to_move_tile)\n else:\n print \"position_tile_same_row\"\n return self.__position_tile_same_row(target_row, target_col, to_move_tile)", "title": "" }, { "docid": "a7d4a5a69e66cbb45cc9a00899ebb1d5", "score": "0.5869197", "text": "def rightUpMove():\r\n\r\n var.ball.x+=5\r\n #increase x 5 pixels to go right\r\n\r\n var.ball.y-=5\r\n #decrease y 5 pixels to go up\r", "title": "" }, { "docid": "7ddc38141a17137619018d12a666f40a", "score": "0.58500814", "text": "def move(self, direction):\n if(direction == UP or direction == DOWN):\n heightOrWidth = self.get_grid_height();\n elif(direction == LEFT or direction == RIGHT):\n heightOrWidth = self.get_grid_width();\n \n newValuesOfLine = [];\n if(direction == UP or direction == DOWN or direction == LEFT or direction == RIGHT):\n for index in range(0,len(self.initialTiles[int(direction)])):\n singleLineValues = [];\n row = 0;\n col = 0;\n for index2 in range(0,heightOrWidth):\n row = self.initialTiles[int(direction)][index][0] + index2 * OFFSETS[int(direction)][0];\n col = self.initialTiles[int(direction)][index][1] + index2 * OFFSETS[int(direction)][1];\n singleLineValues.append(self.get_tile(row,col));\n #print \"row : \",row,\"col \",col;\n print \"singleLineValues\",singleLineValues;\n newValuesOfLine = merge(singleLineValues);\n print \"merged new values : \",newValuesOfLine;\n for index3 in range(len(newValuesOfLine)):\n row = self.initialTiles[int(direction)][index][0] + index3 * OFFSETS[int(direction)][0];\n col = self.initialTiles[int(direction)][index][1] + index3 * OFFSETS[int(direction)][1];\n self.set_tile(row,col,newValuesOfLine[index3]);\n \n self.new_tile();", "title": "" }, { "docid": "86732ac52214f98235fc8aaa8a5b41b2", "score": "0.5849125", "text": "def push_right (grid):\r\n for i in range(4):\r\n row = []\r\n for j in range(4):\r\n row.append(grid[i][3-j])\r\n row = merge(row)\r\n for j in range(4):\r\n grid[i][j] = row[3-j]", "title": "" }, { "docid": "e22d1893135f3ad1902bd57625d358f9", "score": "0.58333486", "text": "def move_tiles_up(self):\r\n\r\n # inits\r\n\r\n _at = self.matrix.get_object_at\r\n\r\n _acted = False\r\n\r\n # loop on columns\r\n\r\n for _column in range(self.columns):\r\n\r\n # pass 1: fusions\r\n\r\n for _row in range(self.rows - 1):\r\n\r\n # get tile\r\n\r\n _tile1 = _at(_row, _column)\r\n\r\n # got a tile?\r\n\r\n if _tile1:\r\n\r\n # get next tile\r\n\r\n for _row2 in range(_row + 1, self.rows):\r\n\r\n # get tile\r\n\r\n _tile2 = _at(_row2, _column)\r\n\r\n # matching values?\r\n\r\n if self.fuse_tiles(_tile1, _tile2):\r\n # we did something\r\n\r\n _acted = True\r\n\r\n # end if\r\n\r\n if _tile2: break\r\n\r\n # end for - next tile\r\n\r\n # end if - tile\r\n\r\n # end for - fusions\r\n\r\n # empty location inits\r\n\r\n _empty = None\r\n\r\n # pass 2: scrollings\r\n\r\n for _row in range(self.rows):\r\n\r\n # get tile\r\n\r\n _tile1 = _at(_row, _column)\r\n\r\n # new empty location?\r\n\r\n if not _tile1 and not _empty:\r\n\r\n # empty location is at least here now\r\n\r\n _empty = (_row, _column)\r\n\r\n # got to move?\r\n\r\n elif _tile1 and _empty:\r\n\r\n self.move_tile(_tile1, *_empty)\r\n\r\n # empty location is at least here now\r\n\r\n _empty = (_empty[0] + 1, _column)\r\n\r\n # we did something\r\n\r\n _acted = True\r\n\r\n # end if\r\n\r\n # end for - scrollings\r\n\r\n # end for - row\r\n\r\n # pop-up next tile or game over\r", "title": "" }, { "docid": "cf92511412746a2538d37d8b31e536b6", "score": "0.5831214", "text": "def go_right(self):\n self.change_x = 6", "title": "" }, { "docid": "cc4a148c2003b08a91bcd463fefa94fe", "score": "0.58302635", "text": "def animate_move_to(self, row, column):\r\n\r\n # FIXME: implement true animation by here?\r\n\r\n _x0, _y0 = self.xy_origin\r\n\r\n _x1, _y1 = self.cell_size.xy_left_top(row, column)\r\n\r\n # move tile on canvas\r\n\r\n self.owner.move(self.tag, (_x1 - _x0), (_y1 - _y0))\r\n\r\n # update data\r\n\r\n self.row, self.column = row, column", "title": "" }, { "docid": "cc4a148c2003b08a91bcd463fefa94fe", "score": "0.58302635", "text": "def animate_move_to(self, row, column):\r\n\r\n # FIXME: implement true animation by here?\r\n\r\n _x0, _y0 = self.xy_origin\r\n\r\n _x1, _y1 = self.cell_size.xy_left_top(row, column)\r\n\r\n # move tile on canvas\r\n\r\n self.owner.move(self.tag, (_x1 - _x0), (_y1 - _y0))\r\n\r\n # update data\r\n\r\n self.row, self.column = row, column", "title": "" }, { "docid": "10725f5b9985fa6928e1808e10ecdec5", "score": "0.58122116", "text": "def _move_right(self) -> list:\n rotated = self._rotate_grid(self._grid, 180)\n rotated = self._slide_left(rotated)\n final = self._rotate_grid(rotated, -180)\n return final if final != self._grid else None", "title": "" }, { "docid": "dc2149a60e1066c54a7e057745850771", "score": "0.58112234", "text": "def move(r):\n\n if r['d'] == 'N':\n r['p']['y'] += 1\n elif r['d'] == 'S':\n r['p']['y'] -= 1\n elif r['d'] == 'E':\n r['p']['x'] += 1\n elif r['d'] == 'W':\n r['p']['x'] -= 1", "title": "" }, { "docid": "83766737743f9967fb3ea3321db2bc1e", "score": "0.58108103", "text": "def move_to_row(self, index, reset_left=True):\n # Reorient the rectangle to be horizontal \n if self.is_vertical():\n self.rotate90()\n # Set the rectangel to the proper position\n self.y = index * self.length\n if reset_left:\n self.x = 0", "title": "" }, { "docid": "1c3cf20f95391a6889dc396e2ace5864", "score": "0.5807582", "text": "def turn_right(self, small: bool = False) -> None:\n self.__send('action_turn' + ('_small' if small else ''), 'right')", "title": "" }, { "docid": "dccacd58559e5295d6d1088f4667c75c", "score": "0.57992417", "text": "def move_pixel_left(x, y, n):\n L = L4 if n == 4 else L8\n while x > 0 and L[x - 1][y] == 0:# Si la case est vide \n L[x - 1][y] = L[x][y]\n L[x][y] = 0\n x = x - 1\n if L[x - 1][y] == L[x][y]:\n L[x - 1][y] = L[x - 1][y] + 1\n L[x][y] = 0", "title": "" }, { "docid": "d5254447fb532b616e2b5521db36fc9b", "score": "0.5798656", "text": "def move(self, state, mv):\n tile = self.__validMove(state, mv)\n if not tile:\n raise Exception(\"Invalid Move! [%r]\" % (mv,))\n # neighboring positions\n npos = self.__getNeighbors(state, mv)\n # free the target tile and jokers\n for n in npos:\n if state[n] in [tile, self.__symbols(\"joker\")]:\n state[n] = '.'\n # check if colorswitcher\n if mv in state.getSpecials():\n tiles = self.__symbols(\"colors\")\n # assumes 2 tiles max\n state[mv] = tiles[(tiles.find(tile) + 1) % len(tiles)]\n else:\n state[mv] = tile\n state.addMove(mv)", "title": "" }, { "docid": "94ccd0e9057333dddc5e0c73d8634bea", "score": "0.5776771", "text": "def _rotate_right(self, h):\n x = h.left\n h.left = x.right\n x.right = h\n x.color = h.color\n h.color = self.RED\n x.size = h.size\n h.size = self._size(h.left) + self._size(h.right) + 1\n return x", "title": "" }, { "docid": "2895e201eedfad9cb51f879a86c86f57", "score": "0.5776173", "text": "def move_right(self):\n for row in self.board.board:\n for i in range(len(row) - 1, -1, -1):\n square = row[i]\n if square.value != \"\": # if the square is empty it need not be moved\n\n if i == 2: # if the square is in the third column\n if row[i + 1].value == \"\":\n row[i + 1].value, square.value = square.value, \"\"\n else:\n self.try_merge(square, row[i + 1])\n\n if i == 1: # if the square is in the second column\n if row[i + 1].value == \"\":\n row[i + 1].value, square.value = square.value, \"\"\n\n if row[i + 2].value == \"\":\n row[i + 2].value, row[i + 1].value = row[i + 1].value, \"\"\n else:\n self.try_merge(row[i + 1], row[i + 2])\n else:\n self.try_merge(square, row[i + 1])\n\n if i == 0: # if the square is in the first column\n if row[i + 1].value == \"\":\n row[i + 1].value, square.value = square.value, \"\"\n\n if row[i + 2].value == \"\":\n row[i + 2].value, row[i + 1].value = row[i + 1].value, \"\"\n\n if row[i + 3].value == \"\":\n row[i + 3].value, row[i + 2].value = row[i + 2].value, \"\"\n else:\n self.try_merge(row[i + 2], row[i + 3])\n\n else:\n self.try_merge(row[i + 1], row[i + 2])\n\n else:\n self.try_merge(square, row[i + 1])", "title": "" }, { "docid": "0a0f4772863b8db8b8c0a681adae0b14", "score": "0.5776005", "text": "def flip_tiles(self, tile_list, color):\n for tile in tile_list:\n x = tile[0]\n y = tile[1]\n self.board.tile.change_color(x, y, color)", "title": "" }, { "docid": "fdc1f969a7836cfd2345f061487f336b", "score": "0.5767737", "text": "def right(self):\n self.c = self.c + 1\n self.y = (self.c+4)//5\n self.update_block(chars)", "title": "" }, { "docid": "d78305dd44506fbb901e04f9d7bcb2bb", "score": "0.57643867", "text": "def move():\n global row\n global column\n global direction\n \n if direction == \"R\":\n column = column + 1\n elif direction == \"L\":\n column = column - 1\n elif direction == \"U\":\n row = row - 1\n elif direction == \"D\":\n row = row + 1", "title": "" }, { "docid": "9c1938dce771c569eb6ad419fa3bb981", "score": "0.5759251", "text": "def move_tiles_up(self):\r\n\r\n # inits\r\n\r\n _at = self.matrix.get_object_at\r\n\r\n _acted = False\r\n\r\n # loop on columns\r\n\r\n for _column in range(self.columns):\r\n\r\n # pass 1: fusions\r\n\r\n for _row in range(self.rows - 1):\r\n\r\n # get tile\r\n\r\n _tile1 = _at(_row, _column)\r\n\r\n # got a tile?\r\n\r\n if _tile1:\r\n\r\n # get next tile\r\n\r\n for _row2 in range(_row + 1, self.rows):\r\n\r\n # get tile\r\n\r\n _tile2 = _at(_row2, _column)\r\n\r\n # matching values?\r\n\r\n if self.fuse_tiles(_tile1, _tile2):\r\n # we did something\r\n\r\n _acted = True\r\n\r\n # end if\r\n\r\n if _tile2: break\r\n\r\n # end for - next tile\r\n\r\n # end if - tile\r\n\r\n # end for - fusions\r\n\r\n # empty location inits\r\n\r\n _empty = None\r\n\r\n # pass 2: scrollings\r\n\r\n for _row in range(self.rows):\r\n\r\n # get tile\r\n\r\n _tile1 = _at(_row, _column)\r\n\r\n # new empty location?\r\n\r\n if not _tile1 and not _empty:\r\n\r\n # empty location is at least here now\r\n\r\n _empty = (_row, _column)\r\n\r\n # got to move?\r\n\r\n elif _tile1 and _empty:\r\n\r\n self.move_tile(_tile1, *_empty)\r\n\r\n # empty location is at least here now\r\n\r\n _empty = (_empty[0] + 1, _column)\r\n\r\n # we did something\r\n\r\n _acted = True\r\n\r\n # end if\r\n\r\n # end for - scrollings\r\n\r\n # end for - row\r\n\r\n # pop-up next tile or game over\r\n\r\n self.next_tile(acted=_acted)", "title": "" }, { "docid": "8a1f9ef0e1d0844fffee160f759b9615", "score": "0.57588696", "text": "def move_knight(self, row, col):\n try:\n assert self.moves.length() < Tour.board_size\n except AssertionError as err:\n print (\"list is full\")\n else:\n self.moves.add_last( (row, col) )", "title": "" }, { "docid": "9b5fe8d60a2d0b1577e008502fb576be", "score": "0.5753623", "text": "def add_right_pan(backgrd):\n a, b = backgrd.size\n main_img = Image.new('RGB', size=((a+50)*2, b+50 ))\n main_img.paste(backgrd, (25,25))\n return main_img", "title": "" }, { "docid": "54bda51e59c9d7ff4fa3a01afdeaf132", "score": "0.5738772", "text": "def move_to(self, x, y):\n self.x = x // const.CELL_WIDTH\n self.y = y // const.CELL_HEIGHT", "title": "" }, { "docid": "a1a0865a214b55010b60f5033df305de", "score": "0.573842", "text": "def OnOffsetRight(self,e):\n app.debug_frame.WriteLine(\"Offset Right\")\n offset_x = self.active.frame[self.active.activeimage[0]].direction[self.active.activeimage[1]].image[self.active.activeimage[2]].offset_x\n # Any bounds checks should go here...\n if self.options.offset_fine == 0:\n offset_x += self.active.info.paksize / 2\n else:\n offset_x += 1\n self.active.frame[self.active.activeimage[0]].direction[self.active.activeimage[1]].image[self.active.activeimage[2]].offset_x = offset_x\n self.ActivateProject()\n self.DrawImage()", "title": "" }, { "docid": "ccdc32863a640f8b2ca521ed272800bb", "score": "0.57303", "text": "def move(self):\n throw = random.randint(1, 6)\n self.position += throw\n self.position += self.board.position_adjustment(self.position)", "title": "" }, { "docid": "e0114ef013d49b1ea7b60f27ea25f438", "score": "0.5725016", "text": "def test_move_right(self):\n # make sure we can move right a column\n self.keypad.move_right()\n self.assertTrue(self.keypad.column == 1)\n\n # make sure that we can't go out of bounds from 2,4\n self.keypad.column = 4\n self.keypad.move_right()\n self.assertTrue(self.keypad.column == 4)\n\n # make sure that, when we can't move right, we stay at 2\n self.keypad.column = 3\n self.keypad.row = 1\n self.keypad.move_right()\n self.assertTrue(self.keypad.column == 3)", "title": "" }, { "docid": "90aff880be7b76150c5c8d088b024aa0", "score": "0.57199466", "text": "def move(self, direction):\n bool_move = False\n\n for dummy_cord in self.initial[direction]:\n line = [dummy_cord]\n # print line\n temp_loc = dummy_cord\n if (direction == UP or direction == DOWN):\n for dummy_add in range(self.height - 1):\n temp_loc = (temp_loc[0] + OFFSETS[direction][0], temp_loc[1] + OFFSETS[direction][1])\n line.append(temp_loc)\n else:\n for dummy_add in range(self.width - 1):\n temp_loc = (temp_loc[0] + OFFSETS[direction][0], temp_loc[1] + OFFSETS[direction][1])\n line.append(temp_loc)\n # print line\n values = []\n for dummy_each in line:\n values.append(self.grid[dummy_each[0]][dummy_each[1]])\n new_values = merge(values)\n if (values != new_values):\n bool_move = True\n\n index = 0\n for dummy_each in line:\n self.grid[dummy_each[0]][dummy_each[1]] = new_values[index]\n index += 1\n # print values\n if (bool_move):\n self.new_tile()", "title": "" }, { "docid": "79765eedf0fde6a11fab17a122108643", "score": "0.57192", "text": "def move(self, direction):\n self.has_change = False\n if direction <=2:\n for i in range(self.grid_width):\n arr = traverse_grid(self.grid,[start_index[direction],i],OFFSETS[direction],self.grid_height)\n self.update_line(arr,[start_index[direction],i],OFFSETS[direction])\n else:\n for i in range(self.grid_width):\n arr = traverse_grid(self.grid,[i,start_index[direction]],OFFSETS[direction],self.grid_height)\n self.update_line(arr,[i,start_index[direction]],OFFSETS[direction])\n if self.has_change:\n self.new_tile()", "title": "" }, { "docid": "d0d0305d5a02e92c7d51bddc9756ba9a", "score": "0.57173723", "text": "def move(t, x, y):\n t.pu()\n t.setpos(x, y)\n t.pd()", "title": "" } ]
1d066022498a74c3b6eadd3778b8f510
A func implementation of
[ { "docid": "f436231d28e816d9acda70fe43627bf9", "score": "0.0", "text": "def new_func(*args, **kwargs):\n new_func.__name__ = 'func_' + func.__name__\n source_code = inspect.getsource(new_func)\n\n new_func.__doc__ = new_func.__doc__.replace('<name>', new_func.__name__)\n new_func.__doc__ = new_func.__doc__.replace('<fixated_args>', str(fixated_args))\n new_func.__doc__ = new_func.__doc__.replace('<fixated_kwargs>', str(fixated_kwargs))\n new_func.__doc__ = new_func.__doc__.replace('<source_code>', source_code)\n\n fr = inspect.currentframe()\n all_func_args = inspect.getargvalues(fr)\n varargs = all_func_args.varargs\n keywords = all_func_args.keywords\n func_args = all_func_args.locals[varargs]\n func_kwargs = all_func_args.locals[keywords]\n\n new_args = list(func_args)\n new_kwargs = func_kwargs\n new_args.extend(fixated_args)\n new_kwargs.update(fixated_kwargs)\n base(*new_args, **new_kwargs)", "title": "" } ]
[ { "docid": "78e33815d207e4aa34d4e52320435483", "score": "0.80307794", "text": "def func():", "title": "" }, { "docid": "e4d7e7c9fc12a76c3bb65941c64fde7f", "score": "0.77042663", "text": "def fn():", "title": "" }, { "docid": "bd8d0afbc87dd05c9fb56bf3f08e2dcf", "score": "0.68322843", "text": "def fptrunc(self, typ):", "title": "" }, { "docid": "93bfc68d3a6f92a6a4c7fb57299a4fd0", "score": "0.67931634", "text": "def function(self):\n raise NotImplementedError", "title": "" }, { "docid": "b51776647a08575c5d7ff27c6377bd00", "score": "0.6763278", "text": "def __call__(fun_name):", "title": "" }, { "docid": "58b7db98db28b3099229a5d3d6e4b4e6", "score": "0.66292554", "text": "def fun(self, x):\n\n raise NotImplementedError", "title": "" }, { "docid": "a15a15f8cd608b7dd519e752daef6adb", "score": "0.662474", "text": "def fn(*args, **kwargs):\n pass", "title": "" }, { "docid": "00e321a981368f1cd767d807c78a59f0", "score": "0.65876484", "text": "def f():", "title": "" }, { "docid": "00e321a981368f1cd767d807c78a59f0", "score": "0.65876484", "text": "def f():", "title": "" }, { "docid": "21906e18756bf4c25dffade8231f9811", "score": "0.6503722", "text": "def func ( self ) :\n return self.__func", "title": "" }, { "docid": "7f878879a11c961e0cf1b408aa60ed92", "score": "0.64297724", "text": "def g():", "title": "" }, { "docid": "08a741f8da573b6290242380ba76e739", "score": "0.6422988", "text": "def func ( self ) :\n return self.__func", "title": "" }, { "docid": "0263f0698aacc48376c741f55aeb0b1e", "score": "0.63891315", "text": "def fun_a(self):\n pass", "title": "" }, { "docid": "ada581c45515d6dabe2252a72fc76494", "score": "0.62631476", "text": "def firstFunction(self):", "title": "" }, { "docid": "eb3b530ab4abe627936f43e80b9b308f", "score": "0.6227012", "text": "def func2():", "title": "" }, { "docid": "606b0bf4ddf9b1e1c8cf3ced2e002faf", "score": "0.6192423", "text": "def somefunc():", "title": "" }, { "docid": "1e30590409967c04550a4137644d3e3c", "score": "0.6153303", "text": "def apply(cls, func):\n raise NotImplementedError", "title": "" }, { "docid": "cc6777565b9da60954279267c8d9a5b5", "score": "0.6144183", "text": "def __call__(value):", "title": "" }, { "docid": "85dbb7cbd77ae5b4ce1c1a4d2e318780", "score": "0.6136125", "text": "def simple():", "title": "" }, { "docid": "85dbb7cbd77ae5b4ce1c1a4d2e318780", "score": "0.6136125", "text": "def simple():", "title": "" }, { "docid": "d431c60ff4d8a66fce8f4148bf63d3fc", "score": "0.6114804", "text": "def apply(self, func, *args):\n pass", "title": "" }, { "docid": "e2ad02d0edcaf6514aec1b68bdffabf2", "score": "0.6107871", "text": "def func1():", "title": "" }, { "docid": "ade1eb15df5ff1191ed768ca9e393084", "score": "0.61059564", "text": "def liftn(cls, func):\n raise NotImplementedError", "title": "" }, { "docid": "1bdfb1ecf039c40bd6dbb5a98ecb079c", "score": "0.6081727", "text": "def dummy_fn(self, *args, **kwargs):", "title": "" }, { "docid": "41aed38eb5f00734121f5e251fbe8fb4", "score": "0.60743254", "text": "def func():\n pass", "title": "" }, { "docid": "8bfd0a85a6611c14ecd3e0653ff67198", "score": "0.6073813", "text": "def my_func(a, b):", "title": "" }, { "docid": "0b80a68a775eb1a309fdad9337c29a1b", "score": "0.60275567", "text": "def __call__():", "title": "" }, { "docid": "0b80a68a775eb1a309fdad9337c29a1b", "score": "0.60275567", "text": "def __call__():", "title": "" }, { "docid": "0b80a68a775eb1a309fdad9337c29a1b", "score": "0.60275567", "text": "def __call__():", "title": "" }, { "docid": "0b80a68a775eb1a309fdad9337c29a1b", "score": "0.60275567", "text": "def __call__():", "title": "" }, { "docid": "0b80a68a775eb1a309fdad9337c29a1b", "score": "0.60275567", "text": "def __call__():", "title": "" }, { "docid": "03bc8c3583e926265d05df4be267293f", "score": "0.6019222", "text": "def function(self, *args):\n raise NotImplemented", "title": "" }, { "docid": "35ae811da44ebf915cc56d74d00527b2", "score": "0.6014902", "text": "def apply(self):", "title": "" }, { "docid": "2e6f0fc96c3c6a3b7e0cc071e3657864", "score": "0.6007719", "text": "def psi(a):", "title": "" }, { "docid": "a5424fb1495a64ecc56bc980e3ac0d8c", "score": "0.6003841", "text": "def func1(self):\n pass", "title": "" }, { "docid": "ac6245bd47de8148c8a9fc0d6b535e3e", "score": "0.5972585", "text": "def retfun():\r\n return fn(*args, **kwargs)", "title": "" }, { "docid": "3ab65b5f0f658db40f6c538ebcf1ab93", "score": "0.5952772", "text": "def func_wrapper():\n set_interval_sequence(functions[1:] + functions[:1], sec)\n functions[0]()", "title": "" }, { "docid": "f513df3969e78469fb9c34de5035062a", "score": "0.59514254", "text": "def apply(self, func, *args, **kwargs):\n pass", "title": "" }, { "docid": "bcbe8a58439b924335d03ab2b20049ca", "score": "0.5946669", "text": "def f(self,x,*params):\n raise NotImplementedError", "title": "" }, { "docid": "d92c86541cf21d9e7c5296c76f0d8b82", "score": "0.5941911", "text": "def f(x):\n raise NotImplementedError", "title": "" }, { "docid": "f63065bdc5146553399c80b56918d838", "score": "0.59323376", "text": "def __getitem__(self,cle):\n return self.F(*cle)", "title": "" }, { "docid": "1ef1724c332e350d0dbad951a558d034", "score": "0.5921791", "text": "def function(args):\n pass", "title": "" }, { "docid": "126ab95e479241da00ca31ae7216fc31", "score": "0.5921549", "text": "def dummy_fn(self):\n\t\tpass", "title": "" }, { "docid": "89310e026b673d199ee0231ed6f94357", "score": "0.58839756", "text": "def my_function(arg1):\n\n\treturn arg1", "title": "" }, { "docid": "daf9ee45f0a3c1d78fb0984e978d013a", "score": "0.58707124", "text": "def f(a, b):", "title": "" }, { "docid": "7b2a843344314282be40d56164e8ce81", "score": "0.58621067", "text": "def example_function():", "title": "" }, { "docid": "7b12491ca96f509a66194f18e995bffb", "score": "0.586035", "text": "def fA(self):\n pass", "title": "" }, { "docid": "f8b52cc043a9dfd557fc3651f98a60c5", "score": "0.5847364", "text": "def identify(func):\n def identified(arg):\n func(arg)\n return arg\n return identified", "title": "" }, { "docid": "b3cbe03d6d0f64435fbbc4b21ced8f2b", "score": "0.58389074", "text": "def apply(self) -> None:", "title": "" }, { "docid": "b3cbe03d6d0f64435fbbc4b21ced8f2b", "score": "0.58389074", "text": "def apply(self) -> None:", "title": "" }, { "docid": "80710c9251aae0f061e03023583526f0", "score": "0.58329237", "text": "def lift(cls, func):\n raise NotImplementedError", "title": "" }, { "docid": "875c3fe7e0caa932e702a6ab8d21d124", "score": "0.58308995", "text": "def f(self):\n return 1", "title": "" }, { "docid": "a97e1c7d001a72df4ba214941674f05b", "score": "0.581363", "text": "def remove(func):", "title": "" }, { "docid": "27e8a82cc5415423fd355063dba35b76", "score": "0.57925475", "text": "def map(self, function):\n pass", "title": "" }, { "docid": "002354ef996f3e6e0d01ad16970855cb", "score": "0.5772546", "text": "def apply(self, *args: _Data) -> _Data:", "title": "" }, { "docid": "58f595367d781ef9437b46325791b860", "score": "0.5759823", "text": "def f_noarg(self) :\n pass", "title": "" }, { "docid": "5a1c4498275fef6cf0e9d3911c78c648", "score": "0.57464564", "text": "def base():", "title": "" }, { "docid": "efe3d6cc93de0ccc73c6ceaed833849f", "score": "0.5742143", "text": "def funcInFunc(func, num = 519):\n \n return func(num)", "title": "" }, { "docid": "80077249ec187088c36439fa79bae198", "score": "0.57419884", "text": "def func(f,c):\n return(f**2+c)", "title": "" }, { "docid": "fdab92540e58180c6696ae14e14c19cf", "score": "0.5730729", "text": "def regular(self):", "title": "" }, { "docid": "ebc8161fbf7ea242074f461454a51542", "score": "0.5730201", "text": "def preprocess_func(cls, func):\n pass", "title": "" }, { "docid": "dd184af5886b065b5d3402d609c077be", "score": "0.57206017", "text": "def func(arg1, arg2):\n\n return arg", "title": "" }, { "docid": "fabfc550723529c0272cec91475d5523", "score": "0.57070714", "text": "def callable(func, message):\n return func, message", "title": "" }, { "docid": "d5d1eb6e237a7ec8ca58c2d8cd62a21d", "score": "0.5697854", "text": "def real(self, ???):", "title": "" }, { "docid": "749906e4bfe1b5874f8b44a0fa5c7f73", "score": "0.5693582", "text": "def __call__(self, f):\n raise NotImplementedError()", "title": "" }, { "docid": "1d61ae86be7f40124c85a33dcab081e0", "score": "0.56691873", "text": "def cpp_function(self):", "title": "" }, { "docid": "3fca44d66230d0dcb2512c10a4be84d7", "score": "0.5664092", "text": "def __call__(self, x):\n pass", "title": "" }, { "docid": "84fd84b8dea540d62a179e1d8413cf02", "score": "0.5658924", "text": "def __call__(self, *arg, **kwargs):\n return self._fun(*arg, **kwargs)", "title": "" }, { "docid": "60116a0a05379b79a3a77b3a8ae859f1", "score": "0.5658681", "text": "def mlift(func):\n return compose(unit, func)", "title": "" }, { "docid": "1e44aae327b64a15608417ad9ff5740e", "score": "0.56540775", "text": "def f(inicio,obj):\n return g(inicio,obj)+h(inicio,obj)", "title": "" }, { "docid": "66d78d67f4a6717642925945e6ecdbbf", "score": "0.56310344", "text": "def funcs(self) -> {str:TFunc}:\n raise NotImplementedError()", "title": "" }, { "docid": "b7ec0e7fe49908fd1fb5946721b084d4", "score": "0.56278604", "text": "def callback(self, fun: Callable[[], None] | None) -> None:", "title": "" }, { "docid": "c9be189a48e6dbf799fc847f48849861", "score": "0.56275576", "text": "def getCallable():", "title": "" }, { "docid": "1e954c5dc5d5acd78157e05c3f674162", "score": "0.56244445", "text": "def __init__(self, func): \n self.func = func", "title": "" }, { "docid": "5cef5b43195130dd2b67d1ef1a42694c", "score": "0.562122", "text": "def apply(self, function, *args, **kwargs):\n pass", "title": "" }, { "docid": "44eb268529fb496b7e0a8e264df06487", "score": "0.5618857", "text": "def __call__ ( self , *x ) :\n return partial ( self.__index ,\n self.func ,\n x ,\n self.step ,\n self.order ,\n self.err )", "title": "" }, { "docid": "d81a6276ca0d34e069e0c066aa06b4ed", "score": "0.56162167", "text": "def func9():\n pass", "title": "" }, { "docid": "e93b4c9fa6f8f30997b6a6b40b1872ab", "score": "0.5613542", "text": "def f_onearg(self, arg1) :\n pass", "title": "" }, { "docid": "18ee310edc66e9f6be8e46e2c879b9b8", "score": "0.5607121", "text": "def my_function():\n\tpass", "title": "" }, { "docid": "8695238d11bf28f5221248de254b0066", "score": "0.56063974", "text": "def __call__(self):\n return self.fn()", "title": "" }, { "docid": "ca3e3097a237b5ee8152c396d31666ef", "score": "0.5604807", "text": "def func_doc():", "title": "" }, { "docid": "0e9a830d0272bb594bad367cd698321d", "score": "0.5601163", "text": "def func(f):\n return func_custom(f.func_name)(f)", "title": "" }, { "docid": "601070a374339944099192201b545e8b", "score": "0.5598898", "text": "def wrapper(*args):", "title": "" }, { "docid": "19a00f76a02d22e6a4ad0637be188d49", "score": "0.5597802", "text": "def my_func(a,b,c):\n \n return a + b + c", "title": "" }, { "docid": "878817b8d06f057054d5cfa25768dff8", "score": "0.5596642", "text": "def falcon():", "title": "" }, { "docid": "c4b6a9f49fa7778d36eb47141edde172", "score": "0.55931693", "text": "def fu<the_ref>nc():\n pass", "title": "" }, { "docid": "d2e233d978ac62b0950b869aeea2237d", "score": "0.558741", "text": "def __call__(self):\n # apply(self.func, self.args)\n self.func(*self.args)", "title": "" }, { "docid": "0dc7482d29ba99453cc852a2e16016bb", "score": "0.55810183", "text": "def preprocess_func(cls, func):\n return func", "title": "" }, { "docid": "27981df9bff8cf834e1357d70a9b1388", "score": "0.5576927", "text": "def lin_o_func(self):\n return self.hx", "title": "" }, { "docid": "24f05c092041b7f9d4aefb2e17099e12", "score": "0.5563016", "text": "def call(self, data):\n\t\treturn self.fnc(data)", "title": "" }, { "docid": "187c32a82213bf667642349724e4c59e", "score": "0.5562093", "text": "def call(self):", "title": "" }, { "docid": "7f005a3291f2e965aa65b525f26f3b1e", "score": "0.55592525", "text": "def _wrapper(func, args):\n return func(*args)", "title": "" }, { "docid": "6a3e390b26cec51f43b3e7986ffe1c0b", "score": "0.55524635", "text": "def view_function(self,v):\n return v", "title": "" }, { "docid": "a035d4680914ce1d3e5ba650a89bf51f", "score": "0.5537023", "text": "def A(lr):\n pass", "title": "" }, { "docid": "715b551f83719dedfbacb774d6ff6599", "score": "0.5534904", "text": "def mapper(fun: Callable[[str], Pin], /) -> None:", "title": "" }, { "docid": "938afdaef6d3cae1424d6ff6392b2136", "score": "0.55337137", "text": "def my_function():\n\n\treturn None", "title": "" }, { "docid": "8463d94c5bae5714b8fc923f34091dd7", "score": "0.550929", "text": "def _func(self):\n return self._get_flint_func(self.domain)", "title": "" }, { "docid": "6d1e495c74b9b6d25f4818c83222e5e5", "score": "0.5506867", "text": "def __call__(object):", "title": "" }, { "docid": "14dc24608c139f460782118eec8df3db", "score": "0.5503599", "text": "def __call__(self, x):", "title": "" }, { "docid": "c920f558ae1651968df70f1427cbe135", "score": "0.54999715", "text": "def __call__(self) -> None:", "title": "" }, { "docid": "9d04b1eaa7109bc81615dafea7e5a04d", "score": "0.5495791", "text": "def multichannel(fcn):\n return lambda args: (fcn(*args), )", "title": "" } ]
ce483546bd51b522d94fd650cefc14df
Return the batch indicator/group and file data to be sent to the ThreatConnect API.
[ { "docid": "0390df010960df7d2deeb30283b87181", "score": "0.5622479", "text": "def data(self) -> dict:\n data = {'file': {}, 'group': [], 'indicator': []}\n tracker = {'count': 0, 'bytes': 0}\n\n # process group from memory, returning if max values have been reached\n if self.data_groups(data, self.groups, tracker) is True:\n return data\n\n # process group from shelf file, returning if max values have been reached\n if self.data_groups(data, self.groups_shelf, tracker) is True:\n return data\n\n # process indicator from memory, returning if max values have been reached\n if self.data_indicators(data, self.indicators, tracker) is True:\n return data\n\n # process indicator from shelf file, returning if max values have been reached\n if self.data_indicators(data, self.indicators_shelf, tracker) is True:\n return data\n\n return data", "title": "" } ]
[ { "docid": "ced6a377b7eb95e3bb703e69b7b02b11", "score": "0.60656387", "text": "def get_batch(self):\n raise NotImplementedError", "title": "" }, { "docid": "21a6509f723e3d726cb18ae306a5f136", "score": "0.58504844", "text": "def _process_indicators_batch(self, owner):\n batch_job_body = {\n 'action': self._batch_action,\n 'attributeWriteType': self._batch_write_type,\n 'haltOnError': self._batch_halt_on_error,\n 'owner': owner\n }\n\n halt = False\n for chunk in self._chunk_indicators():\n self._tcex.log.info(u'Batch Chunk Size: {}'.format(len(chunk)))\n\n # new batch resource for each chunk\n resource = self._tcex.resource('Batch')\n resource.http_method = 'POST'\n resource.url = self._tcex.default_args.tc_api_path\n resource.content_type = 'application/json'\n resource.body = json.dumps(batch_job_body)\n results = resource.request()\n\n if results['status'] == 'Success':\n batch_id = results.get('data')\n self._indicator_batch_ids.append(batch_id)\n\n resource.content_type = 'application/octet-stream'\n resource.batch_id(batch_id)\n resource.body = json.dumps(chunk)\n # results = resource.request()\n data_results = resource.request()\n\n if data_results['status'] == 'Success':\n # bcs - add a small delay before first status check then normal delay in loop\n time.sleep(3)\n\n poll_time = 0\n while True:\n self._tcex.log.debug(u'poll_time: {}'.format(poll_time))\n if poll_time >= self._batch_poll_interval_max:\n msg = u'Status check exceeded max poll time.'\n self._tcex.log.error(msg)\n self._tcex.message_tc(msg)\n self._tcex.exit(1)\n\n status = self.batch_status(batch_id)\n if status.get('completed'):\n if status.get('errors'):\n if self._batch_halt_on_error:\n self._tcex.exit_code = 1\n halt = True\n # all indicator in chunk will be not_saved\n self._indicator_results['not_saved'].extend(\n [i.get('summary') for i in chunk])\n break\n else:\n # all indicators were saved minus failed; not_save == failed\n self._indicator_results['not_saved'] = self._indicator_results.get(\n 'failed', [])\n self._indicator_results['saved'].extend(\n [i.get('summary') for i in chunk\n if i.get('summary') not in self._indicator_results.get(\n 'failed', [])])\n self._indicators_response.extend(\n [i for i in chunk\n if i.get('summary') not in self._indicator_results.get(\n 'failed', [])])\n else:\n # all indicators were saved\n self._indicator_results['saved'].extend(\n [i.get('summary') for i in chunk])\n self._indicators_response.extend(chunk)\n break # no need to check status anymore\n\n time.sleep(self._batch_poll_interval)\n poll_time += self._batch_poll_interval\n else:\n self._tcex.log.warning('API request failed ({}).'.format(\n results.get('response').text))\n # TODO: move this and above duplicate code to \"if halt\" below after validating logic\n self._tcex.exit_code = 1\n # all indicator in chunk will be not_saved\n self._indicator_results['not_saved'].extend([i.get('summary') for i in chunk])\n halt = True\n\n if halt:\n self._tcex.log.info(u'Halting on error.')\n break", "title": "" }, { "docid": "e4ca80f03482a99a49512d0fff4d5687", "score": "0.57285285", "text": "def get_batch(self):\n batch = f\"/stock/market/batch?symbols={self.symbols}\" +\\\n \"&types=quote,news,chart&range=1m&last=5\"\n url = self.prefix + batch\n return json.loads(self.get_data(url))", "title": "" }, { "docid": "329cbf5e243fff1a47a3a7fef51fa150", "score": "0.5698765", "text": "def data(self):\n data = {'file': {}, 'group': [], 'indicator': []}\n tracker = {'count': 0, 'bytes': 0}\n\n # process group from memory, returning if max values have been reached\n if self.data_groups(data, self.groups, tracker) is True:\n return data\n\n # process group from shelf file, returning if max values have been reached\n if self.data_groups(data, self.groups_shelf, tracker) is True:\n return data\n\n # process indicator from memory, returning if max values have been reached\n if self.data_indicators(data, self.indicators, tracker) is True:\n return data\n\n # process indicator from shelf file, returning if max values have been reached\n if self.data_indicators(data, self.indicators_shelf, tracker) is True:\n return data\n\n return data", "title": "" }, { "docid": "3a6d9465b46ad7f56c0ab344888b7f46", "score": "0.56938726", "text": "def get_batch(self, index):", "title": "" }, { "docid": "ee7839b50bb6089e9bc1f9c08423eb87", "score": "0.5647298", "text": "def return_integratedData():\n return send_from_directory(directory='data/integrated_data',filename=\"graph_dataset.csv\", as_attachment=True)", "title": "" }, { "docid": "8289f62b98af89f664eca1acea149d95", "score": "0.5608169", "text": "def get_batch(self):\n batch = rpc_record_batch([], self.num_records_)\n for time_block in self.batch_:\n data = \"\".join(self.batch_[time_block])\n num_records = len(self.batch_[time_block])\n batch.blocks.append(rpc_record_block(time_block, data, num_records))\n self.clear()\n return batch", "title": "" }, { "docid": "4e32a04151bd073ffcc4eeacac1f4ee1", "score": "0.5510832", "text": "def get_batch(self):\n iter_ = iter(self.data_loader)\n batch = list(next(iter_))\n\n for i, item in enumerate(batch):\n batch[i] = item.asnumpy()\n\n return tuple(batch)", "title": "" }, { "docid": "be9b61396519d2dea373697118c8461f", "score": "0.54530805", "text": "def next_batch(self):\n if not self.file_is_exist:\n print('You need write file before load it.')\n return np.array([]), np.array([])\n if self.batch_count > self.write_count:\n print(\"Warning: batch count is over batch number, auto reset batch count.\")\n self.reset_batch_counter()\n loaded = np.load(os.path.join(self.url, \"data_%d.npz\"%self.batch_count))\n frames = loaded['frames']\n labels = loaded['labels']\n self.batch_count += 1\n return frames, labels", "title": "" }, { "docid": "e3fd8c2d1e8d13e44e20b5937f664d5d", "score": "0.5449724", "text": "def __call__(self, **context) -> str:\n\n headlines_df, key = context[\"task_instance\"].xcom_pull(task_ids=\"extract_data\")\n csv_buffer = StringIO()\n\n return csv_buffer, key, headlines_df", "title": "" }, { "docid": "7cc1ae4a360b54fd247a9bfca6a5ea10", "score": "0.5428535", "text": "def _batch_from_group_batch(self):\n start = self.batch_size * self._group_batch_idx\n end = start + self.batch_size\n return self._group_batch[start:end]", "title": "" }, { "docid": "7580abcd3204fccf3c4f80ce5b662b67", "score": "0.54122806", "text": "def batch_info():\n return BatchInfo(\"Visual AI Suite\")", "title": "" }, { "docid": "bd6f3539c63a94540d61cf1c2720b2d3", "score": "0.5367424", "text": "def process_all(self, process_files: Optional[bool] = True) -> None:\n while True:\n content = self.data\n file_data = content.pop('file', {})\n if not content.get('group') and not content.get('indicator'):\n break\n\n # special code for debugging App using batchV2.\n self.write_batch_json(content)\n\n # store the length of the batch data to use for poll interval calculations\n self.tcex.log.info(\n '''feature=batch, event=process-all, type=group, '''\n f'''count={len(content.get('group')):,}'''\n )\n self.tcex.log.info(\n '''feature=batch, event=process-all, type=indicator, '''\n f'''count={len(content.get('indicator')):,}'''\n )\n\n if process_files:\n self.process_files(file_data)", "title": "" }, { "docid": "2c87de349a5630ed399191f57580552a", "score": "0.53652567", "text": "def _get_batch(self):\n batch_data = []\n batch_label = []\n for i in range(self.batch_size):\n if (self._current + i) >= self._size:\n # use padding from middle in each epoch\n idx = (self._current + i + self._size / 2) % self._size\n index = self._index[idx]\n else:\n index = self._index[self._current + i]\n # index = self.debug_index\n im_path = self._imdb[index]\n img = cv2.imread(im_path)\n gt_name = im_path[0:im_path.rfind('.')] + '.pts'\n gt = FFDio.load_gt(gt_name)\n data, label = self._data_augmentation(img, gt, self._aug, self._norm_width, self._norm_width)\n\n # swap channel\n data = cv2.cvtColor(data, cv2.COLOR_BGR2RGB)\n\n # normalize\n data = data.astype(np.float32)\n data -= [128, 128, 128]\n # data = data / 128\n label[0, :] = label[0, :] / data.shape[1]\n label[1, :] = label[1, :] / data.shape[0]\n\n label = label.flatten()\n data = data.transpose((2, 0, 1))\n # data = np.swapaxes(data, 0, 2)\n\n\n batch_data.append(data)\n batch_label.append(label)\n # pad data if not fully occupied\n for i in range(self.batch_size - len(batch_data)):\n assert len(batch_data) > 0\n batch_data.append(batch_data[0] * 0)\n self._data = {'data': mx.nd.array(np.array(batch_data))}\n self._label = {'label': mx.nd.array(np.array(batch_label))}", "title": "" }, { "docid": "6c89f0dcf8613ac1a0f055a65b3ec658", "score": "0.5362566", "text": "def batch(self, data, **kw):\n return self._client.send(\n 'post', self.get_path(method='batch'),\n data=data\n )", "title": "" }, { "docid": "8af812ef3580713b1c1393d26818a9e0", "score": "0.53259414", "text": "def process_all(self, process_files: bool = True):\n while True:\n content = self.data\n file_data = content.pop('file', {})\n if not content.get('group') and not content.get('indicator'):\n break\n\n # special code for debugging App using batchV2.\n self.write_batch_json(content)\n\n # store the length of the batch data to use for poll interval calculations\n self.log.info(\n '''feature=batch, event=process-all, type=group, '''\n f'''count={len(content['group']):,}'''\n )\n self.log.info(\n '''feature=batch, event=process-all, type=indicator, '''\n f'''count={len(content['indicator']):,}'''\n )\n\n if process_files:\n self.process_files(file_data)", "title": "" }, { "docid": "757a26affa1b0988ce895ffc63f3471a", "score": "0.53031266", "text": "def get_batch_record(self,fetch_batches=[-1],percentage=True,sum_batches=False):\n # if these results are already stored it is easy to sum multiple batches\n\n if hasattr(fetch_batches,\"__len__\"):\n nfetch = len(fetch_batches)\n else:\n nfetch=1\n fetch_batches = [fetch_batches]\n\n stats = np.zeros([nfetch,5])\n for i,bat in enumerate(fetch_batches):\n indx = 0\n if np.abs(bat)>self.nbatches+1:\n print('Error: cannot get performance stats for batch ',bat,'.. [ max =',self.nbatches,']')\n return stats\n elif bat<0: indx = self.nbatches+bat\n else: indx = bat\n\n stats[i,:] = self.batch_record[indx,:]\n\n if sum_batches:\n stats = np.sum(stats,axis=0)\n\n tot,won,lost,drew,step = [],[],[],[],[]\n if percentage:\n if sum_batches:\n stats[1:]*=100.0/stats[0]\n tot,won,lost,drew,step = stats\n else:\n for j in range(nfetch):\n stats[j,1:]*=100.0/stats[j,0]\n tot = stats[:,0]\n won = stats[:,1]\n lost = stats[:,2]\n drew = stats[:,3]\n step = stats[:,4]\n\n return tot,won,lost,drew,step", "title": "" }, { "docid": "9b9622c4f85d56dddbf31ba3c93d31cf", "score": "0.52989423", "text": "def _get_batch(self):\n batch_data = []\n batch_label = []\n for i in range(self.batch_size):\n if (self._current + i) >= self._size:\n if not self.is_train:\n continue\n # use padding from middle in each epoch\n idx = (self._current + i + self._size / 2) % self._size\n index = self._index[idx]\n else:\n index = self._index[self._current + i]\n # index = self.debug_index\n im_path = self._imdb.image_path_from_index(index)\n img = cv2.imread(im_path)\n gt = self._imdb.label_from_index(index).copy() if self.is_train else None\n data, label = self._data_augmentation(img, gt)\n batch_data.append(data)\n if self.is_train:\n batch_label.append(label)\n # pad data if not fully occupied\n for i in range(self.batch_size - len(batch_data)):\n assert len(batch_data) > 0\n batch_data.append(batch_data[0] * 0)\n self._data = {'data': mx.nd.array(np.array(batch_data))}\n if self.is_train:\n self._label = {'label': mx.nd.array(np.array(batch_label))}\n else:\n self._label = {'label': None}", "title": "" }, { "docid": "e6c4d413f56e06b5c17cbe998e5f7e97", "score": "0.52958834", "text": "def __data_generation(self):\n [X, y] = self.batch_gen.get_batch_list()\n\n return X, y", "title": "" }, { "docid": "fab7e59f0deefc675d8eb8a3ed5649f1", "score": "0.5291742", "text": "def _record_dataset_information(self):\n data_info = collections.OrderedDict()\n data_info['header'] = ['Split', 'Dataset Type', 'Number of Clients']\n\n num_train_clients = len(self._train_data.client_ids)\n data_info['train'] = ['Train', 'Federated', num_train_clients]\n\n if isinstance(self._test_data, client_data.ClientData):\n test_type = 'Federated'\n num_test_clients = len(self._test_data.client_ids)\n else:\n test_type = 'Centralized'\n num_test_clients = 'N/A'\n data_info['test'] = ['Test', test_type, num_test_clients]\n\n if self._validation_data is not None:\n if isinstance(self._validation_data, client_data.ClientData):\n validation_type = 'Federated'\n num_validation_clients = len(self._validation_data.client_ids)\n else:\n validation_type = 'Centralized'\n num_validation_clients = 'N/A'\n data_info['validation'] = [\n 'Validation',\n validation_type,\n num_validation_clients,\n ]\n\n return data_info", "title": "" }, { "docid": "89338a162acf0f4845a86239603283a4", "score": "0.52721596", "text": "def __data_generation( self ):\n\n #Input array\n X = np.zeros( ( self.batch_size, self.dim[0], self.dim[1], 1) , dtype=np.float32)\n\n #Output arrays (NB: dimensions are hardcoded because part of the model )\n EmTrkNone = np.zeros((self.batch_size, 3), dtype=np.int32)\n Michel = np.zeros((self.batch_size, 1), dtype=np.int32)\n\n for i in range( 0, self.batch_size ):\n\n #get random numbers and read all the files associated to it\n view, num, id = self.__get_random()\n\n fname = \"db_view_%d_%d.hdf5\" % (view, num)\n\n db = h5py.File( self.path+'/'+self.dirname+'/'+fname , 'r')\n input_dataset_name = 'data/data_%d' % id\n label_dataset_name = 'labels/label_%d' % id\n\n #inport input data\n dataX = db.get( input_dataset_name )\n X[i] = np.asarray( dataX ).reshape( self.dim[0], self.dim[1], 1 )\n\n #inport output label\n dataY = db.get( label_dataset_name )\n EmTrkNone[i] = [dataY[0], dataY[1], dataY[3]]\n Michel[i] = [dataY[2]]\n\n db.close()\n\n #TODO: data augmentation?\n\n return {'main_input': X}, {'em_trk_none_netout': EmTrkNone, 'michel_netout': Michel}", "title": "" }, { "docid": "f1e395511a4a6e0249a76bb6659cc6f1", "score": "0.52691114", "text": "def __collect_ct_info(self) -> list:\n num_chunks = (self.__num_trials_in_graphstore // 10000) + 1\n data_generator = self.__collect_ct_info_from_graphstore()\n\n to_import = []\n for data_chunk in tqdm(data_generator, total=num_chunks, desc=\"Collecting all CT data\"):\n if data_chunk:\n for r in data_chunk:\n # Parse graphstore data\n status = r['overall_status']\n trial_id = r['trial_id']\n primary_condition = r['condition'] if r['condition'] is not None else []\n mesh = r['mesh_conditions'] if r['mesh_conditions'] is not None else []\n conditions = \";\".join(set(primary_condition + mesh))\n trial_drugs = r['drugs_in_trial'] if 'drugs_in_trial' in r else None\n\n # Structure data\n metadata = {'trial_id': trial_id, 'trial_status': status,\n 'conditions': conditions, 'drugs_in_trial': trial_drugs}\n\n to_import.append(metadata)\n\n return to_import", "title": "" }, { "docid": "55026a80984a3b7ae4bc6159b411d447", "score": "0.5260068", "text": "def get_batch(self):\n iter_ = iter(self.data_loader)\n batch = list(next(iter_))\n\n for i, item in enumerate(batch):\n batch[i] = item.data.cpu().numpy()\n\n return tuple(batch)", "title": "" }, { "docid": "d1dd00ae5f06afa11cc1110e5eefdbe0", "score": "0.5259154", "text": "def get_batchOHLC(self):\n batchOHLC = \"/stock/market/ohlc\"\n url = self.prefix + batchOHLC\n return json.loads(self.get_data(url))", "title": "" }, { "docid": "7fd52a7e88b17da67eb4f5da993544fc", "score": "0.5241918", "text": "def get_single_batch(self):\n self.invalid_args()\n batch = f\"/stock/{self.symbols}\" +\\\n \"/batch?types=quote,news,chart&range=1m&last=1\"\n url = self.prefix + batch\n return json.loads(self.get_data(url))", "title": "" }, { "docid": "84f365036c33c602cd5e007cbf6d39fc", "score": "0.52354556", "text": "def setup_batch(request):\r\n data = {}\r\n signer = get_signer()\r\n expected_trxn_ids = []\r\n expected_batch_ids = []\r\n initial_state_length = len(get_state_list())\r\n\r\n LOGGER.info(\"Creating intkey transactions with set operations\")\r\n \r\n txns = [\r\n create_intkey_transaction(\"set\", 'a', 0, [], signer),\r\n ]\r\n\r\n for txn in txns:\r\n data = MessageToDict(\r\n txn,\r\n including_default_value_fields=True,\r\n preserving_proto_field_name=True)\r\n\r\n trxn_id = data['header_signature']\r\n expected_trxn_ids.append(trxn_id)\r\n \r\n data['expected_trxn_ids'] = expected_trxn_ids\r\n\r\n LOGGER.info(\"Creating batches for transactions 1trn/batch\")\r\n\r\n batches = [create_batch([txn], signer) for txn in txns]\r\n\r\n for batch in batches:\r\n data = MessageToDict(\r\n batch,\r\n including_default_value_fields=True,\r\n preserving_proto_field_name=True)\r\n\r\n batch_id = data['header_signature']\r\n expected_batch_ids.append(batch_id)\r\n \r\n data['expected_batch_ids'] = expected_batch_ids\r\n data['signer_key'] = signer.get_public_key().as_hex()\r\n\r\n post_batch_list = [BatchList(batches=[batch]).SerializeToString() for batch in batches]\r\n \r\n LOGGER.info(\"Submitting batches to the handlers\")\r\n \r\n for batch in post_batch_list:\r\n try:\r\n response = post_batch(batch)\r\n except urllib.error.HTTPError as error:\r\n LOGGER.info(\"Rest Api is not reachable\")\r\n data = json.loads(error.fp.read().decode('utf-8'))\r\n LOGGER.info(data['error']['title'])\r\n LOGGER.info(data['error']['message'])\r\n \r\n block_list = get_blocks()\r\n data['block_list'] = block_list\r\n block_ids = [block['header_signature'] for block in block_list]\r\n data['block_ids'] = block_ids\r\n batch_ids = [block['header']['batch_ids'][0] for block in block_list]\r\n data['batch_ids'] = batch_ids\r\n expected_head_id = block_ids[0]\r\n data['expected_head_id'] = expected_head_id\r\n yield data", "title": "" }, { "docid": "8e78dc8aef4b6d41eb6ee8e7eb98370d", "score": "0.5218347", "text": "def create_data(batch):\n src_ids = to_variable(batch[0], \"src_ids\")\n position_ids = to_variable(batch[1], \"position_ids\")\n sentence_ids = to_variable(batch[2], \"sentence_ids\")\n input_mask = to_variable(batch[3], \"input_mask\")\n mask_label = to_variable(batch[4], \"mask_label\")\n mask_pos = to_variable(batch[5], \"mask_pos\")\n labels = to_variable(batch[6], \"labels\")\n labels.stop_gradient = True\n return src_ids, position_ids, sentence_ids, input_mask, mask_label, mask_pos, labels", "title": "" }, { "docid": "2c19bac3f506aa9eb9f2fa4626c521d2", "score": "0.5213639", "text": "def __get_one_batch(self):\n batch_size = self.config['batch_size']\n data_shape = self.config['data_shape']\n label_shape = self.config['label_shape']\n down_sample_rate = self.config.get('down_sample_rate', 1.0)\n data_slice_number = data_shape[0]\n label_slice_number = label_shape[0]\n batch_sample_model = self.config.get('batch_sample_model', ('full', 'valid', 'valid'))\n batch_slice_direction= self.config.get('batch_slice_direction', 'axial') # axial, sagittal, coronal or random\n train_with_roi_patch = self.config.get('train_with_roi_patch', False)\n keep_roi_outside = self.config.get('keep_roi_outside', False)\n if(train_with_roi_patch):\n label_roi_mask = self.config['label_roi_mask']\n roi_patch_margin = self.config['roi_patch_margin']\n\n # return batch size: [batch_size, slice_num, slice_h, slice_w, moda_chnl]\n data_batch = []\n weight_batch = []\n label_batch = []\n #original\n orig_label_batch = []\n slice_direction = batch_slice_direction\n if(slice_direction == 'random'):\n directions = ['axial', 'sagittal', 'coronal']\n idx = random.randint(0,2)\n slice_direction = directions[idx]\n for i in range(batch_size):\n if(self.with_flip):\n flip = random.random() > 0.5\n else:\n flip = False\n self.patient_id = random.randint(0, len(self.data)-1)\n data_volumes = [x for x in self.data[self.patient_id]]\n weight_volumes = [self.weight[self.patient_id]]\n boundingbox = None\n if(self.with_ground_truth):\n label_volumes = [self.label[self.patient_id]]\n #original\n orig_label_volumes = [self.label[self.patient_id]]\n if(train_with_roi_patch):\n mask_volume = np.zeros_like(label_volumes[0])\n for mask_label in label_roi_mask:\n mask_volume = mask_volume + (label_volumes[0] == mask_label)\n [d_idxes, h_idxes, w_idxes] = np.nonzero(mask_volume)\n [D, H, W] = label_volumes[0].shape\n mind = max(d_idxes.min() - roi_patch_margin, 0)\n maxd = min(d_idxes.max() + roi_patch_margin, D)\n minh = max(h_idxes.min() - roi_patch_margin, 0)\n maxh = min(h_idxes.max() + roi_patch_margin, H)\n minw = max(w_idxes.min() - roi_patch_margin, 0)\n maxw = min(w_idxes.max() + roi_patch_margin, W)\n if(keep_roi_outside):\n boundingbox = [mind, maxd, minh, maxh, minw, maxw]\n else:\n for idx in range(len(data_volumes)):\n data_volumes[idx] = data_volumes[idx][np.ix_(range(mind, maxd), \n range(minh, maxh), \n range(minw, maxw))]\n for idx in range(len(weight_volumes)):\n weight_volumes[idx] = weight_volumes[idx][np.ix_(range(mind, maxd), \n range(minh, maxh), \n range(minw, maxw))]\n for idx in range(len(label_volumes)):\n label_volumes[idx] = label_volumes[idx][np.ix_(range(mind, maxd), \n range(minh, maxh), \n range(minw, maxw))]\n\n if(self.label_convert_source and self.label_convert_target):\n label_volumes[0] = convert_label(label_volumes[0], self.label_convert_source, self.label_convert_target)\n \n transposed_volumes = transpose_volumes(data_volumes, slice_direction)\n volume_shape = transposed_volumes[0].shape\n sub_data_shape = [data_slice_number, data_shape[1], data_shape[2]]\n sub_label_shape =[label_slice_number, label_shape[1], label_shape[2]]\n\n center_point = get_random_roi_sampling_center(volume_shape, sub_label_shape, batch_sample_model, boundingbox)\n sub_data = []\n for moda in range(len(transposed_volumes)):\n sub_data_moda = extract_roi_from_volume(transposed_volumes[moda],center_point,sub_data_shape)\n if(flip):\n sub_data_moda = np.flip(sub_data_moda, -1)\n if(down_sample_rate != 1.0):\n sub_data_moda = ndimage.interpolation.zoom(sub_data_moda, 1.0/down_sample_rate, order = 1) \n sub_data.append(sub_data_moda)\n sub_data = np.asarray(sub_data)\n data_batch.append(sub_data)\n transposed_weight = transpose_volumes(weight_volumes, slice_direction)\n sub_weight = extract_roi_from_volume(transposed_weight[0],\n center_point,\n sub_label_shape,\n fill = 'zero')\n \n if(flip):\n sub_weight = np.flip(sub_weight, -1)\n if(down_sample_rate != 1.0):\n sub_weight = ndimage.interpolation.zoom(sub_weight, 1.0/down_sample_rate, order = 1) \n weight_batch.append([sub_weight])\n if(self.with_ground_truth):\n tranposed_label = transpose_volumes(label_volumes, slice_direction)\n sub_label = extract_roi_from_volume(tranposed_label[0],\n center_point,\n sub_label_shape,\n fill = 'zero')\n if(flip):\n sub_label = np.flip(sub_label, -1)\n if(down_sample_rate != 1.0):\n sub_label = ndimage.interpolation.zoom(sub_label, 1.0/down_sample_rate, order = 0) \n label_batch.append([sub_label])\n\n #original\n orig_tranposed_label = transpose_volumes(orig_label_volumes, slice_direction)\n orig_sub_label = extract_roi_from_volume(orig_tranposed_label[0],\n center_point,\n sub_label_shape,\n fill = 'zero')\n if(flip):\n orig_sub_label = np.flip(orig_sub_label, -1)\n if(down_sample_rate != 1.0):\n orig_sub_label = ndimage.interpolation.zoom(orig_sub_label, 1.0/down_sample_rate, order = 0) \n orig_label_batch.append([orig_sub_label])\n\n\n \n data_batch = np.asarray(data_batch, np.float32)\n weight_batch = np.asarray(weight_batch, np.float32)\n label_batch = np.asarray(label_batch, np.int64)\n #original\n orig_label_batch = np.asarray(orig_label_batch, np.int64)\n batch = {}\n batch['images'] = np.transpose(data_batch, [0, 2, 3, 4, 1])\n batch['weights'] = np.transpose(weight_batch, [0, 2, 3, 4, 1])\n batch['labels'] = np.transpose(label_batch, [0, 2, 3, 4, 1])\n #original\n batch['orig_labels'] = np.transpose(orig_label_batch, [0, 2, 3, 4, 1])\n \n return batch", "title": "" }, { "docid": "f519bc1ef932e3d5721272bde06db688", "score": "0.51871634", "text": "def data(self):\n data = {\n 'status': self.status,\n 'begin_time': self.begin_time,\n 'real_times': self.real_times,\n 'cpu_times': self.cpu_times,\n 'error_info': self.error_info,\n 'agg': self.agg,\n 'scores': self.scores,\n 'scoring': self.task.scoring_name(),\n 'validator': self.task.validator_name(),\n 'params': dict(**self.task.params),\n 'data': self.task.data_id,\n 'njobs': self.njobs,\n }\n return data", "title": "" }, { "docid": "7c2e9d7af30c52a7477516512b9559ad", "score": "0.51663995", "text": "def _get_batch_data(self, batch_indices):\n X = self.data[0][batch_indices]\n Y = self.data[1][batch_indices]\n X = self._pad_data(X, self.seq_len)\n Y = self._pad_data(Y, self.seq_len)\n # transpose the dimension from (batch_size, timestep) to (timestep, batch_size) for feeding into model\n return X.transpose([1, 0]), Y.transpose([1, 0])", "title": "" }, { "docid": "f9c98a7e62d38941dd0691e3663e17b5", "score": "0.5155749", "text": "def extData(self):\n if self.status_type is None:\n return bytearray()\n\n writer = Writer()\n writer.add(self.status_type, 1)\n writer2 = Writer()\n for i in self.responder_id_list:\n writer2.add(len(i), 2)\n writer2.bytes += i\n writer.add(len(writer2.bytes), 2)\n writer.bytes += writer2.bytes\n writer.add(len(self.request_extensions), 2)\n writer.bytes += self.request_extensions\n\n return writer.bytes", "title": "" }, { "docid": "f6dfe69c6221719ad41c5150881eec6b", "score": "0.51550317", "text": "def get_data_batch(self, which_set):\n if which_set == 'train':\n x, y = self.data_generator_train(self.chunk_size)\n elif which_set == 'valid':\n x, y = self.data_generator_valid(self.chunk_size)\n else:\n x, y = self.data_generator_test(self.chunk_size)\n\n if self.filter_method == 'A':\n x = x.astype('float32')\n y = numpy.argmax(y, axis=1).astype('int32')\n else:\n raise NotImplementedError()\n return x, y", "title": "" }, { "docid": "485e2ab2c3d3dd099f01f1cdb7c112e0", "score": "0.5139624", "text": "def submit_data(\n self, batch_id: int, content: dict, halt_on_error: Optional[bool] = True\n ) -> dict:\n # check global setting for override\n if self.halt_on_batch_error is not None:\n halt_on_error = self.halt_on_batch_error\n\n # store the length of the batch data to use for poll interval calculations\n self._batch_data_count = len(content.get('group')) + len(content.get('indicator'))\n self.tcex.log.info(\n f'feature=batch, action=submit-data, batch-size={self._batch_data_count:,}'\n )\n\n headers = {'Content-Type': 'application/octet-stream'}\n try:\n r = self.tcex.session.post(f'/v2/batch/{batch_id}', headers=headers, json=content)\n if not r.ok or 'application/json' not in r.headers.get('content-type', ''):\n self.tcex.handle_error(10525, [r.status_code, r.text], halt_on_error)\n return r.json()\n except Exception as e:\n self.tcex.handle_error(10520, [e], halt_on_error)\n\n return None", "title": "" }, { "docid": "6dea9e7d33247ab19f077c0752da7eff", "score": "0.51349604", "text": "def LOAD_DATA(path, batch_num):\r\n def unpickle(file):\r\n import pickle\r\n with open(file, 'rb') as fo:\r\n dict = pickle.load(fo, encoding='bytes')\r\n return dict\r\n if batch_num == 1:\r\n N = \"data_batch_1\"\r\n elif batch_num == 2:\r\n N = \"data_batch_2\"\r\n elif batch_num == 3:\r\n N = \"data_batch_3\"\r\n elif batch_num == 4:\r\n N = \"data_batch_4\"\r\n elif batch_num == 5:\r\n N = \"data_batch_5\"\r\n elif batch_num == 0:\r\n N = \"test_batch\"\r\n \r\n\r\n batch_path = os.path.join(path,N)\r\n\r\n # Data_loaded = unpickle(file=r\"C:\\Users\\Ahmed Husnain Johar\\Downloads\\Compressed\\New folder (2)\\cifar-10-batches-py\\data_batch_1\")\r\n\r\n Data_loaded = unpickle(file=batch_path)\r\n\r\n\r\n\r\n KEYS = []\r\n for i in Data_loaded:\r\n KEYS.append(i)\r\n\r\n LABELS = Data_loaded[KEYS[1]]\r\n IMAGES = Data_loaded[KEYS[2]]\r\n IMAGE_NAMES = Data_loaded[KEYS[3]]\r\n\r\n\r\n\r\n return LABELS, IMAGES, IMAGE_NAMES", "title": "" }, { "docid": "4a02a3c4a02892a4dcc769731bb7c4c8", "score": "0.5113509", "text": "def __getitem__(self, index):\n # get the indexs of each batch\n batch_indexs = self.indexes[index*self.batch_size:(index+1)*self.batch_size]\n # using batch_indexs to get path of current batch\n batch_path = [self.X_path[k] for k in batch_indexs]\n # get batch data\n batch_x, batch_y = self.data_generation(batch_path)\n return batch_x, batch_y", "title": "" }, { "docid": "d6c353e1aee0028075f3034a35414318", "score": "0.5099706", "text": "def get_chunks():\n Log.debug(\"Received request: /get-chunk, threads: {}\".format(get_tids()))\n global chain_id\n # data = json.loads(request.data)\n chunk = load_chunk()\n\n if chunk is None:\n Log.debug(\"Returning data: Failed, threads: {}\".format(get_tids()))\n return '{\"status\": \"fail\"}'\n else:\n Log.debug(\"Returning data: Succeeded, threads: {}\".format(get_tids()))\n return json.dumps(chunk)", "title": "" }, { "docid": "fb891e2cca3365300478c772ae25425c", "score": "0.50965077", "text": "def getIntegrationDataset():\n dataset = request.args.getlist('data[]')\n data_integration.addData(dataset)\n return jsonify(result = data_integration.getDataset())", "title": "" }, { "docid": "2f5bd64396aee94d566526155742e7aa", "score": "0.5095739", "text": "def view_batch(request, bh_name):\n batch_name = bh_name\n batch_name = Batch.objects.get(name=batch_name)\n images = ImageBank.objects.filter(batch=batch_name)\n for image in images:\n #img_row = ImageBank.objects.get(request_id=image.request_id)\n #image.URL = \"data/\" + str(request.user.id) + \"/\" + bh_name + \"/\" + \"output/\" + image.file_name\n image.sample_details = json.loads(image.sample_details)\n # list_of_images = []\n # file_path = os.path.join('static/data', str(request.user.id), bh_name, \"output\")\n # for root, dirs, files in os.walk(file_path):\n # for file in files:\n # if file.endswith('.jpeg') or file.endswith('.JPG') or file.endswith('.jpg') or file.endswith('.png'):\n # dir_string = \"data/\" + str(request.user.id) + \"/\" + bh_name + \"/\" + \"output/\" + file\n # list_of_images.append(dir_string)\n\n return render(request, 'batch/batchView.html', {'images': images, 'batch_name': batch_name})", "title": "" }, { "docid": "23add1932ca4411aed4fbc0b7b0d8797", "score": "0.5082774", "text": "def submit_all(\n self,\n poll: Optional[bool] = True,\n errors: Optional[bool] = True,\n process_files: Optional[bool] = True,\n halt_on_error: Optional[bool] = True,\n ) -> dict:\n batch_data_array = []\n file_data = {}\n while True:\n batch_data = {}\n batch_id = None\n\n # get file, group, and indicator data\n content = self.data\n\n # break loop when end of data is reached\n if not content.get('group') and not content.get('indicator'):\n break\n\n if self.action.lower() == 'delete':\n # no need to process files on a delete batch job\n process_files = False\n\n # while waiting of FR for delete support in createAndUpload submit delete request\n # the old way (submit job + submit data), still using V2.\n if len(content) > 0: # pylint: disable=len-as-condition\n batch_id = self.submit_job(halt_on_error)\n if batch_id is not None:\n batch_data = self.submit_data(\n batch_id=batch_id, content=content, halt_on_error=halt_on_error\n )\n else:\n batch_data = {}\n else:\n # pop any file content to pass to submit_files\n file_data = content.pop('file', {})\n batch_data = (\n self.submit_create_and_upload(content=content, halt_on_error=halt_on_error)\n .get('data', {})\n .get('batchStatus', {})\n )\n batch_id = batch_data.get('id')\n\n if batch_id is not None:\n self.tcex.log.info(f'feature=batch, event=status, batch-id={batch_id}')\n # job hit queue\n if poll:\n # poll for status\n batch_data = (\n self.poll(batch_id, halt_on_error=halt_on_error)\n .get('data', {})\n .get('batchStatus')\n )\n if errors:\n # retrieve errors\n error_count = batch_data.get('errorCount', 0)\n error_groups = batch_data.get('errorGroupCount', 0)\n error_indicators = batch_data.get('errorIndicatorCount', 0)\n if error_count > 0 or error_groups > 0 or error_indicators > 0:\n batch_data['errors'] = self.errors(batch_id)\n else:\n # can't process files if status is unknown (polling must be enabled)\n process_files = False\n\n if process_files:\n # submit file data after batch job is complete\n self._file_threads.append(\n self.submit_thread(\n name='submit-files',\n target=self.submit_files,\n args=(file_data, halt_on_error,),\n )\n )\n batch_data_array.append(batch_data)\n\n # write errors for debugging\n self.write_error_json(batch_data.get('errors'))\n\n return batch_data_array", "title": "" }, { "docid": "9850dde44601884a81c150231f8c3cf0", "score": "0.50759274", "text": "def next_batch(self):\n if (self.batch_counter+1) * self.batch_size <= len(self.raw_frames):\n batch_frames = self.raw_frames[self.batch_counter * self.batch_size:\n (self.batch_counter+1) * self.batch_size]\n batch_labels = self.raw_labels[self.batch_counter * self.batch_size:\n (self.batch_counter+1) * self.batch_size]\n self.batch_counter += 1\n return batch_frames, batch_labels\n else:\n return self.raw_frames[self.batch_counter * self.batch_size:], \\\n self.raw_labels[self.batch_counter * self.batch_size:]", "title": "" }, { "docid": "9130f0e7607abe1772ed6338336abd8c", "score": "0.5069145", "text": "def run(self) -> None:\n self.batch: object = self.tcex.batch(self.args.tc_owner)\n\n # using tcex requests to get built-in features (e.g., proxy, logging, retries)\n with self.tcex.session_external as s:\n r: object = s.get(self.url)\n\n if r.ok:\n decoded_content: str = r.content.decode('utf-8').splitlines()\n\n reader: object = csv.reader(decoded_content, delimiter=',', quotechar='\"')\n for row in reader:\n # CSV headers\n # Firstseen,MD5hash,Malware\n\n # skip comments\n if row[0].startswith('#'):\n continue\n\n # create batch entry\n file_hash: object = self.batch.file(row[1], rating='4.0', confidence='100')\n file_hash.tag(row[2])\n occurrence: object = file_hash.occurrence()\n occurrence.date = row[0]\n self.batch.save(file_hash) # optionally save object to disk\n else:\n self.tcex.exit(1, 'Failed to download CSV data.')\n\n # submit batch job(s)\n batch_status: list = self.batch.submit_all()\n self.tcex.log.debug(batch_status)\n\n # self.exit_message = f'Downloaded and created {self.batch.indicator_len} file hashes.'", "title": "" }, { "docid": "478ac4400542cc4a003bcb09e398a88d", "score": "0.5069071", "text": "def _batch_generater(self):\n pass", "title": "" }, { "docid": "2968810c66724a01dd17f0c9182b0e17", "score": "0.50608885", "text": "def submit_2_dataset(label) -> Response:\n check_filesystem()\n\n data = np.fromstring(request.data, np.uint8)\n\n processed_image = preprocess(data)\n\n append_to_dataset(processed_image, label)\n\n response = jsonpickle.encode({'message': 'image received. size={}x{}, label={}'.format(processed_image.shape[1], processed_image.shape[0], label)})\n\n\n return Response(response=response, status=200, mimetype=\"application/json\")", "title": "" }, { "docid": "a797ff0fd437e25a010f8dccf53fbee5", "score": "0.5055904", "text": "def nfb_export_data(self) -> dict:\n data = {}\n\n # General ------------------------------------------------------------------------------------------------------\n data[\"sExperimentName\"] = self.name\n data[\"sStreamName\"] = self.lsl_stream_name\n data[\"sPrefilterBand\"] = str(self.prefilter_band[0]) + \" \" + str(self.prefilter_band[1])\n data[\"bDC\"] = self.dc\n data[\"sInletType\"] = self.inlet\n data[\"sRawDataFilePath\"] = self.raw_data_path\n data[\"sFTHostnamePort\"] = self.hostname_port\n data[\"bPlotRaw\"] = self.plot_raw\n data[\"bPlotSignals\"] = self.plot_signals\n data[\"bPlotSourceSpace\"] = 0\n data[\"fRewardPeriodS\"] = self.reward_refractory_period\n data[\"sReference\"] = self.discard_channels\n data[\"sReferenceSub\"] = self.reference_sub\n data[\"bUseExpyriment\"] = 0\n data[\"bShowPhotoRectangle\"] = self.show_photo_rectangle\n data[\"sVizNotchFilters\"] = self.show_notch_filters\n\n # Blocks -------------------------------------------------------------------------------------------------------\n data[\"vProtocols\"] = {\n \"FeedbackProtocol\": []\n }\n\n for name in self.blocks:\n block = self.blocks[name]\n\n data[\"vProtocols\"][\"FeedbackProtocol\"].append(block.nfb_export_data()) # Add other information\n data[\"vProtocols\"][\"FeedbackProtocol\"][-1][\"sProtocolName\"] = name # Add name\n\n # Groups -------------------------------------------------------------------------------------------------------\n data[\"vPGroups\"] = {\n \"PGroup\": []\n }\n\n for name in self.groups:\n group = self.groups[name]\n\n data[\"vPGroups\"][\"PGroup\"].append(group.nfb_export_data()) # Add other information\n data[\"vPGroups\"][\"PGroup\"][-1][\"sName\"] = name # Add name\n \n if len(self.groups) == 0:\n # Append a null group as a nfb bug workaround\n data[\"vPGroups\"][\"PGroup\"].append(None)\n\n # Derived Signals ----------------------------------------------------------------------------------------------\n signals = []\n\n # Build a list of lists of nodes (e.g. list of sequences)\n for node in self.signal_scheme.graph.nodes:\n if isinstance(node, DerivedSignalExport):\n signal = []\n n = node\n\n while True:\n signal.append(n)\n\n if len(n.inputs) == 0:\n break\n \n n = list(n.inputs[0].edges)[0].sourceNode()\n \n signals.append(signal)\n \n # Convert list of lists of nodes to a list of serialized signals\n for i in range(len(signals)):\n signal = {}\n\n for node in signals[i]:\n node.add_nfb_export_data(signal)\n \n if isinstance(signals[i][1], EnvelopeDetector):\n signal[\"sTemporalType\"] = \"envdetector\"\n elif isinstance(signals[i][1], BandpassFilter):\n signal[\"sTemporalType\"] = \"filter\"\n else:\n signal[\"sTemporalType\"] = \"identity\"\n \n signals[i] = signal\n\n data[\"vSignals\"] = {\n \"DerivedSignal\": signals\n }\n\n # Composite signals --------------------------------------------------------------------------------------------\n signals = []\n\n for node in self.signal_scheme.graph.nodes:\n if isinstance(node, CompositeSignalExport):\n signal = {}\n node.add_nfb_export_data(signal)\n signals.append(signal)\n \n data[\"vSignals\"][\"CompositeSignal\"] = signals\n\n # Experiment sequence ------------------------------------------------------------------------------------------\n data[\"vPSequence\"] = {\n \"s\": self.sequence\n }\n\n return data", "title": "" }, { "docid": "6a786eff9f09892ddabe56756d858236", "score": "0.5055638", "text": "def _saveddata(self):\n savedData = super(Group, self)._saveddata()\n savedData.update({'nbUnits': self.nbUnits,\n 'shape': self.shape,\n 'weightsRange': self.weightsRange,\n 'object_type': 'group',\n 'incoming_Cs': self.get_incoming_cs_names()})\n \n return savedData", "title": "" }, { "docid": "51d70121bc3a6e26bc1ea0f024735f12", "score": "0.50352013", "text": "def _get_data(self):\n # Download and extract data\n _physionet_download(PHYSIONET_2019_DATASETS, self.path, self.overwrite_cache)\n # Prepare data\n print(\"Processing data...\")\n data_directories = [self.path / dataset for dataset in PHYSIONET_2019_DATASETS]\n data_files = _get_file_list(data_directories)\n length, channels = self._get_lengths_channels(data_files)\n all_X = [None for _ in PHYSIONET_2019_DATASETS]\n all_y = [None for _ in PHYSIONET_2019_DATASETS]\n for i, files in enumerate(data_files):\n all_X[i], all_y[i] = self._process_files(files, max(length), channels)\n # Form tensors\n X = torch.cat(all_X)\n y = torch.cat(all_y)\n length = torch.tensor(length)\n return X, y, length", "title": "" }, { "docid": "fb9a5efb185d114fc8387fed6c3e8281", "score": "0.50329673", "text": "def fetch_data(self, sender: PyPersiaBatchDataSender):\n raise NotImplementedError(\"implement this function to fetch data\")", "title": "" }, { "docid": "018eb00d835972d8730a6807890a9ac7", "score": "0.5027485", "text": "def data_loader(self, image_list):\n batch_data = []\n HH, WW = self.im_shape\n batch = np.zeros((self.batch_size, HH, WW, 3), dtype=\"float16\")\n img_paths, img_shapes, img_scales, raw_images = [], [], [], []\n num_samples = len(image_list)\n max_iter = (\n (num_samples + self.batch_size - 1) // self.batch_size * self.batch_size\n )\n datasets = itertools.cycle(image_list)\n for idx in range(max_iter):\n im_path = next(datasets)\n input_data, raw_input, im_shape, im_scale = self.preprocess(im_path)\n im_name = im_path.split(\"/\")[-1]\n img_paths.append(im_name)\n img_shapes.append(im_shape)\n img_scales.append(im_scale)\n raw_images.append(raw_input)\n batch[idx % self.batch_size, :, :, :] = input_data\n if (idx + 1) % self.batch_size == 0:\n batch_data.append(\n {\n \"data\": batch.astype(\"float16\"),\n \"image_shape\": img_shapes,\n \"image_scale\": img_scales,\n \"path\": img_paths,\n \"image\": raw_images,\n }\n )\n img_paths, img_shapes, img_scales, raw_images = [], [], [], []\n return batch_data", "title": "" }, { "docid": "87fe9ed0b66ef3a7cc2fc208edb3c5a5", "score": "0.50263345", "text": "def get_batch(self, batch_idx):\n in_file_name = str(batch_idx) + '.in.npy'\n inputs = np.load(os.path.join(self.tmp_dir, in_file_name))\n out_file_name = str(batch_idx) + '.out.npy'\n outputs = np.load(os.path.join(self.tmp_dir, out_file_name))\n\n return inputs, outputs", "title": "" }, { "docid": "aa24a791a453c3cb8a8db2b7deaab8b4", "score": "0.50222963", "text": "def GET(self, *args):\n res=[]\n bdb = database('batches')\n if not len(args):\n return dumps({'results': False, 'message': 'No batch ids given'})\n bids = args[0]\n for bid in bids.split(','):\n mcm_b = batch(bdb.get(bid))\n if mcm_b.get_attribute('status')=='new':\n mcm_b.set_attribute('status', 'hold')\n mcm_b.update_history({'action':'set status','step':'hold'})\n elif mcm_b.get_attribute('status')=='hold':\n mcm_b.set_attribute('status', 'new')\n mcm_b.update_history({'action':'set status','step':'new'})\n else:\n res.append({'prepid':bid, 'results': False, 'message': 'Only status hold or new allowed'})\n continue\n\n bdb.update( mcm_b.json())\n res.append({'prepid':bid, 'results': True})\n return dumps(res)", "title": "" }, { "docid": "a6b457561b40373b47db716c03373f1b", "score": "0.5010838", "text": "def batchdata(X, y, btype, bsize):\n if btype == \"minibatch\" or btype == \"stochastic\":\n if btype == \"stochastic\":\n mask = rnd.sample(range(y.size), 1)\n else:\n mask = rnd.sample(range(y.size), bsize)\n Xbatch = X[:, mask]\n ybatch = y[mask]\n elif btype == \"full\":\n Xbatch = X\n ybatch = y\n else:\n logging.error(\"Incorrect batch type, using full\")\n Xbatch = X\n ybatch = y\n return Xbatch, ybatch", "title": "" }, { "docid": "8f45e5a897f36508adaace0ea5668fb5", "score": "0.5005797", "text": "def submit(\n self,\n poll: Optional[bool] = True,\n errors: Optional[bool] = True,\n process_files: Optional[bool] = True,\n halt_on_error: Optional[bool] = True,\n ) -> dict:\n # get file, group, and indicator data\n content = self.data\n\n # pop any file content to pass to submit_files\n file_data = content.pop('file', {})\n batch_data = (\n self.submit_create_and_upload(content=content, halt_on_error=halt_on_error)\n .get('data', {})\n .get('batchStatus', {})\n )\n batch_id = batch_data.get('id')\n if batch_id is not None:\n self.tcex.log.info(f'feature=batch, event=submit, batch-id={batch_id}')\n # job hit queue\n if poll:\n # poll for status\n batch_data = (\n self.poll(batch_id=batch_id, halt_on_error=halt_on_error)\n .get('data', {})\n .get('batchStatus')\n )\n if errors:\n # retrieve errors\n error_groups = batch_data.get('errorGroupCount', 0)\n error_indicators = batch_data.get('errorIndicatorCount', 0)\n if error_groups > 0 or error_indicators > 0:\n batch_data['errors'] = self.errors(batch_id)\n else:\n # can't process files if status is unknown (polling must be enabled)\n process_files = False\n\n if process_files:\n # submit file data after batch job is complete\n self._file_threads.append(\n self.submit_thread(\n name='submit-files', target=self.submit_files, args=(file_data, halt_on_error,),\n )\n )\n return batch_data", "title": "" }, { "docid": "90c6170d451aa888e7e09c09c12cd81c", "score": "0.49987602", "text": "def process_data(request, results, file_name, data):\n results.append(\"\")\n results.append(\"-------\")\n results.append(str(file_name))\n results.append(\"-------\")\n\n server_name = file_name\n for t_matchobj in re.finditer(filename_re, file_name):\n server_name = t_matchobj.groupdict()[\"server_name\"]\n server_name = server_name.replace(\"-\", \":\")\n results.append(\"Server: \" + server_name)\n\n cur_server = GetCreateEntity(request, results, \"server\", {\"name\": server_name})\n print('SERVER: '+str(cur_server.attributes))\n\n cur_server.attributes['name'] = server_name\n sequence_number = 0\n for matchobj in re.finditer(blk_row_re, data):\n if (my_blk_row_processor(request, results, matchobj, cur_server, sequence_number)):\n sequence_number += 1\n\n return results", "title": "" }, { "docid": "9c5a5a38f850a4bae4963b3c3acd33bc", "score": "0.4995515", "text": "def get(self, prepid):\n db = database('batches')\n return {'results': db.get(prepid=prepid)}", "title": "" }, { "docid": "5a0c19001fcddf960f277c69401aea27", "score": "0.4986502", "text": "def _concatenate_batch(self, data_batch: Dict[str, List[str]]) -> List[str]:\n concatenated_batch = []\n\n emotion_batch = data_batch[\"emotion\"]\n target_utterance_batch = data_batch[\"target_utterance\"]\n evidence_utterance_batch = data_batch[\"evidence_utterance\"]\n conversation_history_batch = data_batch[\"conversation_history\"]\n\n for emotion, target_utterance, evidence_utterance, conversation_history in zip(\n emotion_batch,\n target_utterance_batch,\n evidence_utterance_batch,\n conversation_history_batch,\n ):\n concatenated_instance = self._concatenate_instance(\n emotion, target_utterance, evidence_utterance, conversation_history\n )\n concatenated_batch.append(concatenated_instance)\n return concatenated_batch", "title": "" }, { "docid": "72c605cb3ed9c6af3b2a963ddcb476d1", "score": "0.49819386", "text": "def create_raw_data(self, trial):\n\n import os\n import datasets\n wd = os.getcwd() + '\\data'\n\n print(self.dataset + ' Selected')\n print('Attempting to load data from subject ' + str(self.subject) + ' and trial ' + str(trial))\n\n subject = self.subject\n if self.dataset == 'Motor Imagination':\n raw = datasets.read_imagination(subject, trial)\n \n elif self.dataset == 'Motor Execution':\n raw = datasets.read_execution(subject, trial)\n\n elif self.dataset == 'SCI Offline':\n raw = datasets.read_sci_offline(subject, trial)\n\n elif self.dataset == 'SCI Online':\n #file_location = r'C:/Users/ergo/Datasets/SCI/data/P09 Online Session %d %s/P09 Run %d.gdf' % (self.session, self.type_of_trial, trial)\n #file_location = self.wd + r'P09 Online Session %d Train/P09 Run %d.gdf' % (self.session, trial)\n #raw, event_id = read_execution_imagination(file_location, 'SCI Online', self.type_of_trial)\n raw, event_id = datasets.read_sci_online(subject, session, type_of_trial)\n print('The ' + self.dataset + ' dataset has been loaded. Observing the session ' + str(session) + ' for ' + type_of_trial)\n\n else:\n print('Not a known dataset. Check the dataset string.')\n \n print('The ' + self.dataset + ' dataset has been loaded.')\n self.raw = raw\n return raw", "title": "" }, { "docid": "62301ba662d8150d041f434b514a7feb", "score": "0.49812973", "text": "def extract_data(datatype):\n batches = []\n if datatype == 'train':\n str2search = 'batch_'\n elif datatype == 'test':\n str2search = 'test'\n elif datatype == 'validation':\n str2search = 'test'\n\n for file in os.listdir(self.cifar10_dir):\n file_path = os.path.join(self.cifar10_dir, file)\n if os.path.isfile(file_path) and str2search in file:\n batches.append(unpickle(file_path))\n data = np.concatenate(tuple(a['data'] for a in batches))\n labels = np.concatenate(tuple(a['labels'] for a in batches))\n return data, labels", "title": "" }, { "docid": "edccf0d057e59ebec6c5b32bd9e97b8e", "score": "0.49783206", "text": "def data(self):\n\n # Get user request\n tool, name = self._get_tool()\n\n # Get desired format\n data_format = self.request.GET.get('format', 'csv')\n\n # Get the results of the tool\n res = tool.run(save_results=True)\n\n # Render into desired format\n data_artifact = res['data']\n output_data = data_artifact.render_output(data_format)\n extension = data_artifact.available_formats()[data_format]['extension']\n\n # Send out the data in CSV format\n return Response(\n content_type=\"application/force-download\",\n content_disposition='attachment; filename=%s.%s' % (tool.name, extension),\n body=output_data\n )", "title": "" }, { "docid": "dc319287dbe473d6fb4628bfa6cb4a45", "score": "0.49699363", "text": "def get_batch(self, batch_idx):\n in_file_name = str(batch_idx) + '.in.npy'\n inputs = np.load(os.path.join(self.tmp_dir, in_file_name))\n outputs = []\n for i in range(self.num_outputs):\n out_file_name = str(batch_idx) + '.out' + str(i) + '.npy'\n outputs.append(np.load(os.path.join(self.tmp_dir, out_file_name)))\n\n return inputs, outputs", "title": "" }, { "docid": "70f6964a12d6b2134bde7ee0e9c1883f", "score": "0.49673083", "text": "def get_thinpool_data():\n config = parse_config.parse_serviced_config()\n data = get_serviced_settings(config)\n data.update(get_tpool_stats(config))\n data.update(get_tenant_stats())\n return data", "title": "" }, { "docid": "4db5c4d0b970e058c8ec4fe331017c28", "score": "0.49642786", "text": "def _getdata(self, variables: list, years: list, outputfile: str):\n name, request = self._build_request(variables, years)\n if self.dryrun:\n print(name, request, outputfile)\n else:\n queueing_message = (\n os.linesep,\n \"Download request is being queued at Copernicus.\",\n os.linesep,\n \"It can take some time before downloading starts, \",\n \"please do not kill this process in the meantime.\",\n os.linesep\n )\n connection = cdsapi.Client()\n print(\"\".join(queueing_message)) # print queueing message\n connection.retrieve(name, request, outputfile)\n era5cli.utils._append_history(name, request, outputfile)", "title": "" }, { "docid": "6a8addd177ce1b33c834c4edb2a2f9a5", "score": "0.49600196", "text": "async def run_extraction_all(self):\n \n max_user_id, max_message_id = self.get_prev_index()\n \n async with self.client:\n users = await self.client.get_participants(credentials.GROUP_NAME, limit = 50000000)\n \n self.users_df = self.get_users_df(users)\n \n async with self.client:\n messages = await self.client.get_messages(credentials.GROUP_NAME, limit = 50000000, min_id=max_message_id)\n\n \n self.messages_df = self.get_messages_df(messages)\n \n self.push_to_db()\n \n #messages_df.to_csv(self.MESSAGES_FILE, index=False)\n \n return self.messages_df, self.users_df", "title": "" }, { "docid": "96837452e190253eef224aa396171301", "score": "0.49569976", "text": "def get_processed_image_data(setSize):\n\n # get status images\n cae = Storage.get_cae()\n status_images = cae.get_current_status_images(setSize)\n\n # generate ProcessedImageData object from status images\n processed_image_data = generate_status_image_object_from_status_images(status_images)\n\n # return image subset\n return processed_image_data, 200", "title": "" }, { "docid": "9afa0b522d1e3d06d89d07b3eda8296f", "score": "0.49489015", "text": "def get_batch_details(batch_id):\n query = db.session.query(\n BatchClass.id,\n BatchClass.growapp_id,\n CropTypeClass.name,\n BatchClass.tray_size,\n BatchClass.number_of_trays,\n ).filter(\n and_(BatchClass.id == batch_id, CropTypeClass.id == BatchClass.crop_type_id)\n )\n execute_result = db.session.execute(query).fetchall()\n result = jsonify_query_result(execute_result)\n return result", "title": "" }, { "docid": "161dab2ce80d82c0435178e33f715492", "score": "0.49483272", "text": "def load_batch(self):\n raise NotImplementedError", "title": "" }, { "docid": "40d230e5796ab76724f078427e309837", "score": "0.4943145", "text": "def _get_batch(self, dataset_name):\n if self.use_cache:\n support_set_x, support_set_y, target_x, target_y = self._get_batch_from_cache(dataset_name)\n else:\n print(\"use_cache = false not yet implemented\")\n return\n support_set_x = np.array(support_set_x)\n support_set_y = np.array(support_set_y)\n target_x = np.array(target_x)\n target_y = np.array(target_y)\n support_set_x = support_set_x.reshape((support_set_x.shape[0], support_set_x.shape[1] * support_set_x.shape[2]))\n #support_set_y = support_set_y.reshape(support_set_y.shape[0], support_set_y.shape[1] * support_set_y.shape[2])\n return support_set_x, support_set_y, target_x, target_y", "title": "" }, { "docid": "449812e4d6a3833a570e59835c2aa787", "score": "0.4935525", "text": "def __data_generation(self, batch_idx_list):\n image_list = []\n label_list = []\n\n for index, (image, label) in enumerate( zip(self.images, self.labels) ):\n # if index is part of this batch\n if index in batch_idx_list:\n # augment\n if self.augment:\n # apply elastic transformation\n if self.elastic and np.random.uniform() < 0.5:\n elastic = Elastic(image, label)\n image, label = elastic.run()\n # affine transformations + crop\n image, label = self._affine_transform_arr(image, label)\n image, label = self._crop_arr(image, label)\n # noise\n image = self._pepper_arr(image)\n if self.blur_label:\n label = self._blur_arr(label)\n # if no augmentation, just crop\n else:\n image, label = self._crop_arr(image, label)\n\n label = self._ensure_binary_label(label)\n\n # if index == 12:\n # siko = np.random.randint(0, 100)\n #\n # writer = sitk.ImageFileWriter()\n # writer.SetFileName(\"/output/test1/sik-image-{}.nrrd\".format(siko))\n # writer.SetUseCompression(True)\n # writer.Execute(sitk.GetImageFromArray(image))\n #\n # writer = sitk.ImageFileWriter()\n # writer.SetFileName(\"/output/test1/sik-label-{}.nrrd\".format(siko))\n # writer.SetUseCompression(True)\n # writer.Execute(sitk.GetImageFromArray(label))\n\n # print (image.min(), image.max(), image.shape, image.dtype)\n # print (label.min(), label.max(), label.shape, label.dtype)\n\n if label.min()!=0 or label.max()!=1 or len(np.unique(label))!=2:\n print(\"WARNING! CHECK YOUR LABELS at index {}\".format(index))\n\n # reshape\n image = image.reshape(1, *image.shape)\n label = label.reshape(1, *label.shape)\n # append\n image_list.append(image)\n label_list.append(label)\n\n\n return np.array(image_list,'float32'), np.array(label_list,'float32')", "title": "" }, { "docid": "75eeb6c343efbb052d64ac42537e0b30", "score": "0.49250656", "text": "def get(self):\n\n self.response.headers.add_header('Access-Control-Allow-Origin', '*')\n\n accept_header = get_best_mime_match_or_default(\n self.request.headers.get('Accept'),\n SUPPORTED_OUTPUT_APPENDR_MIME_TYPES,\n DEFAULT_OUTPUT_APPENDR_MIME_TYPE)\n\n validate_input_param(self.request.params, 'api_token', True,\n validate_non_empty_string,\n False)\n\n validate_input_param(self.request.params, 'storage_backend', True,\n validate_non_empty_string,\n False)\n\n api_token = self.request.params.get('api_token')\n storage_backend = self.request.params.get('storage_backend')\n\n user_id = Bin.get_user_id_for_token(storage_backend, api_token)\n bins = Bin.all().filter('storage_backend =', storage_backend)\n bins = bins.filter('storage_user_id =', user_id)\n bins = bins.order('-date_created').fetch(None)\n\n self.response.headers['Content-Type'] = accept_header\n self.response.out.write(Bin.serialize(bins, accept_header))", "title": "" }, { "docid": "0ea9cadfc4963acec0fd3bad1969dfda", "score": "0.49236694", "text": "def process_batch(self, batch):\n works = []\n results = []\n for identifier in batch:\n work = self.work(identifier)\n if not isinstance(work, CoverageFailure):\n works.append(work)\n results.append(identifier)\n else:\n results.append(work)\n feed = AcquisitionFeed(self._db, \"Metadata Upload Feed\", \"\", works, None)\n self.lookup_client.add_with_metadata(feed)\n \n # We grant coverage for all identifiers if the upload doesn't raise an exception.\n return results", "title": "" }, { "docid": "f4fc9d94b69befb5c0899bd4dd00b23f", "score": "0.49228", "text": "def _split_to_batch_gt(data, record_size, valid_action_ids):\n\n assert data.size % record_size == 0, 'incorrect record_size'\n records = data.reshape([-1, record_size])\n\n if records.shape[0] == 0:\n return {}\n\n batch_size = int(np.max(records[:, 0])) + 1\n batch_data = {i: [] for i in range(batch_size)}\n\n for record in records:\n item_id = int(record[0])\n if item_id < 0:\n continue\n\n action_id = int(record[1])\n if action_id not in valid_action_ids:\n continue\n\n detection = InputDetection(item_id=item_id,\n track_id=int(record[2]),\n real_action=action_id,\n det_conf=1.0,\n anchor_id=-1,\n xmin=float(record[3]),\n ymin=float(record[4]),\n xmax=float(record[5]),\n ymax=float(record[6]),\n x_pos=-1,\n y_pos=-1)\n batch_data[detection.item_id].append(detection)\n\n return batch_data", "title": "" }, { "docid": "365462dacf4225152c280b697b6e94f3", "score": "0.49140784", "text": "def json_to_dataset(self, info_group):\n dataset = []\n id_generator = 0\n for img in info_group:\n id_generator += 1\n single_img = {}\n # print(os.path.join(self.data_path, img['imagePath']))\n single_img['file_name'] = os.path.join(self.data_path, img['imagePath'])\n single_img['image_id'] = id_generator\n single_img['height'] = img['imageHeight']\n single_img['width'] = img['imageWidth']\n single_img['annotations'] = []\n if len(img['shapes']) != 0:\n for obj in img['shapes']:\n box = {}\n box['bbox'] = self._get_abbox(obj['points'])\n box['bbox_mode'] = BoxMode.XYWHA_ABS\n # print(CLS_NUM[obj['label']])\n box['category_id'] = obj['label']\n single_img['annotations'].append(box)\n dataset.append(single_img)\n\n return dataset", "title": "" }, { "docid": "38ba7560b33b670d0ccde74c4915c009", "score": "0.49134806", "text": "def get_batch():\r\n with tf.device('/cpu:0'):\r\n # Load data\r\n wavs, qts, speakers = load_data() # list\r\n\r\n\r\n # Calc total batch count\r\n num_batch = len(wavs) // hp.batch_size\r\n\r\n # to tensor\r\n wavs = tf.convert_to_tensor(wavs, tf.string)\r\n qts = tf.convert_to_tensor(qts, tf.string)\r\n speakers = tf.convert_to_tensor(speakers, tf.int32)\r\n\r\n # Create Queues\r\n wav, qt, speaker = tf.train.slice_input_producer([wavs, qts, speakers], shuffle=True)\r\n\r\n # Parse\r\n wav, = tf.py_func(lambda x: np.load(x), [wav], [tf.float32]) # (None, 1)\r\n qt, = tf.py_func(lambda x: np.load(x), [qt], [tf.int32]) # (None, 1)\r\n\r\n # Cut off\r\n qt = tf.pad(qt, ([0, hp.T], [0, 0]), mode=\"CONSTANT\")[:hp.T, :]\r\n\r\n # Add shape information\r\n wav.set_shape((None,))\r\n qt.set_shape((hp.T, 1))\r\n speaker.set_shape(())\r\n\r\n # Batching\r\n qts, wavs, speakers = tf.train.batch(tensors=[qt, wav, speaker],\r\n batch_size=hp.batch_size,\r\n shapes=([hp.T, 1], [None,], []),\r\n num_threads=32,\r\n dynamic_pad=True)\r\n return qts, wavs, speakers, num_batch", "title": "" }, { "docid": "b3edf351236d4f76ef1fb837234064c2", "score": "0.49126536", "text": "def data_response(self):\n d = []\n for i in range(self._num_bands):\n d_i = self._imageModel_list[i].data_response\n if i == 0:\n d = d_i\n else:\n d = np.append(d, d_i)\n return d", "title": "" }, { "docid": "49011c67f77effcbb93e8f16fca0a4b4", "score": "0.4906653", "text": "def call_and_output_all(self):\n\n args_dict = self.get_args_dict()\n inc = 0\n\n # only makes API call if there is a valid set of args\n if self.get_valid_args():\n\n # open the output file handle if applicable\n # first overwrite all the contents of the file, then close and\n # open the file again in append mode\n if self.get_output_mode() == ENAClient.OUTPUT_MODE_FILE:\n output_file_path = args_dict[\"output_file\"]\n output_file_write = open(output_file_path, \"w\")\n output_file_write.write(\"\")\n output_file_write.close()\n self.__set_output_file(open(output_file_path, \"a\"))\n\n # write/print prefix for array of metadata objects\n self.__output_batch_prefix()\n\n # if input file was provided, then the program will iterate over\n # each sequence id in the file, making the API request for each\n # id. Formatted responses are printed/written as they are processed.\n if self.get_input_mode() == ENAClient.INPUT_MODE_BATCH:\n input_file = open(args_dict[\"input_file\"], \"r\")\n input_line = input_file.readline()\n # while next input file line is not empty\n while (input_line):\n # get the sequence id and get the formatted response\n sequence_id = input_line.rstrip()\n response_string = self.call_refget_api(sequence_id, inc)\n\n input_line = input_file.readline()\n # if there is another sequence id on the next line,\n # output the formatted response and the separator char\n # between returned objects\n if input_line:\n self.__output(response_string)\n self.__output_batch_separator()\n # if there are no more sequence ids (at end of file), then\n # do not output the separator char\n else:\n self.__output(response_string + \"\\n\")\n inc += 1\n\n # sequence id was provided, program executed in single mode\n else:\n # get sequence id and send it to api call method, writing/\n # printing output\n sequence_id = args_dict[\"sequence_id\"]\n response_string = self.call_refget_api(sequence_id, inc)\n self.__output(response_string + \"\\n\")\n\n # write/print suffix for array of metadata objects\n self.__output_batch_suffix()\n\n if self.get_output_mode() == ENAClient.OUTPUT_MODE_FILE:\n self.get_output_file().close()", "title": "" }, { "docid": "70f6b32fd87f1ce95293b375fc1ce0de", "score": "0.49046743", "text": "def submit_all(\n self,\n poll: bool = True,\n errors: bool = True,\n process_files: bool = True,\n halt_on_error: bool = True,\n ) -> list[dict]:\n batch_data_array = []\n file_data = {}\n while True:\n batch_data: dict[str, int | list | str] | None = {}\n batch_id: int | None = None\n\n # get file, group, and indicator data\n content = self.data\n\n # break loop when end of data is reached\n if not content.get('group') and not content.get('indicator'):\n break\n\n if self.action.lower() == 'delete':\n # no need to process files on a delete batch job\n process_files = False\n\n # while waiting of FR for delete support in createAndUpload submit delete request\n # the old way (submit job + submit data), still using V2.\n if len(content) > 0: # pylint: disable=len-as-condition\n batch_id = self.submit_job(halt_on_error)\n if batch_id is not None:\n batch_data = self.submit_data(\n batch_id=batch_id, content=content, halt_on_error=halt_on_error\n )\n else:\n batch_data = {}\n else:\n # pop any file content to pass to submit_files\n file_data = content.pop('file', {})\n batch_data = (\n self.submit_create_and_upload(content=content, halt_on_error=halt_on_error)\n .get('data', {})\n .get('batchStatus', {})\n )\n batch_id = batch_data.get('id') # type: ignore\n\n if batch_id is not None:\n self.log.info(f'feature=batch, event=status, batch-id={batch_id}')\n # job hit queue\n if poll:\n # poll for status\n batch_data = (\n self.poll(batch_id, halt_on_error=halt_on_error)\n .get('data', {})\n .get('batchStatus', {})\n )\n if errors and batch_data is not None:\n # retrieve errors\n error_count = batch_data.get('errorCount', 0)\n error_groups = batch_data.get('errorGroupCount', 0)\n error_indicators = batch_data.get('errorIndicatorCount', 0)\n if (\n isinstance(error_count, int)\n and isinstance(error_groups, int)\n and isinstance(error_indicators, int)\n ):\n if error_count > 0 or error_groups > 0 or error_indicators > 0:\n batch_data['errors'] = self.errors(batch_id)\n else:\n # can't process files if status is unknown (polling must be enabled)\n process_files = False\n\n if process_files:\n # submit file data after batch job is complete\n self._file_threads.append(\n self.submit_thread(\n name='submit-files',\n target=self.submit_files,\n args=(\n file_data,\n halt_on_error,\n ),\n )\n )\n batch_data_array.append(batch_data)\n\n # write errors for debugging\n if isinstance(batch_data, dict):\n batch_errors = batch_data.get('errors', [])\n if isinstance(batch_errors, list) and len(batch_errors) > 0:\n self.write_error_json(batch_errors)\n\n return batch_data_array", "title": "" }, { "docid": "d30d89b9c3d8c0a5688c041108b9f949", "score": "0.49018767", "text": "def _discover(self):\n self._ptr = 0\n for leadin in self._get_leadins():\n raw_data_size = leadin.segment_len - leadin.metadata_len\n raw_data_start = self._ptr + leadin.metadata_len\n ch_data_start = raw_data_start\n interleaved = leadin.toc & KToC.InterleavedData\n byte_order = \">\" if leadin.toc & KToC.BigEndian else \"<\"\n n_objects = self._unpack(TdsDataType.U32, byte_order)\n for object_i in range(n_objects):\n obj_path = self._unpack(TdsDataType.String, byte_order)\n path_parts = self._get_pathparts(obj_path)\n n = len(path_parts)\n if n == 1:\n self._ptr += 4 # raw data index\n for name, value in self._get_properties(byte_order):\n self._create(name, value)\n if name not in self._info[\"file_properties\"]:\n self._info[\"file_properties\"].append(name)\n elif n == 2:\n group = path_parts[1]\n if not hasattr(self, group):\n self._create(group, Group(group))\n self._info[\"groups\"].append(group)\n self._info[\"group_properties\"][group] = []\n self._info[\"group_channels\"][group] = []\n self._info[\"group_channel_properties\"][group] = {}\n self._ptr += 4\n for name, value in self._get_properties(byte_order):\n self._get_attr(group)._create(name, value)\n if name not in self._info[\"group_properties\"][group]:\n self._info[\"group_properties\"][group].append(name)\n else:\n gr = path_parts[1]\n ch = path_parts[2]\n if not hasattr(self.__dict__[gr], ch):\n self._get_attr(gr)._create(ch, Channel(ch))\n self._get_attr(gr, ch)._buffer = self._buffer\n self._info[\"group_channels\"][gr].append(ch)\n self._info[\"group_channel_properties\"][gr][ch] = []\n index_type = self._unpack(TdsDataType.U32, byte_order)\n if index_type == 0xFFFFFFFF:\n pass\n elif index_type in [0x00001269, 0x00001369]:\n self._ptr += 8\n self._unpack(TdsDataType.U64, byte_order)\n fcs_vector_size = self._unpack(TdsDataType.U32, byte_order)\n self._ptr += fcs_vector_size * 20\n rdw_vector_size = self._unpack(TdsDataType.U32, byte_order)\n rdw = self._unpack(TdsDataType.U32, byte_order)\n count = raw_data_size // rdw\n self._ptr += (rdw_vector_size - 1) * 4\n rd_sz = rdw // n_objects\n rd_fmt = {1: \"b\", 2: \"h\", 4: \"i\"}[rd_sz]\n dtype = TdsDataType.DAQmxRawData\n if count > 0:\n seg = Segment(\n raw_data_start,\n raw_data_size,\n ch_data_start,\n (rd_sz, rd_fmt),\n dtype,\n None,\n count,\n byte_order,\n interleaved,\n )\n self._get_attr(gr, ch)._segments.append(seg)\n ch_data_start += rd_sz\n elif index_type == 0x0000126A:\n # digital data\n self._ptr += 8\n count = self._unpack(TdsDataType.U64, byte_order)\n fcs_vector_size = self._unpack(TdsDataType.U32, byte_order)\n self._ptr += fcs_vector_size * 17\n rdw_vector_size = self._unpack(TdsDataType.U32, byte_order)\n rdw = self._unpack(TdsDataType.U32, byte_order)\n self._ptr += (rdw_vector_size - 1) * 4\n rd_sz = rdw\n rd_fmt = {1: \"B\", 2: \"H\", 4: \"I\"}[rd_sz]\n dtype = TdsDataType.DAQmxRawData\n if count > 0:\n seg = Segment(\n raw_data_start,\n raw_data_size,\n ch_data_start,\n (rd_sz, rd_fmt),\n dtype,\n object_i,\n count * raw_data_size // rd_sz,\n byte_order,\n interleaved,\n )\n self._get_attr(gr, ch)._segments.append(seg)\n elif index_type == 0:\n last_seg = self._get_attr(gr, ch)._segments[-1]\n count = (raw_data_size // last_seg.raw_size) * last_seg.count\n dtype_size, _ = STRUCT_FORMAT[last_seg.dtype]\n seg = Segment(\n raw_data_start,\n raw_data_size,\n ch_data_start,\n last_seg.rd_szfmt,\n last_seg.dtype,\n last_seg.offsets,\n count,\n byte_order,\n interleaved,\n )\n if interleaved:\n ch_data_start += dtype_size\n else:\n ch_data_start += count * dtype_size\n self._get_attr(gr, ch)._segments.append(seg)\n elif index_type == 0x00000014:\n dtype = self._unpack(TdsDataType.U32, byte_order)\n try:\n dtype_size, _ = STRUCT_FORMAT[TdsDataType(dtype)]\n except KeyError:\n raise DataTypeNotSupportedError(\n f\"{TdsDataType(dtype)}\"\n ) from None\n dim = self._unpack(TdsDataType.U32, byte_order)\n if dim != 1:\n raise InvalidDimensionError\n count = self._unpack(TdsDataType.U64, byte_order)\n seg = Segment(\n raw_data_start,\n raw_data_size,\n ch_data_start,\n STRUCT_FORMAT[TdsDataType(dtype)],\n TdsDataType(dtype),\n # None,\n n_objects,\n count,\n byte_order,\n interleaved,\n )\n self._get_attr(gr, ch)._segments.append(seg)\n if interleaved:\n ch_data_start += dtype_size\n else:\n ch_data_start += count * dtype_size\n elif index_type == 0x0000001C:\n dtype = self._unpack(TdsDataType.U32, byte_order)\n dim = self._unpack(TdsDataType.U32, byte_order)\n count = self._unpack(TdsDataType.U64, byte_order)\n raw_chunk_size = self._unpack(TdsDataType.U64, byte_order)\n self._ptr += 4\n raw_size = raw_chunk_size - 4 * count\n reps = raw_data_size // raw_chunk_size\n fmt = f\"{byte_order}{count}I\"\n st = self._ptr\n en = st + 4 * count\n buf = self._buffer[st:en]\n offsets = (0,) + struct.unpack(fmt, buf)\n raw_start = [\n raw_data_start + i * raw_chunk_size + 4 * count\n for i in range(reps)\n ]\n seg = Segment(\n raw_start,\n raw_size,\n raw_start,\n STRUCT_FORMAT[TdsDataType(dtype)],\n TdsDataType(dtype),\n offsets,\n count,\n byte_order,\n interleaved,\n )\n self._get_attr(gr, ch)._segments.append(seg)\n self._ptr -= 4\n else:\n raise ValueError(\"Unknown raw data index type\")\n for n, v in self._get_properties(byte_order):\n if index_type in [0x00001269, 0x00001369, 0x0000126A]:\n if n.startswith(\"NI_\"):\n self._get_attr(gr, ch)._scales[n] = v\n else:\n self._get_attr(gr, ch)._create(n, v)\n if n not in self._info[\"group_channel_properties\"][gr][ch]:\n self._info[\"group_channel_properties\"][gr][ch].append(n)", "title": "" }, { "docid": "fd32360c3cda821d804acc1bd7e0a169", "score": "0.48985487", "text": "def get_data(self):\n return []", "title": "" }, { "docid": "d078d14fa883ace79bd830cc8b7c55ee", "score": "0.48982915", "text": "def act(self):\n self._epoch_done = True\n train_conll = [open(os.path.join(self.train_path, file), 'r').readlines() for file in self.train_documents]\n valid_conll = [open(os.path.join(self.valid_path, file), 'r').readlines() for file in self.valid_documents]\n return {'id': self.id, 'conll': train_conll, 'valid_conll': valid_conll}", "title": "" }, { "docid": "35f2cd304e640c7c7c1d7b3557328a02", "score": "0.48947442", "text": "def _get_data(self):", "title": "" }, { "docid": "321243738caabbac61de1b81e206e4c0", "score": "0.48911253", "text": "def batches(self) -> Dict[str, AnyBatch]:\n return self.batch_cache", "title": "" }, { "docid": "a8324193e8e97ac25115b5dd108e5f8c", "score": "0.4889983", "text": "def _add_batch(self, job_id, data):\n url = self._session.construct_url(self._get_api_uri() + \"/job/{0}/batch\".format(job_id))\n res = self._session.post(url, headers=self._get_headers('text/csv'), data=data.encode('utf-8'))\n\n if res.status_code != 201:\n raise Exception(\n \"Request failed with %d code and error [%s]\" %\n (res.status_code, res.text))\n\n return ET.fromstring(res.text).find('asyncapi:id', self._XML_NAMESPACES).text", "title": "" }, { "docid": "f100cbf04c016198bbd3e549b48bfe47", "score": "0.48878032", "text": "def read_data(self):\n gs = self.gather()\n ds = []\n for g in gs:\n f = self.fetch(g)\n i = self.import_data(f)\n ds.append(i)\n return ds", "title": "" }, { "docid": "2351ce6afb0453fcd91afc85ccda63e8", "score": "0.48773158", "text": "def extData(self):\n if self.selected_group is None:\n return bytearray(0)\n\n w = Writer()\n w.add(self.selected_group, 2)\n return w.bytes", "title": "" }, { "docid": "3a8b22cf7fef700201175b74922464ee", "score": "0.48740107", "text": "def export_data(self, db_client: arvet.database.client.DatabaseClient):\n # Save trajectory files for import into unreal\n for trajectory_group in self._trajectory_groups.values():\n data_helpers.dump_ue4_trajectory(\n name=trajectory_group.name,\n trajectory=traj_help.get_trajectory_for_image_source(db_client, trajectory_group.reference_dataset)\n )\n\n # Group and print the trajectories for graphing\n systems = du.defaults({'LIBVISO 2': self._libviso_system}, self._orbslam_systems)\n for trajectory_group in self._trajectory_groups.values():\n\n # Collect the trial results for each image source in this group\n trial_results = {}\n for system_name, system_id in systems.items():\n for dataset_name, dataset_id in trajectory_group.datasets.items():\n trial_result_list = self.get_trial_result(system_id, dataset_id)\n for idx, trial_result_id in enumerate(trial_result_list):\n label = \"{0} on {1} repeat {2}\".format(system_name, dataset_name, idx)\n trial_results[label] = trial_result_id\n data_helpers.export_trajectory_as_json(trial_results, \"Generated Data \" + trajectory_group.name, db_client)", "title": "" }, { "docid": "3affbf49af440fbb12fb831cdfa6a4f3", "score": "0.48636663", "text": "def get_batch(batch):\n batch_data = []\n batch_idx_data = [[], [], [], []]\n max_summary_length = 0\n for d in batch:\n idx_data = [[], [], []] # for each triplet\n batch_data.append(d[:2]) # keep the original data/ not indexed version\n for triplets in d[2][0]:\n for idt, t in enumerate(triplets):\n idx_data[idt].append(t)\n\n for idb, b in enumerate(idx_data):\n batch_idx_data[idb].append(b)\n\n # Calculate maximum length of the summary\n max_summary_length = max(max_summary_length, len(d[2][1]))\n\n batch_idx_data[3].append(d[2][1])\n\n return batch_data, batch_idx_data", "title": "" }, { "docid": "46d83b9a7a098f6d587800fbc82f824a", "score": "0.48627865", "text": "def bufdata(self):\n debug('GCSMessages.bufdata: %d datasets', self._databuffer['index'])\n return self._databuffer['data']", "title": "" }, { "docid": "20e673b1a41e687971482fb6fb953467", "score": "0.48617274", "text": "def getDataSplitBySession(self, datapath, debug = True):\n\n # # Get a list of all the files\n datapath = Path(datapath)\n\n # Create container\n dataContainer = defaultdict(lambda: defaultdict(list))\n\n # Read all data files and concatenate them into a single dataframe.\n for f in datapath.rglob(\"*.pickle\"):\n # Get Session\n sessId = re.findall(\"S[0-9]_T[0-9]\", str(f.name))[0][-4]\n\n # Get data frame\n print(f) if debug else None\n\n with open(f,'rb') as fHandler:\n data = pickle.load(fHandler)\n assert isinstance(data, dict), \"Unpickled data file is not a dict\"\n assert 'X' in data.keys() and 'y' in data.keys(), \"Unpickled eeg data not in the right format. Missing 'X' or 'y' key\"\n\n\n # Append trial to dataset\n dataContainer[sessId]['X'].append(data['X'])\n dataContainer[sessId]['y'].append(data['y'])\n\n for key, value in dataContainer.items():\n dataContainer[key]['X'] = np.concatenate(dataContainer[key]['X'])\n dataContainer[key]['y'] = np.concatenate(dataContainer[key]['y'])\n\n return dataContainer", "title": "" }, { "docid": "fc2d5a296e155816b66d1ae2f39ed801", "score": "0.48613268", "text": "def batch_img_summary():\n # Check to make sure we received a valid JSON with the request:\n if not request.json:\n return jsonify({\"error\": \"no request received\"})\n # Get incoming request with the image data:\n batch_images_jsons = request.get_json(force=True)['images'] # add checks with request.is_json() (and try + the second one?)\n\n image_summaries = []\n for image_json in batch_images_jsons:\n image_summaries.append(get_summary(image_json))\n\n # Get image summaries for all images in the batch from\n # our ML model, and return them in JSON form:\n return jsonify({\"images\": image_summaries})", "title": "" }, { "docid": "969657f66a0154b5f85b455e1eca036c", "score": "0.48588097", "text": "def testReturnsAllDataByDefault(self):\n client_metrics.GRR_CLIENT_RECEIVED_BYTES.Increment(1566)\n client_metrics.GRR_CLIENT_SENT_BYTES.Increment(2000)\n\n results = self.RunAction(\n admin.GetClientStats,\n grr_worker=MockClientWorker(),\n arg=rdf_client_action.GetClientStatsRequest())\n\n response = results[0]\n self.assertEqual(response.bytes_received, 1566)\n self.assertEqual(response.bytes_sent, 2000)\n\n self.assertLen(response.cpu_samples, 3)\n for i in range(3):\n self.assertEqual(response.cpu_samples[i].timestamp,\n rdfvalue.RDFDatetime.FromSecondsSinceEpoch(100 + i * 10))\n self.assertAlmostEqual(response.cpu_samples[i].user_cpu_time, 0.1)\n self.assertAlmostEqual(response.cpu_samples[i].system_cpu_time,\n 0.1 * (i + 1))\n self.assertAlmostEqual(response.cpu_samples[i].cpu_percent, 10.0 + 5 * i)\n\n self.assertLen(response.io_samples, 3)\n for i in range(3):\n self.assertEqual(response.io_samples[i].timestamp,\n rdfvalue.RDFDatetime.FromSecondsSinceEpoch(100 + i * 10))\n self.assertEqual(response.io_samples[i].read_bytes, 100 * (i + 1))\n self.assertEqual(response.io_samples[i].write_bytes, 100 * (i + 1))\n\n self.assertEqual(response.boot_time, 100 * 1e6)", "title": "" }, { "docid": "5c062c537de5d0c0ae176d0d16b25ed2", "score": "0.48580033", "text": "def main(self):\r\n\r\n # path = get_TFRecord(sub_add=sub_add, tar_dir=tar_dir,action_rate=action_rate, expand_rate=2)\r\n\r\n print(self.get_next_batch().shape)", "title": "" }, { "docid": "f6164b7269bbc379b8753d73886f3643", "score": "0.4853357", "text": "def __output_batch_prefix(self):\n\n # write prefix that opens the array of metadata objects\n prefixes_dict = {\n ENAClient.OUTPUT_FORMAT_JSON: \"[\\n\",\n ENAClient.OUTPUT_FORMAT_XML: '<?xml version=\"1.0\" ?>\\n'\n + '<sequence_group>\\n',\n ENAClient.OUTPUT_FORMAT_YAML: \"sequence_group:\\n\"\n }\n self.__output(prefixes_dict[self.get_output_format()])", "title": "" }, { "docid": "6928ac6b33bcf4121c66586d14252d9e", "score": "0.48470628", "text": "async def test_batch_get(self):\n self.stream.preset_response(batch=Mocks.make_batches('1')[0])\n\n response = await self.get_assert_200('/batches/1')\n self.stream.assert_valid_request_sent(batch_id='1')\n\n self.assertNotIn('head', response)\n self.assert_has_valid_link(response, '/batches/1')\n self.assertIn('data', response)\n self.assert_batches_well_formed(response['data'], '1')", "title": "" }, { "docid": "b71314d0bc11e106466bb01d571ba563", "score": "0.48468357", "text": "def _files_and_annotations(self):\n\n if self.subset == \"train\":\n data_type = \"train\"\n\n if self.subset == \"validation\":\n data_type = \"val\"\n\n if self.subset == \"train_validation\":\n data_type = \"trainval\"\n\n image_ids = self._image_ids(data_type)\n files = [self._image_file_from_image_id(image_id) for image_id in image_ids]\n gt_boxes_list = [self._gt_boxes_from_image_id(image_id) for image_id in image_ids]\n\n print(\"{} {} files and annotations are ready\".format(self.__class__.__name__, self.subset))\n\n return files, gt_boxes_list", "title": "" }, { "docid": "3dc888f5f06409f6f2105499a272c010", "score": "0.48466775", "text": "def info():\n number_of_datasets = Datasets.query.filter_by(session=g.session['uuid'], deleted=False).count()\n dataset = db_dataset_extended_info(g.session['uuid'], uuid=g.session['active_instance'])\n\n return make_response({'lastRequest': g.session['last_request'], 'numberOfDatasets': number_of_datasets, 'activeDataset': dataset}, 200)", "title": "" }, { "docid": "ae72d72b9122e7c8ed71b0ee675ad935", "score": "0.48425776", "text": "def getBatch(batchId):\n url = \"{}/batches/{}\".format(LIVY_SERVER, batchId)\n result = get(url)\n if result.ok:\n print(result.text)", "title": "" }, { "docid": "7bcefbb214e85bfea7cae3ea25843148", "score": "0.48385295", "text": "def data(self):\n return {\n \"frames\": str(self._chunk),\n \"command\": self._command\n }", "title": "" }, { "docid": "d700f70a302d7349564565306e7eb05b", "score": "0.48382726", "text": "def _collect_data_by_station_group(\n self, road_weather_station_group: DwdRoadStationGroup, parameters: List[str]\n ) -> pl.DataFrame:\n remote_files = self._create_file_index_for_dwd_road_weather_station(road_weather_station_group)\n if self.sr.start_date:\n remote_files = remote_files.filter(\n pl.col(Columns.DATE.value).is_between(self.sr.start_date, self.sr.end_date)\n )\n remote_files = remote_files.get_column(Columns.FILENAME.value).to_list()\n filenames_and_files = self._download_road_weather_observations(remote_files, self.sr.settings)\n return self._parse_dwd_road_weather_data(filenames_and_files, parameters)", "title": "" }, { "docid": "75fcf9a8929323fbcfb4383b3533b0c2", "score": "0.48378283", "text": "def getOutputData(self,dir=None,names=None):\n j = self.getJobObject()\n if not names: names = ''\n if not dir: dir = j.getOutputWorkspace().getPath()\n\n global dirac_ganga_server\n cmd = 'result = DiracCommands.getOutputData(\"%s\",\"%s\",%d)' \\\n % (names,dir,self.id)\n result = dirac_ganga_server.execute(cmd)\n\n downloaded_files = []\n if not result_ok(result):\n logger.error('Output download failed: %s' % str(result))\n else: downloaded_files = result.get('Value',[])\n ds = BesDataset()\n for f in downloaded_files: ds.files.append(LogicalFile(f))\n return GPIProxyObjectFactory(ds)", "title": "" }, { "docid": "c85a6b30adbb87d57e7e0eb149114525", "score": "0.48375154", "text": "def test_batch(ctx):\n wother = random_name()\n bnames = [random_name() for _ in range(2)]\n\n # Create worksheet and bundles\n wuuid = _run_command([cl, 'new', wother])\n ctx.collect_worksheet(wuuid)\n buuids = [\n _run_command([cl, 'upload', test_path('a.txt'), '-n', bnames[0]]),\n _run_command([cl, 'upload', test_path('a.txt'), '-n', bnames[1]]),\n _run_command([cl, 'upload', test_path('a.txt'), '-n', bnames[0], '-w', wother]),\n _run_command([cl, 'upload', test_path('a.txt'), '-n', bnames[1], '-w', wother]),\n ]\n\n # Test batch info call\n output = _run_command(\n [\n cl,\n 'info',\n '-f',\n 'uuid',\n bnames[0],\n bnames[1],\n '%s/%s' % (wother, bnames[0]),\n '%s/%s' % (wother, bnames[1]),\n ]\n )\n check_equals('\\n'.join(buuids), output)\n\n # Test batch info call with combination of uuids and names\n output = _run_command([cl, 'info', '-f', 'uuid', buuids[0], bnames[0], bnames[0], buuids[0]])\n check_equals('\\n'.join([buuids[0]] * 4), output)", "title": "" }, { "docid": "8cb5e305e016f954e0395edebb5e5cc6", "score": "0.48303476", "text": "def _execute(self, http, order, requests):\r\n message = MIMEMultipart('mixed')\r\n # Message should not write out it's own headers.\r\n setattr(message, '_write_headers', lambda self: None)\r\n\r\n # Add all the individual requests.\r\n for request_id in order:\r\n request = requests[request_id]\r\n\r\n msg = MIMENonMultipart('application', 'http')\r\n msg['Content-Transfer-Encoding'] = 'binary'\r\n msg['Content-ID'] = self._id_to_header(request_id)\r\n\r\n body = self._serialize_request(request)\r\n msg.set_payload(body)\r\n message.attach(msg)\r\n\r\n body = message.as_string()\r\n\r\n headers = {}\r\n headers['content-type'] = ('multipart/mixed; '\r\n 'boundary=\"%s\"') % message.get_boundary()\r\n\r\n resp, content = http.request(self._batch_uri, 'POST', body=body,\r\n headers=headers)\r\n\r\n if resp.status >= 300:\r\n raise HttpError(resp, content, uri=self._batch_uri)\r\n\r\n # Now break out the individual responses and store each one.\r\n boundary, _ = content.split(None, 1)\r\n\r\n # Prepend with a content-type header so FeedParser can handle it.\r\n header = 'content-type: %s\\r\\n\\r\\n' % resp['content-type']\r\n for_parser = header + content\r\n\r\n parser = FeedParser()\r\n parser.feed(for_parser)\r\n mime_response = parser.close()\r\n\r\n if not mime_response.is_multipart():\r\n raise BatchError(\"Response not in multipart/mixed format.\", resp=resp,\r\n content=content)\r\n\r\n for part in mime_response.get_payload():\r\n request_id = self._header_to_id(part['Content-ID'])\r\n response, content = self._deserialize_response(part.get_payload())\r\n self._responses[request_id] = (response, content)", "title": "" } ]
b9094e1a5259b445aa8ed3a37cca49db
If selected, the controller sends a Role Request message after the connection is established; to change its role according to the Role Request option selected.
[ { "docid": "0fcce8ed0edd5cff1d5750eadf85482d", "score": "0.6079378", "text": "def SendRoleRequest(self):\r\n\t\treturn self._get_attribute('sendRoleRequest')", "title": "" } ]
[ { "docid": "2ddc31c9901295f34a525afe424dc013", "score": "0.6534953", "text": "def change_role():", "title": "" }, { "docid": "496b8d6a6fdd9ab0593bd8fbbd972a75", "score": "0.6400035", "text": "def userRequestedRole(self):\n return self.user_role == ROLE", "title": "" }, { "docid": "f397d19aba42f1a9973f3e18a0e76115", "score": "0.62912786", "text": "def change_role(self):\n response = JSONResponse(self.request)\n role = self.request.form.get('role', None)\n member = self.get_member()\n if role is None or member is None:\n return response.error(MSG_SAVE_FAILURE).remain().dump()\n\n if self.meeting.presidency == member:\n self.meeting.presidency = None\n if self.meeting.secretary == member:\n self.meeting.secretary = None\n\n if role == 'presidency':\n self.meeting.presidency = member\n elif role == 'secretary':\n self.meeting.secretary = member\n return response.proceed().dump()", "title": "" }, { "docid": "9eb23a43cfb71e412971792e4402e39c", "score": "0.6022177", "text": "async def _voice_rolereqmsg(self, ctx: commands.Context, *, message: str):\n await self.config.guild(ctx.guild).voice.role_req_msg.set(message)\n return await ctx.tick()", "title": "" }, { "docid": "5eef801a4330d87c873da05b9ffaa061", "score": "0.5953621", "text": "def request(test, role, gen=None, con=None):\n if con is None:\n con = test.controller\n request = ofp.message.role_request(role=role, generation_id=gen)\n response, _ = con.transact(request)\n test.assertTrue(isinstance(response, ofp.message.role_reply),\n \"Expected a role reply\")\n if role != ofp.OFPCR_ROLE_NOCHANGE:\n test.assertEquals(response.role, role)\n if gen is not None:\n test.assertEquals(response.generation_id, gen)\n return response.role, response.generation_id", "title": "" }, { "docid": "922a5c9beb846b6aa5003d8cddd110fe", "score": "0.5932203", "text": "async def role_set(self, ctx):\n if ctx.invoked_subcommand is None:\n raise commands.CommandNotFound()", "title": "" }, { "docid": "f6602db0efe999792a24bd73bbfddd87", "score": "0.5931783", "text": "async def mmset_role(self, ctx):\n if ctx.invoked_subcommand is None or \\\n isinstance(ctx.invoked_subcommand, commands.Group):\n await self.bot.send_cmd_help(ctx)", "title": "" }, { "docid": "4e7f50c4584e2e3bc8ba01bb1b66ac25", "score": "0.59263873", "text": "async def _text_rolereqmsg(self, ctx: commands.Context, *, message: str):\n await self.config.guild(ctx.guild).text.role_req_msg.set(message)\n return await ctx.tick()", "title": "" }, { "docid": "6ff762ef6c05f2b9d334def6528f5ef4", "score": "0.5864869", "text": "async def mmset_role_add(self, ctx, role_name):\n server = ctx.message.server\n role = discord.utils.get(server.roles, name=role_name)\n if role is None:\n await self.bot.say(\"Cannot find that role on this server.\")\n return\n role_settings = self.settings[server.id][\"roles\"]\n role_settings[role.id] = role_name\n roles = [discord.utils.get(server.roles, id=role_id) for role_id in role_settings]\n role_names = [r.name for r in roles]\n await self.bot.say(\"List of permitted roles updated: {}\".format(', '.join(role_names)))\n dataIO.save_json(JSON, self.settings)", "title": "" }, { "docid": "aee22e033ca5a06aae3d3d836455e076", "score": "0.5771831", "text": "async def role(self, ctx: commands.Context, action: str, *target_role_list: str):\n\n role_query = ' '.join(target_role_list)\n action = action.lower()\n\n if ctx.guild is None:\n message = 'This command must be used from a guild. Please go to the guild you wish to use the command on' \\\n 'and try again.'\n else:\n if action == RequestAction.ADD.value:\n # find role\n role = self.find_role_in_guild(role_query, ctx.guild)\n if not role:\n await ctx.send(f'No role by the name of `{role_query}` exists in this guild. '\n f'Please check your spelling and try again.')\n return\n\n # make sure it's allowed to be manipulated\n if not self._validate_role_against_whitelist(role):\n await ctx.send(\"You are not allowed to interact with this role.\")\n return\n\n # add role to user\n if self.member_contains_role(role.name, ctx.author):\n message = f'You already have that role.'\n else:\n await ctx.author.add_roles(role, reason='Role added via Manageable bot instance.')\n message = f'You now have the `{role.name}` role.'\n elif action == RequestAction.REMOVE.value:\n # find role\n role = self.find_role_in_guild(role_query, ctx.guild)\n if not role:\n await ctx.send(f'No role by the name of `{role_query}` exists in this guild. '\n f'Please check your spelling and try again.')\n return\n\n # make sure it's allowed to be manipulated\n if not self._validate_role_against_whitelist(role):\n await ctx.send(\"You are not allowed to interact with this role.\")\n return\n\n # remove role from user\n if self.member_contains_role(role.name, ctx.author):\n await ctx.author.remove_roles(role, reason='Role removed via Manageable bot instance.')\n message = f'You no longer have the `{role.name}` role.'\n else:\n message = f'You do not have that role.'\n elif action == RequestAction.LIST.value:\n # list all available roles\n message = \"__**Available roles to add/remove:**__\"\n for role_name in self.config[\"content\"][\"role_whitelist\"]:\n if self.find_role_in_guild(role_name, ctx.guild):\n message += f\"\\n{role_name}\"\n else:\n message = f'Unknown role command `{action}`, please re-enter your command and try again.'\n\n await ctx.send(message)", "title": "" }, { "docid": "f309a348dab0b3cd1101bb7c00e15a7c", "score": "0.576981", "text": "async def setrole(self, ctx, *, role = None):\r\n\t\tblock_list = self.settings.getServerStat(ctx.guild, \"UserRoleBlock\")\r\n\t\tif ctx.author.id in block_list:\r\n\t\t\tawait ctx.send(\"You are currently blocked from using this command.\")\r\n\t\t\treturn\r\n\t\t\r\n\t\tserver = ctx.message.guild\r\n\t\tchannel = ctx.message.channel\r\n\r\n\t\tif not self.settings.getServerStat(server, \"OnlyOneUserRole\"):\r\n\t\t\tawait ctx.invoke(self.addrole, role=role)\r\n\t\t\treturn\r\n\r\n\t\t# Check if we're suppressing @here and @everyone mentions\r\n\t\tif self.settings.getServerStat(server, \"SuppressMentions\"):\r\n\t\t\tsuppress = True\r\n\t\telse:\r\n\t\t\tsuppress = False\r\n\t\t\r\n\t\t# Get the array\r\n\t\ttry:\r\n\t\t\tpromoArray = self.settings.getServerStat(server, \"UserRoles\")\r\n\t\texcept Exception:\r\n\t\t\tpromoArray = []\r\n\t\tif promoArray == None:\r\n\t\t\tpromoArray = []\r\n\r\n\t\tif role == None:\r\n\t\t\t# Remove us from all roles\r\n\t\t\tremRole = []\r\n\t\t\tfor arole in promoArray:\r\n\t\t\t\troleTest = DisplayName.roleForID(arole['ID'], server)\r\n\t\t\t\tif not roleTest:\r\n\t\t\t\t\t# Not a real role - skip\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tif roleTest in ctx.message.author.roles:\r\n\t\t\t\t\t# We have this in our roles - remove it\r\n\t\t\t\t\tremRole.append(roleTest)\r\n\t\t\tif len(remRole):\r\n\t\t\t\tself.settings.role.rem_roles(ctx.author, remRole)\r\n\t\t\t# Give a quick status\r\n\t\t\tmsg = '*{}* has been moved out of all roles in the list!'.format(DisplayName.name(ctx.message.author))\r\n\t\t\tawait channel.send(msg)\r\n\t\t\treturn\r\n\r\n\t\t# Check if role is real\r\n\t\troleCheck = DisplayName.roleForName(role, server)\r\n\t\tif not roleCheck:\r\n\t\t\t# No luck...\r\n\t\t\tmsg = '*{}* not found in list.\\n\\nTo see a list of user roles - run `{}listuserroles`'.format(Nullify.escape_all(role), ctx.prefix)\r\n\t\t\tawait channel.send(msg)\r\n\t\t\treturn\r\n\t\t\r\n\t\t# Got a role - set it\r\n\t\trole = roleCheck\r\n\r\n\t\taddRole = []\r\n\t\tremRole = []\r\n\t\tfor arole in promoArray:\r\n\t\t\troleTest = DisplayName.roleForID(arole['ID'], server)\r\n\t\t\tif not roleTest:\r\n\t\t\t\t# Not a real role - skip\r\n\t\t\t\tcontinue\r\n\t\t\tif str(arole['ID']) == str(role.id):\r\n\t\t\t\t# We found it!\r\n\t\t\t\taddRole.append(roleTest)\r\n\t\t\telif roleTest in ctx.message.author.roles:\r\n\t\t\t\t# Not our intended role and we have this in our roles - remove it\r\n\t\t\t\tremRole.append(roleTest)\r\n\r\n\t\tif not len(addRole):\r\n\t\t\t# We didn't find that role\r\n\t\t\tmsg = '*{}* not found in list.\\n\\nTo see a list of user roles - run `{}listuserroles`'.format(Nullify.escape_all(role.name), ctx.prefix)\r\n\t\t\tawait channel.send(msg)\r\n\t\t\treturn\r\n\r\n\t\tif len(remRole) or len(addRole):\r\n\t\t\tself.settings.role.change_roles(ctx.author, add_roles=addRole, rem_roles=remRole)\r\n\r\n\t\tmsg = '*{}* has been moved to **{}!**'.format(DisplayName.name(ctx.message.author), Nullify.escape_all(role.name))\r\n\t\tawait channel.send(msg)", "title": "" }, { "docid": "80272f28d45cc18e3ca690e835199306", "score": "0.57362795", "text": "async def giveRole(args : List[Union[Guild, Role, int]], reactingUser : Union[User, Member] = None) -> bool:\n dcGuild = args[0]\n dcMember = dcGuild.get_member(reactingUser.id)\n role = args[1]\n msgID = args[2]\n\n if role not in dcMember.roles:\n await dcMember.add_roles(role, reason=\"User requested role toggle via BB reaction menu \" + str(msgID))\n return True", "title": "" }, { "docid": "f5972b886982bdff9aecba32d12aae77", "score": "0.57336706", "text": "async def eventset_role(self, ctx, *, role: str=None):\n\t\tserver = ctx.message.server\n\t\tif role is not None:\n\t\t\trole_obj = [r for r in server.roles if r.name == role][0]\n\t\t\tself.settings[server.id][\"role\"] = role_obj.id\n\t\t\tdataIO.save_json(\n\t\t\t\tos.path.join(\"data\", \"eventmaker\", \"settings.json\"),\n\t\t\t\tself.settings)\n\t\t\tawait self.bot.say(\"Role set to {}\".format(role))\n\t\telse:\n\t\t\tself.settings[server.id][\"role\"] = None\n\t\t\tdataIO.save_json(\n\t\t\t\tos.path.join(\"data\", \"eventmaker\", \"settings.json\"),\n\t\t\t\tself.settings)\n\t\t\tawait self.bot.say(\"Role unset!\")", "title": "" }, { "docid": "613b85d30c5be4430af895ee9551aa4c", "score": "0.56756246", "text": "async def role(self, ctx, *, role: typing.Union[discord.Role, str]):\n public_roles = ctx.bot.roles.get(ctx.guild.id, \"public_roles\", {})\n if isinstance(role, discord.Role):\n roles_by_id = {r[\"id\"]: r for k, r in public_roles.items()}\n # switch public_roles over to a dict with the role id as the key\n public_roles = roles_by_id\n role_key = role.id\n else:\n role_str = role.lower()\n if not role_str or role_str == \"list\":\n lines = [\"I'm aware of the following roles:\", \"\"]\n lines += [\n # \"`{0}`: <@&{1}> -- {2}\".format(k, r[\"id\"], r[\"description\"])\n \"`{0}`: {1} -- {2}\".format(k, r[\"name\"], r[\"description\"])\n for k, r in sorted(public_roles.items())\n ]\n await ctx.send(\n \"\\n\".join(lines),\n delete_after=ROLES_MESSAGE_DELETE_DELAY,\n # allowed_mentions=discord.AllowedMentions(\n # everyone=False, users=False, roles=False\n # ),\n )\n return\n role_key = role_str\n try:\n role_dict = public_roles[role_key]\n except KeyError:\n missingstr = (\n f\"I can't find that role. Try `{ctx.prefix}{ctx.command} list` for a\"\n f\" list of self-assignable roles.\"\n )\n await ctx.send(missingstr, delete_after=ROLES_MESSAGE_DELETE_DELAY)\n return\n member = ctx.message.author\n discord_role = ctx.guild.get_role(role_dict[\"id\"])\n if discord_role in member.roles:\n await member.remove_roles(discord_role)\n else:\n await member.add_roles(discord_role)", "title": "" }, { "docid": "8a149ef8744f63d30d02dd3605e03b9b", "score": "0.5608182", "text": "async def _role(ctx, role: discord.Role):\n\n guild = Guild.select(graph, ctx.guild.id).first()\n guild.support_role = role.id\n graph.push(guild)\n\n await ctx.send(f\"Okay, I'll now notify `{role.name}` role on ticket events :white_check_mark:\")", "title": "" }, { "docid": "d1d3515fd5238a133c09968581ed6086", "score": "0.5599685", "text": "async def reactroles(self, ctx: Context) -> None:\n if ctx.guild.id not in self.settings:\n msg = _(\"There are no bound roles in this server.\")\n await ctx.send(msg)\n return\n msg = _(\"Reaction Roles in {guild}\\n\").format(guild=ctx.guild.name)\n for key, role_id in self.settings[ctx.guild.id][\"reaction_roles\"].items():\n channel_id, msg_id, emoji = key.split(\"-\")\n if emoji.isdigit():\n emoji = self.bot.get_emoji(int(emoji))\n if not emoji:\n emoji = _(\"Emoji from another server\")\n role = ctx.guild.get_role(role_id)\n channel = ctx.guild.get_channel(int(channel_id))\n if channel:\n # This can be potentially a very expensive operation\n # so instead we fake the message link unless the channel is missing\n # this way they can check themselves without rate limitng\n # the bot trying to fetch something constantly that is broken.\n message = f\"https://discord.com/channels/{ctx.guild.id}/{channel_id}/{msg_id}\"\n else:\n message = None\n msg += _(\"{emoji} - {role} [Reaction Message]({message})\\n\").format(\n role=role.mention if role else _(\"None\"),\n emoji=emoji,\n message=message if message else _(\"None\"),\n )\n pages = list(pagify(msg))\n await BaseMenu(\n source=ReactRolePages(\n pages=pages,\n ),\n delete_message_after=False,\n clear_reactions_after=True,\n timeout=60,\n cog=self,\n page_start=0,\n ).start(ctx=ctx)", "title": "" }, { "docid": "b661e21a281725a5e22625ae2bf5b10f", "score": "0.559757", "text": "async def role(self, ctx):\n if ctx.invoked_subcommand is None:\n raise commands.CommandNotFound()", "title": "" }, { "docid": "bc9e002c4406fe4ffde1da84314acf2f", "score": "0.5567132", "text": "def roles(ctx):\n resource = 'roles'\n ctx.obj = ControllerClient(resource)", "title": "" }, { "docid": "12c17a4bc63b2363405752c42623e293", "score": "0.5540901", "text": "def control_role(cls):\n return cls._namespace_SIO('SIO_010431')", "title": "" }, { "docid": "eb64b69de93e01e3527836c69b36747c", "score": "0.5515354", "text": "def automate_role_set(request):\n from cfme.configure import configuration\n roles = configuration.get_server_roles()\n old_roles = dict(roles)\n roles[\"automate\"] = True\n configuration.set_server_roles(**roles)\n yield\n configuration.set_server_roles(**old_roles)", "title": "" }, { "docid": "342a1f92fef2616db6fe2d177cb823cb", "score": "0.5505005", "text": "def role_changed(self,user,old_role,new_role,stanza):\n pass", "title": "" }, { "docid": "89f5c5861aaf30978c1640e92a8fb05e", "score": "0.5503178", "text": "def role(self, role):\n\n self._role = role", "title": "" }, { "docid": "89f5c5861aaf30978c1640e92a8fb05e", "score": "0.5503178", "text": "def role(self, role):\n\n self._role = role", "title": "" }, { "docid": "89f5c5861aaf30978c1640e92a8fb05e", "score": "0.5503178", "text": "def role(self, role):\n\n self._role = role", "title": "" }, { "docid": "eb1deb46c380ae86c6f616f46d67b84e", "score": "0.54813087", "text": "async def default_role(self, ctx, *, name):\n await self.profiles.data.guild(ctx.author.guild).defaultrole.set(name)\n await ctx.send(_(f\"Default role name set to {name}\"))", "title": "" }, { "docid": "049b752a04bff7c3a833a36ad510cf5c", "score": "0.54716796", "text": "def set_role(self, role):\n assert role in [ROLE_STRIKER, ROLE_DEFENDER, ROLE_SUPPORTER, ROLE_GOALIE]\n self.data[DATA_KEY_ROLE] = role", "title": "" }, { "docid": "f3133f451843e7337621802d508511a5", "score": "0.5464486", "text": "def InvokeSendRoleRequest(self, Arg2):\r\n\t\tArg1 = self.href\r\n\t\treturn self._execute('InvokeSendRoleRequest', payload=locals(), response_object=None)", "title": "" }, { "docid": "b08911dad9bb27e86977ec027219fa0b", "score": "0.54489005", "text": "async def roles(self, ctx):\n\n available = self.get_available_roles(ctx)\n blue_dia = \":small_blue_diamond:\"\n content = \"Here are the roles you can `!join`/`!leave`:\\n\"\n content += \"\\n\".join([f\"{blue_dia} {r}\" for r in available])\n content += f\"\\nFor example: `!join {np.random.choice(available)}`\"\n\n await ctx.send(content)", "title": "" }, { "docid": "07d114c0c1f33a3ea8232d47669efe87", "score": "0.54414266", "text": "async def accept(self, ctx: commands.Context, target: discord.Member):\n bot = self.bot\n guild = ctx.guild\n applicant = get(guild.roles, name=\"Staff Applicant\")\n role = MessagePredicate.valid_role(ctx)\n if applicant in target.roles:\n await ctx.send(\n \"What role do you want to accept {0} as?\".format(target.name)\n )\n try:\n await bot.wait_for(\"message\", timeout=30, check=role)\n except asyncio.TimeoutError:\n return await ctx.send(\"You took too long. Try again, please.\")\n role_add = role.result\n await target.add_roles(role_add)\n await target.remove_roles(applicant)\n await ctx.send(\"Accepted {0} as {1}.\".format(target.mention, role_add))\n await target.send(\n \"You have been accepted as {0} in {1}.\".format(role_add, guild.name)\n )\n else:\n await ctx.send(\n \"Uh oh. Looks like {0} hasn't applied for anything.\".format(\n target.mention\n )\n )", "title": "" }, { "docid": "8c611a2833f2690e94c18e670c72e851", "score": "0.543437", "text": "def set_role(self, role):\n self.__role.set(role)", "title": "" }, { "docid": "9740bf3cc8323236a123a073ed0a672e", "score": "0.5420189", "text": "def role(self):\n if isinstance(self, Initiator):\n return \"Initiator\"\n if isinstance(self, Target):\n return \"Target\"", "title": "" }, { "docid": "58b0454b515c15f34fb7177ad7546f52", "score": "0.5408303", "text": "async def join(self, ctx, *, role: discord.Role = None):\n if role is None:\n content = \"Please enter a role from `!roles`.\"\n\n elif role not in self.get_available_roles(ctx):\n content = \"It is not a role you can join... :dizzy_face:\"\n\n else:\n await ctx.author.add_roles(role)\n content = f\"You joined the {role.name} role! :smile:\"\n\n bot_msg = await ctx.send(content)\n await asyncio.sleep(30)\n await ctx.channel.delete_messages([ctx.message, bot_msg])", "title": "" }, { "docid": "8e21a9b0a0f5bf65f0671ac4ef42e51b", "score": "0.5406968", "text": "async def role(self, ctx: commands.Context, *, role_name: discord.Role):\n await self.config.guild(ctx.guild).dj_role.set(role_name.id)\n self._dj_role_cache[ctx.guild.id] = role_name.id\n dj_role = self._dj_role_cache.setdefault(\n ctx.guild.id, await self.config.guild(ctx.guild).dj_role()\n )\n dj_role_obj = ctx.guild.get_role(dj_role)\n await self._embed_msg(\n ctx,\n title=_(\"Settings Changed\"),\n description=_(\"DJ role set to: {role.name}.\").format(role=dj_role_obj),\n )", "title": "" }, { "docid": "57c21204170948676b2cf9d30832814b", "score": "0.5394516", "text": "async def reaction_copy_role_set(\n event,\n role: (Role, 'select a role.'),\n):\n check_user_permissions(event)\n \n automation_configuration = get_automation_configuration_for(event.guild_id)\n automation_configuration.set('reaction_copy_role_id', role.id)\n \n return f'Reaction-copy can be used by users with role {role.name} as well.'", "title": "" }, { "docid": "3b88a6942e16503b82faec03b09a8bfb", "score": "0.5339836", "text": "async def addrole(self, ctx, *, role = None):\r\n\t\tblock_list = self.settings.getServerStat(ctx.guild, \"UserRoleBlock\")\r\n\t\tif ctx.author.id in block_list:\r\n\t\t\tawait ctx.send(\"You are currently blocked from using this command.\")\r\n\t\t\treturn\r\n\t\t\r\n\t\tif role == None:\r\n\t\t\tawait ctx.send(\"Usage: `{}addrole [role name]`\".format(ctx.prefix))\r\n\t\t\treturn\r\n\r\n\t\tserver = ctx.message.guild\r\n\t\tchannel = ctx.message.channel\r\n\r\n\t\tif self.settings.getServerStat(server, \"OnlyOneUserRole\"):\r\n\t\t\tawait ctx.invoke(self.setrole, role=role)\r\n\t\t\treturn\r\n\r\n\t\t# Check if we're suppressing @here and @everyone mentions\r\n\t\tif self.settings.getServerStat(server, \"SuppressMentions\"):\r\n\t\t\tsuppress = True\r\n\t\telse:\r\n\t\t\tsuppress = False\r\n\t\t\r\n\t\t# Get the array\r\n\t\ttry:\r\n\t\t\tpromoArray = self.settings.getServerStat(server, \"UserRoles\")\r\n\t\texcept Exception:\r\n\t\t\tpromoArray = []\r\n\t\tif promoArray == None:\r\n\t\t\tpromoArray = []\r\n\r\n\t\t# Check if role is real\r\n\t\troleCheck = DisplayName.roleForName(role, server)\r\n\t\tif not roleCheck:\r\n\t\t\t# No luck...\r\n\t\t\tmsg = '*{}* not found in list.\\n\\nTo see a list of user roles - run `{}listuserroles`'.format(Nullify.escape_all(role), ctx.prefix)\r\n\t\t\tawait channel.send(msg)\r\n\t\t\treturn\r\n\t\t\r\n\t\t# Got a role - set it\r\n\t\trole = roleCheck\r\n\r\n\t\taddRole = []\r\n\t\tfor arole in promoArray:\r\n\t\t\troleTest = DisplayName.roleForID(arole['ID'], server)\r\n\t\t\tif not roleTest:\r\n\t\t\t\t# Not a real role - skip\r\n\t\t\t\tcontinue\r\n\t\t\tif str(arole['ID']) == str(role.id):\r\n\t\t\t\t# We found it!\r\n\t\t\t\tif roleTest in ctx.author.roles:\r\n\t\t\t\t\t# We already have it\r\n\t\t\t\t\tawait ctx.send(\"You already have that role.\")\r\n\t\t\t\t\treturn\r\n\t\t\t\taddRole.append(roleTest)\r\n\t\t\t\tbreak\r\n\r\n\t\tif not len(addRole):\r\n\t\t\t# We didn't find that role\r\n\t\t\tmsg = '*{}* not found in list.\\n\\nTo see a list of user roles - run `{}listuserroles`'.format(Nullify.escape_all(role.name), ctx.prefix)\r\n\t\t\tawait channel.send(msg)\r\n\t\t\treturn\r\n\r\n\t\tif len(addRole):\r\n\t\t\tself.settings.role.add_roles(ctx.author, addRole)\r\n\r\n\t\tmsg = '*{}* has acquired **{}!**'.format(DisplayName.name(ctx.message.author), Nullify.escape_all(role.name))\r\n\t\tawait channel.send(msg)", "title": "" }, { "docid": "c0534320fb3e5d2f8f4591fc22bf763c", "score": "0.5332731", "text": "async def roles(self, ctx):\n pass", "title": "" }, { "docid": "c1f84551cedee3d14f5ad0b2c07245bc", "score": "0.53199774", "text": "def role(role):\r\n env.host_string = lib.get_env_host_string()\r\n lib.print_header(\r\n \"Applying role '{0}' to {1}\".format(role, env.host_string))\r\n\r\n # Now create configuration and sync node\r\n data = lib.get_node(env.host_string)\r\n data[\"run_list\"] = [\"role[{0}]\".format(role)]\r\n if not __testing__:\r\n chef.sync_node(data)", "title": "" }, { "docid": "ea5d39ca5625de948417a1f4e6b510a1", "score": "0.52898014", "text": "def role_view(self, role_view):\n\n self._role_view = role_view", "title": "" }, { "docid": "fb45ef1892908fbd655f1f8bcd1e7b71", "score": "0.52673113", "text": "def getUserRole(self):\n\n # general question concerning the user's role (hersteller, beauftragter...)\n self.roleView.getUserRole()", "title": "" }, { "docid": "c621bed018c9dfa06f0f1fbecb9341ed", "score": "0.5262306", "text": "def __init__(self, request=None, *args, **kwargs):\n super(RoleForm, self).__init__(*args, **kwargs)", "title": "" }, { "docid": "d56d48f14c496fc670d07d70973f2e71", "score": "0.526105", "text": "async def remrole(self, ctx, *, role = None):\r\n\t\tblock_list = self.settings.getServerStat(ctx.guild, \"UserRoleBlock\")\r\n\t\tif ctx.author.id in block_list:\r\n\t\t\tawait ctx.send(\"You are currently blocked from using this command.\")\r\n\t\t\treturn\r\n\r\n\t\tif role == None:\r\n\t\t\tawait ctx.send(\"Usage: `{}remrole [role name]`\".format(ctx.prefix))\r\n\t\t\treturn\r\n\r\n\t\tserver = ctx.message.guild\r\n\t\tchannel = ctx.message.channel\r\n\r\n\t\tif self.settings.getServerStat(server, \"OnlyOneUserRole\"):\r\n\t\t\tawait ctx.invoke(self.setrole, role=None)\r\n\t\t\treturn\r\n\r\n\t\t# Check if we're suppressing @here and @everyone mentions\r\n\t\tif self.settings.getServerStat(server, \"SuppressMentions\"):\r\n\t\t\tsuppress = True\r\n\t\telse:\r\n\t\t\tsuppress = False\r\n\t\t\r\n\t\t# Get the array\r\n\t\ttry:\r\n\t\t\tpromoArray = self.settings.getServerStat(server, \"UserRoles\")\r\n\t\texcept Exception:\r\n\t\t\tpromoArray = []\r\n\t\tif promoArray == None:\r\n\t\t\tpromoArray = []\r\n\r\n\t\t# Check if role is real\r\n\t\troleCheck = DisplayName.roleForName(role, server)\r\n\t\tif not roleCheck:\r\n\t\t\t# No luck...\r\n\t\t\tmsg = '*{}* not found in list.\\n\\nTo see a list of user roles - run `{}listuserroles`'.format(Nullify.escape_all(role), ctx.prefix)\r\n\t\t\tawait channel.send(msg)\r\n\t\t\treturn\r\n\t\t\r\n\t\t# Got a role - set it\r\n\t\trole = roleCheck\r\n\r\n\t\tremRole = []\r\n\t\tfor arole in promoArray:\r\n\t\t\troleTest = DisplayName.roleForID(arole['ID'], server)\r\n\t\t\tif not roleTest:\r\n\t\t\t\t# Not a real role - skip\r\n\t\t\t\tcontinue\r\n\t\t\tif str(arole['ID']) == str(role.id):\r\n\t\t\t\t# We found it!\r\n\t\t\t\tif roleTest in ctx.author.roles:\r\n\t\t\t\t\t# We have it\r\n\t\t\t\t\tremRole.append(roleTest)\r\n\t\t\t\telse:\r\n\t\t\t\t\t# We don't have it...\r\n\t\t\t\t\tawait ctx.send(\"You don't currently have that role.\")\r\n\t\t\t\t\treturn\r\n\t\t\t\tbreak\r\n\r\n\t\tif not len(remRole):\r\n\t\t\t# We didn't find that role\r\n\t\t\tmsg = '*{}* not found in list.\\n\\nTo see a list of user roles - run `{}listuserroles`'.format(Nullify.escape_all(role.name), ctx.prefix)\r\n\t\t\tawait channel.send(msg)\r\n\t\t\treturn\r\n\r\n\t\tif len(remRole):\r\n\t\t\tself.settings.role.rem_roles(ctx.author, remRole)\r\n\r\n\t\tmsg = '*{}* has been removed from **{}!**'.format(DisplayName.name(ctx.message.author), Nullify.escape_all(role.name))\r\n\t\tawait channel.send(msg)", "title": "" }, { "docid": "1691248911f2a3b81085072f24febc61", "score": "0.5259887", "text": "def role_edit(self, role_edit):\n\n self._role_edit = role_edit", "title": "" }, { "docid": "f8c6eaeb484d888de2e236e192716265", "score": "0.5248897", "text": "async def punishset_setup(self, ctx):\n server = ctx.message.guild\n default_name = DEFAULT_ROLE_NAME\n # role_id = self.data.get(server.id, {}).get('ROLE_ID')\n role_id = await self.config.guild(server).role_id()\n\n if role_id:\n role = discord.utils.get(server.roles, id=role_id)\n else:\n role = discord.utils.get(server.roles, name=default_name)\n\n perms = server.me.guild_permissions\n if not perms.manage_roles and perms.manage_channels:\n await ctx.send(\n \"I need the Manage Roles and Manage Channels permissions for that command to work.\"\n )\n return\n\n if not role:\n msg = \"The %s role doesn't exist; Creating it now... \" % default_name\n\n msgobj = await ctx.send(msg)\n\n perms = discord.Permissions.none()\n role = await server.create_role(\n server, name=default_name, permissions=perms\n )\n else:\n msgobj = await ctx.send(\"%s role exists... \" % role.name)\n\n if role.position != (server.me.top_role.position - 1):\n if role < server.me.top_role:\n msgobj = msgobj.edit(\n msgobj.content + \"moving role to higher position... \"\n )\n await role.edit(position=server.me.top_role.position - 1)\n else:\n await msgobj.edit(\n msgobj.content + \"role is too high to manage.\"\n \" Please move it to below my highest role.\"\n )\n return\n\n msgobj = await msgobj.edit(msgobj.content + \"(re)configuring channels... \")\n\n for channel in server.channels:\n await self.setup_channel(channel, role)\n\n await msgobj.edit(msgobj.content + \"done.\")\n\n if role and role.id != role_id:\n await self.config.guild(server).role_id.set(role.id)", "title": "" }, { "docid": "eea6bdadc289b7ce4080c5c91374d323", "score": "0.5235317", "text": "async def rrlist(self, ctx):\r\n\t\tif not await Utils.is_bot_admin_reply(ctx): return\r\n\t\turl = await self._get_message_url(ctx)\r\n\t\tif not url: return await ctx.send(\"Reaction role message is not currently set.\")\r\n\t\ttoggle = self.settings.getServerStat(ctx.guild,\"ReactionMessageToggle\",True)\r\n\t\tonly_one = self.settings.getServerStat(ctx.guild,\"OnlyOneUserRole\",True)\r\n\t\tdesc = \"Currently watching [this message]({}) for reactions.\\nReacting will **{}** the target role.\\nUsers can select {}\".format(\r\n\t\t\turl,\r\n\t\t\t\"add or remove\" if toggle else \"only add\",\r\n\t\t\t\"**only one** role at a time.\" if only_one else \"**multiple** roles at a time.\"\r\n\t\t)\r\n\t\t# Gather the roles/reactions\r\n\t\trr_list = self.settings.getServerStat(ctx.guild, \"ReactionMessageList\", [])\r\n\t\tif not rr_list: return await ctx.send(\"There are no reaction roles setup currently.\")\r\n\t\trole_list = []\r\n\t\tfor x in rr_list:\r\n\t\t\trole = ctx.guild.get_role(x[\"role_id\"])\r\n\t\t\tif not role: continue # Doesn't exist, ignore it\r\n\t\t\tname = \"{} ({})\".format(role.name,role.id)\r\n\t\t\t# Check if it's a custom emoji - and give the name, id, and a ping\r\n\t\t\tif x[\"emoji_id\"]:\r\n\t\t\t\temoji = self.bot.get_emoji(x[\"emoji_id\"])\r\n\t\t\t\tif emoji: value = \"{} - `{}`\".format(self._get_emoji_mention(x),self._get_emoji_mention(x))\r\n\t\t\t\telse: value = \"`{}` - Not from a shared server\".format(self._get_emoji_mention(x))\r\n\t\t\telse: value = x[\"emoji_name\"]\r\n\t\t\trole_list.append({\"name\":name,\"value\":value})\r\n\t\treturn await PickList.PagePicker(title=\"Current Reaction Roles\",list=role_list,description=desc,ctx=ctx).pick()", "title": "" }, { "docid": "84d87b461790d06053bf3e9206e41317", "score": "0.52324736", "text": "def role(self) -> Dict[str, str]:\n if self.__role is None:\n self.query_attributes(self.resource_url(), params={'expand': 'contact'})\n\n self.__role = self._context['role']\n\n return self.__role", "title": "" }, { "docid": "2d5baef94541b07266382a310c933df9", "score": "0.5222383", "text": "def orgOfferedMentorRole(self):\n return self.org_role == MENTOR_ROLE", "title": "" }, { "docid": "c5ff3080f1ce2fe72aaf33b56ffba4c5", "score": "0.52202547", "text": "def patch(self, request, id):\n api.keystone.role_update(request, id, request.DATA['name'])", "title": "" }, { "docid": "c5ff3080f1ce2fe72aaf33b56ffba4c5", "score": "0.52202547", "text": "def patch(self, request, id):\n api.keystone.role_update(request, id, request.DATA['name'])", "title": "" }, { "docid": "e1b280540990c3691ec0b7053d03c9cb", "score": "0.5218946", "text": "def automate_role_set():\n from cfme.configure import configuration\n roles = configuration.get_server_roles()\n roles[\"automate\"] = True\n configuration.set_server_roles(**roles)", "title": "" }, { "docid": "8a453ce93bd7111016fb7743b2d776a9", "score": "0.5216102", "text": "async def oneuserrole(self, ctx, *, yes_no = None):\r\n\t\tif not await Utils.is_admin_reply(ctx): return\r\n\t\tawait ctx.send(Utils.yes_no_setting(ctx,\"One user role at a time\",\"OnlyOneUserRole\",yes_no,True))", "title": "" }, { "docid": "0547efbff30ef6596e5ecc2d06360955", "score": "0.5200401", "text": "def assign_role(self, valid_role) -> None: # todo: use enums\n bss_api.assign_role(self.environment, self.email, valid_role)", "title": "" }, { "docid": "c01dd1a365ec0bba81f1b607a883c135", "score": "0.5189569", "text": "def type():\n return 'role'", "title": "" }, { "docid": "c01dd1a365ec0bba81f1b607a883c135", "score": "0.5189569", "text": "def type():\n return 'role'", "title": "" }, { "docid": "7b22972a4d4b454c20ab2d031ea29cf9", "score": "0.5181134", "text": "async def post_reaction_for_role_message(self, ctx):\n reaction_for_role_message = (\n self.bot.session.query(ReactionForRoleMessage)\n .where(ReactionForRoleMessage.guild_id == ctx.guild.id)\n .where(ReactionForRoleMessage.author_id == ctx.author.id)\n .first()\n )\n if not reaction_for_role_message:\n await self.send_reply(ctx, ctx.command.name, \"error\")\n return\n if not reaction_for_role_message.emojis or not reaction_for_role_message.roles:\n await self.send_reply(ctx, ctx.command.name, \"no_emoji\")\n return\n\n await self.delete_setup_messages(reaction_for_role_message)\n reaction_for_role_message.guild_id = \"\"\n reaction_for_role_message.author_id = \"\"\n reaction_for_role_message.setup_channel_id = 0\n reaction_for_role_message.setup_message_id = 0\n reaction_for_role_message.preview_message_id = 0\n\n channel = self.bot.get_channel(reaction_for_role_message.channel_id)\n if not channel:\n await self.send_reply(ctx, ctx.command.name, \"channel_error\")\n self.bot.session.delete(reaction_for_role_message)\n return\n message = await channel.send(reaction_for_role_message.text)\n reaction_for_role_message.message_id = message.id\n self.bot.session.update(reaction_for_role_message)\n\n for emoji in filter(None, reaction_for_role_message.emojis.split(\"\\n\")):\n try:\n await message.add_reaction(emoji)\n except Exception:\n continue", "title": "" }, { "docid": "a11d7cec60c565f3e43aef0c331d8496", "score": "0.51700413", "text": "def default_role_granted_via_role(self, node=None):\n\n user_name = f\"user_{getuid()}\"\n role_name = f\"role_{getuid()}\"\n\n if node is None:\n node = self.context.node\n\n with user(node, f\"{user_name}\"), role(node, f\"{role_name}\"):\n\n with When(\"I grant the role to the user\"):\n node.query(f\"GRANT {role_name} TO {user_name}\")\n\n Suite(test=default_role)(grant_target_name=role_name, user_name=user_name)", "title": "" }, { "docid": "abb3f0fc8a22c60730f7291915730ba4", "score": "0.5167347", "text": "def reagent_role(cls):\n return cls._namespace_SIO('SIO_000893')", "title": "" }, { "docid": "387c20d5cce56e777d492d90da13dbd6", "score": "0.51672894", "text": "def role_add(cls, interaction: discord.Interaction) -> ui.View:\n roles = [r for r in allowed_roles(interaction.guild) if r not in interaction.user.roles]\n if not roles:\n raise ValueError(\"I couldn't find any roles to add\")\n options = [discord.SelectOption(label=role.name, value=role.id) for role in roles]\n select = RoleAddSelect(options=options)\n view = cls()\n view.add_item(select)\n return view", "title": "" }, { "docid": "bc6bdab6836d671dacbb12e6b5ad6013", "score": "0.51552355", "text": "def set_user_role(x,idi,setrole):\n logging.info('%s executing Actions.%s',x,Actions.set_user_role.__name__)\n status = 0 \n queryrole = x.role\n role_names = list(roles['role_priviledges'].keys())\n if(setrole not in role_names):\n logging.error('Execution Error: Role %s not present (executing Actions.%s)',setrole,Actions.set_user_role.__name__)\n raise RoleError('Role not present')\n #check_privelege('set_user_role',queryrole)\n if(queryrole=='moderator' and setrole == 'admin'):\n logging.error('Execution Error: Moderator %s trying to change user with ID %s to admin OR attempting to change admin privileges(executing Actions.%s)',x,str(idi),Actions.set_user_role.__name__)\n raise RuntimeError('Moderator trying to change someone to admin OR trying to change admin priveleges')\n flag =0\n q_obj = File_Handler.get_file_handler().get_user_obj(idi)\n if(queryrole=='moderator' and q_obj.role=='admin'):\n logging.error('Execution Error: Moderator %s trying to change user with ID %s to admin OR attempting to change admin privileges(executing Actions.%s)',x,str(idi),Actions.set_user_role.__name__)\n raise RuntimeError('Moderator trying to change someone to admin OR trying to change admin priveleges')\n if(q_obj!=-1):\n flag = 1\n status = 1\n logging.warning('Role of user with ID %s has been changed to %s by %s (executing Actions.%s)',idi,setrole,x, Actions.set_user_role.__name__)\n q_obj.role = setrole\n File_Handler.get_file_handler().overwrite(q_obj)\n if(flag==0):\n logging.error('User with ID %s not found (executing Actions.%s)',idi,Actions.set_user_role.__name__)\n raise IndexError(\"ID not found\")\n return status", "title": "" }, { "docid": "8452ee1d54e779760c7e77559a936d42", "score": "0.5149626", "text": "def request_set_control_mode(self, req, mode):\n try:\n self.set_control_mode(mode)\n except Exception as error:\n return (\"fail\", str(error))\n else:\n return (\"ok\",)", "title": "" }, { "docid": "229d39865fda22ef183098783b96e16c", "score": "0.51354194", "text": "def role(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"role\")", "title": "" }, { "docid": "229d39865fda22ef183098783b96e16c", "score": "0.51354194", "text": "def role(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"role\")", "title": "" }, { "docid": "510c5a7a6291222d9a69f74259125476", "score": "0.51341546", "text": "def change_state():\n if not current_user.admin:\n disconnect()\n sec = SecurityController.get()\n message = \"\"\n if sec.state == 'disabled':\n # Set to armed\n sec.arm()\n message = get_action('arm').run()\n elif sec.state == 'armed':\n # Set to disabled\n sec.disable()\n message = get_action('disable').run()\n elif sec.state == 'alert':\n # Restore to armed\n sec.arm()\n emit('state change', {'state': sec.state, 'message': message}, broadcast=True)", "title": "" }, { "docid": "4ef8a8082f5beceaa77ac6ff6b1899e7", "score": "0.51261884", "text": "def changelist_view(self, request, *args, **kwargs):\n self.__request__ = request\n return super(RolesAdmin, self).changelist_view(request, *args, **kwargs)", "title": "" }, { "docid": "4dee1f32736679e6c791b4f04f22b055", "score": "0.51180595", "text": "def create_role(self, **kwargs):\n role = super().create_role(**kwargs)\n self.mark_changed(id(self.db.session), rid=role.id)\n return role", "title": "" }, { "docid": "90da00f152a022199d06d5cbc3eb821c", "score": "0.5101593", "text": "def reassign_role(self, role):\n if self.role != role:\n self.role = role\n self.save(update_fields=[\"role\"])\n return None", "title": "" }, { "docid": "687d82e60969a4d9eb27549b1762cad9", "score": "0.50979733", "text": "def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")", "title": "" }, { "docid": "687d82e60969a4d9eb27549b1762cad9", "score": "0.50979733", "text": "def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")", "title": "" }, { "docid": "687d82e60969a4d9eb27549b1762cad9", "score": "0.50979733", "text": "def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")", "title": "" }, { "docid": "687d82e60969a4d9eb27549b1762cad9", "score": "0.50979733", "text": "def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")", "title": "" }, { "docid": "687d82e60969a4d9eb27549b1762cad9", "score": "0.50979733", "text": "def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")", "title": "" }, { "docid": "687d82e60969a4d9eb27549b1762cad9", "score": "0.50979733", "text": "def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")", "title": "" }, { "docid": "687d82e60969a4d9eb27549b1762cad9", "score": "0.50979733", "text": "def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")", "title": "" }, { "docid": "687d82e60969a4d9eb27549b1762cad9", "score": "0.50979733", "text": "def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")", "title": "" }, { "docid": "88b44833a299a0efcd01bc903f7f9c7e", "score": "0.5085387", "text": "def update_role(self, user_role=None):\n\n if not is_basic_identifier(user_role.name):\n raise BadRequest(\"The role name '%s' can only contain alphanumeric and underscore characters\" % user_role.name)\n\n self.clients.resource_registry.update(user_role)", "title": "" }, { "docid": "7c1ce61a9b0d6634f8e31a306410b700", "score": "0.50726414", "text": "async def _voice_roles(self, ctx: commands.Context, *roles: discord.Role):\n await self.config.guild(ctx.guild).voice.roles.set([r.id for r in roles])\n return await ctx.tick()", "title": "" }, { "docid": "e8a2fefb0bf5f01a401061af21757ac1", "score": "0.5070277", "text": "def role_from_first_message( # pylint: disable=unused-argument\n message: Message, receiver_address: Address\n ) -> BaseDialogue.Role:\n return OefSearchDialogue.Role.AGENT", "title": "" }, { "docid": "e248aeb24ffd712a6abee2212cfa5707", "score": "0.506649", "text": "def reactant_role(cls):\n return cls._namespace_SIO('SIO_000879')", "title": "" }, { "docid": "e5427e4bbb1176dba9a7f27f1d45b036", "score": "0.5058849", "text": "async def set_verified_role(self, ctx, *, role: discord.Role):\n await self.set_guild_values(ctx, {\"verified_role_id\": str(role.id)})", "title": "" }, { "docid": "421e589d346d39326e7b3fa5c381eec9", "score": "0.50539213", "text": "def _get_role(self):\n return self.__role", "title": "" }, { "docid": "46c2429ab391c9a8d5ca1e9b1a3da0ed", "score": "0.5051523", "text": "async def rradd(self, ctx, *, role_name_or_id = None):\r\n\t\tif not await Utils.is_bot_admin_reply(ctx): return\r\n\t\tm = await self._get_message(ctx)\r\n\t\tif not m: return await ctx.send(\"Reaction role message is not currently set. Please set that up first with `{}rrmessage [message_url]`.\".format(ctx.prefix))\r\n\t\t# We should meet prerequisites - let's resolve the role.\r\n\t\tif not role_name_or_id: return await ctx.send(\"No role passed.\")\r\n\t\trole = DisplayName.roleForName(role_name_or_id,ctx.guild)\r\n\t\tif not role: return await ctx.send(\"I couldn't find that role.\")\r\n\t\t# We have the role - make sure it's in the user roles list\r\n\t\tur_list = self.settings.getServerStat(ctx.guild, \"UserRoles\", [])\r\n\t\t# Make sure it's in the user role list\r\n\t\tif not next((x for x in ur_list if int(x[\"ID\"]) == role.id),None): return await ctx.send(\"That role is not in the user role list. Please add it first with `{}adduserrole [role]`.\".format(ctx.prefix))\r\n\t\tmessage = await ctx.send(\"Please react to this message with the desired emoji.\")\r\n\t\t# Now we would wait...\r\n\t\tdef check(reaction, user): return reaction.message.id == message.id and user == ctx.author\r\n\t\ttry: reaction, user = await self.bot.wait_for('reaction_add', timeout=60, check=check)\r\n\t\texcept:\r\n\t\t\t# Didn't get a reaction\r\n\t\t\treturn await message.edit(content=\"Looks like we ran out of time - run `{}rradd [role_name_or_id]` to try again.\".format(ctx.prefix))\r\n\t\t# Let's walk through the reaction list and verify what we have\r\n\t\trr_list = self.settings.getServerStat(ctx.guild, \"ReactionMessageList\", [])\r\n\t\temoji_a,emoji_id,emoji_name = (False,None,str(reaction.emoji)) if isinstance(reaction.emoji,str) else (reaction.emoji.animated,reaction.emoji.id,reaction.emoji.name)\r\n\t\t# Check if we are already using that reaction for a different role\r\n\t\tusing_that_emoji = [x for x in rr_list if x[\"emoji_a\"] == emoji_a and x[\"emoji_id\"] == emoji_id and x[\"emoji_name\"] == emoji_name and x[\"role_id\"] != role.id]\r\n\t\tusing_that_role = [x for x in rr_list if x[\"role_id\"] == role.id]\r\n\t\tif using_that_emoji:\r\n\t\t\t# Evaluate the role id - and ensure it exists\r\n\t\t\tusing_role = DisplayName.roleForName(using_that_emoji[0][\"role_id\"],ctx.guild)\r\n\t\t\tif using_role: return await message.edit(content=\"That reaction is already being used for \\\"{}\\\".\".format(Nullify.escape_all(using_role.name)))\r\n\t\t\t# If we got here - it doesn't exist - pop it from that list\r\n\t\t\trr_list.remove(using_that_emoji[0])\r\n\t\tif using_that_role:\r\n\t\t\t# Pop the role from the list so we can re-add it with the new emoji\r\n\t\t\trr_list.remove(using_that_role[0])\r\n\t\t# Add the emoji name/id and role id to the list\r\n\t\trr_list.append({\"emoji_a\":emoji_a,\"emoji_id\":emoji_id,\"emoji_name\":emoji_name,\"role_id\":role.id})\r\n\t\tself.settings.setServerStat(ctx.guild, \"ReactionMessageList\", rr_list)\r\n\t\tawait message.edit(content=\"Reaction for \\\"{}\\\" set to {}\".format(\r\n\t\t\tNullify.escape_all(role.name),\r\n\t\t\tstr(reaction.emoji)\r\n\t\t))", "title": "" }, { "docid": "50d3df74b900fcc51ffaa0603ab1b089", "score": "0.5049692", "text": "async def removeuserrole(self, ctx, *, role = None):\r\n\t\t\r\n\t\tauthor = ctx.message.author\r\n\t\tserver = ctx.message.guild\r\n\t\tchannel = ctx.message.channel\r\n\r\n\t\tusage = 'Usage: `{}removeuserrole [role]`'.format(ctx.prefix)\r\n\r\n\t\tif not await Utils.is_admin_reply(ctx): return\r\n\r\n\t\tif role == None:\r\n\t\t\tawait channel.send(usage)\r\n\t\t\treturn\r\n\r\n\t\trr_list = self.settings.getServerStat(ctx.guild, \"ReactionMessageList\", [])\r\n\r\n\t\tif type(role) is str:\r\n\t\t\tif role == \"everyone\":\r\n\t\t\t\trole = \"@everyone\"\r\n\t\t\t# It' a string - the hope continues\r\n\t\t\t# Let's clear out by name first - then by role id\r\n\t\t\ttry:\r\n\t\t\t\tpromoArray = self.settings.getServerStat(server, \"UserRoles\")\r\n\t\t\texcept Exception:\r\n\t\t\t\tpromoArray = []\r\n\t\t\tif promoArray == None:\r\n\t\t\t\tpromoArray = []\r\n\r\n\t\t\tfor aRole in promoArray:\r\n\t\t\t\t# Get the role that corresponds to the name\r\n\t\t\t\tif aRole['Name'].lower() == role.lower():\r\n\t\t\t\t\t# We found it - let's remove it\r\n\t\t\t\t\tpromoArray.remove(aRole)\r\n\t\t\t\t\t# Also remove it from the rr_list\r\n\t\t\t\t\trr_list = [x for x in rr_list if x[\"role_id\"] != int(aRole[\"ID\"])]\r\n\t\t\t\t\tself.settings.setServerStat(server, \"ReactionMessageList\", rr_list)\r\n\t\t\t\t\tself.settings.setServerStat(server, \"UserRoles\", promoArray)\r\n\t\t\t\t\tmsg = '**{}** removed successfully.'.format(Nullify.escape_all(aRole['Name']))\r\n\t\t\t\t\tawait channel.send(msg)\r\n\t\t\t\t\treturn\r\n\t\t\t# At this point - no name\r\n\t\t\t# Let's see if it's a role that's had a name change\r\n\r\n\r\n\t\t\troleCheck = DisplayName.roleForName(role, server)\r\n\t\t\tif roleCheck:\r\n\t\t\t\t# We got a role\r\n\t\t\t\t# If we're here - then the role is an actual role\r\n\t\t\t\ttry:\r\n\t\t\t\t\tpromoArray = self.settings.getServerStat(server, \"UserRoles\")\r\n\t\t\t\texcept Exception:\r\n\t\t\t\t\tpromoArray = []\r\n\t\t\t\tif promoArray == None:\r\n\t\t\t\t\tpromoArray = []\r\n\r\n\t\t\t\tfor aRole in promoArray:\r\n\t\t\t\t\t# Get the role that corresponds to the id\r\n\t\t\t\t\tif str(aRole['ID']) == str(roleCheck.id):\r\n\t\t\t\t\t\t# We found it - let's remove it\r\n\t\t\t\t\t\tpromoArray.remove(aRole)\r\n\t\t\t\t\t\t# Also remove it from the rr_list\r\n\t\t\t\t\t\trr_list = [x for x in rr_list if x[\"role_id\"] != roleCheck.id]\r\n\t\t\t\t\t\tself.settings.setServerStat(server, \"ReactionMessageList\", rr_list)\r\n\t\t\t\t\t\tself.settings.setServerStat(server, \"UserRoles\", promoArray)\r\n\t\t\t\t\t\tmsg = '**{}** removed successfully.'.format(Nullify.escape_all(aRole['Name']))\r\n\t\t\t\t\t\tawait channel.send(msg)\r\n\t\t\t\t\t\treturn\r\n\t\t\t\t\r\n\t\t\t# If we made it this far - then we didn't find it\r\n\t\t\tmsg = '*{}* not found in list.'.format(Nullify.escape_all(roleCheck.name))\r\n\t\t\tawait channel.send(msg)\r\n\t\t\treturn\r\n\r\n\t\t# If we're here - then the role is an actual role - I think?\r\n\t\ttry:\r\n\t\t\tpromoArray = self.settings.getServerStat(server, \"UserRoles\")\r\n\t\texcept Exception:\r\n\t\t\tpromoArray = []\r\n\t\tif promoArray == None:\r\n\t\t\tpromoArray = []\r\n\r\n\t\tfor aRole in promoArray:\r\n\t\t\t# Get the role that corresponds to the id\r\n\t\t\tif str(arole['ID']) == str(role.id):\r\n\t\t\t\t# We found it - let's remove it\r\n\t\t\t\tpromoArray.remove(aRole)\r\n\t\t\t\t# Also remove it from the rr_list\r\n\t\t\t\trr_list = [x for x in rr_list if x[\"role_id\"] != role.id]\r\n\t\t\t\tself.settings.setServerStat(server, \"ReactionMessageList\", rr_list)\r\n\t\t\t\tself.settings.setServerStat(server, \"UserRoles\", promoArray)\r\n\t\t\t\tmsg = '**{}** removed successfully.'.format(Nullify.escape_all(aRole['Name']))\r\n\t\t\t\tawait channel.send(msg)\r\n\t\t\t\treturn\r\n\r\n\t\t# If we made it this far - then we didn't find it\r\n\t\tmsg = '*{}* not found in list.'.format(Nullify.escape_all(role.name))\r\n\t\tawait channel.send(msg)", "title": "" }, { "docid": "c077175b51f49fa3aa45ea585e85397f", "score": "0.50399864", "text": "def test_update_role_user(self):\n other_role = \"Visitor\"\n self.user_test.set_role(other_role)\n self.assertEqual(other_role, self.user_test.get_role())", "title": "" }, { "docid": "11b41ec283c997249da4c9b091e9a813", "score": "0.5036335", "text": "def post(self, request):\n new_role = api.keystone.role_create(request, request.DATA['name'])\n return rest_utils.CreatedResponse(\n '/api/keystone/roles/%s' % new_role.id, new_role.to_dict())", "title": "" }, { "docid": "9047c4a1f3e433d25f7e93789daa280e", "score": "0.5035206", "text": "def __str__(self):\n return \"WAMP Role-Change Message (op = '%s', role = '%s')\" % (RoleChange.ROLE_CHANGE_OP_DESC.get(self.op), self.role)", "title": "" }, { "docid": "4de379343a119bae9b6a73b3f64879f0", "score": "0.5025088", "text": "def handle_lecturer_send(message):\n if not current_user.is_authenticated:\n return\n course_id = message['course_id']\n if course_id not in rooms():\n return\n l_session = get_lecture_session(course_id)\n new_state = message['session_control']\n if new_state == 'start' and not l_session.active:\n old_feedbacks = models.SessionFeedback.query.filter_by(session_id=l_session.session_id)\n models.Questions.query.filter_by(session_id=l_session.session_id).delete()\n emit('student_recv', {'command': \"deleteQuestions\"}, room=course_id)\n emit('lecturer_recv', {'command': \"deleteQuestions\"}, room=course_id)\n for feedback in old_feedbacks.all():\n emit('lecturer_recv', {'action': [feedback.action_name, 0]}, room=course_id)\n db.session.delete(feedback)\n db.session.commit()\n l_session.active = True\n elif new_state == 'stop':\n l_session.active = False\n emit('lecturer_recv', {'active': l_session.active}, room=course_id)\n emit('student_recv', {'active': l_session.active}, room=course_id)\n db.session.add(l_session)\n db.session.commit()", "title": "" }, { "docid": "31c640c76a396a4bd85364e4dd1d6b8b", "score": "0.5021508", "text": "async def applysetup(self, ctx: commands.Context):\n bot = self.bot\n guild = ctx.guild\n pred = MessagePredicate.yes_or_no(ctx)\n applicant = get(guild.roles, name=\"Staff Applicant\")\n channel = get(guild.text_channels, name=\"applications\")\n\n await ctx.send(\n \"This will create required channel and role. Do you wish to continue? (yes/no)\"\n )\n try:\n await bot.wait_for(\"message\", timeout=30, check=pred)\n except asyncio.TimeoutError:\n return await ctx.send(\"You took too long. Try again, please.\")\n if pred.result is False:\n return await ctx.send(\"Setup cancelled.\")\n\n if applicant is None:\n try:\n await guild.create_role(\n name=\"Staff Applicant\", reason=\"Application cog setup\"\n )\n except discord.Forbidden:\n return await ctx.send(\n \"Uh oh. Looks like I don't have permissions to manage roles.\"\n )\n\n if channel is None:\n await ctx.send(\n \"Do you want everyone to see the applications channel? (yes/no)\"\n )\n try:\n await bot.wait_for(\"message\", timeout=30, check=pred)\n except asyncio.TimeoutError:\n return await ctx.send(\"You took too long. Try again, please.\")\n if pred.result is True:\n overwrites = {\n guild.default_role: discord.PermissionOverwrite(\n send_messages=False\n ),\n guild.me: discord.PermissionOverwrite(send_messages=True),\n }\n else:\n overwrites = {\n guild.default_role: discord.PermissionOverwrite(\n read_messages=False\n ),\n guild.me: discord.PermissionOverwrite(read_messages=True),\n }\n try:\n await guild.create_text_channel(\n \"applications\",\n overwrites=overwrites,\n reason=\"Application cog setup\",\n )\n except discord.Forbidden:\n return await ctx.send(\n \"Uh oh. Looks like I don't have permissions to manage channels.\"\n )\n\n await ctx.send(\n \"You have finished the setup! Please, move your new channel to the category you want it in.\"\n )", "title": "" }, { "docid": "1def111b05647fe4980e25811d085cda", "score": "0.5018208", "text": "def execute(self, role):\n menu = self.menu[role]()\n menu.execute()", "title": "" }, { "docid": "d02df37a33b427b3ee21a17b316b423f", "score": "0.501728", "text": "def post(self):\n data = request.json\n return save_new_role(data=data)", "title": "" }, { "docid": "2e2be3ed68251293ec74530d02561f86", "score": "0.5006952", "text": "def stepCheckConfigureRolesForm(self, sequence=None, sequence_list=None, **kw):\n response_dict = sequence.get(\"response_dict\")\n if 'command' in response_dict:\n self.assertEquals('show', response_dict['command'])\n self.assertEquals('Configure Roles', response_dict['next'])\n self.assertEquals('Previous', response_dict['previous'])\n self.assertCurrentStep('Your roles settings', response_dict)", "title": "" }, { "docid": "b60fdf071abd1eae33e90910c7c86ae5", "score": "0.5001284", "text": "async def adduserrole(self, ctx, *, role = None):\r\n\t\t\r\n\t\tauthor = ctx.message.author\r\n\t\tserver = ctx.message.guild\r\n\t\tchannel = ctx.message.channel\r\n\r\n\t\tusage = 'Usage: `{}adduserrole [role]`'.format(ctx.prefix)\r\n\t\t\r\n\t\tif not await Utils.is_admin_reply(ctx): return\r\n\t\t\r\n\t\tif role == None:\r\n\t\t\tawait ctx.send(usage)\r\n\t\t\treturn\r\n\r\n\t\tif type(role) is str:\r\n\t\t\tif role == \"everyone\":\r\n\t\t\t\trole = \"@everyone\"\r\n\t\t\t# It' a string - the hope continues\r\n\t\t\troleCheck = DisplayName.roleForName(role, server)\r\n\t\t\tif not roleCheck:\r\n\t\t\t\tmsg = \"I couldn't find **{}**...\".format(Nullify.escape_all(role))\r\n\t\t\t\tawait ctx.send(msg)\r\n\t\t\t\treturn\r\n\t\t\trole = roleCheck\r\n\r\n\t\t# Now we see if we already have that role in our list\r\n\t\ttry:\r\n\t\t\tpromoArray = self.settings.getServerStat(server, \"UserRoles\")\r\n\t\texcept Exception:\r\n\t\t\tpromoArray = []\r\n\t\tif promoArray == None:\r\n\t\t\tpromoArray = []\r\n\r\n\t\tfor aRole in promoArray:\r\n\t\t\t# Get the role that corresponds to the id\r\n\t\t\tif str(aRole['ID']) == str(role.id):\r\n\t\t\t\t# We found it - throw an error message and return\r\n\t\t\t\tmsg = '**{}** is already in the list.'.format(Nullify.escape_all(role.name))\r\n\t\t\t\tawait channel.send(msg)\r\n\t\t\t\treturn\r\n\r\n\t\t# If we made it this far - then we can add it\r\n\t\tpromoArray.append({ 'ID' : role.id, 'Name' : role.name })\r\n\t\tself.settings.setServerStat(server, \"UserRoles\", promoArray)\r\n\r\n\t\tmsg = '**{}** added to list.'.format(Nullify.escape_all(role.name))\r\n\t\tawait channel.send(msg)\r\n\t\treturn", "title": "" }, { "docid": "fba7045634e0207b212a364e2cacd878", "score": "0.49971524", "text": "async def add(self, ctx, level: int, role: discord.Role):\n await self.profiles._add_guild_role(ctx.guild, level, role.id)\n await ctx.send(_(\"Role configured\"))", "title": "" }, { "docid": "7c46ff82cf156e5d117e2cba3524835d", "score": "0.4989747", "text": "def activator_role(cls):\n return cls._namespace_SIO('SIO_000804')", "title": "" }, { "docid": "bae4a6248bb4a2bd3e07532546efc36c", "score": "0.49882704", "text": "def become(self, state: Union[Follower, Leader, Candidate]):\n logging.info(f\"Server {self.addr} became a {state} from {self.role.__class__}\")\n self.stop()\n self.role = state(self)\n self.role.start()", "title": "" }, { "docid": "8d3315a4f3bbe796a9f653b50946aa63", "score": "0.49862748", "text": "def post(self, request):\n new_role = api.keystone.role_create(request, request.DATA['name'])\n return rest_utils.CreatedResponse(\n '/api/keystone/roles/%s' % new_role.id,\n new_role.to_dict()\n )", "title": "" }, { "docid": "17659951560ab3743256dbd646f578f5", "score": "0.4981102", "text": "def target_role(self) -> Optional[pulumi.Input['DbProxyEndpointTargetRole']]:\n return pulumi.get(self, \"target_role\")", "title": "" }, { "docid": "1fad0358e0cdc92ed40cbced1e5f9f4e", "score": "0.49795496", "text": "def change_stakeholder_role(self, role_property, stakeholder):\n user_id = session[\"user_id\"]\n user_token = session[\"user_token\"]\n case_id = session[\"case_id\"]\n \n try:\n values_list = dict(request.form)[\"select_\" + role_property + \"_\" + stakeholder]\n except KeyError:\n values_list = []\n \n self.case_db_proxy.change_stakeholder(user_id=user_id, user_token=user_token, case_id=case_id, role_property=role_property,\n stakeholder=stakeholder, values_list=values_list)\n return self.add_stakeholder_dialogue_transition()", "title": "" }, { "docid": "7a488c3735a0b020c59ce1a4e0669510", "score": "0.49766368", "text": "async def rolesetprotectedmember(self, ctx):\n manager = MessageManager(ctx)\n guild_db = await Guild.get(guild_id=ctx.guild.id)\n\n roles = []\n cont = True\n while cont:\n name = await manager.send_and_get_response(\n \"Enter the name of one or more roles that denote(s) a 'protected member', one per line. \"\n \"(enter `stop` when done, or enter `cancel` to cancel command entirely)\"\n )\n if name.lower() == \"cancel\":\n return await manager.send_and_clean(\"Canceling command\")\n elif name.lower() == \"stop\":\n cont = False\n else:\n role_obj = discord.utils.get(ctx.guild.roles, name=name)\n if role_obj:\n roles.append(\n Role(\n guild=guild_db,\n role_id=role_obj.id,\n is_protected_clanmember=True,\n )\n )\n else:\n return await manager.send_and_clean(\n f\"Could not find a role with name `{name}`\"\n )\n\n if roles:\n await Role.bulk_create(roles)\n\n return await manager.send_and_clean(\"Protected member roles have been set\")", "title": "" }, { "docid": "340511e19e2dff156bc704771435a67a", "score": "0.49710506", "text": "def handle_lecturer_course_new(message):\n if not current_user.is_authenticated:\n return\n code = message['code']\n name = message['name']\n # TODO: fix unique constraint in new migration\n if models.Course.query.filter_by(code=code).count() > 0:\n return\n new_course = user_datastore.create_role(code=code, name=name)\n user_datastore.add_role_to_user(current_user, new_course)\n db.session.commit()\n\n emit('lecturer_course_new_recv', {\n 'code': code,\n 'name': name\n })", "title": "" }, { "docid": "40a461330d44361a504638787fd207f9", "score": "0.49704587", "text": "def _handle_choose(self, msg):\n\n actions = self._choose_actions(*msg)\n send_msg(self._serialize_actions(actions), self.sock)", "title": "" }, { "docid": "e8c9957889d97fac2079233ad9303a18", "score": "0.4963049", "text": "def get_context_data(self, *args, **kwargs):\n context = super().get_context_data()\n context['name'] = 'role'\n return context", "title": "" } ]
07b2fb0da537e30f7339871c8dec4c79
r""" Default train function takes a torch.utils.data.Dataset and train the model on the dataset
[ { "docid": "29a7a7381c73f8b1eda040c948d86d6c", "score": "0.0", "text": "def test_only_train_fn(model, train_data, loss_fn=_default_loss_fn):\n optimizer = torch.optim.Adam(model.parameters(), lr=0.001)\n train_loss, correct, total = 0, 0, 0\n for i in range(10):\n model.train()\n for data, target in train_data:\n optimizer.zero_grad()\n output = model(data)\n loss = loss_fn(output, target)\n loss.backward()\n optimizer.step()\n train_loss += loss.item()\n _, predicted = torch.max(output, 1)\n total += target.size(0)\n correct += (predicted == target).sum().item()\n return train_loss, correct, total", "title": "" } ]
[ { "docid": "8d17761d82e3f4144fbbd8ca43b5cb03", "score": "0.81942797", "text": "def train(self, dataset):\n raise NotImplementedError()", "title": "" }, { "docid": "8d17761d82e3f4144fbbd8ca43b5cb03", "score": "0.81942797", "text": "def train(self, dataset):\n raise NotImplementedError()", "title": "" }, { "docid": "82f7cc453ff9f9c8d0d6d12d85e14834", "score": "0.7412993", "text": "def train_model(dataset, labels):", "title": "" }, { "docid": "49830a699e0de9a5d173ef6c9289a0f7", "score": "0.72289234", "text": "def train(self, *args, **kwargs):\n pass", "title": "" }, { "docid": "647d823dfee9c5e3ee890ab294d41b8b", "score": "0.7202319", "text": "def train(self, dataset: ds.Dataset, batch_size, epochs, optimizer) -> None:\n\n # compile model\n self.model.compile(optimizer=optimizer,\n loss=keras.losses.binary_crossentropy,\n metrics=['accuracy'])\n\n # train model\n self.training_history = self.model.fit(x=[dataset.train_features, dataset.train_images], y=dataset.train_labels,\n validation_data=(\n [dataset.test_features, dataset.test_images], dataset.test_labels),\n batch_size=batch_size, epochs=epochs)", "title": "" }, { "docid": "40eb2081c315fd788861f45e1ee94603", "score": "0.7059714", "text": "def train(self, data_set: ModelDataSet, log=False):\n pass", "title": "" }, { "docid": "8aa0c9b486f27a0ebc87c28bc402d5c5", "score": "0.6997953", "text": "def train(self, *args, **kwargs):\n return self._train(*args, **kwargs)", "title": "" }, { "docid": "3a44d37e029308c50cc9efdf13458f1b", "score": "0.695977", "text": "def _do_training(self, train_data):\r\n pass", "title": "" }, { "docid": "e28402175dbcc4bc69ce1c8c734eb70a", "score": "0.68820435", "text": "def train(dataset, data_loader, model, criterion, optimizer):\n # put the model in train mode\n model.train()\n\n # calculate the number of batches\n num_batches = int(len(dataset) / data_loader.batch_size)\n\n # init tqdm to track progress\n tk0 = tqdm(data_loader, total=num_batches)\n\n # loop over all batches\n for data in tk0:\n # fetch input images and masks\n # from dataset batch\n inputs = data[\"images\"]\n targets = data[\"mask\"]\n\n # move images and masks to cpu/gpu device\n inputs = inputs.to(config.DEVICE, dtype=torch.float)\n targets = targets.to(config.DEVICE, dtype=torch.float)\n\n #zero grad the optimzer\n optimizer.zero_grad()\n\n # forward step of model\n outputs = model(inputs)\n\n # calculate loss\n loss = criterion(outputs, targets)\n\n # backward loss is calculated on a scaled loss\n # context since we are using mixed precision training\n # if you are not using mixed precision training,\n # you can use loss.backward() and delete the following\n # two lines of code\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n # step the optimizer\n optimizer.step()\n\n # close tqdm\n tk0.close()", "title": "" }, { "docid": "50bcd4fec6d4a6fd492aa90eb57feadb", "score": "0.68596995", "text": "def train(self, dataset):\n \"*** YOUR CODE HERE ***\"\n update = True\n dataset_size = dataset.x.shape[0]\n batch_num = 1\n D = dataset.iterate_forever(dataset_size)\n while update == True:\n update = False\n batch_num += 1\n X, Y = D.__next__()\n for x_array, y_scalar in zip(X.data, Y.data):\n x = nn.Constant(numpy.array([x_array]))\n y = nn.Constant(y_scalar)\n if self.get_prediction(x) != y_scalar:\n update = True\n self.w.update(x, nn.as_scalar(y))", "title": "" }, { "docid": "7870a9f238bcccdbbbcb61f270353f2e", "score": "0.68422854", "text": "def train_model(self, model, dataset):\n #convert DataFrame to CSV and read in as deepchem.Dataset via deepchem.CSVLoader\n \n dataset.to_csv(\"training_dataset.csv\")\n \n featurizer = dc.feat.ConvMolFeaturizer()\n loader = dc.data.CSVLoader(tasks=[\"bace\", \"esol\", \"logD\"], smiles_field=\"SMILES\", featurizer=featurizer)\n\n dataset_feat = loader.featurize(\"training_dataset.csv\")\n \n transformer = dc.trans.NormalizationTransformer(transform_y=True, dataset=dataset_feat)\n dataset_feat = transformer.transform(dataset_feat)\n\n model.fit(dataset_feat, nb_epoch=self.epochs, deterministic=True, restore=False)", "title": "" }, { "docid": "8fdc239b39d2a9388929bdf41961e2de", "score": "0.68143445", "text": "def train(model, dataset, model_file, epoch):\n try:\n trainer = Trainer(model, dataset, model_file)\n trainer.train(epoch)\n except Exception as e:\n logging.error(e, exc_info=True)", "title": "" }, { "docid": "9e2befa8471510844e562a005ca51c87", "score": "0.67871904", "text": "def train(self, training_set): # override me on your classifier class!\n pass", "title": "" }, { "docid": "52ad041890508bb7d114fadc736c71f7", "score": "0.6786181", "text": "def train(self, training_data, cfg, **kwargs):\n pass", "title": "" }, { "docid": "73abece305b0dec3efdb5e80d5e2b783", "score": "0.67619497", "text": "def train(self, data_from_labeled_set: tf.data.Dataset,\n data_from_unlabeled_set: List[tf.Tensor]) -> None:\n raise NotImplementedError(\"Pure Abstract Method\")", "title": "" }, { "docid": "eafb3d9a7c47fe544a4651ca20ac85dd", "score": "0.6757469", "text": "def train(self,data_set):\n\t\tlogging.debug ('Training on dataset ')\n\t\t# logging.debug (data_set.get_items())\n\t\t'''\n\t\t`train' Usage\n\t\t=============\n\n\t\tUsage: train [options] training_set_file [model_file]\n\t\toptions:\n\t\t-s type : set type of solver (default 1)\n\t\t 0 -- L2-regularized logistic regression (primal)\n\t\t 1 -- L2-regularized L2-loss support vector classification (dual)\n\t\t 2 -- L2-regularized L2-loss support vector classification (primal)\n\t\t 3 -- L2-regularized L1-loss support vector classification (dual)\n\t\t 4 -- multi-class support vector classification by Crammer and Singer\n\t\t 5 -- L1-regularized L2-loss support vector classification\n\t\t 6 -- L1-regularized logistic regression\n\t\t 7 -- L2-regularized logistic regression (dual)\n\t\t-c cost : set the parameter C (default 1)\n\t\t-e epsilon : set tolerance of termination criterion\n\t\t -s 0 and 2\n\t\t |f'(w)|_2 <= eps*min(pos,neg)/l*|f'(w0)|_2,\n\t\t where f is the primal function and pos/neg are # of\n\t\t positive/negative data (default 0.01)\n\t\t -s 1, 3, 4 and 7\n\t\t Dual maximal violation <= eps; similar to libsvm (default 0.1)\n\t\t -s 5 and 6\n\t\t |f'(w)|_inf <= eps*min(pos,neg)/l*|f'(w0)|_inf,\n\t\t where f is the primal function (default 0.01)\n\t\t-B bias : if bias >= 0, instance x becomes [x; bias]; if < 0, no bias term added (default -1)\n\t\t-wi weight: weights adjust the parameter C of different classes (see README for details)\n\t\t-v n: n-fold cross validation mode\n\t\t-q : quiet mode (no outputs)\n\n\t\tOption -v randomly splits the data into n parts and calculates cross\n\t\tvalidation accuracy on them.\n\n\n\n\n\n\t\t'''\n\t\t#\n\t\ttrainer_package_parameters=self.trainer_settings['trainer_package_parameters']\n\t\tif(self.trainer_settings['perform_grid_search']==True):\n\t\t\t# perform grid search to find the best parameters\n\t\t\t\n\t\t\t(c_opt,g_opt)=self.grid_search(data_set)\n\t\t\ttrainer_package_parameters['-c']=str(c_opt)\n\n\t\t\ttrainer_package_parameters['-g']=str(g_opt)\n\t\t\t\n\t\targs=[]\n\t\t\n\t\t\n\t\targs.append(self.trainer_settings['training_program_path'])\n\t\tfor param in trainer_package_parameters:\n\t\t\targs.append(param)\n\t\t\targs.append(str(trainer_package_parameters[param]))\n\t\tself.create_training_set_file_from_dataset(data_set)\n\t\ttraining_file_name=os.path.join(self.trainer_settings['temporary_folder_location'],self.trainer_settings['training_set_filename'])\n\t\t\n\t\targs.append(training_file_name)\n\t\t\n\t\tmodel_file=os.path.join(self.trainer_settings['temporary_folder_location'],self.trainer_settings['model_filename'])\n\t\targs.append(model_file)\n\t\tlogging.debug (args)\n\t\t#logging.debug 'args for train'\n\t\t#logging.debug args\n\t\t# call train \n\t\t#p=subprocess.Popen(args,stdout=subprocess.PIPE)\n\t\tfnull=open(os.devnull,'w')\n\n\t\t# p=subprocess.call(args,stdout=fnull)\n\t\t# fnull.close()\n\t\t\n\t\t# TODO the persistance of models\n\t\tlearned_model_object=LibSVMModel(self.trainer_settings['temporary_folder_location'],self.trainer_settings['model_filename'])\n\t\tlogging.debug(learned_model_object)\n\t\t#LibSVMTrainer._file_suffix+=1\n\t\t\n\t\treturn(learned_model_object)", "title": "" }, { "docid": "d383ec34be542d6b1dab8828ac60d994", "score": "0.67458767", "text": "def train(self, data, target, **kwargs):\n self.data = self.predict(data)", "title": "" }, { "docid": "d383ec34be542d6b1dab8828ac60d994", "score": "0.67458767", "text": "def train(self, data, target, **kwargs):\n self.data = self.predict(data)", "title": "" }, { "docid": "d383ec34be542d6b1dab8828ac60d994", "score": "0.67458767", "text": "def train(self, data, target, **kwargs):\n self.data = self.predict(data)", "title": "" }, { "docid": "f58d6ab461bbb950d1c2c0be915a8d97", "score": "0.6729826", "text": "def train_step(self, *args, **kwargs):\n raise NotImplementedError('Implement to run training')", "title": "" }, { "docid": "4277bf6eef67f67e04a4fc43ab0bb483", "score": "0.67241436", "text": "def train(args):\n # === choose which dataset to build:\n if args.dataset == 'blobs':\n dataloader_fn = load_blobs\n input_dim = 2\n if args.dataset == 'moons':\n dataloader_fn = load_moons\n input_dim = 2\n if args.dataset == 'gausses':\n dataloader_fn = load_gausses\n input_dim = 2\n if args.dataset == 'mnist':\n dataloader_fn = load_mnist\n input_dim = 28*28\n if args.dataset == 'svhn':\n dataloader_fn = load_svhn\n input_dim = 32*32*3\n if args.dataset == 'cifar10':\n dataloader_fn = load_cifar10\n input_dim = 32*32*3\n if args.dataset == 'tfd':\n raise NotImplementedError(\"[train] Toronto Faces Dataset unsupported right now. Sorry!\")\n dataloader_fn = load_tfd\n input_dim = None\n\n # === build model & optimizer:\n model = NICEModel(input_dim, args.nhidden, args.nlayers)\n if (args.model_path is not None):\n assert(os.path.exists(args.model_path)), \"[train] model does not exist at specified location\"\n model.load_state_dict(torch.load(args.model_path, map_location='cpu'))\n model.to(DEVICE)\n opt = optim.Adam(model.parameters(), lr=args.lr, betas=(args.beta1,args.beta2), eps=args.eps)\n\n # === choose which loss function to build:\n if args.prior == 'logistic':\n nice_loss_fn = LogisticPriorNICELoss(size_average=True)\n elif args.prior == 'binomial':\n nice_loss_fn = BinomialPriorNICELoss(size_average=True)\n else:\n nice_loss_fn = GaussianPriorNICELoss(size_average=True)\n\n def loss_fn(fx, DEVICE):\n \"\"\"Compute NICE loss w/r/t a prior and optional L1 regularization.\"\"\"\n if args.lmbda == 0.0:\n return nice_loss_fn(fx, model.scaling_diag, DEVICE, args.alpha)\n else:\n return nice_loss_fn(fx, model.scaling_diag, DEVICE, args.alpha) + args.lmbda*l1_norm(model, include_bias=True)\n\n # === train over a number of epochs; perform validation after each:\n path = date.today().strftime('%m_%d_')+\\\n '_dataset={}_alpha={}_prior={}_batch_size={}_nlayers={}_nhidden={}_epochs={}_'.format(args.dataset, args.alpha, args.prior, args.batch_size, args.nlayers, args.nhidden, args.num_epochs)+get_random_string()\n path_plots = 'runs/'+str(args.dataset)+'/'+path\n path_tensorboard = 'logs/'+str(args.dataset)+'/'+path\n if os.path.isdir(path_plots):\n shutil.rmtree(path_plots)\n os.makedirs(path_plots)\n\n writer = SummaryWriter(log_dir=path_tensorboard)\n\n for t in range(args.num_epochs):\n print(\"* Epoch {0}:\".format(t))\n dataloader = dataloader_fn(train=True, batch_size=args.batch_size, alpha=args.alpha)\n losses = []\n last_loss = 0.0\n for inputs, _ in tqdm(dataloader):\n opt.zero_grad()\n loss = loss_fn(model(inputs.to(DEVICE)), DEVICE)\n a = loss\n a = a.cpu().detach().numpy()\n loss.backward()\n opt.step()\n last_loss = a\n losses.append(a)\n writer.add_scalar('Loss/train_mean', np.mean(np.array(losses)), t+1)\n writer.add_scalar('Loss/train', last_loss, t+1)\n \n # save model to disk and delete dataloader to save memory:\n if t % args.save_epoch == 0 and args.save:\n _dev = 'cuda' if CUDA else 'cpu'\n _fn = \"nice.{0}.l_{1}.h_{2}.p_{3}.e_{4}.{5}.pt\".format(args.dataset, args.nlayers, args.nhidden, args.prior, t, _dev)\n torch.save(model.state_dict(), os.path.join(args.savedir, _fn))\n print(\">>> Saved file: {0}\".format(_fn))\n del dataloader\n \n # perform validation loop:\n vmin, vmed, vmean, vmax = validate(model, dataloader_fn, nice_loss_fn, args.alpha)\n print(\">>> Validation Loss Statistics: min={0}, med={1}, mean={2}, max={3}\".format(vmin,vmed,vmean,vmax))\n if args.dataset in ['blobs', 'moons', 'gausses']:\n validate_outliers(model, dataloader_fn, t+1, path_plots, args.alpha)\n writer.add_scalar('Validation/vmin', vmin, t+1)\n writer.add_scalar('Validation/vmed', vmed, t+1)\n writer.add_scalar('Validation/vmean', vmean, t+1)\n writer.add_scalar('Validation/vmax', vmax, t+1)\n writer.close()", "title": "" }, { "docid": "12c0bfe8a4d8e7b6bee426e4ef8b95ff", "score": "0.6708278", "text": "def forward_train(self, data, **kwargs):\n raise NotImplementedError(\n 'In MMGeneration, we do NOT recommend users to call'\n 'this function, because the train_step function is designed for '\n 'the training process.')", "title": "" }, { "docid": "71c1eaa7621a03db9797e2d7f26c9672", "score": "0.67068344", "text": "def train_datasets(self, dataset_def, dataset_cln, test_cln):\n # training rnn\n self.train_clean(dataset_cln, test_cln)\n # training classifier\n self.train_pred(dataset_def, dataset_cln)", "title": "" }, { "docid": "4535c37ac9ab83df5fcf7c11ef3f1946", "score": "0.6699649", "text": "def train(model, data, optimizer, device):\n model.train()\n for samples, labels in data:\n samples, labels = samples.to(device), labels.to(device)\n optimizer.zero_grad()\n results = model(samples)\n loss = nn.functional.nll_loss(results, labels)\n loss.backward()\n\n optimizer.step()", "title": "" }, { "docid": "83e6e72a6bc3d381d3a64026688fdaec", "score": "0.6692118", "text": "def train_fn(data_loader, model, loss_fn, optimizer, device, verbose, epoch):\n model.train()\n tr_loss = 0\n counter = 0\n if verbose:\n losses = AverageMeter()\n tk0 = tqdm(enumerate(data_loader), total=len(data_loader))\n else:\n tk0 = enumerate(data_loader)\n for bi, d in tk0:\n targets = d[\"target\"].to(device, dtype=torch.float)\n images = d[\"image\"].to(device, dtype=torch.float)\n optimizer.zero_grad()\n outputs = model(images)\n loss = loss_fn(outputs, targets)\n tr_loss += loss.item()\n counter += 1\n loss.backward()\n optimizer.step()\n if verbose:\n losses.update(loss.item(), targets.size(0))\n tk0.set_postfix(loss=losses.avg)\n return tr_loss / counter", "title": "" }, { "docid": "944c9196c398272f9bbae3da1fa51e59", "score": "0.6691064", "text": "def train(self):\n x, y = self.get_data()\n if x and y:\n self.model, self.scaler = self.classifier.create_model(x, y)", "title": "" }, { "docid": "76f978d8226e75afbde71bff1e01b23d", "score": "0.66828406", "text": "def train(self, profile_data, validate = True):\n\n\t\tsamples, labels = self.__build_feats(profile_data)\n\n\t\t# Samples are transformed into features in order to train\n\t\tfeats = self.vectorizer.fit_transform(samples)\n\t\tfeats = self.selector.fit_transform(feats, labels)\n\t\tself.model.fit(feats, labels)\n\n\t\t# Validation process\n\t\tif validate: self.__validate(\n\t\t\tsamples = samples,\n\t\t\tlabels = labels\n\t\t)", "title": "" }, { "docid": "7b028054723e51a3da8da689729ac664", "score": "0.66741", "text": "def train_dataloader():\n dataset.switchmode('train')\n if args.sanity_check != 1:\n dataset.resample()\n return DataLoader(dataset, args.train_batch_size, shuffle=True,\n num_workers=args.num_workers)", "title": "" }, { "docid": "3c77bcc6ad08a3d702afd48158c9a363", "score": "0.6667745", "text": "def train(self, x_train, y_train):\n self.algorithm.fit(x_train, y_train)", "title": "" }, { "docid": "9b8d273b419be1da5bf29e1411a79f38", "score": "0.6664037", "text": "def train_model(self, train_data: DataLoader) -> None:\n # define optimizer to be used - this updates the parameters of the model\n # once the wieghts have been computed\n # two optimizers are supported\n\n # todo: which parameters of the optimizers are to be tunable?\n if self.optimizer == \"adam\":\n optimizer = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate)\n elif self.optimizer == \"sgd\":\n optimizer = torch.optim.SGD(self.model.parameters(), lr=self.learning_rate)\n else:\n raise NotImplementedError(\"Invalid optimization strategy specified\")\n\n # set the model to training mode - all layers to be trained, parameters to be learned\n self.model.train()\n\n # overall training start time (tick)\n train_start_time = datetime.datetime.now()\n\n # use the entire training dataset to train the model for the number of epochs\n for epoch in range(self.num_epochs):\n start = datetime.datetime.now().strftime(\"%H:%M:%S\") # epoch start time\n print(\"\\nTraining Epoch: {epoch} at {start_time}\".format(epoch=epoch, start_time=start))\n\n total_loss_per_epoch = 0.0 # used to accumulate the loss for each epoch\n\n # zero the gradients so that they don't accumulate\n optimizer.zero_grad()\n\n # define loss function that will be used to compute the loss\n # CrossEntropyLoss is used as the output is modelled as categorical\n criterion = torch.nn.CrossEntropyLoss()\n\n # load the data iterable through a progress bar\n data_progress = tqdm(train_data, desc=\"\\n Training Progress:\")\n\n # loop through the batches of input training dataset\n for idx, (batch_logs, batch_labels) in enumerate(data_progress):\n output = self.model(input_data=batch_logs, device=self.device) # use model to predict output\n loss = criterion(\n output, batch_labels.to(self.device)\n ) # compute loss between predicted output and labels\n total_loss_per_epoch += float(loss) # accumulate loss\n loss.backward() # perform back-propagation and compute gradients\n optimizer.step() # use gradients to update weights\n optimizer.zero_grad() # clear existing gradients to prevent accumulation\n print(\n \"Epoch: {epoch} Train Loss: {train_loss:.5f}\".format(\n epoch=epoch, train_loss=total_loss_per_epoch / (idx + 1)\n )\n )\n\n # overall model training time (tock)\n train_end_time = datetime.datetime.now()\n self.training_time = train_end_time - train_start_time\n\n # save the trained model parameters and the optimizer state dict\n data_to_save = {\"state_dict\": self.model.state_dict(), \"optimizer_state_dict\": optimizer.state_dict()}\n model_path = self.path + self.ie_name + \"_model.pth\"\n torch.save(data_to_save, model_path)\n\n if self.verbose:\n print(\"\\nModel training complete. Total training time: {train_time}\".format(train_time=self.training_time))\n print(\"\\nModel parameters saved at: {model_path}\".format(model_path=model_path))", "title": "" }, { "docid": "eed56109dc185f47e77113edd1b09c02", "score": "0.66556233", "text": "def train(self,dataset):\n #data = []\n #self._clusters=[]\n self._sample_shape=dataset.samples_original.shape[1:]\n #TODO: assert intervals in sample shape\n\n ##print \"# \", dataset.samples.shape\n #for i_l, lab in enumerate(dataset.uniquelabels):\n # d_tmp = dataset.samples_original[dataset.labels==lab]\n # data.append(self.calculate_power(d_tmp))\n self._is_trained = True", "title": "" }, { "docid": "192a556de9c4ae32bdc6e00dbd44cbdd", "score": "0.6655501", "text": "def train(self, train_data, train_labels, strategy, val_set=None, \n epochs=10, batch_size=32, verbose=0): \n try:\n assert(isinstance(strategy, str))\n assert(strategy == \"MT\" or strategy == \"CL\" or \n strategy == \"Full\" or strategy == \"SPL\")\n\n except AssertionError:\n print(\"Error in function train of CustomModel: the strategy must \"\n \"be a string, either Full, MT, CL or SPL\")\n exit(1)\n\n try:\n assert(len(train_labels.shape) != 1)\n assert(train_labels.shape[1] == 10 or train_labels.shape[1] == 2)\n\n except AssertionError:\n # If the labels are not one hot\n self.loss_function = tf.keras.losses.SparseCategoricalCrossentropy(\n from_logits=True)\n\n # Initialize arrays\n self.train_acc = np.zeros(epochs, dtype=np.float32)\n self.train_loss = np.zeros(epochs, dtype=np.float32)\n self.val_acc = np.zeros(epochs, dtype=np.float32)\n self.val_loss = np.zeros(epochs, dtype=np.float32)\n\n if strategy == \"MT\" or strategy == \"Full\":\n self.simple_train(train_data, train_labels, val_set, epochs,\n batch_size, verbose)\n\n elif strategy == \"CL\":\n self.CL_train(train_data, train_labels, val_set, epochs, batch_size,\n verbose)\n\n elif strategy == \"SPL\":\n self.SPL_train(train_data, train_labels, val_set, epochs,\n batch_size, verbose)", "title": "" }, { "docid": "4d216f9f58d91ec2cd14b95310cae5f2", "score": "0.66512674", "text": "def train_input_fn(dataset, batch_size):\n\t# Convert the inputs to a Dataset.\n\tdataset = tf.data.Dataset.from_tensor_slices(({\"x\":dataset[0]},dataset[1]))\n\n\t# Shuffle, repeat, and batch the examples.\n\tdataset = dataset.shuffle(100).repeat().batch(batch_size)\n\n\tprint( dataset)\n\n\t# Return the dataset.\n\treturn dataset", "title": "" }, { "docid": "4423927a3bdc164b9f5f69ccc3f4db3a", "score": "0.6634435", "text": "def train(self,training_data):\n\n print('train method of simple KNN classifier')", "title": "" }, { "docid": "13745723d2549f5602cd0e0d4ebb3149", "score": "0.6630888", "text": "def train(self):\n super().train()\n X = np.array([d[0] for d in self.trainData])\n Y = np.array([d[1] for d in self.trainData])\n # training\n try:\n self.clf.fit(X,Y)\n except ValueError:\n for i in X:\n pass\n raise", "title": "" }, { "docid": "1097db1bc687e85d1292091dd93f31c1", "score": "0.6628072", "text": "def train(model, data, settings):\n print(\"-- RUNNING TRAINING --\")\n\n x_train, y_train = read_data(data)\n model.fit(x_train, y_train, epochs=settings['epochs'], batch_size=settings['batch_size'], verbose=True)\n\n print(\"-- TRAINING COMPLETED --\")\n return model", "title": "" }, { "docid": "2bf480ba516ca9b9785790394ce3f265", "score": "0.66077876", "text": "def train(self,dataset):\n\n data = []\n self._sample_shape=dataset.samples_original.shape[1:]\n self._clusters=[] \n self.search_clusters(data)\n self._is_trained = True", "title": "" }, { "docid": "9c14c5c2c6bb9fccef3cf7a3d4281b67", "score": "0.66051596", "text": "def train(self,testing_data):\n\n print('train method')", "title": "" }, { "docid": "5daf8494112c9d60baae3e684008b08c", "score": "0.66014826", "text": "def train(train_data=None, train_target=None, do_log=False):\n torch.manual_seed(1234)\n model = Net()\n optimizer = optim.SGD(model.parameters(),\n lr=0.01, momentum=0.5)\n\n num_batches = train_data.shape[1]\n\n if (do_log):\n print(\"Started Training\")\n total_data = len(train_data)\n epochs = 5\n total_steps = epochs * total_data\n\n for epoch in range(epochs):\n epoch_loss = 0.0\n count = 0\n for data, target in zip(train_data, train_target):\n data = np.reshape(data, (data.shape[0], 1, data.shape[1], data.shape[2])) / 128.0\n count = count + 1\n result = '{0:.4g}'.format((count / float(total_steps)) * 100.0)\n print(\"Progress {}% \\r\".format(result), end='\\r')\n optimizer.zero_grad()\n output = model(data)\n # this comes with data loading mechanism use target or target.long()\n # depending on network specifications.\n target = target.long()\n loss = F.nll_loss(output, target)\n epoch_loss += loss.item()\n loss.backward()\n optimizer.step()\n\n print('Epoch ', epoch, ': ', epoch_loss / num_batches)\n return model", "title": "" }, { "docid": "38f4327e0aec2c67dde1fefcb79346b5", "score": "0.65897167", "text": "def train(self, dataloader=None, mode=True):\n self.check_initialized()\n if mode:\n self.model.train()\n else:\n self.model.eval()\n if dataloader is not None:\n self.dataloader = dataloader\n self.data_iter = iter(dataloader)", "title": "" }, { "docid": "0ac186713311ed2fd2581a71f5724551", "score": "0.65845776", "text": "def train(self, dataset, remaining_time_budget=None):\n # Convert training dataset to necessary format and\n # store as self.domain_dataset_train\n self.set_domain_dataset(dataset, is_training=True)\n\n # Train the model\n self.domain_model.train(self.domain_dataset_train,\n remaining_time_budget=remaining_time_budget)\n\n # Update self.done_training\n self.done_training = self.domain_model.done_training", "title": "" }, { "docid": "c3080b59a7b6b70d75946be289001667", "score": "0.6583978", "text": "def train(dataset):\n train_filename = f'../data/{dataset}/train'\n # train\n t = learn_transitions(train_filename)\n e = learn_emissions(train_filename)\n return t, e", "title": "" }, { "docid": "d60be20e4b746c3f03437bcb58dfb4eb", "score": "0.6577106", "text": "def train_model(self):\n pass", "title": "" }, { "docid": "27cba51149897cda8fc90d8c42df25b1", "score": "0.6569382", "text": "def train(self):\n raise NotImplementedError", "title": "" }, { "docid": "27cba51149897cda8fc90d8c42df25b1", "score": "0.6569382", "text": "def train(self):\n raise NotImplementedError", "title": "" }, { "docid": "0784d9a91afc52af97137ea1c5bec5ea", "score": "0.65665835", "text": "def train(self):\n self.model.train()", "title": "" }, { "docid": "c056bf47e6d31bcaadfa70c0fad185d9", "score": "0.6559703", "text": "def train(self,dataset):\n data = []\n self._clusters=[]\n self._sample_shape=dataset.samples_original.shape[1:]\n for i_l, lab in enumerate(dataset.uniquelabels):\n d_tmp = dataset.samples_original[dataset.labels==lab]\n self._make_samples_4d(d_tmp)\n data.append(d_tmp)\n self.search_clusters(data)\n self._is_trained = True", "title": "" }, { "docid": "b5a7278f5812a3e80934aa13fbfaa39c", "score": "0.65540236", "text": "def train_classifier(self, model_name, dataset, exp):\n self.dataset = dataset\n self.exp = exp\n self.deep_learning_search_batch_epoch_grid(model_name)", "title": "" }, { "docid": "747a6b83ad08fdfe0af4f381bbdb4483", "score": "0.6533403", "text": "def train(self, data: np.ndarray, kludge=1e-10):\n X, y = super().__pretrain__(data, kludge)", "title": "" }, { "docid": "f4cdaf16b3db4b998b22c2d2f4d7a49c", "score": "0.6528828", "text": "def train_step(self, data):\n raise NotImplementedError('Should be implemented in derived class!')", "title": "" }, { "docid": "d2225db2ea4bf09c88f166c0856fd0b3", "score": "0.6528207", "text": "def train(data_dir, model_name, epochs):\n train_model(data_dir=data_dir, model_name=model_name, epochs=epochs)", "title": "" }, { "docid": "0f796ea8064e8531d4e70e1f259aeac4", "score": "0.65259916", "text": "def train(self, inputs, targets):\n raise NotImplementedError()", "title": "" }, { "docid": "b03fa9acf50111a122e5c7397a45074e", "score": "0.6508481", "text": "def train_step(self, *args, **kwargs):\n raise NotImplementedError", "title": "" }, { "docid": "0a664c964c07f9ee530b4e77e7faa96e", "score": "0.6505331", "text": "def train(train_data, model, optimizer, criterion, device):\r\n\r\n # switch to train mode\r\n model.train()\r\n\r\n # iterate through the dataset loader\r\n i = 0\r\n losses = []\r\n for (inp, target) in train_data:\r\n # transfer inputs and targets to the GPU (if it is available)\r\n inp = inp.to(device)\r\n target = target.to(device)\r\n\r\n # compute output, i.e. the model forward\r\n output = model(inp)\r\n\r\n # calculate the loss\r\n loss = criterion(output, target)\r\n # print(\"loss\", loss)\r\n losses.append(loss)\r\n\r\n print(\"loss {:.2}\".format(loss))\r\n # compute gradient and do the SGD step\r\n # we reset the optimizer with zero_grad to \"flush\" former gradients\r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n\r\n avg_loss = torch.mean(torch.stack(losses)).item()\r\n print(\"avg.loss {:.2}\".format(avg_loss))\r\n return avg_loss", "title": "" }, { "docid": "37b7f0271a45d8eb51d753f87f94a72d", "score": "0.65050006", "text": "def train_data_loader(self):\n train_dataset = pytorchvideo.data.Kinetics(\n data_path = os.path.join(self._DATA_PATH,\"train\"), \n clip_sampler = pytorchvideo.data.make_clip_sampler(\"random\", self._CLIP_DURATION),\n decode_audio = False,\n transform = get_transform() \n )\n\n train_data_loader = torch.utils.data.DataLoader(\n train_dataset, \n batch_size = self._BATCH_SIZE, \n num_workers = self._NUM_WORKERS, \n )\n\n return train_data_loader", "title": "" }, { "docid": "f6d04d79da712795ca7a83ab3c7e4397", "score": "0.6500873", "text": "def train(**kwargs):\n print(\"==> TRAINING YOUR MODEL!\")", "title": "" }, { "docid": "18158ac07c79a724b16800dc0f2b93af", "score": "0.64884746", "text": "def train(self, X, y):\n self.model.fit(X, y)", "title": "" }, { "docid": "2dd90fcbbe8b5223aa3b6bf1268f10d2", "score": "0.64741826", "text": "def train(metric_fn):\n from ml_logger import logger\n from os.path import expanduser\n\n rope = np.load(expanduser(Args.data_path))\n dataset = PairDataset(rope[len(rope) // Args.k_fold:], K=Args.K)\n loader = DataLoader(dataset, batch_size=Args.batch_size, shuffle=True, num_workers=4)\n eval_dataset = PairDataset(rope[:len(rope) // Args.k_fold], K=Args.K)\n eval_loader = DataLoader(eval_dataset, batch_size=Args.batch_size, shuffle=True, num_workers=4)\n score_distribution(dataset.labels, f\"figures/train_scores.png\")\n score_distribution(eval_dataset.labels, f\"figures/eval_scores.png\")\n\n # used for the score distribution stats. Make smaller for faster training\n all_images = torch.tensor(np.concatenate(rope).transpose(0, 3, 1, 2)[::10] / 255, dtype=torch.float32,\n device=Args.device, requires_grad=False)\n traj_labels = torch.tensor(np.concatenate([np.ones(len(traj)) * i for i, traj in enumerate(rope)])[::10],\n device=Args.device, requires_grad=False)\n\n # used for visualization. Deterministic\n seq_gen = DataLoader(SeqDataset(rope, H=Args.vis_horizon, shuffle=False), batch_size=20, shuffle=False,\n num_workers=4)\n eval_trajs = next(iter(seq_gen)).to(Args.device)\n eval_x = eval_trajs[:, :1, :, :, :]\n\n optimizer = optim.Adam(metric_fn.parameters(), lr=Args.lr)\n\n # from torch_utils import RMSLoss\n # rms = RMSLoss()\n def evaluate(epoch, step):\n nonlocal seq_gen, all_images, traj_labels\n prefix = f\"figures/epoch_{epoch:04d}-{step:04d}\"\n for i, ((s, s_prime), y) in enumerate(tqdm(eval_loader), total=len(eval_loader.dataset) // Args.batch_size):\n if i < Args.vis_k and (epoch % Args.vis_interval == 0 or epoch < Args.vis_interval):\n s.requires_grad_(True)\n s_prime.requires_grad_(True)\n\n y_hat = metric_fn(s.to(Args.device), s_prime.to(Args.device)).squeeze()\n loss = F.smooth_l1_loss(y_hat, y.view(-1).to(Args.device))\n loss.backward()\n\n # diff = normalize(s_prime - s)\n # diff[:, :, :10, :10] = y[:, None, None, None]\n # diff[:, :, :10, 10:20] = y_hat[:, None, None, None]\n # _ = torch.cat([s, s_prime, diff, # diff image\n # normalize(s.grad), # activation of first image\n # normalize(s_prime.grad)], dim=1)\n # stack = _.reshape(-1, *_.shape[2:])[:50].detach().cpu().numpy()\n # logger.log_images(stack,\n # f\"figures/eval_pairs/epoch_{epoch:04d}-{step:04d}/activation_{i:04d}.png\", 5, 10)\n\n # visualize activation and samples\n # with torch.no_grad():\n # _ = torch.cat([s, s_prime, normalize(s_prime - s)], dim=1)\n # stack = _.reshape(-1, *_.shape[2:])[:30].cpu().numpy()\n # if i < Args.vis_k and epoch == 0:\n # logger.log_images(stack, f\"figures/sample_pairs/{i:02d}.png\", 5, 6)\n\n with torch.no_grad():\n y_hat = metric_fn(s.to(Args.device), s_prime.to(Args.device)).squeeze()\n correct = (y_hat.cpu() > 1).numpy() == (y.byte().cpu().numpy() > 1)\n logger.store_metrics(metrics={\"eval/accuracy\": correct}, y=y.mean(), sample_bias=y.numpy())\n\n # score vs timesteps\n with torch.no_grad():\n y_hat = metric_fn(eval_x, eval_trajs).squeeze()\n from plan2vec.plotting.visualize_traj_2d import local_metric_over_trajectory\n local_metric_over_trajectory(y_hat.cpu(), f\"{prefix}/score_vs_timesteps_{epoch:04d}.png\", ylim=(-0.1, 1.1))\n score_distribution(y_hat.cpu(), f\"{prefix}/in_trajectory_scores.png\", xlim=[-.1, 0.2])\n score_distribution(y_hat.cpu(), f\"{prefix}/in_trajectory_scores_full.png\", xlim=[-0.1, 1.2])\n\n y_hat = metric_fn(all_images[:, None, :, :, :], all_images[None, :1, :, :, :]).squeeze()\n score_distribution(y_hat.cpu(), f\"{prefix}/all_scores.png\", xlim=[-.1, 0.2])\n score_distribution(y_hat.cpu(), f\"{prefix}/all_scores_full.png\", xlim=[-0.1, 1.2])\n for _ in range(0, 10):\n y_hat = metric_fn(all_images[:, None, :, :, :], all_images[None, _ * 10: _ * 10 + 1, :, :, :]).squeeze()\n top_neighbors(all_images, all_images[_ * 100, :, :, :], y_hat, f\"{prefix}/top_neighbors_{_:04d}.png\")\n faraway_samples(all_images, all_images[_ * 100, :, :, :], y_hat,\n f\"{prefix}/faraway_samples_{_:04d}.png\")\n\n logger.split()\n for epoch in range(Args.num_epochs + 1):\n\n for i, ((s, s_prime), y) in enumerate(tqdm(loader), total=len(loader.dataset) // Args.batch_size):\n y_hat = metric_fn(s.to(Args.device), s_prime.to(Args.device))\n loss = F.smooth_l1_loss(y_hat.view(-1), y.view(-1).to(Args.device))\n\n # Optimize the model\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n with torch.no_grad():\n logger.store_metrics(loss=loss.cpu().item(),\n y_hat=y_hat.mean().cpu().item(),\n y=y.mean().cpu().item(),\n accuracy=(y_hat.cpu() > 1).numpy() == (y.byte().cpu().numpy() > 1))\n\n if i % 1000 == 0:\n evaluate(epoch, step=i // 1000)\n\n if i % 100 == 0:\n logger.log_metrics_summary(default_stat=\"quantile\",\n key_values=dict(epoch=epoch + i / len(loader.dataset) * Args.batch_size), )\n\n if Args.save_weights:\n logger.save_module(metric_fn, \"models/local_metric.pkl\")", "title": "" }, { "docid": "33e15a106ea1b64b5ffc96f3beadcc57", "score": "0.6473998", "text": "def train(self, dataset):\n \"*** YOUR CODE HERE ***\" \n while(1):\n correct = True\n for x, y in dataset.iterate_once(1): \n yvec = nn.as_scalar(y)\n c = self.get_prediction(x)\n\n if yvec != c:\n correct = False\n nn.Parameter.update(self.w, x, yvec)\n if correct:\n break", "title": "" }, { "docid": "7160870e28fdcd15a517dc40f15aa351", "score": "0.64736617", "text": "def train(self, data):\n self._curr_data = data\n if data is not None:\n\t ## calling train_data function to find the label(prior) distribution and conditional distribution\n self._label_dist, self._cond_dist = train_data(data,self.smoothing) \n\t ## finding the labels in label_counts dictionary. For eg: if _label_dist is {good:3, bad:2}, then _label_dist_sample= {good,bad} \t ## grabs labels. \"samples\" is misleading.\n\t self._labels = self._label_dist.samples", "title": "" }, { "docid": "c211b38e16da023ccd7d20a4143c5e36", "score": "0.6470323", "text": "def train_single_model(model: TransformerModelWrapper, train_data: List[InputExample], config: TrainConfig,\n eval_config: EvalConfig = None, ipet_train_data: List[InputExample] = None,\n unlabeled_data: List[InputExample] = None, return_train_set_results: bool = True):\n\n device = torch.device(config.device if config.device else \"cuda\" if torch.cuda.is_available() else \"cpu\")\n if not ipet_train_data:\n ipet_train_data = []\n\n results_dict = {}\n\n model.model.to(device)\n\n if train_data and return_train_set_results:\n results_dict['train_set_before_training'] = evaluate(model, train_data, eval_config)['scores']['acc']\n\n all_train_data = train_data + ipet_train_data\n\n if not all_train_data and not config.use_logits:\n logger.warning('Training method was called without training examples')\n else:\n global_step, tr_loss = model.train(\n all_train_data, device,\n per_gpu_train_batch_size=config.per_gpu_train_batch_size,\n per_gpu_unlabeled_batch_size=config.per_gpu_unlabeled_batch_size,\n n_gpu=config.n_gpu,\n num_train_epochs=config.num_train_epochs,\n max_steps=config.max_steps,\n gradient_accumulation_steps=config.gradient_accumulation_steps,\n weight_decay=config.weight_decay,\n learning_rate=config.learning_rate,\n adam_epsilon=config.adam_epsilon,\n warmup_steps=config.warmup_steps,\n max_grad_norm=config.max_grad_norm,\n unlabeled_data=unlabeled_data if config.lm_training or config.use_logits else None,\n lm_training=config.lm_training,\n use_logits=config.use_logits,\n alpha=config.alpha,\n temperature=config.temperature\n )\n results_dict['global_step'] = global_step\n results_dict['average_loss'] = tr_loss\n\n if train_data and return_train_set_results:\n results_dict['train_set_after_training'] = evaluate(model, train_data, eval_config)['scores']['acc']\n\n return results_dict", "title": "" }, { "docid": "6b34e3076d60d7087ca55c16395369f8", "score": "0.6469995", "text": "def train(self, features, labels):\n raise NotImplementedError()", "title": "" }, { "docid": "fb6832128f3aedd4e62255ac22437ecc", "score": "0.6461335", "text": "def train(self, datasets: Dict[str, Dataset], collate_fn: Callable):\n config = self.config\n device = self.device\n artifact_root = os.path.join(config[\"artifact_dir\"], config[\"exp_id\"])\n\n optimizer = optim.Adam(self.model.parameters(),\n lr=config[\"learning_rate\"])\n if config[\"lr_scheduler\"]:\n scheduler = optim.lr_scheduler.ReduceLROnPlateau(\n optimizer, factor=0.5, patience=5)\n\n best_dev_f1 = 0\n best_dev_f1_epoch = 0\n best_test_f1 = 0\n best_test_f1_epoch = 0\n best_model_path = os.path.join(artifact_root, \"best_model.pth\")\n last_model_path = os.path.join(artifact_root, \"last_model.pth\")\n for epoch in range(1, 1 + config[\"num_epochs\"]):\n logging.info(\"Start training epoch %s\", epoch)\n self.model.train() # Turn on training mode (e.g. dropout)\n \n data = DataLoader(\n datasets[\"train\"], batch_size=config[\"batch_size\"],\n shuffle=True, collate_fn=collate_fn\n )\n \n start = time.perf_counter()\n num_batches = 0\n total_num_tokens = 0\n accumulated_loss = 0.0\n for tokens, token_chars, lengths, _, strings, labels in data:\n optimizer.zero_grad()\n \n num_batches += 1\n num_batch_tokens = torch.sum(lengths).item()\n total_num_tokens += num_batch_tokens\n \n tokens, token_chars, lengths, labels = \\\n tokens.to(device), token_chars.to(device), \\\n lengths.to(device), labels.to(device)\n\n emissions, transitions = \\\n self.model(tokens, token_chars, lengths, strings)\n # shape: [batch_size, max_num_tokens, label_vocab_size]\n loss = self.criterion(emissions, transitions, lengths, labels)\n accumulated_loss += loss.item() * num_batch_tokens\n \n loss.backward()\n nn.utils.clip_grad_norm_(self.model.parameters(),\n config[\"gradient_clip\"])\n optimizer.step()\n \n if num_batches % config[\"print_freq\"] == 0:\n logger.info(\n \"Epoch %s, Progress: %s, Loss: %.3f, \"\n \"speed: %.2f tokens per sec\", epoch,\n \"{}/{}\".format(num_batches, len(data)),\n accumulated_loss / total_num_tokens,\n total_num_tokens / (time.perf_counter() - start)\n )\n\n if config[\"train_with_dev\"]:\n test_f1 = self.eval(\n datasets[\"test\"], collate_fn,\n os.path.join(artifact_root,\n \"test_predictions_{}.txt\".format(epoch)))\n if test_f1 > best_test_f1:\n best_test_f1 = test_f1\n best_test_f1_epoch = epoch\n torch.save(self.model.state_dict(), best_model_path)\n logger.info(\"Best test F1 {} from epoch {}\".format(\n best_test_f1, best_test_f1_epoch))\n else:\n logger.info(\"Evaluating on dev set...\")\n dev_f1 = self.eval(datasets[\"dev\"], collate_fn)\n if dev_f1 > best_dev_f1:\n logger.info(\"Saving model - best so far...\")\n best_dev_f1 = dev_f1\n best_dev_f1_epoch = epoch\n torch.save(self.model.state_dict(), best_model_path)\n test_f1 = self.eval(datasets[\"test\"], collate_fn)\n if test_f1 > best_test_f1:\n best_test_f1 = test_f1\n best_test_f1_epoch = epoch\n torch.save(self.model.state_dict(), last_model_path)\n \n if config[\"lr_scheduler\"]:\n train_loss = accumulated_loss / total_num_tokens\n scheduler.step(train_loss)\n logger.info(\"Train loss this epoch: %.4f, new lr %s\",\n train_loss, optimizer.param_groups[0]['lr'])\n \n if not config[\"train_with_dev\"]:\n # Re-evaluate the best model and dump predictions\n self.model.load_state_dict(torch.load(best_model_path))\n logger.info(\"Evaluating best model on dev from epoch %s...\",\n best_dev_f1_epoch)\n self.eval(datasets[\"dev\"], collate_fn,\n os.path.join(artifact_root, \"dev_predictions_best.txt\"))\n logger.info(\"Evaluating best model on test...\")\n self.eval(datasets[\"test\"], collate_fn,\n os.path.join(artifact_root, \"test_predictions_best.txt\"))\n \n logger.info(\"Best test F1 seen %s from epoch %s\",\n best_test_f1, best_test_f1_epoch)", "title": "" }, { "docid": "647f1ea309f6475eca5b699c98e9fe7c", "score": "0.6459181", "text": "def train(args, data_loader, model, global_stats):\n # Initialize meters + timers\n train_loss = util.AverageMeter()\n epoch_time = util.Timer()\n # Run one epoch\n for idx, ex in enumerate(data_loader):\n train_loss.update(*model.update(ex)) # run on one batch\n\n if idx % args.display_iter == 0:\n logger.info('train: Epoch = %d | iter = %d/%d | ' %\n (global_stats['epoch'], idx, len(data_loader)) +\n 'loss = %.2f | elapsed time = %.2f (s)' %\n (train_loss.avg, global_stats['timer'].time()))\n train_loss.reset()\n logger.info('train: Epoch %d done. Time for epoch = %.2f (s)' %\n (global_stats['epoch'], epoch_time.time()))\n\n # Checkpoint\n if args.checkpoint:\n model.checkpoint(args.model_file + '.checkpoint',\n global_stats['epoch'] + 1)", "title": "" }, { "docid": "87212cdff74b50af8d136a46d5cfaca7", "score": "0.6454787", "text": "def train_fn(model, dl, optimizer, criterion):\n\n # track total number of data points trained on\n n_samples = 0\n\n correct = 0\n total_loss = 0\n for X, y in tqdm.tqdm(dl):\n X = X.to(DEVICE)\n y = y.to(DEVICE)\n\n # get model output\n predictions = model(X)\n\n # get loss and update weights\n loss = criterion(predictions, y)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # update samples and total loss to compute accuracy at the end\n n_samples += X.shape[0]\n total_loss += loss.cpu().item()\n\n # add number of correct prediction sto correct\n predictions = torch.argmax(predictions, dim=1)\n correct += torch.sum(predictions == y).cpu().item()\n\n # return the average loss per sample along with the accuracy during training\n average_loss = total_loss / n_samples\n accuracy = correct / n_samples\n return average_loss, accuracy", "title": "" }, { "docid": "0fe420e51d3f906bac627d3316bc4b27", "score": "0.64542174", "text": "def train(self) -> TypeVar(\"Trainer\"):\n self._current_mode = \"train\"\n self._sequential.train()\n\n if self.has_criterion and isinstance(self.criterion, nn.Module):\n self.criterion.train()\n\n # if self.has_metrics and isinstance(self.metric, nn.Module):\n # self.metric.train()\n\n return self", "title": "" }, { "docid": "72242b151a5b7611539cfd65f3261bf8", "score": "0.6450363", "text": "def __train_model__(self, dataloader, lossfnc, biasfnc, optimizer, status_str, epoch_i,\n n_epochs):\n # Set up for training.\n self.train()\n losses = list()\n biases = list()\n nb_batches = dataloader.nbatches\n\n # Perform the training loop for this epoch.\n for batch_i, (X, y) in enumerate(dataloader):\n optimizer.zero_grad()\n net_out = self.forward(X)\n loss = lossfnc(net_out, y)\n\n bias = biasfnc(net_out=net_out, meas_true=y).detach().clone()\n biases += bias.cpu().numpy().tolist()\n bias = torch.mean(torch.square(bias), 0)\n\n loss.backward()\n optimizer.step()\n\n losses.append(loss.item())\n\n print(status_str.format(epoch_i + 1, n_epochs, batch_i + 1, nb_batches,\n 100. * (batch_i + 1) / nb_batches, losses[-1], bias[0], bias[1]), end='')\n #end for\n return losses, biases", "title": "" }, { "docid": "8e14928f7bdc84b868395f3670f94776", "score": "0.6447991", "text": "def train(self, dataset, remaining_time_budget=None):\n # Convert training dataset to necessary format and\n # store as self.domain_dataset_train\n logger.info(\"Note:train_process model.py starts train\")\n # if self.call_num==0:\n # dataset = dataset.shuffle(min(1000, self.train_num))\n start = time.time()\n self.tf_dataset_trainsformer.init_train_tfds(dataset, self.train_num)\n end = time.time()\n self.time_record[\"init_train_tfds\"] = end - start\n\n if \"train_num\" not in self.domain_model.feature_dict:\n self.domain_model.feature_dict[\"train_num\"] = self.train_num\n self.domain_model.feature_dict[\"class_num\"] = self.class_num\n self.domain_model.feature_dict[\"language\"] = self.domain_metadata['language']\n\n self.set_domain_dataset(dataset, is_training=True)\n logger.info(\"Note:train_process model.py set domain dataset finished, domain_model train starts.\")\n self.domain_model.time_record = self.time_record\n # Train the model\n\n # print(\"check domain_y\", self.domain_dataset_train_dict[\"y\"].shape)\n if self.call_num == -1:\n # self.domain_model.train_first_svm(self.domain_dataset_train_dict[\"x\"], self.domain_dataset_train_dict[\"y\"],\n # remaining_time_budget=remaining_time_budget)\n self.domain_model.train(self.domain_dataset_train_dict[\"x\"], self.domain_dataset_train_dict[\"y\"],\n remaining_time_budget=remaining_time_budget)\n else:\n\n self.domain_model.train(self.domain_dataset_train_dict[\"x\"], self.domain_dataset_train_dict[\"y\"],\n remaining_time_budget=remaining_time_budget)\n self.call_num += 1\n\n logger.info(\"Note:train_process model.py domain_model train finished.\")\n\n # Update self.done_training\n self.done_training = self.domain_model.done_training", "title": "" }, { "docid": "5c7834e9211fe63b939c1219a23a1e0c", "score": "0.64452046", "text": "def train(args, data_loader, model, global_stats):\n # Initialize meters + timers\n train_loss = utils.AverageMeter()\n epoch_time = utils.Timer()\n\n # Run one epoch\n for idx, ex in enumerate(data_loader):\n train_loss.update(*model.update(ex))\n\n if idx % args.display_iter == 0:\n logger.info('train: Epoch = %d | iter = %d/%d | ' %\n (global_stats['epoch'], idx, len(data_loader)) +\n 'loss = %.2f | elapsed time = %.2f (s)' %\n (train_loss.avg, global_stats['timer'].time()))\n train_loss.reset()\n\n logger.info('train: Epoch %d done. Time for epoch = %.2f (s)' %\n (global_stats['epoch'], epoch_time.time()))\n\n # Checkpoint\n if args.checkpoint:\n model.checkpoint(args.model_file + '.checkpoint',\n global_stats['epoch'] + 1)", "title": "" }, { "docid": "031ab3d6a714d9c7b6b78012dcf68562", "score": "0.64446443", "text": "def train(self, session, train_dataset, val_dataset, train_dir):\n\n # some free code to print out number of parameters in your model\n # it's always good to check!\n # you will also want to save your model parameters in train_dir\n # so that you can use your trained model to make predictions, or\n # even continue training\n\n tic = time.time()\n params = tf.trainable_variables()\n num_params = sum(map(lambda t: np.prod(tf.shape(t.value()).eval()), params))\n toc = time.time()\n logging.info(\"Number of params: %d (retreival took %f secs)\" % (num_params, toc - tic))\n\n logging.info(\"Number of training examples \" + str(len(train_dataset[0])))\n logging.info(\"Number of total validation examples \" + str(len(val_dataset[0])))\n\n # Each epoch we train on all of the data\n for epoch in range(self.start_epoch, self.epochs):\n logging.info(\"Running epoch \" + str(epoch))\n self.run_epoch(session, train_dataset, val_dataset, train_dir, epoch)\n logging.info(\"Saving model\")\n\n # Reset global step to reflect new epoch\n session.run(self.global_step.assign(0))\n\n self.saver.save(session, self.train_dir + '/' + self.saved_name + '_epoch' +\n str(epoch), global_step=0)", "title": "" }, { "docid": "78ce2682cc2e1899c6d3de85f2f31597", "score": "0.64352816", "text": "def train(self,data,classes):\n raise Unimplemented()", "title": "" }, { "docid": "fd27372bf4920ba8c89fce3e08c5d635", "score": "0.6434403", "text": "def train(self):\n self.clf = self.clf.fit(self.dataset['training']['features'], self.dataset['training']['labels'])", "title": "" }, { "docid": "4b7e612dde6008f928c2822dde6bdec1", "score": "0.64256895", "text": "def train(self, dl_train: DataLoader):\n\n x_train, y_train = dataloader_utils.flatten(dl_train)\n self.x_train = x_train\n self.y_train = y_train\n self.n_classes = len(set(y_train.numpy()))\n return self", "title": "" }, { "docid": "e4348c17bffde6876308bd6921354207", "score": "0.6423845", "text": "def train(self) -> None:\n self.training = True\n self.model.train()", "title": "" }, { "docid": "79fb153ca5750f53ead2d6d79f6d7734", "score": "0.64164615", "text": "def fit(self, x_train: list, y_train: torch.Tensor):\n raise NotImplementedError", "title": "" }, { "docid": "9e2f82105ec7fbb92e06c2a6504098a8", "score": "0.64154947", "text": "def train(net, trainloader, epochs, device: str):\n criterion = torch.nn.CrossEntropyLoss()\n optimizer = torch.optim.SGD(net.parameters(), lr=0.01, momentum=0.9)\n net.train()\n for _ in range(epochs):\n for images, labels in trainloader:\n images, labels = images.to(device), labels.to(device)\n optimizer.zero_grad()\n loss = criterion(net(images), labels)\n loss.backward()\n optimizer.step()", "title": "" }, { "docid": "b0614596a9f0d6e87137124909f42963", "score": "0.64019907", "text": "def train():\n\tpipeline_manager.train()", "title": "" }, { "docid": "233ee4c90f3a3b68f28495a493674f07", "score": "0.6397135", "text": "def train(self):\n self.set_pipeline()\n if self.gridsearch:\n self.add_grid_search()\n if self.kwargs.get(\"estimator\") == 'NN':\n self.pipeline.fit(self.X_train, self.y_train)\n else:\n self.pipeline.fit(self.X_train, self.y_train)", "title": "" }, { "docid": "0859920b33aee505db6a2af9e5e5d669", "score": "0.63924646", "text": "def train(self):\n ##################\n # YOUR CODE HERE #\n ##################\n pass", "title": "" }, { "docid": "2aa86b9e114e0ce34e72947b4cbed04d", "score": "0.6390707", "text": "def train(self, X_train, y_train):\n self.linear_model.fit(X=X_train, y=y_train)", "title": "" }, { "docid": "4f82ccfc647260fe88d5bef42fac5899", "score": "0.6382375", "text": "def train(self, X, y):\n raise NotImplementedError", "title": "" }, { "docid": "45cced246cc58d2ee8193285876f886e", "score": "0.637674", "text": "def fit_partial(self, dataset):\n x, y = dataset\n if self.model is None:\n # first time, we build and compile the model\n self.fit_architecture(x, y)\n self.compile_model()\n\n from keras.utils import to_categorical\n return self.model.train_on_batch(x, to_categorical(y, self.states['n_classes']), **self.fit_kwargs)", "title": "" }, { "docid": "569056d959cd201b1c622af205e5cd7a", "score": "0.63727355", "text": "def train(net,optimizer, trainloader, epochs):\n criterion = torch.nn.CrossEntropyLoss()\n net.train()\n for _ in range(epochs):\n for images, labels in trainloader:\n images, labels = images.to(DEVICE), labels.to(DEVICE)\n optimizer.zero_grad()\n b_x = Variable(images) # batch x\n b_y = Variable(labels)\n loss = criterion(net(images), labels)\n loss.backward()\n optimizer.step()", "title": "" }, { "docid": "cc614efc0e63ce7715e55d253601c6e3", "score": "0.6372412", "text": "def train_model(model,\n dataset,\n cfg,\n distributed=False,\n validate=False,\n timestamp=None,\n meta=None):\n logger = get_root_logger(log_level=cfg.log_level)\n\n # start training\n if distributed:\n _dist_train(\n model,\n dataset,\n cfg,\n validate=validate,\n logger=logger,\n timestamp=timestamp,\n meta=meta)\n else:\n _non_dist_train(\n model,\n dataset,\n cfg,\n validate=validate,\n logger=logger,\n timestamp=timestamp,\n meta=meta)", "title": "" }, { "docid": "6595b690ad17f7b840f0cd9e0977bf86", "score": "0.6363362", "text": "def train(self, x_train: numpy.ndarray, y_train: numpy.ndarray,\n x_val: numpy.ndarray = None,\n y_val: numpy.ndarray = None) -> None:\n # This will be specific to model so should be implemented by\n # child classes\n raise NotImplementedError()", "title": "" }, { "docid": "271fd194c5ed02885c3b571828815d3a", "score": "0.63618654", "text": "def training():", "title": "" }, { "docid": "f4a3658a25768c16de1548a62e5dbe4d", "score": "0.63546747", "text": "def train_on_data_file(self,\r\n dataset_identifier,\r\n preprocessed_datasets_folder=config.preprocessed_datasets_folder,\r\n batch_size=1000,\r\n nb_epoch=10,\r\n verbose=0):\r\n filename = self.generate_dataset_filename(dataset_identifier)\r\n\r\n dataset = self.open_dataset_file(filename, preprocessed_datasets_folder)\r\n\r\n if dataset.empty:\r\n return\r\n\r\n X_train, y_train, _t0_train, X_val, y_val, _t0_val, _X_test, _y_test, _t0_test = self.generate_training_data(\r\n dataset)\r\n\r\n self.train(X_train, y_train, X_val, y_val, batch_size, nb_epoch, verbose)", "title": "" }, { "docid": "616dcff6eec4e549342268ce1cfb980b", "score": "0.6348088", "text": "def train(self):", "title": "" }, { "docid": "1dda59e213776f63b4219ebd6132148a", "score": "0.63423294", "text": "def train(self, dataset):\n \"*** YOUR CODE HERE ***\"\n while 1:\n correct = True\n for x, y in dataset.iterate_once(1):\n \n xvec = nn.as_vector(x)\n yvec = nn.as_scalar(y)\n c = self.get_prediction(xvec)\n p = self.run(xvec)\n\n if yvec != c:\n correct = False\n cpw = self.w.copy()\n for i in range(len(self.w)):\n self.w[i] = cpw[i] + self.alpha * (yvec - p) * p * (1-p) * xvec[i]\n if correct:\n break", "title": "" }, { "docid": "d5d483db76ced009eafdb2483b6d55d0", "score": "0.6341367", "text": "def train(self):\n pass", "title": "" }, { "docid": "d5d483db76ced009eafdb2483b6d55d0", "score": "0.6341367", "text": "def train(self):\n pass", "title": "" }, { "docid": "d5d483db76ced009eafdb2483b6d55d0", "score": "0.6341367", "text": "def train(self):\n pass", "title": "" }, { "docid": "d5d483db76ced009eafdb2483b6d55d0", "score": "0.6341367", "text": "def train(self):\n pass", "title": "" }, { "docid": "d5d483db76ced009eafdb2483b6d55d0", "score": "0.6341367", "text": "def train(self):\n pass", "title": "" }, { "docid": "d5d483db76ced009eafdb2483b6d55d0", "score": "0.6341367", "text": "def train(self):\n pass", "title": "" }, { "docid": "d5d483db76ced009eafdb2483b6d55d0", "score": "0.6341367", "text": "def train(self):\n pass", "title": "" }, { "docid": "d5d483db76ced009eafdb2483b6d55d0", "score": "0.6341367", "text": "def train(self):\n pass", "title": "" }, { "docid": "d5d483db76ced009eafdb2483b6d55d0", "score": "0.6341367", "text": "def train(self):\n pass", "title": "" }, { "docid": "5c563374de105a77da2efa26f54e4e44", "score": "0.63391346", "text": "def train(self, X, y):\n self.X_train = X\n self.y_train = y", "title": "" }, { "docid": "bb76176e42663b019287b9c3f2c637f8", "score": "0.6334745", "text": "def run_training(self, dataset):\n # Prepare data\n timed_dataset = prp.split_data(dataset, test_size=self.params[\"test_size\"])\n train_dataset, self.scaler = prp.scale_and_format(timed_dataset[0], timed_dataset[1], timed_dataset[2], timed_dataset[3])\n # Train\n self.model, history = mlp.run_training(train_dataset, self.params[\"layers_sizes\"], self.params[\"layers_activations\"], self.params[\"epochs_nb\"], self.params[\"batch_size\"], self.params[\"test_size\"])\n # Save\n #self.save()\n self.print_param()\n # return history\n return history", "title": "" }, { "docid": "4119790e00f14639638f3e32d5cf5911", "score": "0.63335294", "text": "def train_step(model: torch.nn.Module, \n dataloader: torch.utils.data.DataLoader, \n loss_fn: torch.nn.Module, \n optimizer: torch.optim.Optimizer,\n device: torch.device) -> Tuple[float, float]:\n # Put model in train mode\n model.train()\n\n # Setup train loss and train accuracy values\n train_loss, train_acc = 0, 0\n\n # Loop through data loader data batches\n for batch, (X, y) in enumerate(dataloader):\n # Send data to target device\n X, y = X.to(device), y.to(device)\n\n # 1. Forward pass\n y_pred = model(X)\n\n # 2. Calculate and accumulate loss\n loss = loss_fn(y_pred, y)\n train_loss += loss.item() \n\n # 3. Optimizer zero grad\n optimizer.zero_grad()\n\n # 4. Loss backward\n loss.backward()\n\n # 5. Optimizer step\n optimizer.step()\n\n # Calculate and accumulate accuracy metric across all batches\n y_pred_class = torch.argmax(torch.softmax(y_pred, dim=1), dim=1)\n train_acc += (y_pred_class == y).sum().item()/len(y_pred)\n\n # Adjust metrics to get average loss and accuracy per batch \n train_loss = train_loss / len(dataloader)\n train_acc = train_acc / len(dataloader)\n return train_loss, train_acc", "title": "" } ]
4dbf8edcdf7c02def36a3203902b1dc2
Add object to scene. When you add a new object to scene after that object will be drawn to scene in every `draw` function call.
[ { "docid": "40fa2826ed55ed8f0ea314fa88b4a1e1", "score": "0.6973952", "text": "def add(self, obj: GameObject) -> None:\n self.objects[obj.get_id()] = obj", "title": "" } ]
[ { "docid": "1924230e8fc78f76f86e7c8506bfc203", "score": "0.7627554", "text": "def add_object(self, obj):\n found = self.lookup_object(obj)\n obj.scene = self\n if found:\n self.objects[found] = obj\n else:\n self.objects.append(obj)", "title": "" }, { "docid": "0cf6af1325b1350da3e24c7bac6850ac", "score": "0.7190459", "text": "def add_to_world(self, obj):\n if self.game:\n self.game.added.append(obj)\n else:\n ipdb.set_trace()", "title": "" }, { "docid": "f182404c09d3c78d0cd5a97fd67d3d68", "score": "0.6999933", "text": "def add_object(self, objectToAdd):\n self._manager.add_worldObject(objectToAdd)", "title": "" }, { "docid": "4b28f24b8ed67c4f3fdcc28cced562e3", "score": "0.689054", "text": "def add_scene(self, scene):\n self.hscene.add(scene)\n return scene", "title": "" }, { "docid": "5d62c8f12bb9c5e88b3b1c71d870e1eb", "score": "0.6726953", "text": "def _add_object(self,o):\n o.game = self\n o.uid = hashlib.md5(str(self.object_uid)).digest()\n self.object_uid += 1\n self.objects.append(o)\n if o.physical:\n if o.movable:\n self.broadphase_mov.append(o)\n self.broadphase_mov.sort(key=lambda o:(o._x))\n else:\n self.broadphase_stat.append(o)\n self.broadphase_stat.sort(key=lambda o:(o._x))\n o.added_to_game(self)", "title": "" }, { "docid": "b081ee910e24ff8c01af088fee6d9399", "score": "0.66711974", "text": "def add_object(self, grid_world_object):\n self.objects.append(grid_world_object)", "title": "" }, { "docid": "3c5e683a590e67b0e464ef4332c87734", "score": "0.66221505", "text": "def add(self, obj):\n self.objects.append(obj)\n if obj.gravity == 0:\n obj.gravity = self.gravity\n if obj.gravity_z == 0:\n obj.gravity_z = self.gravity_z", "title": "" }, { "docid": "278993195b7eb736be7004e64b468b13", "score": "0.65753007", "text": "def add(self, scene: SceneRecord):\n self.session.add(scene)", "title": "" }, { "docid": "14523d0a4ed1da5d74701fabfef0eece", "score": "0.65600187", "text": "def AddShape(self, object, addAfter = None):\n if not object in self._shapeList:\n if addAfter:\n self._shapeList.insert(self._shapeList.index(addAfter) + 1, object)\n else:\n self._shapeList.append(object)\n\n object.SetCanvas(self.GetCanvas())", "title": "" }, { "docid": "2f2a719d48f29813107bfce5aa6559b5", "score": "0.6524193", "text": "def add_background_object(self, obj):\n self.__add_to_all__(obj)\n self.noncollideables.add(obj)", "title": "" }, { "docid": "d22a6b193d96c268699a45a2dce54da5", "score": "0.64980465", "text": "def add_object(self, place_name, game_object):\n place = self.get(place_name)\n place.objects.append(game_object)\n game_object.place = place", "title": "" }, { "docid": "0604534358370140a65edcc2da4f9dc1", "score": "0.64435524", "text": "def _add_objects_on_scene(self):\n\n # Delete everything on the scene - no params mean everything\n self._scene.remove_world_object()\n # rospy.sleep(1)\n\n # Add objects to the scene\n # All objects need to be placed relative to the robot, hence the weird z-axis\n self._add_table(self.table1_name_, 0.0, 1.0, -(1.03)/2, quaternion_from_euler(0.0, 0.0, 0.0))\n self._add_table(self.table2_name_, 0.0, -1.0, -(1.03)/2, quaternion_from_euler(0.0, 0.0, 0.0))\n \n self._add_coke_can(self.coke_can_name_, 0.0, 0.75, 0.122/2.0, quaternion_from_euler(0.0, 0.0, 0.0))\n\n\n start = rospy.get_time()\n seconds = rospy.get_time()\n timeout = 2.0 # in seconds\n \n # Loop until the objects placed are on the scene or the time runs out\n while (seconds - start < timeout) and not rospy.is_shutdown():\n # Test if models are on scene\n is_known_table1 = 'table1' in self._scene.get_known_object_names()\n is_known_table2 = 'table2' in self._scene.get_known_object_names()\n is_known_coke_can = 'coke_can' in self._scene.get_known_object_names()\n\n if is_known_table1 and is_known_table2 and is_known_coke_can:\n return True\n\n # Sleep so that we give other threads time on the processor\n rospy.sleep(0.1)\n seconds = rospy.get_time()\n\n # If all objects don't appear until the timeout\n return False", "title": "" }, { "docid": "5d2efb073a150a01d53382f9f8efcce7", "score": "0.6419557", "text": "def add_object(self, object_id, object_type, x, y, z, quaternion, sel_cb=None):\n\n obj = ObjectItem(self.scene, object_id, object_type, x, y, z, quaternion, sel_cb)\n\n if object_id in self.selected_object_ids or object_type.name in self.selected_object_types:\n\n obj.set_selected(True)", "title": "" }, { "docid": "eb461f306b4ccb4d551d2d3ed7698b81", "score": "0.6361825", "text": "def object_data_add(context, obdata, operator=None, name=None):\n scene = context.scene\n layer = context.view_layer\n layer_collection = context.layer_collection or layer.active_layer_collection\n scene_collection = layer_collection.collection\n\n for ob in layer.objects:\n ob.select_set(False)\n\n if name is None:\n name = \"Object\" if obdata is None else obdata.name\n\n obj_act = layer.objects.active\n obj_new = bpy.data.objects.new(name, obdata)\n scene_collection.objects.link(obj_new)\n obj_new.select_set(True)\n obj_new.matrix_world = add_object_align_init(context, operator)\n\n if obj_act and obj_act.mode == 'EDIT' and obj_act.type == obj_new.type:\n bpy.ops.mesh.select_all(action='DESELECT')\n obj_act.select_set(True)\n bpy.ops.object.mode_set(mode='OBJECT')\n\n obj_act.select_set(True)\n scene.update() # apply location\n # layer.objects.active = obj_new\n\n # Match up UV layers, this is needed so adding an object with UV's\n # doesn't create new layers when there happens to be a naming mis-match.\n uv_new = obdata.uv_layers.active\n if uv_new is not None:\n uv_act = obj_act.data.uv_layers.active\n if uv_act is not None:\n uv_new.name = uv_act.name\n\n bpy.ops.object.join() # join into the active.\n if obdata:\n bpy.data.meshes.remove(obdata)\n\n bpy.ops.object.mode_set(mode='EDIT')\n else:\n layer.objects.active = obj_new\n if context.preferences.edit.use_enter_edit_mode:\n bpy.ops.object.mode_set(mode='EDIT')\n\n return obj_new", "title": "" }, { "docid": "b042928c597f02a00abc41fee1ec7d6b", "score": "0.6361377", "text": "def add_object(self, object):\n if object is not None:\n self.objects.append(object)", "title": "" }, { "docid": "e2d0ea9f087358c405d2505fc19f9bc4", "score": "0.63221186", "text": "def add_object(self, obj: Object) -> None:\n self._objects.append(obj)", "title": "" }, { "docid": "89ffbba387a13a20a720b1a61606993c", "score": "0.6285371", "text": "def draw_scene(self):\n pass", "title": "" }, { "docid": "f1509b162a7a7648f3656dacc9ac3848", "score": "0.6281282", "text": "def add_scene(self, scene, key):\r\n self.scenes[key] = scene\r\n scene.add(self)", "title": "" }, { "docid": "0585f0648e3a2f955e048d88701bb38d", "score": "0.6247958", "text": "def add_button_press(self):\n material = Substance.MATERIALS['cork']\n mass = 100000\n force_object = Physics.PhysicsObject(material, mass)\n self.force_objects.append(force_object)\n self.window.physics_canvas.add_physics_object(force_object)", "title": "" }, { "docid": "7e8b7257e0cd6f0f801187b48473aaf5", "score": "0.6232648", "text": "def drawActor(A):\n pf.canvas.addActor(A)\n pf.canvas.update()", "title": "" }, { "docid": "82c4fcf19eff0d612aa96d9943963bfb", "score": "0.6218931", "text": "def add_collideable(self, obj):\n self.__add_to_all__(obj)\n self.collideables.add(obj)", "title": "" }, { "docid": "ca7cfce5aca839cf378545ed0549e242", "score": "0.6204556", "text": "def __add_to_all__(self, obj):\n assert isinstance(obj, GameObject)\n self.all_objects.add(obj)", "title": "" }, { "docid": "779f1940e398995431ae7a62daf27682", "score": "0.614215", "text": "def add_child(self, obj):\n self.children.append(obj)", "title": "" }, { "docid": "b5181c1bbb34838c2bb93638a267384d", "score": "0.6134796", "text": "def add_enemy(self, obj):\n self.__add_to_all__(obj)\n self.enemies.add(obj)", "title": "" }, { "docid": "93d43763795409417b49c26f07faab12", "score": "0.6124855", "text": "def generate(self):\n self.game_objects.append(CoffeeDraw(self.surface, self))", "title": "" }, { "docid": "f8cf92f9bb4b9765cf33eb70dd9f5975", "score": "0.6109984", "text": "def visualize_obj(obj_path, *args, color=\"lightcoral\", **kwargs):\n print(\"Visualizing : \" + obj_path)\n scene = Scene(add_root=True)\n scene.add_from_file(obj_path, *args, c=color, **kwargs)\n\n return scene", "title": "" }, { "docid": "0cb6c5adbcad3d23553498bdb8a14d2a", "score": "0.6104775", "text": "def append(self, object):\r\n\r\n self.children.append(object)", "title": "" }, { "docid": "3fa9953eca22623b5a7deb19aae7eb62", "score": "0.60995394", "text": "def add(self, obj):\n self.elements.append(obj)", "title": "" }, { "docid": "a71b260e79eb665e4eec787cce734e7e", "score": "0.60690117", "text": "def add(self, obj):\n self._objects.append(weakref.ref(obj))\n self.resize()", "title": "" }, { "docid": "931ab9f1b4cecd24825570a73b8922f1", "score": "0.60532004", "text": "def add(self, obj):", "title": "" }, { "docid": "e6ade040d3552794570a827d9fbfa63a", "score": "0.60336214", "text": "def add_obj(self, obj):\n self._check_add_obj(obj)\n self._obj_list.append(obj)\n obj.index = self.get_n_obj()\n obj.father = self\n return True", "title": "" }, { "docid": "e63767649c56646dd823b4d4d948c79b", "score": "0.5986015", "text": "def user_add_box(self, rect):\n self.source.scene_box_added(rect)", "title": "" }, { "docid": "e5365dc26a711a3ea2f95accb1231359", "score": "0.59759563", "text": "def add_new_object(self, object_to_add):\n db.session.add(object_to_add)\n db.session.commit()", "title": "" }, { "docid": "2289b4fd638b6509918d25ff594ace8f", "score": "0.5951198", "text": "def add(self, obj):\n self.data.append(obj)", "title": "" }, { "docid": "e2fdefe637d3f6512626a5e3075be0da", "score": "0.59431344", "text": "def add_object(self, obj):\n dbsession = self.context.get_dbsession()\n dbsession.add(obj)\n dbsession.flush()", "title": "" }, { "docid": "da88e27fb0166d3e7fe1fbfabd5dfd04", "score": "0.5932498", "text": "def add(self, obj: object) -> None:\n self._contents.append(obj)", "title": "" }, { "docid": "af700f987b94def13e0d3e175df53892", "score": "0.59100515", "text": "def draw_object(draw, obj):\n draw.rectangle(obj.bbox, outline='red')\n textbox_w, textbox_h = app.font.getsize(obj.label)\n draw.rectangle ((obj.bbox[0]+2,obj.bbox[3]-22,obj.bbox[0]+2+textbox_w,obj.bbox[3]-22+textbox_h),fill='yellow')\n draw.text((obj.bbox[0]+2, obj.bbox[3] - 22), obj.label, fill='#0000', font=app.font)\n ##draw.rectangle ((obj.bbox[0]+2,obj.bbox[3]-22,obj.bbox[0]+2+textbox_w,obj.bbox[3]-22+textbox_h),fill='yellow')\n ##draw.text((obj.bbox[0]+2, obj.bbox[3] - 22), str(obj.score), fill='#0000', font=app.font)", "title": "" }, { "docid": "9cffd0d55ef9df3dcc1ec84ef16ae60c", "score": "0.5895869", "text": "def add_object( obj_file , obj_name=\"obj\" , pos_vec=None , orient_vec=None ):\n if( orient_vec == () ):\n orient_vec = None\n if( pos_vec == () ):\n pos_vec = None\n \n _ = SimObject( obj_name, obj_file, pos_vec , orient_vec )", "title": "" }, { "docid": "593e2cb385bd67b99b92a156b38eef7d", "score": "0.58918816", "text": "def add(self, obj):\n raise NotImplementedError(\"Subclass needed\")", "title": "" }, { "docid": "15dac3b2b789ea8c8532edc5fb1a20f9", "score": "0.5874847", "text": "def add(self, obj):\n\t\t# notify access for lazy load\n\t\tself._notifyAccess()\n\n\t\t# build all the objects before adding one to the list\n\t\tfor obj in self:\n\t\t\tpass\n\n\t\t# add to list of objects\n\t\tself._objects.insert(len(self), obj)\n\t\tself._total += 1", "title": "" }, { "docid": "b68d9a78e40cb24a69151211d2b8e012", "score": "0.5872763", "text": "def addToScene(airfoil, scene):\r\n scene.addItem(airfoil.contourPolygon)\r\n scene.addItem(airfoil.chord)\r\n airfoil.polygonMarkersGroup = scene. \\\r\n createItemGroup(airfoil.polygonMarkers)", "title": "" }, { "docid": "dac258feb14cefc23da794d849f04f1e", "score": "0.58474755", "text": "def add_node(self, node):\n if not isinstance(node, Node3D):\n raise ValueError(\"node must be a Node3D object\")\n\n self.nodes.append(node)", "title": "" }, { "docid": "f3221bcd4feaf305bf14b96923284222", "score": "0.58414346", "text": "def add(self, obj, data=None):\n raise NotImplementedError", "title": "" }, { "docid": "cf1a06738f9407d0e0dc9dc3e41294d6", "score": "0.58398515", "text": "def add(self, obj):\n self._content.append(obj)", "title": "" }, { "docid": "19159e45dbb4bdfa16104ad576715466", "score": "0.5814083", "text": "def add_player(self, obj):\n self.__add_to_all__(obj)\n self.players.add(obj)", "title": "" }, { "docid": "0a766c8f0ca21fea902020bab9bb54c7", "score": "0.57573867", "text": "def InsertShape(self, object):\n self._shapeList.insert(0, object)", "title": "" }, { "docid": "3d7d3c75cb6aa347c3dfeb90ddcee73d", "score": "0.5723016", "text": "def Draw(self, item=None):\n if item is None:\n objects_sorted = sorted([j for j in self.objects], key=lambda s: s.depth, reverse=False)\n # draw our objects onto the Scene\n for item in objects_sorted:\n # Handle drawing our map\n if isinstance(item, Map):\n pos = item.position\n self.surface.screen.blit(item.tmxhandler.image, pos)\n # Handle drawing sprites\n elif isinstance(item, DarkSprite):\n #self.surface.screen.blit(item.current_image, item.rect)\n # if sprites self.hide attribute is False.\n if not item.hide:\n # Draw the DarkSprite surface.\n if item.surface is not None:\n self.surface.screen.blit(item.surface, item.rect)\n # Draw the subsprites surfaces.\n for k in item.subsprites:\n if k.surface is not None:\n k.Update(False, False, None)\n self.surface.screen.blit(k.surface, k.rect)\n # Handle drawing UI overlay\n elif isinstance(item, ui.Overlay):\n pos = item.position\n item.Draw()\n self.surface.screen.blit(item.surface, pos)\n # draw our players onto the Scene\n for PLAYER in self.players:\n if isinstance(PLAYER, Player):\n self.surface.screen.blit(PLAYER.GetCurrentImage(), PLAYER.GetPosition())\n else:\n # Handle drawing our Map\n if isinstance(item, Map):\n pos = item.position\n self.surface.screen.blit(item.tmxhandler.image, pos)\n # Handle drawing players\n elif isinstance(item, Player):\n self.surface.screen.blit(item.GetCurrentImage(), item.GetPosition())\n # Handle drawing sprites\n elif isinstance(item, DarkSprite):\n if not item.hide:\n if item.surface is not None:\n self.surface.screen.blit(item.surface, item.rect)\n # Draw the subsprites surfaces.\n for k in item.subsprites:\n if k.surface is not None:\n k.Update(False, False, None)\n self.surface.screen.blit(k.surface, k.rect)\n # Handle drawing UI Overlay\n elif isinstance(item, ui.Overlay):\n item.Draw()\n pos = item.position\n self.surface.screen.blit(item.surface, pos)", "title": "" }, { "docid": "13ff1069fa04de65bc91fd469cf9f179", "score": "0.57206565", "text": "def on_draw(self):\n arcade.start_render() #what tells the arcade window to appear when we run it\n self.ball.draw() #what puts the ball onto the screen when we run the program", "title": "" }, { "docid": "0014bb0c0de5c2a2d99f374982c595c7", "score": "0.5713974", "text": "def on_draw(self):\r\n arcade.start_render()\r\n self.play.draw()\r\n self.button_exit.draw()", "title": "" }, { "docid": "95b144753b9b6a590114f5ca726b485e", "score": "0.5698714", "text": "def add_object(self, x, y, type, id):\n new_obj = {\"pos\":{\"x\":x,\"y\":y}, \"type\":type, \"r_obj_ID\": id}\n\n for obj in self.objects:\n if x == obj[\"pos\"][\"x\"] and y == obj[\"pos\"][\"y\"]:\n return \"that spot already full\"\n else:\n self.objects.append(new_obj)\n print(\"object added\")\n # if the list is empty, the for loop will nor run.\n self.objects.append(new_obj)\n print(\"first object in list\")", "title": "" }, { "docid": "865ab1631a95836382705a2e68c8bd58", "score": "0.56901836", "text": "def add(self, *args, **kargs):\n self.world.add_component(self, *args, **kargs)", "title": "" }, { "docid": "c6648bd572af3a4f544a803e438166a7", "score": "0.5686571", "text": "def add_item(self, obj):\n self.__add_to_all__(obj)\n self.items.add(obj)", "title": "" }, { "docid": "6acc2c97982963489553638a5b42a28a", "score": "0.5679017", "text": "def draw(self):\n self.obj_id = self.canvas.create_rectangle(\n self.x,\n self.y,\n self.x + self.width,\n self.y + self.height,\n tag = self.tag,\n width = 0,\n fill = self.color)", "title": "" }, { "docid": "6acc2c97982963489553638a5b42a28a", "score": "0.5679017", "text": "def draw(self):\n self.obj_id = self.canvas.create_rectangle(\n self.x,\n self.y,\n self.x + self.width,\n self.y + self.height,\n tag = self.tag,\n width = 0,\n fill = self.color)", "title": "" }, { "docid": "1cf15d155ccd123c70ddf656ff91e95c", "score": "0.56666696", "text": "def add(self, label, obj):\n\n self.args.append((label, obj))\n return self", "title": "" }, { "docid": "8fa8112eebaea4020f49663983b685f8", "score": "0.56598586", "text": "def draw(_delta_time):\n\n # Start the render. This must happen before any drawing\n # commands. We do NOT need an stop render command.\n arcade.start_render()\n\n update_coords()\n\n global state\n for boid in state['objects']:\n if boid.alive:\n arcade.draw_circle_filled(\n state['boids_xy'][boid.idx].real,\n state['boids_xy'][boid.idx].imag,\n boid.SIZE,\n boid.COLOR,\n )\n\n for obstacle in state['obstacles']:\n arcade.draw_circle_outline(\n state['obstacles_xy'][obstacle.idx].real,\n state['obstacles_xy'][obstacle.idx].imag,\n obstacle.SIZE,\n obstacle.COLOR,\n )", "title": "" }, { "docid": "50a0a891c9729b5d405329701b87f9e6", "score": "0.56466866", "text": "def cmd_add(self, obj, **kwargs):\n raise NotImplementedError # pragma: no cover", "title": "" }, { "docid": "d1b5981dcf50af3110598ac526b4fa2c", "score": "0.5643989", "text": "def draw(self, surface):\n AnimateSpriteObject.draw(self, surface)", "title": "" }, { "docid": "8f2a01aa16c03f02dcaefe47e7b93e51", "score": "0.5632726", "text": "def add_objects():\n add_collision_object(0.0, 0.0, -0.10, 2, 2, 0.2, 'table')\n # add_collision_object(0, 0.7, 0.5, 2, .2, 1, 'left_wall')\n add_collision_object(0.6, 0, 0.5, .2, 2, 1, 'rear_wall')\n # add_collision_object(0.4, -0.8, 0.5, .4, .4, 1, 'right_wall')\n add_collision_object(-0.95, -0, 0.35, .1, .1, .1, 'camera')", "title": "" }, { "docid": "3afb3557c0f36d224bb500550f256f85", "score": "0.5615834", "text": "def add(self, drawable):\n self.drawableSeq.insert(drawable)", "title": "" }, { "docid": "1361addead6f3732023630fef3da4dcc", "score": "0.56133604", "text": "def add_obj_A(self):\n box_pose = geometry_msgs.msg.PoseStamped()\n box_pose.header.frame_id = \"base_link\"\n box_pose.pose.position.x=0.23+0.09\n box_pose.pose.position.y=-0.68\n box_pose.pose.position.z=0.648\n box_pose.pose.orientation.w = 1.0\n box_name = \"box\"\n self.scene.add_box(box_name, box_pose, size=(0.05, 0.47, 0.04))\n\n\n box_pose = geometry_msgs.msg.PoseStamped()\n box_pose.header.frame_id = \"base_link\"\n box_pose.pose.position.x=-0.23+0.09\n box_pose.pose.position.y=-0.68\n box_pose.pose.position.z=0.648\n box_pose.pose.orientation.w = 1.0\n box_name = \"box1\"\n self.scene.add_box(box_name, box_pose, size=(0.05, 0.47, 0.04))\n\n box_pose = geometry_msgs.msg.PoseStamped()\n box_pose.header.frame_id = \"base_link\"\n box_pose.pose.position.x=0.0\n box_pose.pose.position.y=-0.68\n box_pose.pose.position.z=0.2\n box_pose.pose.orientation.w = 1.0\n box_name = \"base\"\n self.scene.add_box(box_name, box_pose, size=(1.5, 0.47, 0.85))\n\n return \"add_obj_A\"", "title": "" }, { "docid": "2de36574b334b8eb36784a3a1ddeac17", "score": "0.5590289", "text": "def add(obj_name):", "title": "" }, { "docid": "e54c75b0a8c4fba3d6faa65fd06215f7", "score": "0.5588237", "text": "def add_new_object(fobj, mainname, target, dispname=None):\n\n global dbcurs, verbose, src\n\n obj = objdata.ObjData(objname=mainname, dispname=dispname)\n fobj.names.discard(mainname)\n add_alias_set(obj, fobj.names)\n fobj.update_object(obj)\n obj.vicinity = target\n try:\n obj.put(dbcurs)\n except objdata.ObjDataError as e:\n print(\"Unexpected error adding new object\", obj.objname, \"for target\", target, e.args[0], e.args[1], file=sys.stderr)\n sys.exit(251)\n if verbose:\n if obj.is_target():\n print(\"Add target\", target, file=sys.stderr)\n else:\n print(\"Added object\", obj.objname, \"near to\", target, file=sys.stderr)", "title": "" }, { "docid": "804cd7fb4386a144cefa41f2cf9f4c98", "score": "0.5578757", "text": "def add(ctx, targets):\n ctx.obj[Keys.OBJ].add(targets)", "title": "" }, { "docid": "da53706632a4f322ed93892b53a0ebe8", "score": "0.5576557", "text": "def add_object(self, object_id=None, object_type=None, **kwargs):\n if not object_id:\n object_id = uuid4().hex\n return self.driver.add_object(object_id=object_id, object_type=object_type, **kwargs)", "title": "" }, { "docid": "b56b28de8c0a6125c553874578af8f3d", "score": "0.55585176", "text": "def add(self, pt):\n if len(pt) == 2:\n self.representation.AddNodeAtDisplayPosition(int(pt[0]), int(pt[1]))\n else:\n self.representation.AddNodeAtWorldPosition(pt)\n return self", "title": "" }, { "docid": "81931b3c5838ac87bbd44f20ddd6ae61", "score": "0.5551483", "text": "def Obj_add (self, object_in, object_param_in, object_s_location):\n loc_env_road_s = self.road_len\n tmp_s_index = np.min(np.where(loc_env_road_s >= object_s_location)) - 1\n self.object_list[tmp_s_index].add_object(object_in,object_param_in,object_s_location)", "title": "" }, { "docid": "f1b778a823a0ffe264201a90d1ac281f", "score": "0.5549248", "text": "def on_draw(self):\n\n # clear the screen to begin drawing\n arcade.start_render()\n\n # draw each object\n self.rifle.draw()\n\n for bullet in self.bullets:\n bullet.draw()\n\n # TODO: iterate through your targets and draw them...\n for target in self.targets:\n target.draw()\n\n self.draw_score()", "title": "" }, { "docid": "f1b778a823a0ffe264201a90d1ac281f", "score": "0.5549248", "text": "def on_draw(self):\n\n # clear the screen to begin drawing\n arcade.start_render()\n\n # draw each object\n self.rifle.draw()\n\n for bullet in self.bullets:\n bullet.draw()\n\n # TODO: iterate through your targets and draw them...\n for target in self.targets:\n target.draw()\n\n self.draw_score()", "title": "" }, { "docid": "85e808e9b57bb06471be0ae9bde87274", "score": "0.55477774", "text": "async def async_added_to_hass(self):\n self.hass.data[DOMAIN][\"entities\"][\"scene\"].append(self)", "title": "" }, { "docid": "7b3dc112872f5467085ca774e742ea54", "score": "0.5541414", "text": "def add(self,obj_idx,bbox):\n cls = None\n vel_x = 0\n vel_y = 0\n for frame in self.labels:\n labels = self.labels[frame]\n for box in labels:\n if int(box[2]) == obj_idx and float(box[8]) != 0:\n cls = box[3]\n vel_x = float(box[8])\n vel_y = float(box[9])\n break\n if cls is not None:\n break\n \n if cls is None: # try again but accepting boxes with no speed estimate\n for frame in self.labels:\n labels = self.labels[frame]\n for box in labels:\n if int(box[2]) == obj_idx:\n cls = box[3]\n break\n if cls is not None:\n break\n \n if cls is None:\n print(\"This object does not exist elsewhere. Input class:\")\n cls = self.keyboard_input()\n \n timestamp = \"\"\n try:\n for box in self.labels[self.frame_num]:\n if len(box[1]) > 0:\n timestamp = box[1]\n break\n except:\n pass\n \n size = 100\n newbox = np.array([bbox[0],bbox[1],bbox[0] + size,bbox[1],bbox[0],bbox[1] + size, bbox[0]+size,bbox[1] + size])\n newbox2 = newbox + size/2.0\n new_row = [self.frame_num,\"\",obj_idx,cls] + list(newbox2) + list(newbox)\n print(new_row)\n try:\n self.labels[self.frame_num].append(new_row)\n except:\n self.labels[self.frame_num] = [new_row]\n \n print(\"Added box for object {} to frame {}\".format(obj_idx,self.frame_num))\n \n self.realign(obj_idx,self.frame_num)\n #self.next() # for convenience", "title": "" }, { "docid": "842250c2d7c0cd9fd89d1c7809aeb023", "score": "0.55251324", "text": "def addObject(self, objType, structuralNavigationObject):\n\n self.enabledObjects[objType] = structuralNavigationObject", "title": "" }, { "docid": "842250c2d7c0cd9fd89d1c7809aeb023", "score": "0.55251324", "text": "def addObject(self, objType, structuralNavigationObject):\n\n self.enabledObjects[objType] = structuralNavigationObject", "title": "" }, { "docid": "62dff5cd3c1bd31164aa25f9d43d2603", "score": "0.5521952", "text": "def add_body(self, body):\r\n self.bodies.append(body)", "title": "" }, { "docid": "62dff5cd3c1bd31164aa25f9d43d2603", "score": "0.5521952", "text": "def add_body(self, body):\r\n self.bodies.append(body)", "title": "" }, { "docid": "beba4400d2a910db47d537b734ecd627", "score": "0.5517198", "text": "def _add_object(parent, gn_feature):\n obj = _Xml.SubElement(parent, _TAG_OBJECT)\n if not gn_feature:\n return\n gn_feature.write_elements(obj)", "title": "" }, { "docid": "213a8513f9ab768c54da7e76b6c56f3f", "score": "0.55042744", "text": "def spawn_object(self, obj_type, pos=None):\n radius = self.settings[\"object_radius\"]\n\n # max_speed = np.array(self.settings[\"maximum_speed\"])\n # speed = np.random.uniform(-max_speed, max_speed).astype(float)\n # speed = Vector2(float(speed[0]), float(speed[1]))\n if pos is not None:\n self.objects.append(GameObject(pos, obj_type, self.settings))\n else:\n pos = np.random.uniform([radius, radius, radius], np.array(self.size) - radius)\n pos = Point3(float(pos[0]), float(pos[1]), float(pos[2]))\n self.objects.append(GameObject(pos, obj_type, self.settings))", "title": "" }, { "docid": "d747e004e892d93775e7eccdc40c5dd2", "score": "0.5493614", "text": "def add(self, obj):\n self._content[obj] = obj", "title": "" }, { "docid": "158c5a4700fd3653823c0c5411b7fbfa", "score": "0.54920566", "text": "def add(self, x, y):\n self.enemies_list.append(Enemy(x, y))\n self.add_count = 0", "title": "" }, { "docid": "d875eaaeb48bc285ca52eb9fe83b2db6", "score": "0.5484379", "text": "def add(self, pos):\n self.snake_list.append(pos)", "title": "" }, { "docid": "8cbc652eb0cb810dee3f157c52909a0c", "score": "0.54818267", "text": "def on_draw(self):\n\n # Start rendering\n arcade.start_render()\n\n # Draw a blue circle with a radius of 50 in the center of the screen\n arcade.draw_circle_filled(\n center_x=WIDTH // 2,\n center_y=HEIGHT // 2,\n radius=50,\n color=arcade.color.BLUE,\n num_segments=50,\n )\n\n # Draw a red-outlined square in the top-left corner of the screen\n arcade.draw_lrtb_rectangle_outline(\n left=50,\n top=HEIGHT - 50,\n bottom=HEIGHT - 100,\n right=100,\n color=arcade.color.RED,\n border_width=3,\n )\n\n # Draw an orange caption along the bottom in 60-point font\n arcade.draw_text(\n text=\"Hello, World! From Arcade!\",\n start_x=100,\n start_y=50,\n font_size=28,\n color=arcade.color.ORANGE,\n )", "title": "" }, { "docid": "c629b75452c507c2c4c2ba1bf4c82e9e", "score": "0.54598016", "text": "def draw(self):\r\n self.obj = box(pos = self.center,axis = self.i,up = self.j,size = self.size,color = self.color, make_trail = True)\r\n #self.draw_edges() \r\n if self.tray:\r\n self.dots.append(self.center)\r\n self.dotv.append(self.list_of_vertices[2])", "title": "" }, { "docid": "3585dbbd249e511dcc026467f5501612", "score": "0.54592645", "text": "def add_object(self, name, sort, position, **kwargs):\n obj = objects_map[sort](weakref.proxy(self), name, position, **kwargs)\n self._objects.append(obj)\n return self.get_object(obj.name)", "title": "" }, { "docid": "95ea7bff44270a7e3bc5724fc7227e77", "score": "0.54570484", "text": "def on_draw(self):\n arcade.start_render()\n self.draw_array(self.array)\n #if(self.moving):\n # self.array = self.updateLife()", "title": "" }, { "docid": "05584da4c93911a8fb20c669b3cda747", "score": "0.54514074", "text": "def on_draw(self):\n\n # clear the screen to begin drawing\n arcade.start_render()\n\n # TODO: draw each object\n # self.ship.draw()\n\n for asteroid in self.asteroids:\n asteroid.draw()", "title": "" }, { "docid": "c5ab7a052a319398024e9e8880d0e601", "score": "0.5449345", "text": "def update_scene(camera, cam_pos, cam_rot, is_vertical, scene, img_width, img_height, object_name, coords, size_factor):\n # Get the 3D cursor location\n cursor_pos = bpy.context.space_data.cursor_location\n # Get transformation matrix for vertical orientation\n vert_matrix = get_vertical_mode_matrix(is_vertical, cam_rot)\n # Set the camera position and rotation\n cam_rot = cam_rot.copy()\n cam_rot.rotate(vert_matrix)\n set_camera_transformation(camera, vert_matrix * cam_pos * size_factor + cursor_pos, cam_rot)\n # Apply the transformation matrix for vertical orientation\n for i in range(4):\n coords[i].rotate(vert_matrix)\n # Set the render resolution\n scene.render.resolution_x = img_width\n scene.render.resolution_y = img_height\n # Add the rectangle to the scene (at the 3D cursor location)\n bpy.ops.mesh.primitive_plane_add()\n rect = bpy.context.object\n # Rename the rectangle\n rect.name = object_name_append(object_name, \"_Cal\")\n # Set the correct size (local coordinates)\n for i in range(4):\n rect.data.vertices[rect.data.polygons[0].vertices[i]].co = coords[i] * size_factor", "title": "" }, { "docid": "ccd744d8510057de28640fd537613013", "score": "0.5449117", "text": "def add_to_queue(self, object_):\n self.queue.append(object_)", "title": "" }, { "docid": "1ed03fbb82387efc5ee85dace2f22e5b", "score": "0.544644", "text": "def add(self, obj):\n self.array = np.insert(self.array, self.len, obj)\n self.len += 1", "title": "" }, { "docid": "859f05b0199038c50002b9f7e1ab9f7b", "score": "0.54456127", "text": "def add(self, obj: object) -> None:\n self._contains.append(obj)", "title": "" }, { "docid": "79b39bf6d0a6e5f8792c69e15a238745", "score": "0.54455125", "text": "def add_object(self,\n name,\n xmin,\n ymin,\n xmax,\n ymax,\n pose='Unspecified',\n truncated=0,\n difficult=0):\n bndbox = Bndbox(xmin=xmin, ymin=ymin, xmax=xmax, ymax=ymax)\n obj = XmlObject(name=name,\n pose=pose,\n truncated=truncated,\n difficult=difficult,\n bndbox=bndbox)\n self.object.append(obj)\n return self", "title": "" }, { "docid": "8b4739dd1fb3fe3e0288e197dae5f090", "score": "0.54338574", "text": "def on_draw(self):\r\n self.clear()\r\n self.set_3d()\r\n glColor3d(1, 1, 1)\r\n #self.model.batch.draw()\r\n #self.draw_focused_block()\r\n self.set_2d()\r\n #self.draw_label()\r\n #self.draw_reticle()\r", "title": "" }, { "docid": "a8af624e71e92beba5796f47b7f2efa5", "score": "0.5431381", "text": "def add(self, *groups):\n\t\tspyral.sprite.Sprite.add(self, *groups);\n\t\tself.face.add(*groups);", "title": "" }, { "docid": "e10c6f91f2fee61c2798e0712e536315", "score": "0.5426278", "text": "def add_scene(self, description: str):\n self.scenes.append(Scene(description))\n return self.scenes[-1]", "title": "" }, { "docid": "d1c2f51df20aa1ab582f891f82720204", "score": "0.54234225", "text": "def push(self, obj):\n construct = import_module('.construct', package='ndk')\n assert isinstance(\n obj, construct.Construct), f'{obj} must be Construct instance'\n self.objects[obj.__object_type__][obj.pk] = obj", "title": "" }, { "docid": "fa3006a0a8313d16718db2ac955ea41e", "score": "0.5416583", "text": "def draw(self):\n for obj in self.objects:\n obj.draw()", "title": "" }, { "docid": "bb6673e3d783a385ce13dfca624ac3b4", "score": "0.5414977", "text": "def add_hero(self, Hero):\n self.heroes.append(Hero)", "title": "" }, { "docid": "bb6673e3d783a385ce13dfca624ac3b4", "score": "0.5414977", "text": "def add_hero(self, Hero):\n self.heroes.append(Hero)", "title": "" }, { "docid": "6d83e8f3ae21d938fec2390fcb3d6217", "score": "0.5403919", "text": "def on_draw(self):\n self.clear()\n #self.set_3d()\n #glColor3d(1, 1, 1)\n #self.model.batch.draw()\n self.set_2d()\n self.draw_label()\n #self.draw_reticle()", "title": "" }, { "docid": "ce12c6d054458fb5b2f524b83c66164f", "score": "0.540063", "text": "def add_object_to_list(self, obj):\n self.display_file.append(obj)\n\n item = ObjectItem(obj)\n self.main_window.items_model.appendRow(item)", "title": "" }, { "docid": "9e9590139ead965a3456d62d5006f0e4", "score": "0.53936833", "text": "def add(self, o):\n return getReference().add(o)", "title": "" } ]
a321327085845f89b15a4fd1c9bff20f
Respond to bulletalien collisions.
[ { "docid": "f6d3eff3d1b5e970d3681a443bc145f3", "score": "0.7386176", "text": "def check_bullet_alien_collisions(ai_settings,screen, stats, sb, ship, aliens, super_aliens, bullets):\n\n # Check if any bullet collision with any alien\n bullet_alien_collision(ai_settings,screen, stats, sb, ship, aliens, bullets)\n\n # Check if any bullet collision with any super alien\n bullet_super_alien_collisions(ai_settings,screen, stats, sb, ship, super_aliens, bullets)\n \n # Check if totally fleet are down\n check_fleet_down(ai_settings, screen, stats, sb, aliens, super_aliens, bullets, ship )", "title": "" } ]
[ { "docid": "2da448b58ac9f807dd2ed67e78fb61c3", "score": "0.79089516", "text": "def handle_alien_bullets(self):\n for bullet in self.alien_bullet_sprite_list:\n if bullet.rect.colliderect(self.player.rect):\n # ammo hit the player\n bullet.kill() # remove the bullet so the same one won't collide with player again\n self.player.lives -= 1\n if self.player.lives < 1:\n self.end_game()\n else:\n # tell the microbit to vibrate\n self.s.write(str.encode(\"3\"))\n\n # clean up bullet if it is outside of screen\n if bullet.rect.y > self.height + 10:\n bullet.kill()", "title": "" }, { "docid": "7401c59850b5be6aafd8149b2d8e3e57", "score": "0.7706271", "text": "def check_bullet_alien_collisions(self):\n collisions = pygame.sprite.groupcollide(self.bullets, self.aliens, True, True)\n if collisions:\n for aliens in collisions.values():\n self.stats.score += self.settings.alien_reward * len(aliens)\n self.sb.prep_score()\n self.check_high_score()\n\n if len(self.aliens) == 0:\n # Destroy all bullets and create a new fleet.\n self.bullets.empty()\n self.settings.increase_speed()\n self.stats.level += 1\n self.sb.prep_level()\n self.create_fleet()", "title": "" }, { "docid": "080b0debc815d7f27654a40d6523ec89", "score": "0.7608089", "text": "def check_bullet_alien_collisions(ai_settings, screen, stats, sb, ship, aliens, alien_bullets, bullets, ufo):\r\n collisions = pygame.sprite.groupcollide(bullets, aliens, True, False, collided=alien_collision_check)\r\n # When collisions occur to aliens\r\n if collisions:\r\n for aliens_hit in collisions.values():\r\n for a in aliens_hit:\r\n # Set scores\r\n stats.score += ai_settings.alien_points[str(a.alien_type)]\r\n a.bullet_collide()\r\n sb.prep_score()\r\n check_high_score(stats, sb)\r\n # When collisons occur to ufo\r\n ufo_collide = pygame.sprite.groupcollide(bullets, ufo, True, False, collided=alien_collision_check)\r\n if ufo_collide:\r\n for ufo in ufo_collide.values():\r\n # Update scores\r\n for u in ufo:\r\n stats.score += u.score\r\n u.hit_ufo()\r\n sb.prep_score()\r\n check_high_score(stats, sb)\r\n if len(aliens) == 0:\r\n if ufo:\r\n for u in ufo.sprites():\r\n u.died()\r\n # Manages the alien bullets and manages level increases\r\n alien_bullets.empty()\r\n bullets.empty()\r\n stats.level += 1\r\n level_intro(ai_settings, screen, stats)\r\n # Manages music speed\r\n ai_settings.increase_base_speed()\r\n # Manages alien speed\r\n ai_settings.reset_alien_speed()\r\n # Calls to show scoreboard\r\n sb.prep_level()\r\n # Crate the fleet of aliens\r\n create_fleet(ai_settings, screen, ship, aliens)\r\n stats.next_speedup = len(aliens) - (len(aliens) // 5)\r\n stats.aliens_left = len(aliens)\r\n if stats.aliens_left <= stats.next_speedup and ai_settings.alien_speed_factor < ai_settings.alien_speed_limit:\r\n # Increases speed of aliens when certain aliens left\r\n ai_settings.increase_alien_speed()\r\n stats.next_speedup = stats.aliens_left - (stats.aliens_left // 5)", "title": "" }, { "docid": "341fc07b81ec381c32e00bdc208c4e31", "score": "0.7602166", "text": "def check_bullet_alien_collisions(ai_settings, screen, stats, sb, ship, aliens, bullets):\n collisions = pygame.sprite.groupcollide(bullets, aliens, True, True)\n sound = Sound()\n if collisions:\n for aliens in collisions.values():\n stats.score += ai_settings.alien_points\n sb.prep_score()\n if len(aliens) == 0:\n sound.play_sound_win()\n bullets.empty()\n ai_settings.increase_speed()\n stats.level += 1\n sb.prep_level()\n create_fleet(ai_settings, screen, stats, ship, aliens)", "title": "" }, { "docid": "684866e599b1e9ee9262bfaaf367f9fd", "score": "0.7459866", "text": "def check_bullet_alien_collisions(ai_settings, screen, stats, sb, ship, aliens, bullets):\n collisions = pygame.sprite.groupcollide(bullets, aliens, False, True)\n if collisions:\n for aliens in collisions.values():\n stats.score += ai_settings.alien_points * len(aliens)\n sb.prep_score()\n check_high_scores(stats, sb)\n # replenish the fleet\n if len(aliens) == 0:\n bullets.empty()\n # level-up!\n stats.level += 1\n sb.prep_level()\n ai_settings.increase_speed()\n call_alien_fleet(ai_settings, screen, ship, aliens)", "title": "" }, { "docid": "ef46dae93998d1297912d143222a6874", "score": "0.7454963", "text": "def _check_bullet_alien_collisions(self):\n # check if any bullets have hit aliens, if so remove both\n # groupcollide() \n # - checks for collision between 2 groups\n # - creates a ky value pair (key = bullet, value = alien)\n # - arguments for removing each element\n collisions = pygame.sprite.groupcollide( \n self.bullets, self.aliens, True, True\n )\n\n if collisions:\n # in case multiple aliens are hit in same loop\n # each value is a list of aliens hit by a single bullet\n for aliens in collisions.values(): \n self.stats.score += self.settings.alien_points * len(aliens)\n self.sb.prep_score() #updates image\n self.sb.check_high_score() #check everytime there's a hit\n\n if not self.aliens: # see if aliens group is empty\n # destroy existing bullets and create new fleet\n self.bullets.empty() # .empty() removes all sprites from group\n self._create_fleet()\n self.settings.increase_speed()\n\n # increase level\n self.stats.level += 1\n self.sb.prep_level() # update image", "title": "" }, { "docid": "0973f286cb1975b25d8cabc2c4f0a0ad", "score": "0.7432529", "text": "def check_bullet_alien_collisions(ai_settings, screen, stats, sb, ship, aliens, lasers, bullets, ufo):\n collisions = pygame.sprite.groupcollide(bullets, aliens, True, False, collided=alien_collision_check)\n if collisions:\n for aliens in collisions.values():\n for alien in aliens:\n stats.score += ai_settings.alien_points[str(alien.alien_type)]\n alien.begin_death()\n sb.prep_score()\n check_high_score(stats, sb)\n\n ufo_collision = pygame.sprite.groupcollide(bullets, ufo, True, False, collided=alien_collision_check)\n if ufo_collision:\n for ufos in ufo:\n stats.score = stats.score + ufos.score\n ufos.begin_death()\n sb.prep_high_score()\n check_high_score(stats, sb)\n\n if len(aliens) == 0:\n if ufo:\n for ufos in ufo.sprites():\n ufos.kill()\n # If the entire fleet is destroyed, start a new level\n bullets.empty()\n lasers.empty()\n ai_settings.increase_speed()\n # Increase level\n stats.level += 1\n sb.prep_level()\n create_fleet(ai_settings, screen, ship, aliens)", "title": "" }, { "docid": "e9c4acbaa07803051befa6dcd8efc45b", "score": "0.7386704", "text": "def check_collision_bullet_aliens (ai_settings , screen,stats, ship, bullets, aliens,score_board):\n collisions = pygame.sprite.groupcollide(bullets,aliens,True, True)\n if collisions:\n for value_aliens in collisions.values():\n stats.score += ai_settings.alien_points\n score_board.prep_score()\n check_high_score(stats,score_board)\n if len(aliens)== 0:\n #destroy existing bullets and create new fleet\n update_level(ai_settings,screen, stats,aliens,ship, bullets,score_board)", "title": "" }, { "docid": "c9353a8eb8ff8e325cac71de7618f0a1", "score": "0.738237", "text": "def check_ship_alien_bullet_collisions(ai_settings, screen, stats, sb, ship, aliens, alien_bullets, bullets, ufo):\r\n collide = pygame.sprite.spritecollideany(ship, alien_bullets)\r\n # Manages the collision of the ship\r\n if collide:\r\n ship_hit(ai_settings, screen, stats, sb, ship, aliens, alien_bullets, bullets, ufo)", "title": "" }, { "docid": "aef11253df631c65d8049d0053b9ba28", "score": "0.73730403", "text": "def _check_bullet_allien_collisions(self):\r\n # Remove any bullets an allien that have collided\r\n collisions = pygame.sprite.groupcollide(self.bullets, self.aliens, True,True)\r\n if collisions:\r\n for aliens in collisions.values():\r\n self.stats.score += self.settings.alien_points * len(aliens)\r\n self.sb.prep_score()\r\n self.sb.check_high_score()\r\n se.alien_sound.play()\r\n\r\n if not self.aliens:\r\n # Destroy existing bullets and create new fleet\r\n self.start_new_level()", "title": "" }, { "docid": "f2d1ca1c193690ee5486aaba7f220232", "score": "0.73412806", "text": "def check_bullet_alien_collisions(ai_settings, screen, ship, aliens, bullets,stats,sb):\n\t# Remove any bullets and aliens that have collided.\n\tcollisions = pygame.sprite.groupcollide(bullets, aliens, True, True)\n\t#print(collisions)\n\tif collisions:\n\t\tstats.score += ai_settings.alien_points\n\t\tsb.prep_score()\n\t\tprint(stats.score)\n \n\n\tif len(aliens) == 0:\n\t\t# Destroy existing bullets, and create new fleet.\n\t\tbullets.empty()\n\t\tai_settings.increase_speed()\n\t\tcreate_fleet(ai_settings, screen, ship, aliens)", "title": "" }, { "docid": "634436992a8f293682989798614e6422", "score": "0.7331525", "text": "def _check_bullet_alien_collisions(self):\n # Remove any bullets and aliens that have collided.\n collisions = pygame.sprite.groupcollide(\n self.bullets, self.aliens, True, True)\n\n if collisions:\n for aliens in collisions.values():\n self.stats.score += self.settings.alien_points * len(aliens)\n self.stats.aliens_destroyed += len(aliens)\n self.sb.prep_score()\n self.sb.check_high_score()\n\n if not self.aliens:\n # Destroy existing bullets and create new wave.\n self.bullets.empty()\n self.settings.increase_speed()\n\n # Increase wave.\n self._create_wave()\n self.stats.wave += 1\n self.sb.prep_wave()\n\n # Pause to allow player to get ready.\n self.stats.incoming_wave = True\n\n # Create timer to use when waiting for incoming wave of aliens.\n self.timer = ResumableTimer(1.0, self._incoming_wave)\n self.timer.start()", "title": "" }, { "docid": "902b57256fd280393ab5f9e34e66e5c7", "score": "0.7229551", "text": "def alien_collision_check(bullet, alien):\r\n if alien.dead:\r\n return False\r\n return pygame.sprite.collide_rect(bullet, alien)", "title": "" }, { "docid": "e679e809564939f461de2c7878b740ec", "score": "0.71961635", "text": "def alien_collision_check(bullet, alien):\n if alien.dead:\n return False\n return pygame.sprite.collide_rect(bullet, alien)", "title": "" }, { "docid": "903806e615479626bbde7d760588d70d", "score": "0.7193069", "text": "def bullet_super_alien_collisions(ai_settings,screen, stats, sb, ship, super_aliens, bullets):\n collisions = pygame.sprite.groupcollide(bullets, super_aliens, True, False)\n\n if collisions:\n for super_aliens_list in collisions.values():\n for super_alien in super_aliens_list:\n # If the super alien have two shoot\n if super_alien.scratch:\n # Delete super alien\n super_aliens.remove(super_alien)\n # Assign points\n assign_points(ai_settings, stats, sb, super_aliens=super_aliens_list)\n # Update high score\n check_high_score(stats, sb)\n else:\n # The super alien have one shoot\n super_alien.scratch = 1", "title": "" }, { "docid": "fbff9534d3edcd2b4af43336bce2db3e", "score": "0.7187185", "text": "def update_alienBullet(ai_settings, screen, stats, sb, ship, aliens, alien_bullets, bullets, ufo):\r\n # Create bullets\r\n bullets.update()\r\n alien_bullets.update()\r\n\r\n for bullet in bullets.copy():\r\n if bullet.rect.bottom <= 0:\r\n bullets.remove(bullet)\r\n for alien_bullet in alien_bullets.copy():\r\n if alien_bullet.rect.bottom > ai_settings.screen_height:\r\n alien_bullets.remove(alien_bullet)\r\n\r\n # Check for functions when contact\r\n check_bullet_alien_collisions(ai_settings, screen, stats, sb, ship, aliens, alien_bullets, bullets, ufo)\r\n check_ship_alien_bullet_collisions(ai_settings, screen, stats, sb, ship, aliens, alien_bullets, bullets, ufo)", "title": "" }, { "docid": "131208b9ae4d7b702e02c2a6e211a111", "score": "0.7112396", "text": "def _check_bullet_alien_ship_collisions(self):\n # Remove the alien ship if a bullet hits.\n collision = pg.sprite.groupcollide(self.bullets, self.fleet, True, True)\n if collision:\n self.stats.score += self.settings.point\n self.sb.prep_score()\n self.sb.check_high_score()\n\n if not self.fleet:\n self.bullets.empty()\n self._create_fleet()\n self.settings.increase_speed()", "title": "" }, { "docid": "7802f4629da81e93b8c7ada6bd4d000a", "score": "0.6862252", "text": "def handle_player_bullets(self):\n for bullet in self.player_bullet_sprite_list:\n # check if hit alien\n alien_hit_list = pygame.sprite.spritecollide(bullet, self.alien_sprite_list, dokill=True)\n\n for alien in alien_hit_list:\n bullet.kill() # removes sprite from all sprite lists\n explosion = Explosion(alien.rect.center)\n self.all_sprites_list.add(explosion)\n self.points += 1\n self.s.write(str.encode(\"4\"))\n\n # check if hit alien bullet\n alien_bullet_hit_list = pygame.sprite.spritecollide(bullet, self.alien_bullet_sprite_list, True)\n\n for alien_bullet in alien_bullet_hit_list:\n bullet.kill()\n self.points += 1\n self.s.write(str.encode(\"4\"))\n\n ufo_hit_list = pygame.sprite.spritecollide(bullet, self.ufo_sprites_list, True)\n\n for ufo in ufo_hit_list:\n bullet.kill()\n explosion = Explosion(ufo.rect.center)\n self.all_sprites_list.add(explosion)\n self.points += 500\n self.s.write(str.encode(\"4\"))\n\n # clean up bullet if it is outside of screen\n if bullet.rect.y < -10:\n bullet.kill()", "title": "" }, { "docid": "fb20977ee66f8f6b8e2faa6bafbb12b9", "score": "0.6850618", "text": "def check_bullet_ship_collision(settings, stats, screen, sb, ship,\n alien_groups, bullets, blockade):\n\n alien_fire = Group()\n for bullet in bullets:\n if bullet.direction == 1:\n alien_fire.add(bullet)\n\n if pygame.sprite.spritecollideany(ship, alien_fire):\n ship_hit_bullet(settings, stats, screen, sb, ship, alien_groups,\n bullets, blockade)", "title": "" }, { "docid": "22bf18ebc8b573bf95bfb3576c0207a1", "score": "0.67962635", "text": "def check_bullet_alien_collision(settings, stats, screen, sb, ship,\n alien_groups, bullets):\n\n ships_fire = Group()\n for bullet in bullets:\n if bullet.direction == -1:\n ships_fire.add(bullet)\n\n # Check if any bullets have hit aliens, remove alien if hit, dependant on\n # game settings remove bullets.\n for alien_column in alien_groups:\n if settings.powerbullets == True:\n collisions = pygame.sprite.groupcollide(ships_fire, alien_column, False, True)\n else:\n collisions = pygame.sprite.groupcollide(ships_fire, alien_column, True, True)\n\n if collisions:\n for alien in collisions.values():\n stats.score += settings.alien_points * len(alien_column)\n sb.prep_score()\n settings.increase_alien_fire()\n check_high_score(stats, sb)\n\n # Define the front line for alien fire.\n define_frontline(alien_column)\n\n # If the entire fleet is destroyed, start a new level.\n count = 0\n for alien_column in alien_groups:\n count += len(alien_column)\n if count == 0:\n make_new_level(settings, stats, screen, sb, ship, alien_groups, bullets)", "title": "" }, { "docid": "b7d80c4a3724982a5baf3e8b38f34666", "score": "0.6794478", "text": "def hit_by_bullet(self):\n pass", "title": "" }, { "docid": "b58c25d2497f0a0c888fc6adff969c11", "score": "0.6759232", "text": "def shipcollision(self):\n for bolt in self._bolts:\n if self._ship.shipcollides(bolt):\n self._blast2.play()\n self._bolts.remove(bolt)\n self.setLives(1)\n if self.lowest_alien() <= DEFENSE_LINE + ALIEN_HEIGHT//2:\n self.setLives(3)", "title": "" }, { "docid": "ea3d5e04bf64ca2b57dce9be8f22db52", "score": "0.6755185", "text": "def fire_bullet(ai_Settings,screen, ship, bullets):\n if len(bullets) < ai_Settings.bullets_allowed:\n new_bullet = Bullet(ai_Settings,screen,ship)\n bullets.add(new_bullet)", "title": "" }, { "docid": "e51b0b168b7f7b573bb5b1391e4351b2", "score": "0.6752768", "text": "def hit(self, bullet, player):\n if not self.inmune:\n self.health -= bullet.damage\n\n if self.health <= 0:\n if player.lifes <= 0:\n print(\"FIN DEL JUEGO\")\n sys.exit()\n else:\n player.lifes -= 1\n player.create_ship()", "title": "" }, { "docid": "95144abf27721cb8589a25110d1a0ede", "score": "0.66917086", "text": "def update(self):\n # Warm up the gun\n self.heat+=self.heat_change\n \n # Move the bullets\n self.rect.x+=self.change_x\n self.rect.y+=self.change_y\n \n # If the gun is warmed up, shoot the bullets\n if (self.heat>=2) and (self.firing == False):\n # Pull the trigger, BOOOMMMMMMMMMMMMMM!!\n self.firing=True\n \n # Calculate the velocity and angle of the bullets\n self.difference_y = aim.rect.y + 72 - self.rect.y \n self.difference_x = aim.rect.x + 100 - self.rect.x\n if self.difference_y>0 and self.difference_x<0:\n self.angle=math.pi+math.atan((self.difference_y/self.difference_x))\n elif self.difference_y<=0 and self.difference_x<0:\n self.angle=-1*math.pi+math.atan((self.difference_y/self.difference_x))\n elif self.difference_x==0 and self.difference_y<=0:\n self.angle=math.radians(-90)\n elif self.difference_x==0 and self.difference_y>0:\n self.angle=math.radians(90)\n else:\n self.angle=math.atan(self.difference_y/self.difference_x)\n\n self.change_x=float(self.speed*math.degrees(math.cos(self.angle)))\n self.change_y=float(self.speed*math.degrees(math.sin(self.angle))) \n \n # Set the position of the bullet to fit the gun's position\n elif self.firing == False:\n if gun_image.faceFront==True:\n self.rect.left=gun_image.rect.left\n if gun_image.faceFront==False:\n self.rect.right=gun_image.rect.right \n self.rect.y=gun_image.rect.y \n \n \n \n # Check the collision with other sprites \n for bullet in machine_gun_list:\n enemy_hit_list = pygame.sprite.spritecollide(bullet, current_level.enemy_list , False)\n block_hit_list = pygame.sprite.spritecollide(bullet, player.level.platform_list,False)\n boss_hit_list = pygame.sprite.spritecollide(bullet, current_level.boss_list , False)\n \n # If it hits enemy, remove the enemy\n if len(enemy_hit_list) > 0:\n machine_gun_list.remove(bullet)\n all_sprite_list.remove(bullet) \n sound1 = pygame.mixer.Sound(os.path.join(\"music\",\"shot.wav\"))\n sound1.play()\n for trap in enemy_hit_list:\n current_level.enemy_list.remove(trap)\n all_sprite_list.remove(trap)\n \n player.hit_num += len(enemy_hit_list)\n \n # If it hits platform, remove itself\n if len(block_hit_list) > 0 :\n machine_gun_list.remove(bullet)\n all_sprite_list.remove(bullet) \n \n # If it hits boss, remove boss's hp \n if len(boss_hit_list) > 0:\n machine_gun_list.remove(bullet)\n all_sprite_list.remove(bullet)\n for boss in current_level.boss_list:\n if boss.hp >= 2:\n boss.hp -= self.dmg\n if boss.hp <2:\n boss.hp = 0\n \n if boss.hp == 0:\n current_level.boss_list.remove(boss)\n \n for hp1 in current_level.hp_bar_group:\n hp1_length = hp1.length\n hp1_length = 600 * boss.hp /200\n hp1.length = hp1_length", "title": "" }, { "docid": "24b6d663f7b878314b81c1f84a9791f2", "score": "0.6654631", "text": "def update(self):\n # Warm up\n self.heat+=self.heat_change\n \n # Move the bullet\n self.rect.x+=self.change_x\n self.rect.y+=self.change_y\n \n # If warmed up, shoot the bullet\n if (self.heat>=2) and (self.firing == False):\n self.firing=True\n self.difference_y = aim.rect.y + 72 - self.rect.y \n self.difference_x = aim.rect.x + 100 - self.rect.x\n if self.difference_y>0 and self.difference_x<0:\n self.angle=math.pi+math.atan((self.difference_y/self.difference_x))\n elif self.difference_y<=0 and self.difference_x<0:\n self.angle=-1*math.pi+math.atan((self.difference_y/self.difference_x))\n elif self.difference_x==0 and self.difference_y<=0:\n self.angle=math.radians(-90)\n elif self.difference_x==0 and self.difference_y>0:\n self.angle=math.radians(90)\n else:\n self.angle=math.atan(self.difference_y/self.difference_x)\n\n self.change_x=float(self.speed*math.degrees(math.cos(self.angle+math.radians(self.angle_change))))\n self.change_y=float(self.speed*math.degrees(math.sin(self.angle+math.radians(self.angle_change))))\n \n # Change the facing direction\n elif self.firing == False:\n if gun_image.faceFront==True:\n self.rect.left=gun_image.rect.left\n if gun_image.faceFront==False:\n self.rect.right=gun_image.rect.right \n self.rect.y=gun_image.rect.y \n \n \n \n # If we hit something \n for bullet in machine_gun_list:\n enemy_hit_list = pygame.sprite.spritecollide(bullet, current_level.enemy_list , False)\n block_hit_list = pygame.sprite.spritecollide(bullet, player.level.platform_list,False)\n boss_hit_list = pygame.sprite.spritecollide(bullet, current_level.boss_list , False)\n \n # Hit enemy, kill them\n if len(enemy_hit_list) > 0:\n machine_gun_list.remove(bullet)\n all_sprite_list.remove(bullet) \n sound1 = pygame.mixer.Sound(os.path.join(\"music\",\"shot.wav\"))\n sound1.play()\n for trap in enemy_hit_list:\n current_level.enemy_list.remove(trap)\n all_sprite_list.remove(trap)\n # Count for the number of enemy which being killed \n player.hit_num += len(enemy_hit_list)\n \n # Hit platforms, remove the bullets\n if len(block_hit_list) > 0 :\n machine_gun_list.remove(bullet)\n all_sprite_list.remove(bullet) \n \n # Hit the boss, damage it \n if len(boss_hit_list) > 0:\n machine_gun_list.remove(bullet)\n all_sprite_list.remove(bullet)\n for boss in current_level.boss_list:\n if boss.hp >= 1:\n boss.hp -= self.dmg\n if boss.hp <1:\n boss.hp = 0\n \n if boss.hp == 0:\n current_level.boss_list.remove(boss)\n \n for hp1 in current_level.hp_bar_group:\n hp1_length = hp1.length\n hp1_length = 600 * boss.hp /200\n hp1.length = hp1_length", "title": "" }, { "docid": "8a8fdc70b3fa3bf700cb8c802f6f9ed2", "score": "0.6636125", "text": "def update(self):\n # Warm up the gun\n self.heat+=self.heat_change\n \n # Move the bullets\n self.rect.x+=self.change_x\n self.rect.y+=self.change_y\n \n # If the gun is warmed up, shoot the bullets\n if (self.heat>=60) and (self.firing == False):\n # Pull the trigger, BOOOMMMMMMMMMMMMMM!!\n self.firing=True\n \n # Calculate the velocity and angle of the bullets\n self.difference_y = aim.rect.y + 72 - self.rect.y \n self.difference_x = aim.rect.x + 100 - self.rect.x\n if self.difference_y>0 and self.difference_x<0:\n self.angle=math.pi+math.atan((self.difference_y/self.difference_x))\n elif self.difference_y<=0 and self.difference_x<0:\n self.angle=-1*math.pi+math.atan((self.difference_y/self.difference_x))\n elif self.difference_x==0 and self.difference_y<=0:\n self.angle=math.radians(-90)\n elif self.difference_x==0 and self.difference_y>0:\n self.angle=math.radians(90)\n else:\n self.angle=math.atan(self.difference_y/self.difference_x)\n\n self.change_x=float(self.speed*math.degrees(math.cos(self.angle)))\n self.change_y=float(self.speed*math.degrees(math.sin(self.angle))) \n \n # Set the position of the bullet to fit the gun's position\n elif self.firing == False:\n if gun_image.faceFront==True:\n self.rect.left=gun_image.rect.left\n if gun_image.faceFront==False:\n self.rect.right=gun_image.rect.right \n self.rect.y=gun_image.rect.y \n \n \n \n # Check the collision with other sprites \n for bullet in machine_gun_list:\n enemy_hit_list = pygame.sprite.spritecollide(bullet, current_level.enemy_list , False)\n block_hit_list = pygame.sprite.spritecollide(bullet, player.level.platform_list,False)\n boss_hit_list = pygame.sprite.spritecollide(bullet, current_level.boss_list , False)\n \n # If it hits enemy, remove the enemy\n if len(enemy_hit_list) > 0:\n machine_gun_list.remove(bullet)\n all_sprite_list.remove(bullet) \n sound1 = pygame.mixer.Sound(os.path.join(\"music\",\"shot.wav\"))\n sound1.play()\n for trap in enemy_hit_list:\n current_level.enemy_list.remove(trap)\n all_sprite_list.remove(trap)\n \n player.hit_num += len(enemy_hit_list)\n \n # If it hits platform, remove itself\n if len(block_hit_list) > 0 :\n machine_gun_list.remove(bullet)\n all_sprite_list.remove(bullet) \n \n # If it hits boss, remove boss's hp \n if len(boss_hit_list) > 0:\n machine_gun_list.remove(bullet)\n all_sprite_list.remove(bullet)\n for boss in current_level.boss_list:\n if boss.hp >= 4:\n boss.hp -= self.dmg\n if boss.hp < 4:\n boss.hp = 0\n \n if boss.hp == 0:\n current_level.boss_list.remove(boss)\n \n for hp1 in current_level.hp_bar_group:\n hp1_length = hp1.length\n hp1_length = 600 * boss.hp /200\n hp1.length = hp1_length", "title": "" }, { "docid": "0eb76d97571dd8fe8429455b15afd8ef", "score": "0.66359216", "text": "def fire_bullet(ai_settings, screen, ship, bullets):\n if len(bullets) < ai_settings.bullets_allowed:\n new_bullet = Bullet(ai_settings, screen, ship)\n bullets.add(new_bullet)", "title": "" }, { "docid": "25c1f5f8ad78244463559bf429fa3aad", "score": "0.66174054", "text": "def bullet_firing(ai_settings, screen, ship, bullets):\n\n if len(bullets) < ai_settings.bullets_nr:\n new_bullet = ammo.Bullet(ai_settings, screen, ship)\n bullets.add(new_bullet)", "title": "" }, { "docid": "3767bc73ec13059c4278a5994ec94ceb", "score": "0.6599394", "text": "def _alienCollision(self,bolts,list):\n for x in bolts:\n for row in range(ALIEN_ROWS):\n for alien in range(ALIENS_IN_ROW):\n if list[row][alien]!=None and x!=None:\n if list[row][alien].collides(x):\n self._sound[2].play()\n list[row][alien] = None\n bolts.remove(x)\n self._speed = self._speed * 0.97", "title": "" }, { "docid": "9a3a934d770730e31573e1b0267116d8", "score": "0.65662557", "text": "def update_aliens(ai_settings,screen, ship,aliens,bullets, stats,score_board):\n check_fleet_edges(ai_settings, aliens)\n aliens.update()\n #look for alien_ship collision\n check_collision_aliens_ship(ai_settings, screen, ship, aliens, bullets,stats,score_board)\n check_aliens_bottom(ai_settings, screen, ship, aliens, bullets,stats)", "title": "" }, { "docid": "0c09152ff03bb2683a680489f8527a6b", "score": "0.6560284", "text": "def check_aliens_ship_collisions(ai_settings, screen, stats, sb, ship, aliens, bullets):\n ship_alien_collision = pygame.sprite.spritecollideany(ship, aliens)\n alien_on_the_bottom = alien_on_bottom(ai_settings, screen, stats, sb, ship, aliens, bullets)\n if ship_alien_collision or alien_on_the_bottom:\n ship_hit(ai_settings, screen, stats, sb, ship, aliens, bullets)", "title": "" }, { "docid": "fe7068d05b9d3f2f8a3029f4ce9cd9fb", "score": "0.6558896", "text": "def _check_bullet_enemy_collision(self):\n # check for any bullet that have hit enemy\n # if so , get rid of the bullet and the enemy\n collisions = pygame.sprite.groupcollide(self.bullets, self.enemy, True, True)\n if not self.enemy:\n self.bullets.empty()\n self._create_fleet()\n self.setting.increase_speed()\n\n #increase level\n self.stats.level+=1\n self.score_board.prep_level()\n if collisions:\n for enemy in collisions.values():\n self.stats.score+=self.setting.enemy_point * len(enemy)\n self.score_board.prep_score()\n self.score_board.check_high_score()", "title": "" }, { "docid": "865f16188eadcfd35c1c786b6615dab4", "score": "0.65433615", "text": "def update_bullets(ai_Setting, screen,stats,sb, ship, aliens, bullets):\n bullets.update()\n #get rid of em\n for bullet in bullets.copy():\n if bullet.rect.bottom <= 0:\n bullets.remove(bullet)\n check_bullet_alien_collisions(ai_Setting,screen,stats,sb,ship,aliens,bullets)", "title": "" }, { "docid": "17858b0df4b540e98e2bc697da9fc945", "score": "0.6535927", "text": "def checkCollision(self):\n for bullet in self.bullets.copy():\n for _, collid in self.obstacles:\n if collid.rect_collide(bullet.getBbox())[0]:\n self.bullets.remove(bullet)\n break\n\n for enemy in self.enemies:\n collid_count = 0\n for _, collid in self.obstacles:\n if collid.rect_collide(enemy.getBbox())[0] or \\\n not (100 <= enemy.pos_x <= 800 and 100 <= enemy.pos_y <= 600):\n collid_count += 1\n enemy.change_angle()\n\n if collid_count > 0:\n enemy.setCollision(True)\n\n else:\n enemy.setCollision(False)\n\n if self.player.colliderect(enemy.getRectObject()):\n self.player.resetPreviousPos()\n enemy.resetPreviousPos()\n\n for bullet in self.bullets.copy():\n for tank in self.enemies.copy():\n if bullet.tankObject() == self.player and tank.colliderect(bullet.getRect()):\n self.enemies.remove(tank)\n self.bullets.remove(bullet)\n self.score += 1\n break\n\n if bullet.tankObject() != self.player and self.player.colliderect(bullet.getRect()):\n self.bullets.remove(bullet)\n self.lives -= 1", "title": "" }, { "docid": "327792fe212b5ec3752a206bef44ff30", "score": "0.65229464", "text": "def update_aliens(ai_settings, stats, screen, sb, ship, aliens, bullets):\n check_fleet_edges(ai_settings, aliens)\n aliens.update()\n if pygame.sprite.spritecollideany(ship, aliens):\n ship_hit(ai_settings, stats, screen, sb, ship, aliens, bullets)\n check_aliens_bottom(ai_settings, stats, screen, sb, ship, aliens, bullets)", "title": "" }, { "docid": "0d49dfae047d0a718f5fc74e207d9341", "score": "0.65218055", "text": "def ship_hit_bullet(settings, stats, screen, sb, ship, alien_groups, bullets,\n blockade):\n # Ship lost or game over.\n stats.ships_left -= 1\n if stats.ships_left:\n ship_lost(settings, screen, sb, ship, alien_groups, bullets, blockade)\n else:\n game_over(stats)", "title": "" }, { "docid": "bb04f28f97a293b5cc999404b88b1026", "score": "0.65211236", "text": "def fire_bullet(ai_settings, screen, ship, bullets):\r\n if len(bullets) < ai_settings.bullets_allowed:\r\n # Create new bullet\r\n new_bullet = Bullet(ai_settings, screen, ship)\r\n # Ship fire bullets\r\n ship.fire_weapon()\r\n bullets.add(new_bullet)", "title": "" }, { "docid": "582ef292b45ac94b7c982af85e757ddd", "score": "0.6514705", "text": "def aliens_shoot(self):\n for alien in self.alien_sprite_list:\n if not self.game_over:\n bullet = alien.shoot()\n if bullet:\n self.alien_bullet_sprite_list.add(bullet)\n self.all_sprites_list.add(bullet)", "title": "" }, { "docid": "fa8a69ecdbfa7be5a4a1f36444ad2c11", "score": "0.65013677", "text": "def update_aliens(settings, aliens, ship, bullets, stats, scoreboard):\n aliens.update()\n if pygame.sprite.spritecollideany(ship, aliens):\n ship_hit(settings, aliens, ship, bullets, stats, scoreboard)\n\n check_aliens_bottom(settings, aliens, ship, bullets, stats, scoreboard)", "title": "" }, { "docid": "3a9283905bbb12aa1c1732219e4c1348", "score": "0.6485033", "text": "def update_aliens(ai_settings, screen, stats, sb, ship, aliens, alien_bullets, bullets, ufo):\r\n check_fleet_edges(ai_settings, aliens)\r\n aliens.update()\r\n # Update the alien collisions when ship is hit\r\n if pygame.sprite.spritecollideany(ship, aliens):\r\n ship_hit(ai_settings, screen, stats, sb, ship, aliens, alien_bullets, bullets, ufo)\r\n check_aliens_bottom(ai_settings, screen, stats, sb, ship, aliens, alien_bullets, bullets, ufo)\r\n # Allows aliens to fire bullets\r\n if aliens.sprites():\r\n fire_random_alien_bullet(ai_settings, screen, aliens, alien_bullets)", "title": "" }, { "docid": "60881b7f45a7aa27ca9e1a553da45827", "score": "0.64828396", "text": "def fire_bullet(ai_settings, screen, ship, bullets):\n\t# create a new bullet and add it to the bullet's group\n\tif len(bullets) < ai_settings.bullets_allowed:\n\t\tnew_bullet =Bullet(ai_settings, screen, ship)\n\t\tbullets.add(new_bullet)", "title": "" }, { "docid": "8e802f8f224d8b6dcd23b9d2e1ae35d5", "score": "0.64631164", "text": "def check_laser_ship_collisions(ai_settings, screen, stats, sb, ship, aliens, lasers, bullets, ufo):\n collide = pygame.sprite.spritecollideany(ship, lasers) \n if collide: \n ship_hit(ai_settings, screen, stats, sb, ship, aliens, lasers, bullets, ufo)", "title": "" }, { "docid": "f8cd071b7a249f7c7b6beae151984587", "score": "0.6461937", "text": "def fire_bullet(ai_settings, screen, ship, bullets):\n # Create a new bullet and add it to the bullets group\n if len(bullets) < ai_settings.bullets_allowed:\n new_bullet = Bullet(ai_settings, screen, ship)\n ship.ship_bullet_sound()\n bullets.add(new_bullet)", "title": "" }, { "docid": "b2c6264cf3dff674ee14387711a795b4", "score": "0.6457201", "text": "def fire_bullet(ai_settings, screen, ship, bullets):\n # Create a new bullet and add it to the bullets group.\n if len(bullets) < ai_settings.bullets_allowed:\n new_bullet = Bullet(ai_settings, screen, ship)\n bullets.add(new_bullet)", "title": "" }, { "docid": "5d833d55945b0a4725014eca5da25b96", "score": "0.6452911", "text": "def _check_laser_alien_collisions(self):\n # Remove any lasers and aliens that have collided.\n collisions = pygame.sprite.groupcollide(\n self.lasers, self.aliens,\n self.settings.laser_collide_remove, True\n )\n\n if collisions:\n for aliens in collisions.values():\n self.stats.score += self.settings.alien_points * len(aliens)\n self.sb.prep_score()\n self.sb.check_high_score()\n\n self._start_new_level()", "title": "" }, { "docid": "28d99b8c2fe6da3b8cf08348512c7805", "score": "0.6415212", "text": "def fire_bullet(ai_settings, screen, hero, bullets):\n #Create a new bullet and add it to the bullets group.\n if len(bullets)< ai_settings.bullets_allowed:\n new_bullet = Bullet(ai_settings, screen, hero)\n bullets.add(new_bullet)", "title": "" }, { "docid": "6240e60a5f2bfd81188bf69edbcba932", "score": "0.6405638", "text": "def update_bullets(ai_settings, screen, stats, sb, ship, aliens, bullets):\n bullets.update()\n for bullet in bullets.copy():\n if bullet.rect.bottom <= 0:\n bullets.remove(bullet)\n check_bullet_alien_collisions(ai_settings, screen, stats, sb, ship, aliens, bullets)", "title": "" }, { "docid": "1f9d0d196f19e9cf8c83577bc1d6b074", "score": "0.64021236", "text": "def update_bullets(ai_settings, screen, stats, sb, ship, aliens, bullets):\n bullets.update()\n\n for bullet in bullets.copy():\n if bullet.rect.bottom <= 0:\n bullets.remove(bullet)\n\n check_bullet_alien_collisions(ai_settings, screen, stats, sb, ship, aliens, bullets)", "title": "" }, { "docid": "006d4b1aec90f7572ae69fe37fa3f341", "score": "0.64017767", "text": "def check_bullets_colisions(ai_settings, screen, stat, sb, ship, aliens, bullets):\n colisions = pygame.sprite.groupcollide(bullets, aliens, True, True)\n if colisions:\n for aliens in colisions.values():\n stat.score += ai_settings.alien_points * len(aliens)\n sb.prep_score()\n check_high_score(stat, sb)\n\n if len(aliens) == 0:\n # Destroy existing bullets and create a new fleet\n bullets.empty()\n ai_settings.increase_speed()\n create_fleet(ai_settings, screen, ship, aliens)\n\n # Increase level\n stat.level += 1\n sb.prep_level()", "title": "" }, { "docid": "940ac4e2f92bcaecdc267dc40a7cb918", "score": "0.6379672", "text": "def update_bullets(ai_settings, screen, stats, sb, ship, aliens, lasers, bullets, ufo):\n # Update bullet positions\n bullets.update()\n\n # Get rid of bullets that have disappeared\n for bullet in bullets.copy():\n if bullet.rect.bottom <= 0:\n bullets.remove(bullet)\n\n check_bullet_alien_collisions(ai_settings, screen, stats, sb, ship, aliens, lasers, bullets, ufo)", "title": "" }, { "docid": "ca47420a084a568ac2663bcc352cab31", "score": "0.6367871", "text": "def update_aliens(ai_settings, screen, stats, sb, ship, aliens, super_aliens,bullets):\n check_fleet_edges(ai_settings, aliens, super_aliens)\n aliens.update()\n super_aliens.update()\n # Look for alien-ship collisions or aliens hitting the bottom of the screen.\n check_aliens_ship_collisions(ai_settings, screen, stats, sb, ship, aliens, bullets)", "title": "" }, { "docid": "5d61147e52cb43a08708ed0fa08a42c5", "score": "0.6359403", "text": "def fire_random_alien_bullet(ai_settings, screen, aliens, alien_bullets):\r\n # Alien fire randomly\r\n firing_alien = random.choice(aliens.sprites())\r\n if len(alien_bullets) < ai_settings.alien_bullet_allowed and (ai_settings.alien_bullet_stamp is None or (abs(pygame.time.get_ticks() - ai_settings.alien_bullet_stamp) > ai_settings.alien_bullet_time)):\r\n new_alien_bullet = AlienBullet(ai_settings, screen, firing_alien)\r\n # Fire from alien\r\n firing_alien.shoot_fire()\r\n # Create new bullet\r\n alien_bullets.add(new_alien_bullet)", "title": "" }, { "docid": "8af6f1e81b98455a689fdbd0aafc602e", "score": "0.6358192", "text": "def update_aliens(ai_settings, stats, screen, sb, ship, bullets, aliens):\n # check for hitting the edge and steer the fleet\n check_fleet_edges(ai_settings, aliens)\n aliens.update()\n # look for alien-ship collisions\n if pygame.sprite.spritecollideany(ship, aliens):\n ship_hit(ai_settings, stats, screen, sb, ship, bullets, aliens)\n # look for aliens hitting the bottom\n check_aliens_bottom(ai_settings, stats, screen, sb, ship, aliens, bullets)", "title": "" }, { "docid": "48bbd77efb79e74ba033e629adf29faa", "score": "0.63337797", "text": "def update_aliens(ai_settings,aliens,ship, game_stats, bullets, screen, sb):\r\n \"\"\"\r\nCheck if the fleet is at an edge,\r\nand then update the postions of all aliens in the fleet.\r\n\"\"\"\r\n check_fleet_edges(ai_settings,aliens)\r\n aliens.update()\r\n\r\n #Look for alien sheep collisions \r\n '''\r\n The method spritecollideany() takes two arguments: a sprite and a\r\ngroup. The method looks for any member of the group that’s collided with\r\nthe sprite and stops looping through the group as soon as it finds one member\r\nthat has collided with the sprite. Here, it loops through the group aliens\r\nand returns the first alien it finds that has collided with ship. '''\r\n if pygame.sprite.spritecollideany(ship,aliens):\r\n ship_hit(ai_settings, game_stats, bullets, aliens,ship, screen, sb)\r\n #Look for aliens hitting the bottom of the screen \r\n check_aliens_bottom(ai_settings, game_stats, screen, ship, aliens, bullets, sb)", "title": "" }, { "docid": "260e63c568b5404096a3fa931f9f2e52", "score": "0.6329429", "text": "def collision(self):\n for bolt in self._bolts:\n for row in range(len(self._aliens)):\n for alien in range(ALIENS_IN_ROW):\n if self._aliens[row][alien] != None:\n if self._aliens[row][alien].collides(bolt) and bolt.isPlayerBolt():\n self._aliens[row][alien] = None\n self._blast1.play()\n self._bolts.remove(bolt)", "title": "" }, { "docid": "ebd5cdfdc7c96abe815e5aa97c4a6886", "score": "0.6329112", "text": "def on_collide(self, **kwargs):\n pass", "title": "" }, { "docid": "b6f9d0d1c63b9332b10ecdd61f444393", "score": "0.63249004", "text": "def update_aliens(ai_settings, screen, stats, sb, ship, aliens, lasers, bullets, ufo):\n check_fleet_edges(ai_settings, aliens)\n aliens.update()\n # Look for alien-ship collisions\n if pygame.sprite.spritecollideany(ship, aliens):\n ship_hit(ai_settings, screen, stats, sb, ship, aliens, lasers, bullets, ufo)\n check_aliens_bottom(ai_settings, screen, stats, sb, ship, aliens, lasers, bullets, ufo)\n if aliens.sprites():\n fire_laser(ai_settings, screen, aliens, lasers)", "title": "" }, { "docid": "8104344db2fadfa51ed7c3ee03d16e57", "score": "0.6298985", "text": "def item_hit_handler(bullet_sprite, item_sprite, _arbiter, _space, _data):\n bullet_sprite.remove_from_sprite_lists()\n item_sprite.remove_from_sprite_lists()", "title": "" }, { "docid": "05819e6b2b0d003fd436e1b359237a03", "score": "0.6285173", "text": "def update(self, delta_time):\n\n self.frame_count += 1\n\n # Loop through each enemy that we have\n for enemy in self.enemy_list:\n\n # First, calculate the angle to the player. We could do this\n # only when the bullet fires, but in this case we will rotate\n # the enemy to face the player each frame, so we'll do this\n # each frame.\n\n # Position the start at the enemy's current location\n start_x = enemy.center_x\n start_y = enemy.center_y\n\n # Get the destination location for the bullet\n dest_x = self.player_sprite.center_x\n dest_y = self.player_sprite.center_y\n\n # Do math to calculate how to get the bullet to the destination.\n # Calculation the angle in radians between the start points\n # and end points. This is the angle the bullet will travel.\n x_diff = dest_x - start_x\n y_diff = dest_y - start_y\n angle = math.atan2(y_diff, x_diff)\n\n # Set the enemy to face the player.\n enemy.angle = math.degrees(angle)-90\n\n # Shoot every 'fr' frames change of shooting each frame\n if self.frame_count % 10 == 0:\n bullet = arcade.Sprite(\"bullet.jpg\",0.2)\n bullet.center_x = start_x\n bullet.center_y = start_y\n\n # Angle the bullet sprite\n bullet.angle = math.degrees(angle)\n\n # Taking into account the angle, calculate our change_x\n # and change_y. Velocity is how fast the bullet travels.\n bullet.change_x = math.cos(angle) * BULLET_SPEED\n bullet.change_y = math.sin(angle) * BULLET_SPEED\n\n self.bullet_list.append(bullet)\n\n # Get rid of the bullet when it flies off-screen\n for bullet in self.bullet_list:\n if bullet.top < 0:\n bullet.kill()\n\n self.bullet_list.update()\n\n bullet_hit_list = arcade.check_for_collision_with_list(self.player_sprite, self.bullet_list)\n\n # Loop through each colliding sprite, and update score\n for bullet in bullet_hit_list:\n bullet.kill()\n self.score -= 1\n if self.score==0:\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n print('PS-YOU HAD NO CHANCE OF SURVIVING')\n os._exit(1)", "title": "" }, { "docid": "265cdd2ab7542c7b431d6a2c05234caa", "score": "0.6281644", "text": "def wall_hit_handler(bullet_sprite, _wall_sprite, _arbiter, _space, _data):\n bullet_sprite.remove_from_sprite_lists()", "title": "" }, { "docid": "27024f16e128dfb5c7a6a46a96eeba0f", "score": "0.6279736", "text": "def update_aliens(ai_settings, stats, screen, ship, aliens, bullets):\n\n fleet_edge_checker(ai_settings,aliens)\n aliens.update()\n\n check_bottom_aliens(ai_settings,stats,screen,ship,aliens,bullets)\n\n\n # spritecollideany: The method looks for any member of the group that’s collided with\n # the sprite and stops looping through the group as soon as it finds one mem-\n # ber that has collided with the sprite.\n\n if pygame.sprite.spritecollideany(ship,aliens):\n ship_hit(ai_settings, stats, screen, ship, aliens, bullets)", "title": "" }, { "docid": "c0a9357094742cc161ac90c7731d7a57", "score": "0.627897", "text": "def check_alien_positions(self):\n for alien in self.alien_sprite_list:\n if alien.rect.colliderect(self.player.rect):\n # alien hit player\n self.s.write(str.encode(\"3\"))\n self.player.lives -= 1\n if self.player.lives < 1:\n self.end_game()\n else:\n alien.kill() # remove so won't collide with player again\n\n if alien.rect.bottom > self.height:\n # alien went off screen, end game\n self.s.write(str.encode(\"3\"))\n self.end_game()\n\n if len(self.alien_sprite_list) == 0:\n if not self.levelling_up:\n self.start_level_up()\n else:\n self.levelling_up += 1\n\n for ufo in self.ufo_sprites_list:\n if ufo.rect.left > self.width:\n ufo.kill()", "title": "" }, { "docid": "9516974fe8b5cab000ad8ec0d260a021", "score": "0.6276996", "text": "def collision(bugs_hit, player):\n\n for bug in bugs_hit:\n\n if not bug.touched:\n pygame.mixer.Sound.play(pygame.mixer.Sound(os.path.join(os.path.dirname(__file__), 'Images', 'damage.wav')))\n #pygame.mixer.Sound.play(pygame.mixer.Sound(\"damage.wav\")) # play sound effect for hitting a bug\n pygame.mixer.music.stop()\n bug.touched = True\n player.HP -= 1 # decrease HP of the player", "title": "" }, { "docid": "59c9e09e0decadfc17f13a46422e9003", "score": "0.6275205", "text": "def __init__(self, center, speed=2.0, bullet_range=screen_height/2, size=5, layer=100):\n \n # Call the parent class (Sprite) constructor\n super().__init__()\n \n # Set layer. Nice to be on the same layer as the one shooting\n self._layer = layer\n\n # My center as floats\n self.centerx = center[0]\n self.centery = center[1]\n \n # Time To Live (Bullet will die when travveled this far)\n self.bullet_range = bullet_range\n \n # How far have I traveled\n self.dist = 0.0\n \n # Speed of bullet \n self.speedx = math.cos(math.radians(player['dir'])) * speed\n self.speedy = math.sin(math.radians(player['dir'])) * speed\n \n # This is the speed the bullet inherits from the player\n self.start_speedx = player['dx'] \n self.start_speedy = player['dy'] \n \n # Create an empty image (surface). Needed by PyGame.\n # This could also be an image loaded from the disk.\n self.image = pygame.Surface([size, size])\n \n # Unlikely color is transparent background\n self.image.fill([1,2,3])\n self.image.set_colorkey([1,2,3])\n \n # Draw space ship\n pygame.draw.ellipse(self.image, RED, [0,0,size,size]) \n \n # Fetch the rectangle object that has the dimensions of the image\n # image.\n # Update the position of this object by setting the values\n # of rect.x and rect.y\n self.rect = self.image.get_rect()\n \n # Set initial position on screen\n self.rect.center = (self.centerx, self.centery)", "title": "" }, { "docid": "cce3c28d1a00a6c828f4bbce5f077749", "score": "0.6270363", "text": "def update_bullets(ai_settings, screen, stats, sb, ship, aliens, super_aliens, bullets):\n # Update bullet positions\n bullets.update()\n # Get rid of bullets that have disappeared.\n remove_old_bullets(bullets)\n # Check for any bullets that have hit aliens and if so, get rid of the bullet and the alien\n check_bullet_alien_collisions(ai_settings, screen, stats, sb, ship, aliens, super_aliens, bullets)", "title": "" }, { "docid": "9d60809118fc0a2b29178a7ed088c8d5", "score": "0.62681526", "text": "def check_alien_ship_collision(settings, stats, screen, sb, ship,\n alien_groups, bullets, blockade):\n for alien_column in alien_groups:\n if pygame.sprite.spritecollideany(ship, alien_column):\n ship_hit_alien(settings, stats, screen, sb, ship, alien_groups,\n bullets, blockade)", "title": "" }, { "docid": "cd6b449bd69e3231a447adc5d4599ebe", "score": "0.6267896", "text": "def _check_bullet_target_collisions(self):\n collisions = pygame.sprite.spritecollide(\n self.target, self.bullets, True)", "title": "" }, { "docid": "5fde6da995c597497643824877206365", "score": "0.625672", "text": "def is_collision(enemy_x,enemy_y,bullet_x,bullet_y):\n distance=math.sqrt((math.pow\n (enemy_x-bullet_x,2))+(math.pow(enemy_y-bullet_y,2)))\n return distance < 27", "title": "" }, { "docid": "5308b9106871ef2e6e7b412a6cd360eb", "score": "0.6253152", "text": "def kill_player_on_collision(player, enemy_list):\n for enemy in enemy_list:\n enemy_check = arcade.check_for_collision_with_list(\n player, enemy_list\n )\n for enemy in enemy_check:\n if enemy.health > 0:\n player.health = 0\n enemy.set_texture(3)", "title": "" }, { "docid": "2c4d631ac36b146d285a405fcd741af4", "score": "0.6200272", "text": "def fire_bullet(settings, screen, ship, bullets):\n if len(bullets) < settings.bullets_allowed:\n play_sound('shot.wav')\n new_bullet = Bullet(settings, screen, ship)\n bullets.add(new_bullet)", "title": "" }, { "docid": "d7f6fa32ba4080e810a578ec088009ec", "score": "0.6184798", "text": "def main():\n #hit boxes for spaceships\n red_rec=pygame.Rect(700,300,SPACESHIP_WIDTH,SPACESHIP_HEIGHT)#args are \"x,y,width,height\"\n yellow_rec=pygame.Rect(100,300,SPACESHIP_WIDTH,SPACESHIP_HEIGHT)#^\n \n #list of all bullets\n red_bullets=[]\n yellow_bullets=[]\n \n #initilize health stuff\n red_health=4\n yellow_health=4\n\n clock=pygame.time.Clock()#define clock object\n #flag and game loop\n running=True\n while running:\n #hard limit the fps\n clock.tick(FPS)#limit this while loop to 60 runs per second\n #loop through all of the events. So we can bind to them if needed\n for event in pygame.event.get():\n #bind to quit event\n if event.type==pygame.QUIT:\n running=False\n pygame.quit()\n #bind to keyDown event\n if event.type==pygame.KEYDOWN:\n # left ctrl\n if event.key==pygame.K_LCTRL and len(yellow_bullets)<MAX_RENDERED_BULLETS:\n bullet=pygame.Rect(yellow_rec.x+yellow_rec.width,yellow_rec.y+int(yellow_rec.height/2)-2,10,5)\n yellow_bullets.append(bullet)\n BULLET_FIRE_SOUND.play()\n # right ctrl\n if event.key==pygame.K_RCTRL and len(red_bullets)<MAX_RENDERED_BULLETS:\n bullet=pygame.Rect(red_rec.x,red_rec.y+int(red_rec.height/2)-2,10,5)\n red_bullets.append(bullet)\n BULLET_FIRE_SOUND.play()\n #bind to red hit event\n if event.type==RED_HIT:\n red_health-=1\n BULLET_HIT_SOUND.play()\n #bind to yellow hit event\n if event.type==YELLOW_HIT:\n yellow_health-=1\n BULLET_HIT_SOUND.play()\n #if something isn't event based it goes here\n winner_text=\"\"#this also works as a flag\n if red_health<=0: #decide winner\n winner_text=\"YELLOW WINS\"\n if yellow_health<=0: #decide winner\n winner_text=\"RED WINS\"\n if winner_text!=\"\":\n VICTORY_SCREAM.play()\n draw_winner(winner_text)\n break\n keys_pressed=pygame.key.get_pressed()#returns all keys pressed down during this frame\n #move yellow and redif needed\n yellow_handle_movement(keys_pressed,yellow_rec)\n red_handle_movement(keys_pressed,red_rec)\n #bullets move\n handle_bullets(yellow_bullets,red_bullets,yellow_rec,red_rec)\n draw_window(red_rec,yellow_rec,red_bullets,yellow_bullets,red_health,yellow_health)\n #when someone ends restart the game\n main()", "title": "" }, { "docid": "d4eaba908260bcaac74c8118865bb484", "score": "0.61807984", "text": "def update_aliens(ai_settings, stat, screen, sb, ship, aliens, bullets):\n check_fleet_edges(ai_settings, aliens)\n aliens.update()\n\n # Check colision alien-ship\n if pygame.sprite.spritecollideany(ship, aliens):\n ship_hit(ai_settings, stat, screen, sb, ship, aliens, bullets)\n\n # Looks for aliens at the bottom on the screen\n check_aliens_bottom(ai_settings, stat, screen, sb, ship, aliens, bullets)", "title": "" }, { "docid": "bbfe5de565fd3273fb143c7d459effca", "score": "0.61689997", "text": "def ship_hit_alien(settings, stats, screen, sb, ship, alien_groups, bullets,\n blockade):\n # Ship lost or game over.\n stats.ships_left -= 1\n if stats.ships_left:\n ship_lost_new_fleet(settings, screen, sb, ship, alien_groups, bullets,\n blockade)\n else:\n game_over(stats)", "title": "" }, { "docid": "47d3a190b847ecae26b14616d746b2d7", "score": "0.6168902", "text": "def handle_bullets(yellow_bullets,red_bullets,yellow,red):\n # yellow bullets\n for bullet in yellow_bullets:\n bullet.x+=BULLET_VEL\n #check for collisions\n if red.colliderect(bullet):\n #post event so we can interact with the list in the main function\n pygame.event.post(pygame.event.Event(RED_HIT))\n #remove the bullet\n yellow_bullets.remove(bullet)\n elif bullet.x+bullet.width>WIDTH:\n yellow_bullets.remove(bullet)\n # red bullets\n for bullet in red_bullets:\n bullet.x-=BULLET_VEL\n #check for collisions\n if yellow.colliderect(bullet):\n #post event so we can interact with the list in the main function\n pygame.event.post(pygame.event.Event(YELLOW_HIT))\n #remove the bullet\n red_bullets.remove(bullet)\n elif bullet.x<0:\n red_bullets.remove(bullet)", "title": "" }, { "docid": "01867ebf73e6d1460b7734ea6f68ecce", "score": "0.6161078", "text": "def collision_bullet_box(arbiter, _space, data):\r\n _bullet = arbiter.shapes[0]\r\n _box = arbiter.shapes[1]\r\n # Create a explosion\r\n explosion(_bullet.parent)\r\n if _box.parent.boxmodel.destructable:\r\n # If the bos is destructable reduce HP\r\n _box.parent.hp -= 1\r\n if _box.parent.hp <= 0:\r\n # If HP reaches 0, remove box\r\n space.remove(_box, _box.body)\r\n game_objects_list.remove(_box.parent)\r\n # Award point\r\n point_list[_bullet.parent.owner] += 1\r\n print_points()\r\n if _bullet.parent in game_objects_list:\r\n bullet_list.remove(_bullet.parent)\r\n game_objects_list.remove(_bullet.parent)\r\n space.remove(_bullet, _bullet.body)\r\n return False", "title": "" }, { "docid": "2bfc94141b1853c145ea54a65b29c727", "score": "0.615998", "text": "def fire_bullet(game_settings, screen, ship, bullets):\n if len(bullets) < game_settings.bullets_allowed:\n new_bullet = Bullet(game_settings, screen, ship)\n bullets.add(new_bullet)", "title": "" }, { "docid": "b43bf4513db6e3c60454a8d92402b735", "score": "0.61414135", "text": "def update(self, events, game):\n for event in events:\n if event.type == pygame.MOUSEBUTTONDOWN:\n # Fire a bullet if the user clicks the mouse button\n self.shoot(game)\n\n for bullet in self.bullet_list:\n # check if the lasers(bullet) hit anything in the block list(enemies)\n bullet_hit_list = pygame.sprite.spritecollide(bullet, game.enemy_list, True, collided=pygame.sprite.collide_mask)\n\n for enemy in bullet_hit_list:\n bullet.kill()\n game.score += enemy.points\n\n self.rect.center = pygame.mouse.get_pos() # center player to mouse\n self.rect.y = SCREEN_HEIGHT-130 # lock player to bottom of screen", "title": "" }, { "docid": "99451c39d828911a07a5e86c1a4662e0", "score": "0.61292213", "text": "def collision_bullet_bullet(arbiter, _space, data):\r\n _bullet = arbiter.shapes[0]\r\n # Create explosion\r\n explosion(_bullet.parent)\r\n if _bullet.parent in game_objects_list:\r\n bullet_list.remove(_bullet.parent)\r\n game_objects_list.remove(_bullet.parent)\r\n space.remove(_bullet, _bullet.body)\r\n return False", "title": "" }, { "docid": "d1b3512f999f0408244749d8bedebaf2", "score": "0.6128807", "text": "def check_collisions(self):\n\n # NOTE: This assumes you named your targets list \"targets\"\n for bullet in self.bullets:\n for asteroid in self.asteroids:\n # Make sure they are all alive before checking for a collision\n if bullet.alive and asteroid.alive:\n too_close = bullet.radius + asteroid.radius\n\n if (abs(bullet.center.x - asteroid.center.x) < too_close and\n abs(bullet.center.y - asteroid.center.y) < too_close):\n # its a hit!\n bullet.alive = False\n self.asteroid_split(asteroid)\n\n self.cleanup_zombies()", "title": "" }, { "docid": "ed5f061a2a8eebabd09a64d7acb6a3e1", "score": "0.6098354", "text": "def check_bullet_blockade_collision(bullets, blockade):\n pygame.sprite.groupcollide(bullets, blockade, True, True)", "title": "" }, { "docid": "ac6fdf76f35359535b06917066db9137", "score": "0.6060742", "text": "def _update_bullets(self):\r\n # update the bullets\r\n self.bullets.update()\r\n\r\n # Get rid of old bullets that have disappeared\r\n for bullet in self.bullets.copy():\r\n if bullet.rect.bottom <= 0:\r\n self.bullets.remove(bullet)\r\n\r\n # Check for any bullets that have hit aliens.\r\n # If so , get rid of the alien and the bullet.\r\n self._check_bullet_allien_collisions()", "title": "" }, { "docid": "aa5d60ef8539e9db08609bfcbb56031f", "score": "0.60524255", "text": "def checkForCollisions(self):\n hitList = pygame.sprite.spritecollide(self, self.groups()[0], False)\n for other in [x for x in hitList if x is not self]:\n otherType = type(other)\n if otherType is Bullet:\n other.increaseScore(config.kill_score)\n other.kill()\n self.killForReal()\n elif otherType is Terrain or otherType is Rocket:\n self.killForReal()\n elif otherType is FuelPlatform:\n self.acceleration = pygame.Vector2(0,0)\n self.velocity = pygame.Vector2(0,0)\n self.fuel += 1\n self.fuel = min(self.fuel, config.starting_fuel)", "title": "" }, { "docid": "16470626c87f1cf2d76d8112a14c8bff", "score": "0.60515195", "text": "def _update_aliens(self):\r\n\t\tself._check_fleet_edges()\r\n\t\tself.aliens.update()\r\n\r\n\t\t#Look for alien ship collision\r\n\t\tif pygame.sprite.spritecollideany(self.ship, self.aliens):\r\n\t\t\tself._ship_hit()\r\n\r\n\t\t#Look for aliens hitting bottom of the screen\r\n\t\tself._check_aliens_bottom()", "title": "" }, { "docid": "f0c882bfa01bedfaff308ea4cb0a82a7", "score": "0.6048236", "text": "def update_aliens(self):\n self.check_fleet_edges()\n self.aliens.update()\n\n # Now check for alien ship collisions.\n if pygame.sprite.spritecollideany(self.ship, self.aliens):\n self.lose_a_life()\n # Check if aliens hit the bottom.\n self.check_aliens_bottom()", "title": "" }, { "docid": "e55fd3b3062f7d6ea0d7c21e2b85dc2c", "score": "0.60408515", "text": "def fire_bullet(self):\n if len(self.bullets) < self.settings.bullets_allowed and self.stats.game_active:\n new_bullet = Bullet(self)\n self.bullets.add(new_bullet)", "title": "" }, { "docid": "72ae8735c18b390f3fc53ce97d50e099", "score": "0.60367954", "text": "def update_bullets(ai_settings, screen, ship, bullets, target, stats, play_button):\n check_bullet_target_collisions(ai_settings, screen, target, bullets, stats, ship, play_button)\n\n bullets.update() # Change position of existing bullets\n\n for bullet in bullets.copy(): # Remove old bullets\n if bullet.rect.right >= screen.get_rect().right:\n bullets.remove(bullet)", "title": "" }, { "docid": "dda791b23baa9cb304643f5d647947d4", "score": "0.602092", "text": "def __init__(self, game, player, pos):\n self.image = game.imageLoader.item_img['Mbullet'] \n super().__init__(game, player, pos)\n \n self.speed = 2\n self.max_speed = 4\n self.damage = 3\n self.anim_speed = 100\n self.destroy_timer = 0\n self.hit_rect = pygame.Rect((0, 0), (int(5),int(5)))\n position = pygame.mouse.get_pos()\n Bangle = math.degrees(math.atan2(position[1]-(self.game.player.pos.y),position[0]-(self.game.player.pos.x)))\n A = self.game.player.pos.x + math.cos(math.radians(Bangle))*40\n B = self.game.player.pos.y + math.sin(math.radians(Bangle))*40\n self.bulletlst.append([math.atan2(position[1]-(self.game.player.pos.y),position[0]-(self.game.player.pos.x)),A,B])\n self.rect = self.image.get_rect()\n self.rect.center = self.pos\n self.hit_rect.center = self.rect.center", "title": "" }, { "docid": "4b789c6ae04450e8bccc95d8f844f925", "score": "0.600171", "text": "def iscollision(enemyX,enemyY,bulletX,bulletY):\n distance=math.sqrt((math.pow(enemyX-bulletX,2))+(math.pow(enemyY-bulletY,2)))\n if distance<27:\n return True\n else:\n return False", "title": "" }, { "docid": "115fb5230b7b575df05a4910faafce1c", "score": "0.5996788", "text": "def ballCollision(self, collObj):\n if collObj.__class__.__name__ == \"negCue\":\n return\n else:\n return GENballCollision(self, collObj)", "title": "" }, { "docid": "ab9274ebbccad9db3242994979ebc5d8", "score": "0.599673", "text": "def _fire_bullet(self):\n if len(self.bullets) < self.settings.bullets_allowed:\n new_bullet = Bullet(self)\n self.bullets.add(new_bullet)\n self.stats.bullets_fired += 1", "title": "" }, { "docid": "5319ff55ed1efbb54051d3f971e749eb", "score": "0.5984105", "text": "def handle_collision(self):\r\n for enemy in self._enemies:\r\n if enemy.hp > 0 and enemy.detect_collision(self._player):\r\n enemy.hp -= 1\r\n if enemy.is_enemy:\r\n self._player.hp -= 1\r\n self._health_bar.health = self._player.hp / self._player.max_hp\r\n if self._player.hp <= 0:\r\n self.game_over()\r\n else:\r\n self._score += enemy.score\r\n\r\n # Filter out dead enemies and projectiles\r\n self._enemies[:] = [enemy for enemy in self._enemies if enemy.hp > 0]", "title": "" }, { "docid": "e23ab68ae27cd8c1c815389cddedc99b", "score": "0.5979033", "text": "def _fire_bullet(self):\r\n\t\tif len(self.bullets) < self.settings.bullets_allowed:\r\n\t\t\tnew_bullet = Bullet(self)\r\n\t\t\tself.bullets.add(new_bullet)", "title": "" }, { "docid": "a6719ab2a102ea93b6fa8503368b92db", "score": "0.5974175", "text": "def _update_aliens(self):\r\n \r\n self._check_fleet_edges()\r\n self.aliens.update()\r\n\r\n # Look for alien-ship collisions .\r\n if pygame.sprite.spritecollideany(self.ship, self.aliens):\r\n self._ship_hit()\r\n \r\n #Look for aliens hitting the bottom of the screen\r\n self._check_aliens_bottom()", "title": "" }, { "docid": "40c20e49093839f2ba438be9b14259e6", "score": "0.59706134", "text": "def shoot(self):\r\n bullet = Bullet(self.rect.centerx,self.rect.bottom, enemy=True)\r\n all_sprites.add(bullet)\r\n mob_bullets.add(bullet)\r\n bullets.add(bullet)\r\n enemy_shoot_sound.play()", "title": "" }, { "docid": "6cc51601e8f6fd1b600057aa88236afc", "score": "0.59640795", "text": "def bullets_refresh(ai_settings,screen,ship,aliens,bullets):\n bullets.update()\n\n\n # deleting the invisible bullets\n for bullet in bullets.copy():\n if bullet.rect.bottom <= 0:\n bullets.remove(bullet) #Remove function is inherited from SpriteS\n\n alien_bullet_collisions(ai_settings,screen,ship,aliens,bullets)", "title": "" }, { "docid": "0eae69658b28288f795883b7fae621dd", "score": "0.5950829", "text": "def _fire_bullet(self):\r\n if len(self.bullets) < self.settings.bullets_allowed:\r\n new_bullet = Bullet(self)\r\n self.bullets.add(new_bullet)\r\n se.bullet_sound.play()", "title": "" }, { "docid": "0f71f958c62fe95c6c7d90ca3e89f866", "score": "0.5937154", "text": "def fire(engine, inputs):\n if inputs['SPACE'] is True:\n if engine.objects[2].check_weapon_cooldown():\n pr = engine.objects[2].loadBullet()\n engine.objects.append(pr)\n engine.drawables.add(pr)\n pr.kill_function = engine.cleanup_projectile\n engine.collisions[pr] = []\n for enemy in engine.enemy_list:\n engine.collisions[pr].append((enemy, enemy.get_killed))\n # Sound effect added from\n # https://www.zapsplat.com/music/science-fiction-weapon-gun-shoot-powerful-2/\n pew = pygame.mixer.Sound('assets/laser1.wav')\n pew.play()", "title": "" }, { "docid": "599ee9cbfcfb1048513fdd33a2175525", "score": "0.5936576", "text": "def ship_hit(ai_settings, screen, stats, sb, ship, aliens, bullets, alien_bullets, ufo):\r\n # Update the alien when hit\r\n if ufo:\r\n for u in ufo.sprites():\r\n u.died()\r\n # Manges the when ship gets hit\r\n ship.death()\r\n ship.update()\r\n\r\n # Manges when ship animated if dead\r\n while ship.dead:\r\n screen.fill(ai_settings.bg_color)\r\n ship.blitme()\r\n pygame.display.flip()\r\n ship.update()\r\n\r\n # Subtract lives when ship hit\r\n if stats.ships_left > 0:\r\n # Decrement lives\r\n stats.ships_left -= 1\r\n\r\n # Remove aliens, alien bullets, and bullets off the screen\r\n aliens.empty()\r\n bullets.empty()\r\n alien_bullets.empty()\r\n\r\n # Reset aliens and speed\r\n ai_settings.reset_alien_speed()\r\n create_fleet(ai_settings, screen, ship, aliens)\r\n stats.next_speedup = len(aliens) - (len(aliens) // 5)\r\n stats.aliens_left = len(aliens.sprites())\r\n ship.center_ship()\r\n\r\n # Update the ships\r\n sb.prep_ships()\r\n else:\r\n # Manage the music\r\n ai_settings.stop_bgm()\r\n pygame.mixer.music.load('sounds/gameOver.wav')\r\n pygame.mixer.music.play()\r\n\r\n # Manage when functions the game ends\r\n stats.game_active = False\r\n pygame.mouse.set_visible(True)", "title": "" }, { "docid": "f602a64059bc2e7eeacd949af9225ecf", "score": "0.59319603", "text": "def fire(self, start_x, start_y, start_angle, x_vel, y_vel):\n if time.time() - self.last_fire_time > self.cooldown_time_in_sec:\n for spread in range(-2, 3):\n bullet = Bullet(start_x, start_y, start_angle + spread*3, x_vel, y_vel, self.screen_width, self.screen_height)\n self.bullets.add(bullet)\n self.last_fire_time = time.time()", "title": "" } ]
7e4a43f45c30c775c92adf178b8662af
Construct a Shannontype partition of the information contained in `dist`.
[ { "docid": "fee24e7a88089622770130c49c7c61af", "score": "0.6277066", "text": "def __init__(self, dist):\n self.dist = dist\n self._partition()", "title": "" } ]
[ { "docid": "868849b8dc673a2701134d0f8961766d", "score": "0.5895211", "text": "def write_st_dist(self, dist):", "title": "" }, { "docid": "ec4a4790f9292e43a14d8bbd4fe30ac2", "score": "0.58232087", "text": "def init_dist(dist, ndim):\n if isinstance(dist, str):\n return ndim*(dist,)\n elif isinstance(dist, (list, tuple)):\n return tuple(dist)\n elif isinstance(dist, dict):\n return tuple([dist.get(i, 'n') for i in range(ndim)])\n else:\n DistError(\"Dist must be a string, tuple, list or dict\")", "title": "" }, { "docid": "7a6b5c6b024eef79befb2dd36adedfaa", "score": "0.56241417", "text": "def _partition(self):\n rvs = self.dist.get_rv_names()\n if not rvs:\n rvs = tuple(range(self.dist.outcome_length()))\n\n self._lattice = powerset_lattice(rvs)\n Hs = {}\n Is = {}\n atoms = {}\n new_atoms = {}\n\n # Entropies\n for node in self._lattice:\n Hs[node] = self._measure(self.dist, node) # pylint: disable=no-member\n\n # Subset-sum type thing, basically co-information calculations.\n for node in self._lattice:\n Is[node] = sum((-1)**(len(rv) + 1) * Hs[rv] for rv in self._lattice.descendants(node, include=True))\n\n # Mobius inversion of the above, resulting in the Shannon atoms.\n for node in self._lattice:\n kids = self._lattice.ascendants(node)\n atoms[node] = Is[node] - sum(atoms[child] for child in kids)\n\n # get the atom indices in proper format\n for atom, value in atoms.items():\n if not atom:\n continue\n\n a_rvs = tuple((_,) for _ in atom)\n a_crvs = tuple(sorted(set(rvs) - set(atom)))\n new_atoms[(a_rvs, a_crvs)] = value\n\n self.atoms = new_atoms", "title": "" }, { "docid": "e02d350dd8397bce8cc320e4fb192eed", "score": "0.55458903", "text": "def init_distdims(dist, ndim):\n reduced_dist = [d for d in dist if d != 'n']\n ndistdim = len(reduced_dist)\n if ndistdim > ndim:\n raise DistError(\"Too many distributed dimensions\")\n distdims = [i for i in range(ndim) if dist[i] != 'n']\n return tuple(distdims)", "title": "" }, { "docid": "2dec4717b7369ccea52ba77898c97c9a", "score": "0.54075754", "text": "def dist2param(cls, dist, dist_info=None):\n assert isinstance(dist, Distribution)\n assert dist_info is None or isinstance(dist_info, dict)\n dist_class = type(dist)\n\n if dist_class not in cls.dict_dist2param.keys():\n raise NotImplementedError(\"Conversion from distribution instance to parameters for distribution class '{}' \"\n \"not yet implemented\".format(dist_class))\n return cls.dict_dist2param[dist_class](dist, dist_info)", "title": "" }, { "docid": "4b1cd439ed5a1cb902f00ff2ff085c9f", "score": "0.53874254", "text": "def write_pr_dist(self, dist):", "title": "" }, { "docid": "c1981eac1e58c8c1f90a4d4afd6bc562", "score": "0.5383139", "text": "def __init__(self, dist, rvs=None, measures={'H': entropy}, cover=True, maxiter=None): # noqa: B006\n self.dist = dist\n self.rvs = sum(dist.rvs, []) if rvs is None else rvs\n self.measures = measures\n self.cover = cover\n self._partition(maxiter=maxiter)\n self._measure_of_interest = self.atoms", "title": "" }, { "docid": "24bd9dc74a928421657e38d322d0bffc", "score": "0.52913", "text": "def apply_quickshiftclustering(segments, feature_space, bandwidth_density=0.2, bandwidth_nn=1.0, min_cluster_size=100, num_clusters=None, directory_output=None, segments_name=\"Segments\", return_labels=False):\n X, labels = prepare_data(segments, feature_space)\n if X is None:\n return None\n \n print(\"{} - Calculating density...\".format(datetime.datetime.now()))\n nbrs = NearestNeighbors(radius=bandwidth_density).fit(X)\n density = []\n for el in X:\n density.append(get_point_density(el, X, nbrs))\n density = np.array(density)\n segments[\"logdensity\"] = np.log10(density)\n \n print(\"{} - Calculating cluster seeds...\".format(datetime.datetime.now()))\n nbrs = NearestNeighbors(radius=bandwidth_nn).fit(X)\n parent = []\n for i, point, point_density in zip(np.arange(X.shape[0]), X, density):\n dist, ind = nbrs.radius_neighbors([point]) # find neighbors within bandwith_nn of point\n ind = ind[np.argsort(dist)] # sorting neighbors according to distance\n ind = ind[0]\n ind = ind[density[ind] > point_density] # keep only neighbors with a higher density\n if ind.size != 0:\n parent.append(ind[0]) # point gets assigned to cluster of nearest neighbor with higher density\n else:\n parent.append(i) # point becomes cluster seed if no neighbors with higher density\n parent = np.array(parent)\n \n print(\"{} - Flattening forest of parent points...\".format(datetime.datetime.now()))\n old = np.zeros_like(parent)\n while (old != parent).any():\n old = parent\n parent = parent[parent]\n \n print(\"{} cluster seeds found.\".format(np.unique(parent).size))\n segments[\"cluster\"] = np.unique(parent, return_inverse=True)[1] # relabel clusters from 0:n\n \n if min_cluster_size:\n print(\"{} - Removing outlier clusters...\".format(datetime.datetime.now()))\n unique_clusters, unique_cluster_counts = np.unique(segments[\"cluster\"], return_counts=True)\n freq_all = np.array([unique_cluster_counts[i] for i in segments[\"cluster\"]])\n segments.loc[freq_all < min_cluster_size, [\"cluster\"]] = -1\n segments[\"cluster\"] = np.unique(segments[\"cluster\"], return_inverse=True)[1] # relabel clusters from 0:n \n num_realclusters = np.unique(segments[\"cluster\"]).size - 1\n num_outlierclusters = unique_clusters.size - num_realclusters\n print(\"{} real cluster seeds and {} outlier cluster seeds found.\".format(num_realclusters, num_outlierclusters))\n elif num_clusters: # TODO: verify\n print(\"{} - Selecting only the {} largest clusters...\".format(datetime.datetime.now(), num_clusters))\n unique_clusters, unique_cluster_counts = np.unique(segments[\"cluster\"], return_counts=True)\n freq_all = np.array([unique_cluster_counts[i] for i in segments[\"cluster\"]])\n unique_cluster_counts_sorted = np.sort(unique_cluster_counts) # ascending\n t_size = unique_cluster_counts_sorted[-num_clusters]\n segments.loc[freq_all < t_size, [\"cluster\"]] = -1\n segments[\"cluster\"] = np.unique(segments[\"cluster\"], return_inverse=True)[1] # relabel clusters from 0:n \n num_realclusters = np.unique(segments[\"cluster\"]).size - 1\n num_outlierclusters = unique_clusters.size - num_realclusters\n print(\"{} real cluster seeds and {} outlier cluster seeds found.\".format(num_realclusters, num_outlierclusters))\n else:\n print(\"Error! At least one of [min_cluster_size, num_clusters], should be specified. Aborting.\")\n if return_labels:\n return None, None\n return None\n\n if directory_output:\n outfile_pickle = os.path.join(directory_output, \"{}_QuickClust_{}_bd{}_bn{}_ms{}.pkl\".format(segments_name, feature_space, bandwidth_density, bandwidth_nn, min_cluster_size))\n print(\"{} - Saving cluster output to {}...\".format(datetime.datetime.now(), outfile_pickle))\n with open(outfile_pickle, \"wb\") as handle:\n pickle.dump(segments[\"cluster\"], handle)\n\n colors = ['coral', 'lightblue', 'blue', 'lightgreen', 'green', 'grey', 'purple',\n 'yellow', 'red', 'pink', 'saddlebrown', 'cyan', 'violet', 'olive']\n outfile_fig = os.path.join(directory_output, \"QuickshiftClustering_{}_bd{}_bn{}_ms{}.png\".format(feature_space, bandwidth_density, bandwidth_nn, min_cluster_size))\n print(\"{} - Plotting clusters and saving to {}...\".format(datetime.datetime.now(), outfile_fig))\n labels_mean = [el for el in labels if el.endswith(\"mean\") and (\"VV\" in el or \"VH\" in el)]\n fig, ax = pf.makeScatterSubplotsObjects(segments, labels_mean, colorlabel=\"cluster\", cmap=mpl.colors.ListedColormap(colors[:num_realclusters+1]))\n fig.set_size_inches(12,7)\n fig.suptitle('Object intensity means colored by cluster', fontsize=16)\n plt.savefig(outfile_fig)\n plt.close(fig)\n outfile_fig = os.path.join(directory_output, \"QuickshiftClustering_{}_bd{}_bn{}_ms{}_density.png\".format(feature_space, bandwidth_density, bandwidth_nn, min_cluster_size)) \n print(\"{} - Plotting segments density to {}...\".format(datetime.datetime.now(), outfile_fig))\n fig, ax = pf.makeScatterSubplotsObjects(segments, labels_mean, colorlabel=\"logdensity\", cmap='jet')\n fig.set_size_inches(12,7)\n fig.suptitle('Object intensity means colored by density', fontsize=16)\n plt.savefig(outfile_fig) \n plt.close(fig)\n outfile_fig = os.path.join(directory_output, \"QuickshiftClustering_{}_bd{}_bn{}_ms{}_spatial.png\".format(feature_space, bandwidth_density, bandwidth_nn, min_cluster_size)) \n print(\"{} - Plotting clusters spatially to {}...\".format(datetime.datetime.now(), outfile_fig))\n fig, ax = plt.subplots()\n segments.plot(column=\"cluster\", cmap=mpl.colors.ListedColormap(colors[:len(np.unique(segments[\"cluster\"]))]), ax=ax)\n fig.set_size_inches(13,10)\n plt.savefig(outfile_fig)\n plt.close(fig)\n\n if return_labels:\n return segments[\"cluster\"], labels\n return segments[\"cluster\"]", "title": "" }, { "docid": "ac5274a85c5f151888f0f811236771f8", "score": "0.5255929", "text": "def _dist_extraction(dist):\n\n dist_keys = list(\n set(sum([\n list(i.keys()) for i in dist\n ],[]))\n )\n dist_keys.sort()\n\n dist_values = [\n [i.get(j,0) for j in dist_keys] for i in dist\n ]\n\n dist_counts = {}\n for i in dist_values:\n dist_counts[tuple(i)] = dist_values.count(i)\n\n dist_dict = {\n 'states': tuple(dist_keys),\n 'counts': dist_counts\n }\n\n return dist_dict", "title": "" }, { "docid": "316bf10a22093f241d882e2475014014", "score": "0.5166735", "text": "def _create_heatmaps_from_dist(self, weighted_dist: Tensor,\n cls_labels: Tensor) -> Tensor:\n heatmaps = weighted_dist.new_zeros(\n (weighted_dist.shape[0], self.num_classes))\n for c in range(self.num_classes):\n inds = (cls_labels == c) # N\n if inds.int().sum() == 0:\n continue\n heatmaps[:, c] = torch.exp(-weighted_dist[:, inds].min(dim=1)[0])\n zeros = heatmaps[:, c] < 1e-4\n heatmaps[zeros, c] = 0\n return heatmaps", "title": "" }, { "docid": "a6c2d613523e4dc5e1d55facd6d3af7e", "score": "0.5166277", "text": "def create_dist(self, parameters):\n pass", "title": "" }, { "docid": "6ea95846049c5616e936f4b4ad0e13d8", "score": "0.50992316", "text": "def part_dist_func(dist, dist_params):\n ## get numpy function\n try:\n dist_func = getattr(np.random, dist)\n except AttributeError:\n raise AttributeError('Distribution \"{}\" not supported\\n'.format(dist))\n\n # if function should return one constant value\n try:\n if dist_params['low'] == dist_params['high']:\n return lambda size: [dist_params['low']] * size\n except KeyError:\n pass\n\n # else making partial function\n try:\n part_dist_func = partial(dist_func, **dist_params)\n part_dist_func(size=1)\n except TypeError:\n params = ','.join([str(x) + ':' + str(y) for x,y \n in dist_params.items()])\n msg = 'Params \"{}\" do not work with distribution \"{}\"\\n'\n raise TypeError(msg.format(params, dist)) \n\n return part_dist_func", "title": "" }, { "docid": "80fd0fc1d0616a03af2a6bd17503dcd6", "score": "0.5079796", "text": "def register(self, distribution):\n dist_name, entry_point_map = self._backend.scan_dist(distribution)\n entry_point_map = clean_map(entry_point_map)\n self._backend.write_dist_map(dist_name, entry_point_map)\n return dist_name, entry_point_map", "title": "" }, { "docid": "ac317ccf2097f44bff49bc7c371e1c48", "score": "0.50346255", "text": "def _partition(self, maxiter=None):\n names = self.dist.get_rv_names()\n if names:\n rvs = [names[i] for i in self.rvs]\n else:\n rvs = self.rvs\n\n self._lattice = dependency_lattice(rvs, cover=self.cover)\n dists = {}\n\n # Entropies\n for node in reversed(list(self._lattice)):\n try:\n parent = list(self._lattice._lattice[node].keys())[0]\n x0 = dists[parent].pmf\n except IndexError:\n x0 = None\n dists[node] = maxent_dist(self.dist, node, x0=x0, sparse=False, maxiter=maxiter)\n\n self.dists = dists\n\n atoms = defaultdict(dict)\n for name, measure in self.measures.items():\n for node in self._lattice:\n atoms[node][name] = measure(dists[node])\n\n self.atoms = atoms", "title": "" }, { "docid": "fd14f04918913bbf414329ac4bb98bb3", "score": "0.5005129", "text": "def shard(xs):\n return jax.tree_util.tree_map(\n lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs\n )", "title": "" }, { "docid": "e2d0a84c0fc2f2894f0d06b30b6ab858", "score": "0.50007623", "text": "def appropriate_partition(distance):\n if distance < 5000:\n return 400\n if distance < 20000:\n return 1000\n if distance < 40000:\n return 2000\n if distance < 100000:\n return 5000\n return 10000", "title": "" }, { "docid": "793da1a0c42280afa792e367571a5eb5", "score": "0.49605814", "text": "def new_cuts(partition):\n return {\n (node, neighbor)\n for node, neighbor in neighbor_flips(partition)\n if partition.crosses_parts((node, neighbor))\n }", "title": "" }, { "docid": "def4670d69eedff207d1103cbb1b1772", "score": "0.49217683", "text": "def __init__(self, dist, verbose=False):\r\n self.dist = dist\r\n self.verbose = verbose", "title": "" }, { "docid": "0c6e8dc1f3d071283b2ffa85cc9f721a", "score": "0.49200755", "text": "def cluster_by_dist(self, dir, species, feature='we', n_mels=24, fs=0, minlen=0.2, f_1=0, f_2=0, denoise=False, single=False,\n distance='dtw', max_clusters=10):\n import Segment\n import SignalProc\n from scipy import signal\n\n # Get flow and fhigh for bandpass from annotations\n lowlist = []\n highlist = []\n srlist = []\n for root, dirs, files in os.walk(str(dir)):\n for file in files:\n if file.lower().endswith('.wav') and file + '.data' in files:\n wavrate = wavio.readFmt(os.path.join(root, file))[0]\n srlist.append(wavrate)\n # Read the annotation\n segments = Segment.SegmentList()\n segments.parseJSON(os.path.join(root, file + '.data'))\n # keep the right species\n if species:\n thisSpSegs = segments.getSpecies(species)\n else:\n thisSpSegs = np.arange(len(segments)).tolist()\n for segix in thisSpSegs:\n seg = segments[segix]\n lowlist.append(seg[2])\n highlist.append(seg[3])\n print(lowlist)\n print(highlist)\n print(srlist)\n if f_1 == 0:\n f_1 = np.min(lowlist)\n if f_2 == 0:\n f_2 = np.median(highlist)\n\n if fs == 0:\n arr = [4000, 8000, 16000]\n pos = np.abs(arr - np.median(highlist) * 2).argmin()\n fs = arr[pos]\n\n print('fs: ', fs)\n\n if fs > np.min(srlist):\n print(fs)\n fs = np.min(srlist)\n\n if fs < f_2 * 2 + 50:\n f_2 = fs // 2 - 50\n\n minlen_samples = minlen * fs\n\n print('Frequency band:', f_1, '-', f_2)\n print('fs: ', fs)\n\n # Find the lower and upper bounds (relevant to the frq range), when the range is given\n if feature == 'mfcc' and f_1 != 0 and f_2 != 0:\n mels = librosa.core.mel_frequencies(n_mels=n_mels, fmin=0.0, fmax=fs / 2, htk=False)\n ind_flow = (np.abs(mels - f_1)).argmin()\n ind_fhigh = (np.abs(mels - f_2)).argmin()\n\n elif feature == 'we' and f_1 != 0 and f_2 != 0:\n linear = np.linspace(0, fs / 2, 62)\n ind_flow = (np.abs(linear - f_1)).argmin()\n ind_fhigh = (np.abs(linear - f_2)).argmin()\n\n # Ready for clustering\n max_clusters = max_clusters\n n_clusters = 0\n clusters = []\n for root, dirs, files in os.walk(str(dir)):\n for file in files:\n if file.lower().endswith('.wav') and file + '.data' in files:\n # Read the annotation\n segments = Segment.SegmentList()\n segments.parseJSON(os.path.join(root, file + '.data'))\n # keep the right species\n if species:\n thisSpSegs = segments.getSpecies(species)\n else:\n thisSpSegs = np.arange(len(segments)).tolist()\n\n # Sort the segments longest to shortest, would be a good idea to avoid making first class with only\n # one member :)\n segments_len = [segments[segix][1] - segments[segix][0] for segix in thisSpSegs]\n inds = np.argsort(segments_len)[::-1]\n sortedsegments = [segments[i] for i in inds]\n\n # Now find syllables within each segment, median clipping\n for seg in sortedsegments:\n if seg[0] == -1:\n continue\n audiodata = self.loadFile(filename=os.path.join(root, file), duration=seg[1] - seg[0],\n offset=seg[0], fs=fs, denoise=denoise, f1=f_1, f2=f_2)\n start = int(seg[0] * fs)\n sp = SignalProc.SignalProc(256, 128)\n sp.data = audiodata\n sp.sampleRate = fs\n sgRaw = sp.spectrogram(256, 128)\n segment = Segment.Segmenter(sp=sp, fs=fs)\n syls = segment.medianClip(thr=3, medfiltersize=5, minaxislength=9, minSegment=50)\n if len(syls) == 0: # Try again with FIR\n syls = segment.segmentByFIR(threshold=0.05)\n syls = segment.checkSegmentOverlap(syls) # merge overlapped segments\n syls = [[int(s[0] * fs), int(s[1] * fs)] for s in syls]\n\n if len(syls) == 0: # Sanity check, when annotating syllables tight,\n syls = [[0, int((seg[1] - seg[0]) * fs)]] # median clipping doesn't detect it.\n if len(syls) > 1:\n # TODO: samples to seconds\n syls = segment.joinGaps(syls, minlen_samples) # Merge short segments\n if len(syls) == 1 and syls[0][1] - syls[0][0] < minlen_samples: # Sanity check\n syls = [[0, int((seg[1] - seg[0]) * fs)]]\n temp = [[np.round((x[0] + start) / fs, 2), np.round((x[1] + start) / fs, 2)] for x in syls]\n print('\\nCurrent:', seg, '--> syllables >', minlen, 'secs ', temp)\n\n # Calculate features of the syllables in the current segment.\n f = []\n for s in syls:\n data = audiodata[s[0]:s[1]]\n if feature == 'mfcc': # MFCC\n mfcc = librosa.feature.mfcc(y=data, sr=fs, n_mfcc=n_mels)\n if f_1 != 0 and f_2 != 0:\n mfcc = mfcc[ind_flow:ind_fhigh, :] # Limit the frequency to the fixed range [f_1, f_2]\n mfcc_delta = librosa.feature.delta(mfcc, mode='nearest')\n mfcc = np.concatenate((mfcc, mfcc_delta), axis=0)\n mfcc = scale(mfcc, axis=1)\n # librosa.display.specshow(mfcc, sr=fs, x_axis='time')\n # m = [i for sublist in mfcc for i in sublist]\n f.append(mfcc)\n\n elif feature == 'we': # Wavelet Energy\n ws = WaveletSegment.WaveletSegment(spInfo={})\n we = ws.computeWaveletEnergy(data=data, sampleRate=fs, nlevels=5, wpmode='new')\n we = we.mean(axis=1)\n if f_1 != 0 and f_2 != 0:\n we = we[ind_flow:ind_fhigh] # Limit the frequency to a fixed range f_1, f_2\n f.append(we)\n elif feature == 'chroma':\n chroma = librosa.feature.chroma_cqt(y=data, sr=fs)\n # chroma = librosa.feature.chroma_stft(y=data, sr=fs)\n chroma = scale(chroma, axis=1)\n f.append(chroma)\n\n matched = False\n if n_clusters == 0:\n print('**Case 1: First class')\n newclass = self.class_create(label=n_clusters, syl=syls, features=f, f_low=seg[2],\n f_high=seg[3], segs=[(os.path.join(root, file), seg)],\n single=single, dist_method=distance)\n clusters.append(newclass)\n n_clusters += 1\n print('Created new class: Class ', \"'\", newclass[\"label\"], \"'\", ',\\tIn-class_d: ',\n newclass[\"d\"], '\\tf_low: ', newclass[\"f_low\"], '\\tf_high: ', newclass[\"f_high\"])\n matched = True\n if not matched:\n # See if the syllables in the current seg match with any existing class\n min_ds = [] # Keep track of the minimum distances to each class\n clusters = random.sample(clusters, len(clusters)) # Shuffle the clusters to avoid bias\n for c in range(len(clusters)):\n f_c = clusters[c][\"features\"] # features of the current class c\n dist_c = np.zeros((len(f_c), len(f))) # distances to the current class c\n for i in range(len(f_c)):\n for j in range(len(f)):\n if distance == 'dtw':\n d, _ = librosa.sequence.dtw(f_c[i], f[j], metric='euclidean')\n dist_c[i, j] = d[d.shape[0] - 1][d.shape[1] - 1]\n elif distance == 'xcor':\n corr = signal.correlate(f_c[i], f[j], mode='full')\n dist_c[i, j] = np.sum(corr) / max(len(f_c[i]), len(f[j]))\n\n # Min distance to the current class\n print('Distance to Class ', clusters[c][\"label\"], ': ', np.amin(dist_c[dist_c != 0]),\n '( In-class distance: ', clusters[c][\"d\"], ')')\n min_ds.append(np.amin(dist_c[dist_c != 0]))\n\n # Now get the clusters sorted according to the min dist\n ind = np.argsort(min_ds)\n min_ds = np.sort(min_ds)\n # make the cluster order\n clusters = [clusters[i] for i in ind]\n for c in range(len(clusters)):\n if (clusters[c][\"d\"] != 0) and min_ds[c] < (clusters[c][\"d\"] + clusters[c][\"d\"] * 0.1):\n print('**Case 2: Found a match with a class > one syllable')\n print('Class ', clusters[c][\"label\"], ', dist ', min_ds[c])\n # Update this class\n clusters[c] = self.class_update(cluster=clusters[c], newfeatures=f, newf_low=seg[2],\n newf_high=seg[3], newsyl=syls,\n newseg=(os.path.join(root, file), seg), single=single,\n dist_method=distance)\n matched = True\n break # found a match, exit from the for loop, go to the next segment\n\n elif c < len(clusters) - 1:\n continue # continue to the next class\n\n # Checked most of the classes by now, if still no match found, check the classes with only one\n # data point (clusters[c][\"d\"] == 0).\n # Note the arbitrary thr.\n if not matched:\n if distance == 'dtw':\n thr = 25\n elif distance == 'xcor':\n thr = 1000\n for c in range(len(clusters)):\n if clusters[c][\"d\"] == 0 and min_ds[c] < thr:\n print('**Case 3: In-class dist of ', clusters[c][\"label\"], '=', clusters[c][\"d\"],\n 'and this example < ', thr, ' dist')\n print('Class ', clusters[c][\"label\"], ', dist ', min_ds[c])\n # Update this class\n clusters[c] = self.class_update(cluster=clusters[c], newfeatures=f, newf_low=seg[2],\n newf_high=seg[3], newsyl=syls,\n newseg=(os.path.join(root, file), seg), single=single,\n dist_method=distance)\n matched = True\n break # Break the search and go to the next segment\n\n # If no match found yet, check the max clusters\n if not matched:\n if n_clusters == max_clusters:\n print('**Case 4: Reached max classes, therefore adding current seg to the closest '\n 'class... ')\n # min_ind = np.argmin(min_ds)\n # classes are sorted in ascending order of distance already\n for c in range(len(clusters)):\n if min_ds[c] <= 4 * clusters[c][\"d\"] or clusters[c][\"d\"] == 0:\n print('Class ', clusters[c][\"label\"], ', dist ', min_ds[c],\n '(in-class distance:', clusters[c][\"d\"], ')')\n # Update this class\n clusters[c] = self.class_update(cluster=clusters[c], newfeatures=f, newf_low=seg[2],\n newf_high=seg[3], newsyl=syls,\n newseg=(os.path.join(root, file), seg),\n single=single,\n dist_method=distance)\n matched = True\n break\n if not matched:\n print('Class ', clusters[0][\"label\"], ', dist ', min_ds[0],\n '(in-class distance:', clusters[0][\"d\"], ')')\n # Update this class\n # TODO: don't update the class as it is an outlier?\n clusters[0] = self.class_update(cluster=clusters[0], newfeatures=f, newf_low=seg[2],\n newf_high=seg[3], newsyl=syls,\n newseg=(os.path.join(root, file), seg), single=single,\n dist_method=distance)\n matched = True\n continue # Continue to next segment\n\n # If still no luck, create a new class\n if not matched:\n print('**Case 5: None of Case 1-4')\n newclass = self.class_create(label=n_clusters, syl=syls, features=f, f_low=seg[2], f_high=seg[3],\n segs=[(os.path.join(root, file), seg)], single=single,\n dist_method=distance)\n print('Created a new class: Class ', n_clusters + 1)\n clusters.append(newclass)\n n_clusters += 1\n print('Created new class: Class ', \"'\", newclass[\"label\"], \"'\", ',\\tin-class_d: ',\n newclass[\"d\"], '\\tf_low: ', newclass[\"f_low\"], '\\tf_high: ', newclass[\"f_high\"])\n\n print('\\n\\n--------------Clusters created-------------------')\n clustered_segs = []\n for c in range(len(clusters)):\n print('Class ', clusters[c]['label'], ': ', len(clusters[c]['segs']))\n for s in range(len(clusters[c]['segs'])):\n print('\\t', clusters[c]['segs'][s])\n if single:\n clustered_segs.append([clusters[c]['segs'][s][0], clusters[c]['segs'][s][1],\n [clusters[c]['features'][s]], clusters[c]['label']])\n else:\n clustered_segs.append([clusters[c]['segs'][s][0], clusters[c]['segs'][s][1], clusters[c]['label']])\n\n # Clustered segments\n print('\\n\\n################### Clustered segments ############################')\n for s in clustered_segs:\n print(s)\n return clustered_segs, fs, n_clusters, 1\n # return clustered_dataset, fs, nclasses, duration", "title": "" }, { "docid": "785bf293a844a28fba7858e88babe092", "score": "0.4908542", "text": "def compute_distinctiveness_from_dist(\n norm_dist, dcvs_power, dcvs_max_clip, dcvs_min_clip\n):\n # expondent to augment distinctiveness scores.\n # clip the distinctiveness at this fraction\n clip_range = dcvs_max_clip - dcvs_min_clip\n # apply distinctivness normalization\n _tmp = np.clip(norm_dist, dcvs_min_clip, dcvs_max_clip)\n np.subtract(_tmp, dcvs_min_clip, out=_tmp)\n np.divide(_tmp, clip_range, out=_tmp)\n np.power(_tmp, dcvs_power, out=_tmp)\n dstncvs = _tmp\n return dstncvs", "title": "" }, { "docid": "22a134e731caeb59dda9a4ca28d7fcd6", "score": "0.48802838", "text": "def sample(dist, num_samples=1):\n\n cdf = cumsum(dist)\n r = uniform(size=num_samples) * cdf[-1]\n\n return cdf.searchsorted(r)", "title": "" }, { "docid": "35191c13ad1dd8f2699d070d980731b1", "score": "0.4878911", "text": "def get_abstract_dist(dist):\n if dist.is_homogeneous():\n n_variables = dist.outcome_length()\n n_symbols = len(dist.alphabet[0])\n d = AbstractDenseDistribution(n_variables, n_symbols)\n else:\n class D(object):\n n_variables = dist.outcome_length()\n n_elements = np.prod(list(map(len, dist.alphabet)))\n\n def parameter_array(self, indexes, cache=None):\n return brute_marginal_array(dist, indexes, rv_mode='indexes')\n d = D()\n\n return d", "title": "" }, { "docid": "531f440f47949ba8c9ffa161087f81b3", "score": "0.48763838", "text": "def partition_at_level(dendrogram, level) :\r\n partition = dict()\r\n partition = dendrogram[0].copy()\r\n for index in range(1, level + 1) :\r\n for node, community in partition.items() :\r\n partition[node] = dendrogram[index][community]\r\n return partition", "title": "" }, { "docid": "c5c8f34a7af855a17bd762b183973cd2", "score": "0.48473218", "text": "def sample_from_dist(self, dist):\n roll = random.random()\n total = 0\n for k, v in dist:\n total += v\n if total >= roll:\n return k\n raise ValueError('Invalid distribution: {}'.format(dist))", "title": "" }, { "docid": "736fdff74d6ee7c9b1fec17868803103", "score": "0.48301646", "text": "def redistribute_vertices(geom, dist):\n if geom.geom_type == 'LineString':\n num_vert = int(round(geom.length / dist))\n if num_vert == 0:\n num_vert = 1\n return [geom.interpolate(float(n) / num_vert, normalized=True)\n for n in range(num_vert + 1)]\n elif geom.geom_type == 'MultiLineString':\n parts = [redistribute_vertices(part, dist)\n for part in geom]\n return type(geom)([p for p in parts if not p.is_empty])\n else:\n raise ValueError('unhandled geometry {}'.format(geom.geom_type))", "title": "" }, { "docid": "0e1b85c4c823245a00c0314fff8c8b42", "score": "0.48175696", "text": "def partition(array_to_partition, start_index, end_index, partition_style):\n if partition_style.lower()[0] == \"f\":\n return start_index\n\n elif partition_style.lower()[0] == \"l\":\n return end_index - 1\n\n elif partition_style.lower()[0] == \"m\":\n # Find the median of the first, middle and last elements.\n x = array_to_partition[start_index]\n y = array_to_partition[end_index - 1]\n z = array_to_partition[int(math.floor((end_index+start_index-1)/2))]\n med = int(numpy.median([x,y,z]))\n\n # Return the index corresponding to the calculated median.\n if med == x:\n return start_index\n elif med == y:\n return end_index - 1\n else:\n return int(math.floor((end_index+start_index-1)/2))\n\n elif partition_style.lower()[0] == \"r\":\n return math.floor(random.random() * end_index + 1)", "title": "" }, { "docid": "58ea09add90c373facf3efb3c16e9834", "score": "0.4812949", "text": "def agglomerative(X, dist):\n h = scipy.cluster.hierarchy\n Z = h.linkage(X, 'ward')\n clust = h.fcluster(Z, t=dist, criterion=\"distance\")\n fig = plt.figure()\n dn = h.dendrogram(Z, color_threshold=dist)\n plt.show()\n return clust", "title": "" }, { "docid": "ad448bb07079320ac543d77eb6b18918", "score": "0.4783239", "text": "def get_train_action_dist_cls(self) -> Type[Distribution]:\n raise NotImplementedError", "title": "" }, { "docid": "ca4d81ff4a81722393684419d52403cb", "score": "0.47253284", "text": "def make_draws(dist, params, size=200):\n return dist(**params).rvs(size)", "title": "" }, { "docid": "604dfa3d53309fe8af0386effbcd3af9", "score": "0.4688263", "text": "def _support(dist):\n\n if isinstance(dist, bernoulli_lib.Bernoulli):\n return tf.range(2), 0\n elif isinstance(dist, categorical_lib.Categorical):\n return tf.range(tf.shape(dist.probs_parameter())[-1]), 0\n elif isinstance(dist, sample_lib.Sample):\n # The support of `tfd.Sample` is the n-fold cartesian product\n # of the supports of the underlying distributions where\n # `n` is the total size of the sample shape.\n\n sample_shape, n = dist._expand_sample_shape_to_vector( # pylint: disable=protected-access\n dist.sample_shape, 'expand_sample_shape')\n p, rank = _support(dist.distribution)\n product = _power(p, n)\n new_shape = tf.concat([tf.shape(product)[:-1], sample_shape], axis=-1)\n\n new_rank = rank + tf.compat.v2.compat.dimension_value(\n sample_shape.shape[0])\n return tf.reshape(product, new_shape), new_rank\n else:\n raise ValueError('Unable to find support for distribution ' +\n str(dist))", "title": "" }, { "docid": "177b1f3e63f5ee86018397baab1dcbff", "score": "0.46585467", "text": "def partition(mat, n_parts):\n \n # Combine weights of directed edges to obtain undirected graph:\n mat = mat+mat.T\n\n # Convert matrix into METIS-compatible form:\n g = nx.from_numpy_matrix(np.array(mat, dtype=[('weight', int)])) \n n = g.number_of_nodes()\n e = g.number_of_edges()\n xadj = np.empty(n+1, int)\n adjncy = np.empty(2*e, int)\n eweights = np.empty(2*e, int)\n end_node = 0\n xadj[0] = 0\n for i in g.node:\n for j, a in g.edge[i].items():\n adjncy[end_node] = j\n eweights[end_node] = a['weight']\n end_node += 1\n xadj[i+1] = end_node\n\n # Compute edge-cut partition:\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n cutcount, part_vert = pymetis.part_graph(n_parts, xadj=xadj,\n adjncy=adjncy, eweights=eweights)\n\n # Find nodes in each partition:\n part_map = {}\n for i, p in enumerate(set(part_vert)):\n ind = np.where(np.array(part_vert) == p)[0]\n part_map[p] = ind\n return part_map", "title": "" }, { "docid": "49b65cfc19fe8355cead2027524db893", "score": "0.46409166", "text": "def extract_cluster(self, n, extraction_type='copy', to='ClusterCuts' ):\n for img_path in self.images.loc[self.cluster_indices[n]]:\n image_name = os.path.basename(img_path)\n\n if not os.path.exists(to):\n os.mkdir(to)\n\n if extraction_type == 'copy':\n shutil.copyfile(img_path, os.path.join(to, image_name))\n else:\n shutil.move(img_path, os.path.join(to, image_name))", "title": "" }, { "docid": "3fd1d4e34199cf3ee02b9e2ffe6e849b", "score": "0.463741", "text": "def _dist_output(self, params):\n loc, scale_diag = params\n return tf.contrib.distributions.MultivariateNormalDiag(\n loc, scale_diag, name=\"game_output\")", "title": "" }, { "docid": "07c5e8f6ff4f551749d2cd03b3c99c43", "score": "0.4627575", "text": "def walk_dist_mat(dist):\n i, j = 0, 0\n shp = np.array(dist.shape)\n while not_finished(i, j, shp):\n i, j = step(dist, i, j)\n out = is_out(i, j, shp)\n\n return dist[i, j] / np.sum(shp) if not out else np.inf", "title": "" }, { "docid": "7570daf9dd91aa7182a479ff27615a63", "score": "0.46150094", "text": "def input_fn(data_dir,\n subset,\n num_shards, \n batch_size,\n use_distortion_for_training=True):\n with tf.device('/cpu:0'):\n use_distortion = subset == 'train' and use_distortion_for_training\n dataset = shadownet.ShadownetDataSet(data_dir, subset, use_distortion)\n inputdata, input_labels = dataset.make_batch(batch_size)\n\n if num_shards <= 1:\n # No GPU available or only 1 GPU.\n num_shards = 1\n\n feature_shards = tf.split(inputdata, num_shards)\n label_shards = tf.sparse_split(sp_input=input_labels, num_split=num_shards, axis=0)\n return feature_shards, label_shards", "title": "" }, { "docid": "923d6e95729b99ea22e3aa4a55534531", "score": "0.46091202", "text": "def logBinning(dist,base):\n # histogram\n maximum=int(math.log(dist[0],base))+1\n hist=np.zeros(maximum)\n # add cluster size each range\n for x in dist:\n hist[int(math.log(x,base))]+=1\n # generate x axis\n x_hist=np.zeros(maximum)\n for i in range(maximum):\n x_hist[i]=(base**(i+1)+base**(i))*0.5\n # divide by range\n for i in range(maximum):\n hist[i]/=(base**(i+1)-base**i)\n return x_hist,hist", "title": "" }, { "docid": "5d837cb2b94d79a96ec5a4da289e2478", "score": "0.46026734", "text": "def get_zs(size, distribution):\n dist_dict = dict([('normal', np.random.normal), ('uniform', np.random.uniform)])\n return dist_dict[distribution](size=(2, 1, size))", "title": "" }, { "docid": "babfc2a1c2fae3b539cdc7847beb8291", "score": "0.4571051", "text": "def _assign_cluster(self, distance: np.ndarray) -> np.ndarray:\n cluster = np.argmin(distance, axis=1)\n return cluster", "title": "" }, { "docid": "babfc2a1c2fae3b539cdc7847beb8291", "score": "0.4571051", "text": "def _assign_cluster(self, distance: np.ndarray) -> np.ndarray:\n cluster = np.argmin(distance, axis=1)\n return cluster", "title": "" }, { "docid": "57f956e634a7a6805986d3a55a79bf43", "score": "0.456725", "text": "def _categorical_param2dist(cls, params, dist_info):\n dist = torch.distributions.Categorical(probs=params)\n return dist", "title": "" }, { "docid": "181e755f6fe101574c155ea4fb147a9a", "score": "0.4565932", "text": "def slice_with_dist_attr(tensor, dist_attr):\n dims_mapping = dist_attr[\"dims_mapping\"]\n process_shape = dist_attr[\"process_shape\"]\n process_group = dist_attr[\"process_group\"]\n # slice the tensor with dist_attr\n partition_index_list = Converter._get_split_indices(\n tensor.shape, dims_mapping, process_shape, process_group\n )\n sliced_tensor_list = Converter.split(\n tensor, partition_index_list, len(partition_index_list)\n )\n # get the current tensor's index in sliced_tensor_list\n rank_id = paddle.distributed.get_rank()\n sliced_tensor_index = Converter._get_sliced_index(\n rank_id, tensor.shape, dims_mapping, process_shape, process_group\n )\n if sliced_tensor_index not in range(len(sliced_tensor_list)):\n raise ValueError(\n \"Fail to slice tensor with dist_attr '{}'.\".format(\n str(dist_attr)\n )\n )\n sliced_tensor = sliced_tensor_list[sliced_tensor_index]\n return sliced_tensor", "title": "" }, { "docid": "df025ea80905a0135df56d262e495d46", "score": "0.45635515", "text": "def sharded_type_as(args, kwargs, pg):\n st = args[0]\n tensor = args[1]\n if isinstance(tensor, ShardedTensor):\n tensor = tensor.local_tensor()\n new_local_shards = []\n for shard in st.local_shards():\n new_local_shards.append(Shard(shard.tensor.type_as(tensor), shard.metadata))\n st_meta = copy.deepcopy(st._metadata)\n st_meta.tensor_properties.dtype = tensor.dtype\n return new_local_shards, st_meta", "title": "" }, { "docid": "1009c051345d45af462f484b4fbe1318", "score": "0.45090303", "text": "def partition(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "1009c051345d45af462f484b4fbe1318", "score": "0.45090303", "text": "def partition(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "49f6e61b13bed59ef058d8fb5b277219", "score": "0.4507008", "text": "def hpd(dist, alpha, *args):\n # Freeze dist if args provided\n if args:\n dist = dist(*args)\n\n def interval_length(start_):\n return dist.ppf(start_ + alpha) - dist.ppf(start_)\n # find start of cdf interval that minimizes the pdf interval length\n start = fmin(interval_length, 1-alpha, ftol=1e-8, disp=False)[0]\n # return interval as array([low, high])\n return dist.ppf([start, alpha + start])", "title": "" }, { "docid": "1a5048761a09b5bc4ea785cdc9478db8", "score": "0.450556", "text": "def flatten(self, weight_partition: _torch.Tensor):\n permute = self.cluster_permute\n dim = self.cluster_dim\n\n if permute and len(permute) == len(weight_partition.size()):\n weight_partition = weight_partition.permute(permute)\n\n num_misalignment = _torch.numel(weight_partition) % dim\n\n pad = None\n if num_misalignment:\n weight_partition = weight_partition.flatten()\n pad = weight_partition[-num_misalignment:]\n weight_partition = weight_partition[:-num_misalignment]\n\n return weight_partition.reshape(-1, dim), pad", "title": "" }, { "docid": "cd02e0a1bcf49791fa9fbc1460c92a84", "score": "0.45040667", "text": "def _categorical_draw(cls, dist, num_particles):\n assert isinstance(dist, torch.distributions.Categorical)\n span = dist.probs.shape[-1]\n b_shape = dist.batch_shape\n s_shape = torch.Size(num_particles)\n\n particles = torch.ones(s_shape + b_shape)\n for i in range(span):\n particles[i] = particles[i] * i\n\n # Weights obtained from probs attribute, by simply permuting the last dimension to first dimension\n n_dims = len(dist.probs.shape)\n dims = [n_dims-1, ] + [i for i in range(n_dims - 1)]\n weights = dist.probs.clone().permute(dims) # clone to prevent accidental in-place value change\n\n # Since we are effectively drawing particles uniformly from the finite discrete domain, the sampling pdf is also\n # uniform\n sampling_log_densities = 0\n\n return particles, weights, sampling_log_densities", "title": "" }, { "docid": "3b10e90350bb0e51e448394b77705c9f", "score": "0.4483107", "text": "def string_to_distribution(distn_name,\n **kwargs):\n\n # Check for edge cases\n if ('scale', np.inf) in kwargs.items():\n return ss.uniform(scale = 2*np.pi)\n\n distn_name = get_canonical_distn_name(distn_name)\n try:\n rv_gen = getattr(ss, distn_name)\n except AttributeError:\n raise AttributeError(f\"distribution {distn_name} unknown.\")\n\n return rv_gen(**kwargs)", "title": "" }, { "docid": "cd86fc05b47593bcbd2277e6a9934f62", "score": "0.44766492", "text": "def _uniforme(self, dist):\r\n identify_labels = self.labels[np.where(dist < self.h)]\r\n # print(identify_labels)\r\n return self._count_targets(identify_labels)", "title": "" }, { "docid": "d68f8d4736a4f2f3f095ce2c06d143d6", "score": "0.44730452", "text": "def make_spectrogramm (orig, ndft, noverlap = None): \n if noverlap is None:\n noverlap = ndft / 2\n noverlap = int (noverlap)\n\n starts = np.arange (0, len (orig), ndft - noverlap, dtype = int)\n # leave windows greater or equal to ndft sample size\n starts = starts[starts + ndft < len (orig)]\n xns = []\n for start in starts:\n # short dft\n window = get_all_xns (orig[start : start + ndft])\n xns.append (window)\n spec = np.array (xns).T\n #rescale (standart procedure)\n spec = 10 * np.log10 (spec)\n assert spec.shape[1] == len (starts)\n return (starts, spec)", "title": "" }, { "docid": "e693a8fd9ee89262503d1d8a0bfe1c39", "score": "0.44713953", "text": "def _create_partition(self, device, parttype, fstype, start, size):\n\n # Start is included to the size so we need to substract one from the end.\n end = start + size - 1\n logger.debug(\"Added '%s' partition, sectors %d-%d, size %d sectors\",\n parttype, start, end, size)\n\n cmd = \"parted -s %s unit s mkpart %s\" % (device, parttype)\n if fstype:\n cmd += \" %s\" % fstype\n cmd += \" %d %d\" % (start, end)\n\n return exec_native_cmd(cmd, self.native_sysroot)", "title": "" }, { "docid": "38ed0f50ecb2878e45efda22578f1872", "score": "0.44710994", "text": "def write_dist(self, distname):\n dist = self.pr_dist_from_name(distname)\n self.write_pr_dist(dist)", "title": "" }, { "docid": "b1b2d8cd0aba1fe2bdc9be91f17606d2", "score": "0.44528744", "text": "def __cluster(self):\n shift_points = PointShifter().shift_points(self.X, self.bandwidth, self.min_dist)\n c_assign = ClusterCreator(dist_tolerance=self.dist_tolerance) \\\n .cluster_points(shift_points)\n \n return c_assign", "title": "" }, { "docid": "f10ffb7f8858cbd170239831edf05b90", "score": "0.44443253", "text": "def make_clusters(d, band, intensity, radius, minpoints):\n points = d[band, intensity]\n xy_arrays = np.array(points)\n\n dbs = DBSCAN(radius, minpoints).fit(xy_arrays)\n\n core_samples = dbs.core_sample_indices_\n labels = dbs.labels_\n n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)\n\n clusters = [xy_arrays[labels == i] for i in range(n_clusters_)]\n\n retval = []\n\n def starmap(func, iterable):\n gunc = lambda x: func(*x)\n return map(gunc, iterable)\n\n for cluster in clusters:\n cp = ClusterPoints(band, intensity, starmap(Coord, cluster.tolist()))\n retval.append(cp)\n del d\n return retval", "title": "" }, { "docid": "b80db35e87dc92a4cfcdf40533201544", "score": "0.44367275", "text": "def get_dist_map(self, dist=None):\n return self._backend.get_dist_map(dist=dist)", "title": "" }, { "docid": "a607804159d3f80146f636609a34603c", "score": "0.44328302", "text": "def sample_with_dist(self, dist, k):\n rand = np.random.random()\n sample = min([np.sqrt(float(i + 1) / k) for i in range(5) if rand < dist[i]])\n return sample", "title": "" }, { "docid": "380d4bda7bc929449222cb01566a0fe4", "score": "0.44310394", "text": "def map_from_dist(dist_arr, batch_size=80, use_gpu=False, print_time=False):\n # Convert embedding dictionary to torch tensors\n probe_emb_list = []\n gallery_emb_list = []\n probe_class_list = []\n gallery_class_list = []\n for i,c in enumerate(sorted(emb_dict)):\n if len(emb_dict[c]) > min_gallery:\n for emb in emb_dict[c]:\n probe_emb_list.append(emb.unsqueeze(0))\n probe_class_list.append(i)\n for emb in emb_dict[c]:\n gallery_emb_list.append(emb.unsqueeze(0))\n gallery_class_list.append(i)\n\n # Convert the embedding list to a torch tensor\n probe_emb_tsr = torch.cat(probe_emb_list)\n gallery_emb_tsr = torch.cat(gallery_emb_list)\n m, _ = probe_emb_tsr.size()\n n, _ = gallery_emb_tsr.size()\n\n # Convert the list of classes corresponding to the embedding list\n # to a torch tensor\n probe_class_tsr = torch.LongTensor(probe_class_list)\n gallery_class_tsr = torch.LongTensor(gallery_class_list)\n\n # Keep track of the average precision for each probe/gallery\n avg_precision_tsr = torch.zeros(m)\n\n # Precomputed torch.range tensor to speed up the computation\n range_tsr = torch.arange(0, n-1)\n\n # Keep track of time spent in different parts of function for profiling\n dist_mat_time = 0.0\n comp_map_time = 0.0\n\n # Index for the current probe/gallery\n j = 0\n\n # The class tensor, full embedding tensor, and a precomputed torch.range\n # tensor are converted to CUDA tensors which use GPU.\n if use_gpu:\n probe_class_tsr = probe_class_tsr.cuda()\n gallery_class_tsr = gallery_class_tsr.cuda()\n probe_emb_tsr = probe_emb_tsr.cuda()\n gallery_emb_tsr = gallery_emb_tsr.cuda()\n range_tsr = range_tsr.cuda()\n\n # Batchify the computation\n for i in range(0, m, batch_size):\n st1 = time.time()\n\n # Carve out the mini-batch from the full embedding tensor\n probe_emb_tsr_part = probe_emb_tsr[i:i+batch_size, :]\n if use_gpu:\n probe_emb_tsr_part = probe_emb_tsr_part.cuda()\n\n # Compute squared differences for all embedding pairs\n dist_tsr = ((gallery_emb_tsr.unsqueeze(0) - probe_emb_tsr_part.unsqueeze(1))**2).sum(dim=2)\n\n dt1 = time.time()\n dist_mat_time += (dt1 - st1)\n\n # Vectorize MAP\n st2 = time.time()\n for dist_row in dist_tsr:\n # Sort dist row, take all but first element (the identity)\n _, sorted_idx = torch.sort(dist_row)\n gallery_idx = sorted_idx[1:]\n # Get the class indeces corresponding to the sorted distances\n probe_class = probe_class_tsr[j]\n sorted_class_row = gallery_class_tsr[gallery_idx]\n # Produce binary array by comparing the sorted class row to the probe class\n binary_class_row = sorted_class_row==probe_class\n # Get indeces of matches\n match_idx_row = range_tsr[binary_class_row == True]\n # Get match counts\n match_count_row = range_tsr[:len(match_idx_row)]+1\n # Get total counts up to each match\n tot_count_row = match_idx_row+1\n # Divide element-wise to get precision\n precision_arr = match_count_row/tot_count_row\n # Take mean of precision array to get average precision\n avg_precision = torch.mean(precision_arr)\n # Accumulate average precision\n avg_precision_tsr[j] = avg_precision\n # Increment index for probe/gallery\n j += 1\n dt2 = time.time()\n comp_map_time += (dt2 - st2)\n # Take the mean of the average precision tensor to get the 'complete' MAP\n amap = torch.mean(avg_precision_tsr)\n # Print time profiling info\n if print_time:\n print('Total dist mat time: {:2.2f}'.format(dist_mat_time))\n print('Total comp map time: {:2.2f}'.format(comp_map_time))\n # Return 'complete' MAP result\n return amap", "title": "" }, { "docid": "0e8f075aee9772e5cfe8a2510783a43e", "score": "0.44272098", "text": "def MDs_sample(partitions):\n MDs = []\n for partition in partitions:\n MDs.append(np.median(partition))\n return MDs", "title": "" }, { "docid": "e9355ae10d03b4989f5cf250257150e8", "score": "0.4419922", "text": "def test_spread(self):\n\n num_samples = 20\n\n # Fingerprints are expensive to generate, so we're only going to \n # generate 20 of them. This is a fairly small number, so we can't\n # really count on them displaying any sort of fine-grained statistical\n # properties. Therefore, we will use the following paradigm: Take the\n # fingerprint-lengths and divide them into two 'buckets.' If the \n # lengths are random, then the two buckets should have roughly the\n # same number. But we can't count on that due to the law of small \n # numbers. Instead, we use the fact that there is only a 1-in-2^20 \n # (roughly 1 in a million) chance that all the lengths end up in the\n # same bucket.\n \n lengths = [len(self.dist.generate()) for i in xrange(num_samples)]\n \n # Bucket 1: top half of the range or bottom half?\n \n midpoint = (self.dist.MAX_FINGERPRINT_SIZE \n + self.dist.MIN_FINGERPRINT_SIZE) / 2\n if all([x < midpoint for x in lengths]):\n self.fail(\"All fingerprint-lengths in bottom half of range. \" +\n self.seed_msg)\n\n \n if all([x > midpoint for x in lengths]):\n self.fail(\"All fingerprint-lengths in top half of range. \" +\n self.seed_msg)\n \n # Bucket #2: is the length odd or even\n \n if all([x % 2 == 0 for x in lengths]):\n self.fail(\"All fingerprint-lengths are even. \" +\n self.seed_msg)\n \n if all([x % 2 == 1 for x in lengths]):\n self.fail(\"All fingerprint-lengths are odd. \" +\n self.seed_msg)", "title": "" }, { "docid": "ce837162fa58c79a34ac7f152deb8d33", "score": "0.441501", "text": "def ishigami_input_dist_object():\n return JointIndependent([Uniform(-np.pi, 2 * np.pi)] * 3)", "title": "" }, { "docid": "9d848f61493e014fa31963eb3ae31238", "score": "0.44121325", "text": "def _partition(self):\n for c in self._clusters:\n c.clear()\n\n points_in_dataset=self._dataset.getContents()\n\n for p in range(len(points_in_dataset)):\n\n cluster=self._nearest(self._dataset.getPoint(p))\n\n cluster.addIndex(p)", "title": "" }, { "docid": "9955d92f7e01ad0155cf8c4ab8c64332", "score": "0.44117486", "text": "def deflatten(self, weight_partition: _torch.Tensor, target_size: _Tuple, pad: _torch.Tensor):\n permute = self.cluster_permute\n\n if pad is not None:\n weight_partition = _torch.cat([weight_partition.flatten(), pad])\n\n if permute and len(permute) == len(target_size):\n cur_shape = [target_size[i] for i in permute]\n\n weight_partition = weight_partition.reshape(cur_shape)\n weight_partition = weight_partition.permute(\n _torch.argsort(_torch.Tensor(permute)).tolist()\n )\n assert weight_partition.size() == target_size\n\n return weight_partition.reshape(target_size)", "title": "" }, { "docid": "bc777af214abac411b924040cb97c0c3", "score": "0.44077876", "text": "def construct_pseudo_classes(self, bootstrap, data, name=None):\n kmeans = KMeans(n_clusters=self.n_classes).fit(bootstrap)\n partition = pd.Series(\n DecisionTreeClassifier().fit(bootstrap, kmeans.labels_).predict(data),\n index=data.index,\n )\n if name:\n partition.name = name\n return partition", "title": "" }, { "docid": "9b0f0b7ec44857a97877be92299e4177", "score": "0.44016182", "text": "def insert_rv(dist, idx, sigalg):\n from itertools import chain\n\n if idx == -1:\n idx = dist.outcome_length()\n\n if not (0 <= idx <= dist.outcome_length()):\n raise IndexError('Invalid insertion index.')\n\n # Provide sane sorting of atoms\n atoms = atom_set(sigalg)\n atoms = [sorted(atom) for atom in atoms]\n atoms.sort(key=lexico_key)\n labels = range(len(atoms))\n if dist._outcome_class == str:\n # Then the labels for the new random variable must be strings.\n labels = map(str, labels)\n\n # Create an index from outcomes to atoms.\n atom_of = {}\n for label, atom in zip(labels, atoms):\n for outcome in atom:\n atom_of[outcome] = label\n\n if idx == dist.outcome_length():\n def new_outcome_ctor(outcome, ctor=dist._outcome_ctor):\n new_outcome = [ outcome, [atom_of[outcome]] ]\n return ctor(chain.from_iterable(new_outcome))\n elif idx == 0:\n def new_outcome_ctor(outcome, ctor=dist._outcome_ctor):\n new_outcome = [ [atom_of[outcome]], outcome ]\n return ctor(chain.from_iterable(new_outcome))\n else:\n def new_outcome_ctor(outcome, ctor=dist._outcome_ctor):\n new_outcome = [ outcome[:idx], [atom_of[outcome]], outcome[idx:] ]\n return ctor(chain.from_iterable(new_outcome))\n\n d = dit.modify_outcomes(dist, new_outcome_ctor)\n return d", "title": "" }, { "docid": "5f1ae503eadb4262bbbd72a53e1bd5fe", "score": "0.43986323", "text": "def makecluster(self):\n\t\tself.clusterList = []\n\t\tfor did in self.docids:\n\t\t\tcluster = []\n\t\t\tcluster = cluster + [did]\n\t\t\tself.clusterList = self.clusterList + [cluster]\n\t\t\n\t\tl = len(self.clusterList)\n\n\t\twhile l > 30:\n\t\t\tmindist = 100000000\n\n\t\t\ts = 0\n\t\t\tfor i in range(0,l):\n\t\t\t\tfor j in range(i+1,l):\n\t\t\t\t\ts = self.clusterDistanceType2( i , j)\n\t\t\t\t\tif s < mindist:\n\t\t\t\t\t\tmindist = s\n\t\t\t\t\t\tclustindx1 = i\n\t\t\t\t\t\tclustindx2 = j\n\t\t\tself.clusterList[clustindx1] = self.clusterList[clustindx1] + self.clusterList[clustindx2]\n\t\t\tt = self.clusterList[clustindx2]\n\t\t\tself.clusterList.remove( t )\n\t\t\tl = len(self.clusterList)\n\t\t\t\n\t\tprint self.clusterList", "title": "" }, { "docid": "d505d352a26f638c54a4b934b3fda430", "score": "0.43973306", "text": "def create_shards(input_list, size):\n shard, i = [], 0\n for element in input_list:\n shard.append(element)\n i += 1\n if i == size:\n yield shard\n shard, i = [], 0\n\n if i > 0:\n yield shard", "title": "" }, { "docid": "9d7c45f93bf756e18d51dfdcaf9c46c6", "score": "0.43909502", "text": "def __init__(self,\n distance: Union[Distance, str],\n dim: int,\n verbose: bool,\n **kwargs):", "title": "" }, { "docid": "bfae5b1f98afa60634d6f45b72d1f0a7", "score": "0.43838245", "text": "def create_density_map(gridfile, map_type='density_map', den_threshold=None, den_scale=5):\n hfile = h5py.File(gridfile, 'r')\n data_names = list(hfile['Data'].keys())\n data = {}\n for dn in data_names: # extract all data sets\n data[dn] = hfile['Data'][dn].value.transpose()\n\n x_vals = np.unique(data['coords'][:, 0]) # x values in grid\n n_x = len(x_vals) # number of x values\n y_vals = np.unique(data['coords'][:, 1])\n n_y = len(y_vals)\n z_vals = np.unique(data['coords'][:, 2])\n n_z = len(z_vals)\n resolution = [np.mean(np.diff(x_vals)), np.mean(np.diff(y_vals)), np.mean(\n np.diff(z_vals))] # it is exported as a cubic grid so resolutions should be equal in all dimensions\n min_coords = data['coords'].min(axis=0) # minimum used to translate density map in Fishualizer.draw_density_map()\n\n cluster_names = []\n for dn in data_names:\n if dn != 'coords':\n data[dn + '_nf'] = np.reshape(data[dn], (n_x, n_y, n_z)) # put cluster densities in new format (1D -> 3D)\n cluster_names.append(dn)\n # nf_cluster_names = [x for x in list(data.keys()) if x[-3:] == '_nf'] # list of names to use\n # colours = {nf_cluster_names[0]: [255, 0, 0, 0],\n # nf_cluster_names[1]: [0, 255, 0, 0],\n # nf_cluster_names[2]: [0, 0, 255, 0],\n # nf_cluster_names[3]: [128, 128, 0, 0],\n # nf_cluster_names[4]: [0, 128, 128, 0]} # colours of clusters # TODO: import color dict\n nf_cluster_names = ['positive_mixed_nf', 'negative_mixed_nf', 'posnegderiv_high95_nf']\n colours = {nf_cluster_names[0]: [255, 0, 113, 0],\n nf_cluster_names[1]: [0, 255, 157, 0],\n nf_cluster_names[2]: [184, 134, 11, 0]} # hard coded colors of regression clusters Migault et al., 2018\n # maxnorm = {cn: data[cn].max() for cn in nf_cluster_names} # max density per cluster (for colour normalization)\n maxnorm = {cn: 0.0005 for cn in nf_cluster_names} # uniform max density (for colour normalization)\n\n dataplot = np.zeros((n_x, n_y, n_z) + (4,), dtype=np.ubyte) # create 4D data matrix to plot (x,y,z,RGBA)\n\n ## Assign RGBA values in series\n if den_threshold is None:\n if map_type == 'density_map':\n den_threshold = 0 # DENSITY MAP\n elif map_type == 'hard_threshold':\n den_threshold = 0.00005 # HARD THRESHOLD MAP\n for x in range(n_x): # loop through all coords to assign RGBA\n for y in range(n_y):\n for z in range(n_z):\n max_den = 0\n for cn in nf_cluster_names: # check all clusters to find max one\n if (data[cn][x, y, z] > den_threshold) and (data[cn][x, y, z] > max_den):\n max_den = np.maximum(data[cn][x, y, z], max_den) # DENSITY MAP\n dataplot[x, y, z, :] = colours[cn]\n if map_type == 'density_map':\n dataplot[x, y, z, 3] = (max_den / maxnorm[cn] * 100) * den_scale # DENSITY MAP\n elif map_type == 'hard_threshold':\n dataplot[x, y, z, 3] = 100 # HARD THRESHOLD MAP\n\n return dataplot, resolution, min_coords", "title": "" }, { "docid": "ba450baa2472371e55b6f11bd7e33658", "score": "0.43825862", "text": "def _clustmap(G_normalized_weight_sources_tuple):\n return nx.clustering(*G_normalized_weight_sources_tuple)", "title": "" }, { "docid": "99424568e3dba0d71f7c2fa11514576e", "score": "0.4378374", "text": "def partition_by(f, seq):\n ...", "title": "" }, { "docid": "17971de00478c0ba0b8526ca2fed18d9", "score": "0.4372067", "text": "def pos(t, dist) :\n t.pu()\n t.fd(dist)\n t.pd()", "title": "" }, { "docid": "d74f7c683a2b64e517455722e7bacb90", "score": "0.437192", "text": "def eval_dist(dist, model, loader, args, n_samples_per_dist):\n preds_all = []\n labels_all = []\n\n # Set the correct test distribution\n loader.sampler.set_sub_dist(dist)\n\n counter = 0\n for images, labels, group_id in loader:\n\n labels = labels.detach().numpy()\n images = images.to(args.device)\n\n logits = model(images).detach().cpu().numpy()\n preds = np.argmax(logits, axis=1)\n\n preds_all.append(preds)\n labels_all.append(labels)\n counter += len(images)\n\n if counter >= n_samples_per_dist:\n break\n\n preds_all = np.concatenate(preds_all)\n labels_all = np.concatenate(labels_all)\n\n return preds_all, labels_all", "title": "" }, { "docid": "98f3df62bdc6975160484ce0ff10ffd7", "score": "0.43712255", "text": "def partition2RDD(partition, sc):\n numParts = len(set([partition[node] for node in partition]) )\n return sc.paralelize([(partition[node], node) for node in partition]).partitionBy(numParts, partitionFunc=identityHash)", "title": "" }, { "docid": "a74977bc881ba3139f6b44db700e8a1c", "score": "0.43676135", "text": "def _reconstruct_dist(eliminated_dist, action_labels, num_actions):\n reconstructed_payoff = np.zeros(num_actions)\n reconstructed_payoff[np.ix_(*action_labels)] = eliminated_dist\n return reconstructed_payoff", "title": "" }, { "docid": "7d3e66bca792068ccc7d7ae951186ffe", "score": "0.4364321", "text": "def samp(pro, dist, attribute):\n acc_pro = 0\n for i, item in enumerate(dist):\n acc_pro += item\n if pro < acc_pro:\n return attribute[i]\n return", "title": "" }, { "docid": "dc8a2f7777cae58802b246230e8e3668", "score": "0.43623877", "text": "def get_quantiles(dist,alpha = 0.68, method = 'median'):\n ordered_dist = dist[np.argsort(dist)]\n param = 0.0\n # Define the number of samples from posterior\n nsamples = len(dist)\n nsamples_at_each_side = int(nsamples*(alpha/2.)+1)\n if(method == 'median'):\n med_idx = 0\n if(nsamples%2 == 0.0): # Number of points is even\n med_idx_up = int(nsamples/2.)+1\n med_idx_down = med_idx_up-1\n param = (ordered_dist[med_idx_up]+ordered_dist[med_idx_down])/2.\n return param,ordered_dist[med_idx_up+nsamples_at_each_side],\\\n ordered_dist[med_idx_down-nsamples_at_each_side]\n else:\n med_idx = int(nsamples/2.)\n param = ordered_dist[med_idx]\n return param,ordered_dist[med_idx+nsamples_at_each_side],\\\n ordered_dist[med_idx-nsamples_at_each_side]", "title": "" }, { "docid": "da75e469700c40a32483a61dbbb2cc48", "score": "0.43606746", "text": "def make_pdf(dist, params, size=10000):\n\n # Separate parts of parameters\n arg = params[:-2]\n loc = params[-2]\n scale = params[-1]\n\n # Get sane start and end points of distribution\n start = dist.ppf(0.01, *arg, loc=loc, scale=scale) if arg else dist.ppf(0.01, loc=loc, scale=scale)\n end = dist.ppf(0.99, *arg, loc=loc, scale=scale) if arg else dist.ppf(0.99, loc=loc, scale=scale)\n\n # Build PDF and turn into pandas Series\n x = np.linspace(start, end, size)\n y = dist.pdf(x, loc=loc, scale=scale, *arg)\n pdf = pd.Series(y, x)\n\n return pdf", "title": "" }, { "docid": "da75e469700c40a32483a61dbbb2cc48", "score": "0.43606746", "text": "def make_pdf(dist, params, size=10000):\n\n # Separate parts of parameters\n arg = params[:-2]\n loc = params[-2]\n scale = params[-1]\n\n # Get sane start and end points of distribution\n start = dist.ppf(0.01, *arg, loc=loc, scale=scale) if arg else dist.ppf(0.01, loc=loc, scale=scale)\n end = dist.ppf(0.99, *arg, loc=loc, scale=scale) if arg else dist.ppf(0.99, loc=loc, scale=scale)\n\n # Build PDF and turn into pandas Series\n x = np.linspace(start, end, size)\n y = dist.pdf(x, loc=loc, scale=scale, *arg)\n pdf = pd.Series(y, x)\n\n return pdf", "title": "" }, { "docid": "da75e469700c40a32483a61dbbb2cc48", "score": "0.43606746", "text": "def make_pdf(dist, params, size=10000):\n\n # Separate parts of parameters\n arg = params[:-2]\n loc = params[-2]\n scale = params[-1]\n\n # Get sane start and end points of distribution\n start = dist.ppf(0.01, *arg, loc=loc, scale=scale) if arg else dist.ppf(0.01, loc=loc, scale=scale)\n end = dist.ppf(0.99, *arg, loc=loc, scale=scale) if arg else dist.ppf(0.99, loc=loc, scale=scale)\n\n # Build PDF and turn into pandas Series\n x = np.linspace(start, end, size)\n y = dist.pdf(x, loc=loc, scale=scale, *arg)\n pdf = pd.Series(y, x)\n\n return pdf", "title": "" }, { "docid": "da75e469700c40a32483a61dbbb2cc48", "score": "0.43606746", "text": "def make_pdf(dist, params, size=10000):\n\n # Separate parts of parameters\n arg = params[:-2]\n loc = params[-2]\n scale = params[-1]\n\n # Get sane start and end points of distribution\n start = dist.ppf(0.01, *arg, loc=loc, scale=scale) if arg else dist.ppf(0.01, loc=loc, scale=scale)\n end = dist.ppf(0.99, *arg, loc=loc, scale=scale) if arg else dist.ppf(0.99, loc=loc, scale=scale)\n\n # Build PDF and turn into pandas Series\n x = np.linspace(start, end, size)\n y = dist.pdf(x, loc=loc, scale=scale, *arg)\n pdf = pd.Series(y, x)\n\n return pdf", "title": "" }, { "docid": "2a98e01585acb67d3752d70ad924a69e", "score": "0.43534005", "text": "def compute_dist_map(self):\n # Build an hyper surface with three spatial dimensions + 1 dose dimension\n hypSurfDim = self.ref_img.shape + (self.ndbins,)\n hypSurf = np.ones( hypSurfDim )\n\n # Fill each layer of the dose axis\n # Dose points are set to 0\n\n lookup = np.digitize(self.ref_img,self.dbins) - 1 # lookup contains the index of dose bins\n\n for i in range(self.ndbins):\n dose_points = lookup == i\n if self.ndim == 3:\n hypSurf[:,:,:,i][dose_points] = 0\n # simple (naive) interpolation. See Fig. 2 au Chen 2009\n hypSurf = self._interp_dose_along_ax3(hypSurf,lookup,0)\n hypSurf = self._interp_dose_along_ax3(hypSurf,lookup,1)\n hypSurf = self._interp_dose_along_ax3(hypSurf,lookup,2)\n # Here, we could try to mask layer by layer all position of pixels below threshold\n # to speed up calculation (only w/ skfmm)\n elif self.ndim == 2:\n hypSurf[:,:,i][dose_points] = 0\n # simple (naive) interpolation. See Fig. 2 au Chen 2009\n hypSurf = self._interp_dose_along_ax2(hypSurf,lookup,0)\n hypSurf = self._interp_dose_along_ax2(hypSurf,lookup,1)\n # Here, we could try to mask layer by layer all position of pixels below threshold\n # to speed up calculation (only w/ skfmm)\n else:\n raise IndexError('Only 2 and 3 spatial dimension supported at this moment')\n\n dst = edt(hypSurf,sampling=self.delta)\n # dst = skfmm.distance(hypSurf)\n\n self.dist_map = dst", "title": "" }, { "docid": "c94c78eddf51c3e9e5bcf4a10749a57f", "score": "0.43506244", "text": "def Dist(dist_name, input_file, output_file):\n D = dist_dict[dist_name] # instantiate class from dictionary\n dist = D()\n text = input_file.read()\n\n dist.analyze(text)\n\n output_file.write( dist.to_readable() )", "title": "" }, { "docid": "f5753109d0b485df6fdc0fb3c0840190", "score": "0.43493623", "text": "def distortPoints(undistorted, K, D, distorted=None, alpha=None):\n pass", "title": "" }, { "docid": "a04bdf638680d5ad50d8ad76c4fc8d82", "score": "0.43489859", "text": "def get_dunn_index(fdist, *clusters):\n\n if len(clusters)<2:\n raise ValueError, \"At least 2 clusters are required\"\n\n intra_dist = []\n for c in clusters:\n for i in c.get_leaves():\n if i is not None:\n # item intraclsuterdist -> Centroid Diameter\n a = fdist(i.profile, c.profile)*2\n intra_dist.append(a)\n max_a = numpy.max(intra_dist)\n inter_dist = []\n for i, ci in enumerate(clusters):\n for cj in clusters[i+1:]:\n # intracluster dist -> Centroid Linkage\n b = fdist(ci.profile, cj.profile)\n inter_dist.append(b)\n min_b = numpy.min(inter_dist)\n\n if max_a == 0.0:\n D = 0.0\n else:\n D = min_b / max_a\n return D", "title": "" }, { "docid": "3aee7bf61cb2092149c955b9d5986379", "score": "0.43452087", "text": "def make_pdf(dist, params, size=10000):\n arg = params[:-2]\n loc = params[-2]\n scale = params[-1]\n\n start = dist.ppf(0.001, *arg, loc=loc, scale=scale) if arg else dist.ppf(0.001, loc=loc, scale=scale)\n end = dist.ppf(0.999, *arg, loc=loc, scale=scale) if arg else dist.ppf(0.999, loc=loc, scale=scale)\n\n x = np.linspace(start, end, size)\n y = dist.pdf(x, loc=loc, scale=scale, *arg)\n pdf = pd.Series(y, x)\n return pdf", "title": "" }, { "docid": "7af9b45fad1e50edae02b239a652b65a", "score": "0.4337785", "text": "def _dist(self):\n raise NotImplementedError('Must be called in a derived instance')", "title": "" }, { "docid": "3c540e489f147b30b7a49d11ecc47b08", "score": "0.43324348", "text": "def sdist():\n pass", "title": "" }, { "docid": "2828cfc706d790d503d3fe6cb96679b8", "score": "0.43302605", "text": "def fulldim_mds_analysis(df):", "title": "" }, { "docid": "b2aeff2f750f639ab3091e813ed3a241", "score": "0.43227732", "text": "def decoder_dist(self, dist_parameter):\n pass", "title": "" }, { "docid": "ab18773b21de1e420708d40fb25fcb6a", "score": "0.43172854", "text": "def concat_segment(stitched_img,distance, mile_to_start,segment_num, segment_start,\n segment_end, concat_width, x_width):\n\n img_h = stitched_img.shape[0]\n segment_width = distance / segment_num # segment width, e.g 1.2m\n seg_num_img_int = round(10 / segment_width)\n seg_num_img = 10 / segment_width # number of segment on single image\n seg_pix_integral = round(img_h / seg_num_img)\n\n # calculate index of segment,\n # get y coordinates of all segment\n order = segment_start < segment_end\n y_cord = [y * seg_pix_integral for y in range(seg_num_img_int)]\n seg_num_div = int(mile_to_start // 10) # 计算距里程起点经过的管片数量,单张图片管片数*这个数\n seg_num_mod = round(mile_to_start % 10)\n seg_num_pass = round(seg_num_div * seg_num_img) + round(seg_num_mod / segment_width)\n if order:\n segment_id = [seg_num_pass + i for i in range(len(y_cord))]\n else:\n segment_id = [seg_num_pass - i for i in range(len(y_cord))]\n\n # if mile_to_start < 0,\n # which means that first segment showed up.else occupied segments.\n if mile_to_start < 0:\n seg_num_img = round((10 + mile_to_start) / segment_width)\n start_y = round(abs(mile_to_start) / 10 * 4000)\n y_cord = [start_y + y * seg_pix_integral for y in range(seg_num_img)]\n if order:\n segment_id = [segment_start + i for i in range(len(y_cord))]\n else:\n segment_id = [segment_start - i for i in range(len(y_cord))]\n\n bgr = np.zeros((img_h, concat_width, 3), dtype=np.uint8)\n for i in range(len(segment_id)):\n cv2.putText(bgr, \"#{}\".format(segment_id[i]), (x_width, 300 + y_cord[i]),\n cv2.FONT_HERSHEY_SCRIPT_SIMPLEX, 5,\n color=(0, 0, 255), thickness=3)\n stitched_img = cv2.hconcat([stitched_img, bgr])\n return stitched_img,segment_id", "title": "" }, { "docid": "6cb267103bf0b679139459c56a57bcd3", "score": "0.43162936", "text": "def partition(current_insts, available_atts, split_att, att_domains):\n d = myutils.group_dict(current_insts, available_atts, split_att, uniques=att_domains[split_att])\n available_atts.remove(split_att)\n return d, available_atts", "title": "" }, { "docid": "6f31c00451bd2e37488af70efd99737e", "score": "0.43138304", "text": "def initialiseAlignTypeDist(self, dataset, loadTypeDist={}):\n typeDist = defaultdict(float)\n typeTotalCount = 0\n for (f, e, alignment) in dataset:\n # Initialise total_f_e_type count\n for (f_i, e_i, typ) in alignment:\n typeDist[typ] += 1\n typeTotalCount += 1\n\n # Calculate alignment type distribution\n for typ in typeDist:\n typeDist[typ] /= typeTotalCount\n # Manually override alignment type distribution\n for typ in loadTypeDist:\n typeDist[typ] = loadTypeDist[typ]\n\n # Create typeIndex and typeList\n self.typeList = []\n self.typeIndex = {}\n for typ in typeDist:\n self.typeList.append(typ)\n self.typeIndex[typ] = len(self.typeList) - 1\n self.typeDist = np.zeros(len(self.typeList))\n for h in range(len(self.typeList)):\n self.typeDist[h] = typeDist[self.typeList[h]]\n return", "title": "" }, { "docid": "1da26892e508fcc2fea3aa4ce80b748e", "score": "0.4301198", "text": "def partitions(self):\n raise NotImplementedError", "title": "" }, { "docid": "a3918a91b1469a4b2ebfe534ff6f3112", "score": "0.43004316", "text": "def get_inference_action_dist_cls(self) -> Type[Distribution]:\n raise NotImplementedError", "title": "" }, { "docid": "19fd7251bcacb0e19237694fce2e4903", "score": "0.42997217", "text": "def show_dendrogram_cut_by_distance(clustermap, dist, axis=1):\n if axis not in (0, 1):\n raise ValueError(\"Valid choices for `axis` are (0, 1).\")\n\n if axis == 0:\n dend = clustermap.dendrogram_row\n ax = clustermap.ax_row_dendrogram\n else:\n dend = clustermap.dendrogram_col\n ax = clustermap.ax_col_dendrogram\n\n xmin = np.array(dend.independent_coord).min()\n xmax = np.array(dend.independent_coord).max()\n rng = xmax - xmin\n\n plot_kwargs = dict(\n linestyle='--',\n color='k',\n alpha=0.3\n )\n\n if axis == 0:\n ax.plot(\n [dist, dist],\n [xmin - 0.1 * rng, xmax + 0.1 * rng],\n **plot_kwargs\n )\n else:\n ax.plot(\n [xmin - 0.1 * rng, xmax + 0.1 * rng],\n [dist, dist],\n **plot_kwargs\n )", "title": "" }, { "docid": "1dcb4c8015b1d69a1f42f58c2c132b47", "score": "0.42981935", "text": "def prep_region(ts, num_snps, L, filter, neg1):\n gt_matrix = ts.genotype_matrix().astype(float)\n snps_total = gt_matrix.shape[0]\n\n positions = [round(variant.site.position) for variant in ts.variants()]\n assert len(positions) == snps_total\n dist_vec = [0] + [(positions[j+1] - positions[j])/L for j in \\\n range(snps_total-1)]\n\n # when mirroring real data\n if filter:\n return util.process_gt_dist(gt_matrix, dist_vec, num_snps, filter=True,\\\n rate=0.3, neg1=neg1)\n else:\n return util.process_gt_dist(gt_matrix, dist_vec, num_snps, neg1=neg1)", "title": "" }, { "docid": "1f29c99cbaa429d72df739d80eb91b26", "score": "0.4289312", "text": "def make_pdf(self, dist, params, size = 10000):\n\n # Separate parts of parameters\n arg = params[:-2]\n loc = params[-2]\n scale = params[-1]\n\n # Get sane start and end points of distribution\n start = dist.ppf(0.01, *arg, loc=loc, scale=scale) if arg else dist.ppf(0.01, loc=loc, scale=scale)\n end = dist.ppf(0.99, *arg, loc=loc, scale=scale) if arg else dist.ppf(0.99, loc=loc, scale=scale)\n\n # Build PDF and turn into pandas Series\n x = np.linspace(start, end, size)\n y = dist.pdf(x, loc=loc, scale=scale, *arg)\n pdf = pd.Series(y, x)\n\n \n return pdf", "title": "" }, { "docid": "2979d91e16e33e12c439cfadc445d160", "score": "0.4286568", "text": "def sharded_init_partition_spec_fn(\n params, params_partition_spec, partition_spec_for_statistics\n ):\n # Parallel lists of spec, and params.\n param_pspec_flat, _ = jax.tree_flatten(\n params_partition_spec, is_leaf=lambda x: x is None\n )\n params_flat, treedef = jax.tree_flatten(params)\n assert param_pspec_flat\n assert params_flat\n # Step is replicated across cores.\n # None means cores.\n local_stats_flat = []\n num_statistics = 0\n for param, param_pspec in zip(params_flat, param_pspec_flat):\n param_clone = jnp.zeros(param.shape, dtype=param.dtype)\n preconditioner = preconditioner_from_params(param_clone)\n shapes = preconditioner.shapes_for_preconditioners()\n sizes = []\n\n index_start = num_statistics\n if not _skip_preconditioning(param):\n sizes = [s[0] for s in shapes]\n shapes = preconditioner.shapes_for_preconditioners()\n num_statistics += len(shapes)\n\n qdtype = quantized_dtype_for_momentum_buffers(param)\n m1_pspec = param_pspec\n m2_pspec = param_pspec\n m1_scale_pspec = []\n m2_scale_pspec = []\n if qdtype != jnp.float32:\n m1_scale_pspec = _remove_leading_sharding_annotation(m1_pspec)\n m2_scale_pspec = _remove_leading_sharding_annotation(m2_pspec)\n\n local_stats_flat.append(\n LocalShardedParameterStats(\n QuantizedValue(\n param_pspec, [], [], jnp.float32, False, list(param.shape)\n ),\n QuantizedValue(\n m1_pspec, [], m1_scale_pspec, qdtype, False, list(param.shape)\n ),\n QuantizedValue(\n m2_pspec, [], m2_scale_pspec, qdtype, False, list(param.shape)\n ),\n init_training_metrics_pspec(),\n index_start,\n sizes,\n )\n )\n\n local_stats = jax.tree_unflatten(treedef, local_stats_flat)\n global_stats = GlobalShardedParameterStats(\n partition_spec_for_statistics,\n partition_spec_for_statistics,\n pjit.PartitionSpec(),\n )\n count_pspec = pjit.PartitionSpec()\n return ShampooState(\n count=count_pspec, stats=ShardedShampooStats(global_stats, local_stats)\n )", "title": "" }, { "docid": "ff3ea34b3bf72232eef3291e40ea34b8", "score": "0.42855233", "text": "def clustering(init_rec_field1, fin_rec_field1, dst, mode='RGB', show=True): \n \n import numpy as np\n import matplotlib.pyplot as plt\n import scipy as scp\n \n from sklearn.cluster import AgglomerativeClustering\n from scipy.stats import wasserstein_distance\n from scipy.cluster.hierarchy import dendrogram, linkage\n \n if mode == 'RGB':\n channels = 3\n else:\n channels = 4\n \n # first gen: generate clusters and dendograms with earth mover distance\n dist = np.zeros(shape=(16,16))\n for i in range(16):\n for j in range(16):\n dist[i,j] = wasserstein_distance(init_rec_field1[:,:,:channels,i].flatten(), \n init_rec_field1[:,:,:channels,j].flatten())\n \n cluster = AgglomerativeClustering(n_clusters=5, affinity='precomputed', linkage=\"average\")\n clusters = cluster.fit_predict(dist)\n \n # plot dendogram\n sq_dist = scp.spatial.distance.squareform(dist)\n linkage_matrix = linkage(sq_dist, \"average\")\n dendrogram(linkage_matrix)\n plt.title(\"[DENDOGRAM RECEPTIVE FIELDS, FIRST GENERATION]: earth mover distance, linkage 'avg'.\")\n \n if show == True:\n plt.show()\n else:\n pass \n # last gen: generate clusters and dendograms with earth mover distance\n dist = np.zeros(shape=(16,16))\n for i in range(16):\n for j in range(16):\n dist[i,j] = wasserstein_distance(fin_rec_field1[:,:,:channels,i].flatten(), \n fin_rec_field1[:,:,:channels,j].flatten())\n \n cluster = AgglomerativeClustering(n_clusters=5, affinity='precomputed', linkage=\"average\")\n clusters = cluster.fit_predict(dist)\n \n # plot dendogram\n sq_dist = scp.spatial.distance.squareform(dist)\n linkage_matrix = linkage(sq_dist, \"average\")\n dendrogram(linkage_matrix)\n plt.title(\"[DENDOGRAM RECEPTIVE FIELDS, LAST GENERATION]: earth mover distance, linkage 'avg'.\")\n \n if show == True:\n plt.show()\n else:\n pass", "title": "" }, { "docid": "acc718a2c538bd339cc24db7117588f8", "score": "0.42821962", "text": "def genClusters(dataSet,T,Z1=0):\n if Z1 >= len(dataSet) or Z1 < 0:\n Z1 = 0\n zs = [dataSet[Z1]]\n zsi = [Z1]\n for i in range(len(dataSet)):\n minDist = util.distEclud(dataSet[i],zs[0])\n #print(minDist)\n for z in zs:\n dist = util.distEclud(dataSet[i],z)\n if dist < minDist:\n minDist = dist\n if minDist > T:\n zs.append(dataSet[i])\n zsi.append(i)\n return zs,zsi", "title": "" } ]
ca841fb507bc6522b9eec458a6c55011
Normalizing Filter for scaling data in incomming array so the max value is equal to 1
[ { "docid": "ed9c720cbe03e2c99b02ea9557002bb4", "score": "0.6344063", "text": "def normalized(x):\n maxi = 0\n X = np.zeros(len(x))\n for i in range(0, len(x)):\n if (x[i] > maxi):\n maxi = float(x[i])\n for i in range(0,len(x)):\n X[i] = x[i]/maxi\n return X", "title": "" } ]
[ { "docid": "6e1d1cee28ebf937cf6fbee5e8fe60df", "score": "0.6841447", "text": "def normalize(data):\r\n array_min = data.min()\r\n array_max = data.max()\r\n\r\n return ((data - array_min) / (array_max - array_min))\r\n # return new_array\r", "title": "" }, { "docid": "6dec42334eaa302098651d3c7d9907f8", "score": "0.67459035", "text": "def channelwised_normalize(batch_image):\n for si in range(batch_image.shape[0]):\n for ci in range(batch_image.shape[1]):\n channel = batch_image[si, ci,:,:]\n mi = torch.min(channel)\n ma = torch.max(channel)\n batch_image[si, ci,:,:] = (channel - mi)/(ma - mi)\n return batch_image", "title": "" }, { "docid": "c09050503467f3a1ff2a99656a0a953b", "score": "0.66750187", "text": "def normalize(data):\r\n max_p = np.max(data[:, :, :, :])\r\n min_p = np.min(data[:, :, :, :])\r\n norm_data = (data - min_p)/(max_p - min_p)\r\n return norm_data", "title": "" }, { "docid": "1d86a908ab0f928aa589e526c0c439d1", "score": "0.66646963", "text": "def _denormalize(self, data):\n for column in data.columns:\n max_value = self._scaling_params[str(column)]['max']\n min_value = self._scaling_params[str(column)]['min']\n if max_value != min_value:\n a = (max_value - min_value) / (self.MAX - self.MIN)\n b = max_value - a * self.MAX\n data[column] = a * data[column] + b\n else:\n data[column] = 0\n return data", "title": "" }, { "docid": "2de003bc995d9603e91184cb1ee050b4", "score": "0.6661903", "text": "def normalize_(self):\n \n self.norm = (self.data - self.min) / (self.max - self.min)", "title": "" }, { "docid": "dafb4bb83065963fdf95422e134b2c04", "score": "0.66502404", "text": "def scaler(self,X,k):\n for i in range(len(X)):\n # X_std\n X_std = (X - X.min(axis=0))/(X.max(axis=0) - X.min(axis=0))\n # 0 -1 minmax \n X_scaled = X_std * (X.max(axis=0) - X.min(axis=0)) + X.min(axis=0)\n\n X2 = np.round(X_scaled * k)\n X_new = np.where(X2==0, 1, X2) \n return X_new", "title": "" }, { "docid": "0c8f96a5945a21ea271bcbbcecc3a0c1", "score": "0.66497123", "text": "def normalize_minmax(data:np.ndarray):\n return (data - np.min(data,axis=0))/(np.max(data,axis=0) - np.min(data,axis=0))", "title": "" }, { "docid": "c3e391292de6762f908b8251c5c10b4e", "score": "0.6648533", "text": "def normalize(img):\n return img / img.max()", "title": "" }, { "docid": "e4a3e079eb3afffb21d9bce20a9b93fb", "score": "0.6646755", "text": "def normalize_data(data):\n\n scaler = preprocessing.MinMaxScaler()\n scaler.fit(data)\n data = scaler.transform(data)\n return data", "title": "" }, { "docid": "cd9dcd8d4b2a00b732d3f201adb6edac", "score": "0.659969", "text": "def normalize(x):\n min_x = np.min(x, axis=0)\n \n max_x = np.max(x, axis=0)\n mindiff = x-min_x\n diff = max_x-min_x\n\n x[:, diff > 0] = mindiff[:, diff > 0]/diff[ diff > 0]\n return x", "title": "" }, { "docid": "473a726cd9e35dd3dd1ef7fb1d5d8e5e", "score": "0.6595892", "text": "def normalize_input(x):\n min_value = min(x)\n max_value = max(x)\n normalized = np.zeros(x.shape)\n for i in range(len(x)):\n normalized[i] = (float(float(x[i] - min_value) / float(max_value - min_value)))\n return normalized", "title": "" }, { "docid": "38c58f09d9c70a9d80db821471f752c5", "score": "0.6578339", "text": "def scale(data):\n mu = np.sum(data) / len(data)\n std = np.sqrt(np.sum(np.square(data - mu) / (len(data) - 1)))\n return (data - mu) / (2 * std)\n #return (data - min(data))/(max(data)-min(data))", "title": "" }, { "docid": "d3576d4e23bba98aa5216ef1b6418991", "score": "0.65715754", "text": "def normalize_img(image):\n max_val = np.amax(image)\n if max_val == 0:\n return image\n return image / max_val", "title": "" }, { "docid": "9d5b84eba97e716e36cd090ea424dadb", "score": "0.65640086", "text": "def renormalize(img, max_value, min_value):\n return (img - min_value) / (max_value - min_value)", "title": "" }, { "docid": "c8b2da8ec67bdcb8fbe907a852d2b91c", "score": "0.6562849", "text": "def normalize(x):\n # TODO: Implement Function\n return x/np.max(x)", "title": "" }, { "docid": "95e310cacecab64308627c55aa1fe503", "score": "0.6546714", "text": "def normalize(array):\n arr_min = np.min(array)\n arr_max = np.max(array)\n return (array - arr_min) / (arr_max - arr_min + K.epsilon())", "title": "" }, { "docid": "6b800541541f1ee458343bda4d71e9bd", "score": "0.6531619", "text": "def rescale_arr(arr):\n max_amp = np.max(arr)\n return arr / max_amp", "title": "" }, { "docid": "efd1711d17392ae4da4e0c9b509c8c86", "score": "0.6506317", "text": "def normalize(inputs):\n\n mean = np.mean(inputs)\n max_element = np.max(inputs)\n min_element = np.min(inputs)\n\n scaled_inputs = np.copy(inputs)\n\n for index in range(len(inputs)):\n scaled_inputs[index] = (inputs[index] - mean) / (max_element - min_element)\n\n return scaled_inputs", "title": "" }, { "docid": "dd6147492932c98b97d4b78aa332966c", "score": "0.6501437", "text": "def normalize(data):\n if data.shape[0] == 0:\n return data\n\n max_ = data.max()\n min_ = data.min()\n\n return (data - min_) / (max_ - min_) if max_ - min_ != 0 else data - min_", "title": "" }, { "docid": "a627071a3e43e5d051a141fd511a67de", "score": "0.6496745", "text": "def normalize(x):\n return x / x.sum(axis=0) # only difference", "title": "" }, { "docid": "a72bab6405c54abca421a714a7981774", "score": "0.6472494", "text": "def scaler(X):\n ans = np.zeros(X.shape)\n for i in range(X.shape[0]):\n for j in range(0,X.shape[1],2):\n\n MIN, MAX = X[i,j:j+2].min(), X[i,j:j+2].max()\n if MIN != MAX:\n ans[i][j] = (X[i][j] - MIN) / (MAX - MIN) *2.0 -1\n ans[i][j+1] = (X[i][j+1] - MIN)/(MAX - MIN) *2.0 - 1\n return np.asarray(ans)", "title": "" }, { "docid": "fe8e22fcd34c78be5e8ab552cfe341d6", "score": "0.64614636", "text": "def LocalMax(self, data, kwidth, scaling, out = None):", "title": "" }, { "docid": "a4ddd8cd7f99b5645f47cded601eb0eb", "score": "0.64562833", "text": "def normalize(image):\n\n image = (image - MIN_BOUND) / (MAX_BOUND - MIN_BOUND)\n image[image>1] = 1.\n image[image<0] = 0.\n return image", "title": "" }, { "docid": "097aaa683562940db468c02aeabce2bc", "score": "0.645458", "text": "def normalize(X):\n return X / X.sum(axis=0)", "title": "" }, { "docid": "50374f460ba4d881d8fa8cfa318c71cd", "score": "0.6450449", "text": "def normalize(data, _min, _max):\n\n new_data = (data - _min) / (_max - _min)\n\n # check if feature is constant, will be nan in new_data\n np.place(new_data, np.isnan(new_data), 1)\n\n return new_data", "title": "" }, { "docid": "cfa34ad1443c33ec62c8842bda812c69", "score": "0.64460695", "text": "def normalize(img):\n\n return (img-np.min(img))/(np.max(img)-np.min(img))\n # return img / np.max(img)", "title": "" }, { "docid": "2261d22a12b8d459e4b946bfafc74bf4", "score": "0.64362127", "text": "def Normalize(data):\n m = np.mean(data)\n mx = max(data)\n mn = min(data)\n return (data - m) / (mx - mn)", "title": "" }, { "docid": "36391fe8843aad2e4697d49d0d1d6638", "score": "0.6429708", "text": "def normalize_arr_of_imgs(arr):\n return arr/127.5 - 1.\n # return (arr - np.mean(arr)) / np.std(arr)", "title": "" }, { "docid": "3cb1301bcb14c274957bf73b2a9e5d87", "score": "0.6428638", "text": "def normalization(data):\n _range = np.max(data) - np.min(data)\n return (data - np.min(data)) / _range", "title": "" }, { "docid": "dd4f35a30b0cf46e4e5ba9052cf5568c", "score": "0.6413419", "text": "def rescale_data(self, arr):\n minval = np.min(arr)\n maxval = np.max(arr)\n scaled_arr = np.subtract(arr, minval)\n scaled_arr = np.divide(scaled_arr, maxval-minval)\n return scaled_arr", "title": "" }, { "docid": "48e28a718f0cda8c5c4af2ab4fbf49fa", "score": "0.64052314", "text": "def normalize(img: np.array, mean: list=[0.485, 0.456, 0.406], std: list=[0.229, 0.224, 0.225], max_value: float=255) -> np.array: \r\n mean = np.array(mean, dtype=np.float32)\r\n mean *= max_value\r\n std = np.array(std, dtype=np.float32)\r\n std *= max_value\r\n\r\n img = img.astype(np.float32)\r\n img = img - mean \r\n img = img / std\r\n\r\n return img", "title": "" }, { "docid": "d4d579b8a24dc40ddc3db9a17b5d837a", "score": "0.6362239", "text": "def normalize(img, value_range_out, value_range_in, clip=False, dtype=None):\n new_value_min, new_value_max = value_range_out\n old_value_min, old_value_max = value_range_in\n\n # Cast to float array if needed:\n temp_img = np.array(img, dtype=float)\n if clip:\n temp_img[temp_img < old_value_min] = old_value_min\n temp_img[temp_img > old_value_max] = old_value_max\n\n # Need to catch the following case separately as it produces nan otherwise:\n if abs(old_value_max - old_value_min) < np.finfo(float).eps:\n scale_factor = 1.\n else:\n scale_factor = 1. * (new_value_max - new_value_min) / (old_value_max - old_value_min)\n if not dtype:\n dtype = float\n return np.array(scale_factor * (temp_img - old_value_max) + new_value_max, dtype=dtype)", "title": "" }, { "docid": "79f0ed97bda42d58e65aa0ef28956f01", "score": "0.63557446", "text": "def normalize(img):\n imin = np.min(img)\n imax = np.max(img)\n\n return (img - imin) / (imax - imin)", "title": "" }, { "docid": "fe2d683a45cff81c60ffb2dab2baaee9", "score": "0.6335322", "text": "def normalize(img, globalscale=False):\n if globalscale:\n maxval = np.amax(img)\n minval = np.amin(img)\n img = (img - minval) / (maxval - minval + 1E-16)\n\n\n else:\n img = [(img[..., i] - np.min(img[..., i])) / (np.ptp(img[..., i]) + 1E-16) for i in range(img.shape[-1])]\n img = np.rollaxis(np.float32(np.array(img)), 0, 4)\n return img", "title": "" }, { "docid": "68ac69cee26274680a9170a32cdab8d3", "score": "0.633379", "text": "def normalize(x):\n x = np.array(x) /255\n return x.reshape(x.shape[0],28,28,1)", "title": "" }, { "docid": "0f715fb775f30c308a4b93f6aeb66baf", "score": "0.6324391", "text": "def normalise(X):\n X = (X - np.max(X)) / (np.max(X) - np.min(X))\n X = X+1\n\n return X", "title": "" }, { "docid": "da552b6291e15596fdd48fe841f17245", "score": "0.6315851", "text": "def scaling(data):\n scaler = preprocessing.MinMaxScaler()\n # scales in the range of -1 to 1\n scaled_data = scaler.fit_transform(data, (-1, 1))\n return scaled_data", "title": "" }, { "docid": "ce6281ab0f2863ffb26b0c56287898a7", "score": "0.63083106", "text": "def normalize(x, maximum=1, axis=None, out=None):\n if axis is None and not np.isscalar(maximum):\n raise TypeError(\"If axis is not specified, maximum must be a scalar\")\n\n maximum = np.max(np.abs(x), axis=axis) / maximum\n if axis is not None:\n maximum = np.expand_dims(maximum, axis=axis)\n return np.true_divide(x, maximum, out)", "title": "" }, { "docid": "c06fd3a91dd4e7f0e6eeaa8dbe7d9590", "score": "0.63078886", "text": "def normalize(arr):\n arr_max = np.max(arr)\n c = 0\n for val in arr:\n c += exp(val - arr_max)\n c = log(c) + arr_max\n return c", "title": "" }, { "docid": "d9cd4250fa5b6c4cbd9ae7fdc460bfb6", "score": "0.6307624", "text": "def normalize(min, max, array):\r\n\r\n normalized = (array - min) / (max - min)\r\n\r\n return normalized", "title": "" }, { "docid": "0ec4d4e5d208b357b9afc03afc442415", "score": "0.6300195", "text": "def normalize(samples, min, max):\r\n # type: (np.ndarray, float, float) -> np.ndarray\r\n return (samples - min) / (max - min)", "title": "" }, { "docid": "61a8a3360f00c702d6f43c54d019d7b6", "score": "0.6294927", "text": "def filter_normalize_pert(A):\n epsilon = 1e-12\n\n A = A.reshape(A.shape[0], -1).T\n idx = np.isfinite(A).all(axis=0)\n A = A[:, idx]\n norms = la.norm(A, axis=0)\n\n idx = norms >= epsilon\n A[:, np.logical_not(idx)] = 0.0\n A[:, idx] /= norms[idx]\n\n return A", "title": "" }, { "docid": "95dad18ca0252f683415967d52fb3952", "score": "0.6289869", "text": "def normalization_test(data,train_limits): \r\n normal_data = data\r\n normalized_data = (normal_data - train_limits[0]) / (train_limits[1] - train_limits[0])\r\n normalized_data[normalized_data < 0] = 0\r\n normalized_data[normalized_data > 1] = 1\r\n return normalized_data", "title": "" }, { "docid": "7bd68e61375a37aace797f13dbe6422d", "score": "0.6279818", "text": "def standardize_filtered(tx):\n N = np.shape(tx)[1];\n for i in range(N): \n f = tx[:,i] == -999\n d = tx[:,i] != -999\n temp = tx[d, i]\n mean = np.mean(temp)\n std = np.std(temp)\n tx[d,i] = [(x - mean) / std for x in tx[d,i]] \n tx[f,i] = [0 for _ in tx[f,i]]\n\n return tx", "title": "" }, { "docid": "3bb1e51f099ccf5bc64783e9d9f63554", "score": "0.62784624", "text": "def _normalize(self,x):\n return np.array((x - np.min(x)) / (np.max(x) - np.min(x)))", "title": "" }, { "docid": "9a615cde12627cba4eeb66f14421e615", "score": "0.62771714", "text": "def normalize_data(img):\n assert isinstance(img, np.ndarray)\n \n normalized_image = (img - np.min(img))/(np.max(img) - np.min(img))\n \n return normalized_image", "title": "" }, { "docid": "389ff840b09c2d5ee9e278d641b050c2", "score": "0.62720877", "text": "def _mean_filter(n):\r\n return np.full((n, n), 1) / n", "title": "" }, { "docid": "7935ee6eea5228be8e1e5704372d24d0", "score": "0.62682354", "text": "def __call__(self, **params):\n logger.info(\"Executing Filter: Normalization\")\n # type*bounds check via Parameter\n _ = self.instance(**params)\n # sanitize arguments\n params = param.ParamOverrides(self, params)\n # type validation is done, now replacing max_worker with an actual integer\n self.max_workers = clamp_max_workers(params.max_workers)\n logger.debug(f\"max_worker={self.max_workers}\")\n\n # use median filter to remove outliers from flats and darks\n # NOTE: this will remove the random noises coming from the environment.\n self.flats = np.median(params.flats, axis=0)\n self.darks = np.median(params.darks, axis=0)\n # apply normalization\n _bg = self.flats - self.darks\n _bg[_bg <= 0] = 1e-6\n params.arrays = params.arrays - self.darks\n arrays_normalized = np.true_divide(params.arrays, _bg, dtype=np.float32)\n\n # return\n logger.info(\"FINISHED Executing Filter: Normalization\")\n\n return arrays_normalized", "title": "" }, { "docid": "31dc8e0505c01709f42976779e0c797b", "score": "0.6261641", "text": "def normalize_image(img):\n min_val = np.min(img)\n max_val = np.max(img)\n img -= min_val\n img /= (max_val - min_val)\n\n return img", "title": "" }, { "docid": "458d57f9a295d76ff9306169431e16ee", "score": "0.6247436", "text": "def normalize(self,array):\n minimum = min(array)\n maximum = max(array)\n normarray = np.zeros(len(array))\n for index in range(0,len(array)):\n normarray[index] = (array[index] - minimum)/(maximum - minimum)\n return(normarray)", "title": "" }, { "docid": "21cd5ad8fdfd7bd004921538ff74379e", "score": "0.62460524", "text": "def feature_normalize(self, x):\n\n # Initialize normalization params on training data set\n if self.mu is None or self.sigma is None:\n self.mu = np.mean(x, axis=0)\n self.sigma = np.std(x, axis=0)\n zero_mask = self.sigma == 0\n self.sigma += zero_mask # ensure no division by zero (if == 0, set = 1)\n\n return (x - self.mu) / self.sigma", "title": "" }, { "docid": "19629122f900c43c1faa655cd60b07e3", "score": "0.6244957", "text": "def normalize(img):\n max_val = img.max()\n min_val = img.min()\n\n if min_val == max_val:\n return img\n\n def transform(pixel):\n x1, y1 = (min_val, 0)\n x2, y2 = (max_val, 1)\n return (y2 - y1) / (x2 - x1) * (pixel - x1) + y1\n\n return iter_pixel(img, transform)", "title": "" }, { "docid": "1fcc82260e4566ae385007c230ecb9cb", "score": "0.6244335", "text": "def _normalize(self, data, train=True):\n normalized = data.copy()\n\n for column in data.columns:\n if train:\n max_value = data[column].max()\n min_value = data[column].min()\n self._scaling_params[str(column)] = {\n 'max': float(max_value),\n 'min': float(min_value)\n }\n else:\n max_value = self._scaling_params[str(column)]['max']\n min_value = self._scaling_params[str(column)]['min']\n if max_value != min_value:\n a = (self.MAX - self.MIN) / (max_value - min_value)\n b = self.MAX - a * max_value\n normalized[column] = a * data[column] + b\n else:\n normalized[column] = 0\n\n return normalized", "title": "" }, { "docid": "261ca0378fa7133f6bebac92e5190c64", "score": "0.62402546", "text": "def normalize_inputs(inputs, min_feature_values, max_feature_values):\n for i in range(len(min_feature_values)):\n if min_feature_values[i] == 0.0:\n min_feature_values[i] = 0.00000000001\n # duplicate min and max _feature_values for each row of a given item:\n min_reshaped = np.array([min_feature_values,]*inputs.shape[1])\n max_reshaped = np.array([max_feature_values,]*inputs.shape[1])\n\n # duplicate the duplicated min and max _feature_values for item in batch:\n min_reshaped = np.array([min_reshaped]*inputs.shape[0])\n max_reshaped = np.array([max_reshaped]*inputs.shape[0])\n\n # normalize x as x = x-min/(max-min)\n max_minus_min = max_reshaped - min_reshaped\n max_minus_min[max_minus_min == 0.0] = 0.00000000001\n\n normalized_inputs = (inputs - min_reshaped) / (max_minus_min)\n normalized_inputs[normalized_inputs > 1.0] = 1.0\n return normalized_inputs", "title": "" }, { "docid": "2865e6751a0c0b311d69c01a6418adbc", "score": "0.6228746", "text": "def normalize_0_1(img): \n\n img /= 255. \n# img = np.reshape(img, (y_size, x_size, num_channels))\n\n return img", "title": "" }, { "docid": "7b0d14c93f749b5a1de5403ffc10144a", "score": "0.6215181", "text": "def minmax_normalize(array):\n return (array - array.min()) / (array.max() - array.min())", "title": "" }, { "docid": "610ee0f134439e6a60fd69fb42c1acb2", "score": "0.6206816", "text": "def normalise_input(features):\n features = np.array(features,dtype=np.float32) \n return np.multiply(features, 1.0 / 255.0)", "title": "" }, { "docid": "496f46ba42a236a4597fc2e87b34d404", "score": "0.61995757", "text": "def normalisation(x):\n x /= 127.5\n x -= 1.\n\n x[..., 3] -= 103.939\n x[..., 2] -= 116.779\n x[..., 1] -= 123.68\n return x", "title": "" }, { "docid": "02a0b8f35c7f36ab69e8ddb9cf17cc3a", "score": "0.61925083", "text": "def scale(data):\n n = N.min(data, 0)\n m = N.max(data, 0)\n\n data -= n\n data /= (m-n)\n return data", "title": "" }, { "docid": "b65f1c32a3a71d448a046514f5fb4587", "score": "0.61895263", "text": "def normalize(self, raw_data, max_list, min_list):\n max_list, min_list = np.array(max_list), np.array(min_list)\n diffs = max_list - min_list\n for i in np.arange(raw_data.shape[1]):\n raw_data[:, i] = (raw_data[:, i] - min_list[i]) / diffs[i]\n # Checking the boundaries\n raw_data[raw_data > 1] = 0.99\n raw_data[raw_data < 0] = 0.00\n return raw_data", "title": "" }, { "docid": "0ccb9599612fa2f8516b7610c2fcdb69", "score": "0.6178755", "text": "def adaptive_filt(image):\r\n ig = gaussian_filter(image, sigma=10)\r\n mean_image = uniform_filter(image, size=10)\r\n istd = sqrt(uniform_filter((mean_image - image) ** 2, size=10))\r\n im = mean(istd)\r\n return (image - ig) / (istd + im) + mean(image)", "title": "" }, { "docid": "780533f392ba6f4c8c7af2965ced4344", "score": "0.6176102", "text": "def scale_normalize(image):\n min_val = np.min(image)\n max_val = np.max(image)\n val_range = max_val - min_val\n \n scaled = image / val_range\n if np.min(scaled) < 0:\n return scaled + np.abs(np.min(scaled))\n else:\n return scaled - np.abs(np.min(scaled))", "title": "" }, { "docid": "702c31fcd4100dec48d4cf7b782548e2", "score": "0.61749136", "text": "def normalize(data):\n\treturn np.float32(data/255.)", "title": "" }, { "docid": "1f1e41ecf3f8159856eb7334d0f7a52d", "score": "0.617358", "text": "def forward(self, x_orig):\n if x_orig.numel() == 0:\n return x_orig\n x = x_orig.detach()\n x = x.to(self.scale.dtype)\n #_scale = x.abs().mean() * 2 / (self.quant_max ** 0.5)\n _scale = x.abs().max() / self.quant_max\n self.scale.copy_(_scale)\n return x_orig", "title": "" }, { "docid": "f8795f5e91641fa6b2bb300161220556", "score": "0.61732244", "text": "def normalize(self, data):\n data_ = np.array(data)\n if np.std(data_) < self.epsilon:\n return np.zeros(len(data_))\n\n return (data_ - np.average(data_)) / np.std(data_)", "title": "" }, { "docid": "f5478f9f45020eecbfea7dc4e0e99fd9", "score": "0.6172167", "text": "def normalize_among_channels(x):\n mean_time = np.expand_dims(x.mean(axis=1),axis=1)\n std_time = np.expand_dims(x.std(axis=1), axis=1)\n return (x-mean_time)/std_time", "title": "" }, { "docid": "3d7f529e81837e618bc74317ebeaaf25", "score": "0.6160013", "text": "def min_max_normalize(data):\n data[:] = (data - data.min()) / (data.max() - data.min())\n return data", "title": "" }, { "docid": "beb2021eee9aa055ed46d4ff58e3669c", "score": "0.61586905", "text": "def _normalize_feature_values(self):\n self.column_max_values = np.max(self.x, axis=0)\n self.column_min_values = np.min(self.x, axis=0)\n _, column = np.shape(self.features)\n for j in range(column):\n self.x[:, j] = (self.x[:, j] - self.column_min_values[j]) / (\n self.column_max_values[j] - self.column_min_values[j])", "title": "" }, { "docid": "5a974d35619fe44fd029fd7fcd41c305", "score": "0.6146461", "text": "def normalization_minmax(arrayX_train_to_consider): \n \n # find min and max value for each body pair\n min_, max_ = finding_minmax(arrayX_train_to_consider)\n \n # add filler values to get the arrays to be of the right shape\n min_ = dup_cols(min_, 0, num_dups=149)\n max_ = dup_cols(max_, 0, num_dups=149)\n min_ = min_[..., np.newaxis]\n max_ = max_[..., np.newaxis]\n \n # apply the normalization formula\n videos_normalized_2 =[]\n for i in range(len(arrayX_train_to_consider)):\n formula_normalization = (arrayX_train_to_consider[i] - min_) / (max_ - min_) \n videos_normalized_2.append(formula_normalization)\n \n arr = np.array(videos_normalized_2) \n return arr", "title": "" }, { "docid": "1b3db06ab3117d14b044d27d46cd1f86", "score": "0.61456597", "text": "def standardize(x):\n x -= np.mean(x, axis=0)\n #print(x)\n x /= np.std(x, axis=0)\n\n return x", "title": "" }, { "docid": "094ca8ff8648f735b17e3df9a9b05840", "score": "0.6134808", "text": "def scale0to1(img):\r\n min = np.min(img)\r\n max = np.max(img)\r\n\r\n if min == max:\r\n img.fill(0.5)\r\n else:\r\n img = (img-min) / (max-min)\r\n\r\n return img.astype(np.float32)", "title": "" }, { "docid": "f0c2c0158d38cd051c667882486f105d", "score": "0.6128994", "text": "def mnih_normalise_input(features):\n features = features.astype(np.float32)\n avg = features.mean()\n stddev = features.std()\n features = features - avg\n features = np.multiply(features, 1.0 / stddev)\n return np.multiply(features, 1.0 / features.max())", "title": "" }, { "docid": "6c57a41568ab3f535de122504ff3efc3", "score": "0.6116332", "text": "def normalize_image(x):\n\n # covert always to float32 to keep compatibility with opencv\n x = x.astype(np.float32)\n h, w, d = x.shape\n for i in range(d):\n \tx[..., i] -= MEAN[i]\n \tx[..., i] /= STD[i]\n \n return x", "title": "" }, { "docid": "63df7abf0670d84b3a3ac6b2e9f10754", "score": "0.61116886", "text": "def samele_wise_normalization(data):\n data.astype(np.float32)\n if np.max(data) == np.min(data):\n return np.ones_like(data, dtype=np.float32) * 1e-6\n else:\n return 1.0 * (data - np.min(data)) / (np.max(data) - np.min(data))", "title": "" }, { "docid": "63df7abf0670d84b3a3ac6b2e9f10754", "score": "0.61116886", "text": "def samele_wise_normalization(data):\n data.astype(np.float32)\n if np.max(data) == np.min(data):\n return np.ones_like(data, dtype=np.float32) * 1e-6\n else:\n return 1.0 * (data - np.min(data)) / (np.max(data) - np.min(data))", "title": "" }, { "docid": "7dfcee36a6b3f23b3075793ed5bd50a1", "score": "0.6096373", "text": "def minmax_normalize(data, min_=None, max_=None, axis=0):\n data_ = np.copy(data)\n if min_ is None: \n min_ = np.min(data_,axis=axis)\n min_ = np.expand_dims(min_,axis=0)\n \n if max_ is None:\n max_ = np.max(data_,axis=axis)\n max_ = np.expand_dims(max_,axis=0)\n \n min_[np.where(min_ == max_)] = 0\n\n return (data_ - min_) / (max_ - min_)", "title": "" }, { "docid": "783e6c0400373af8670837a7adc2d8ac", "score": "0.6094109", "text": "def normalize(x):\n return (x + 1e-10) / (K.sqrt(K.mean(K.square(x))) + 1e-10)", "title": "" }, { "docid": "33831811dc480907843a9e622706d77b", "score": "0.6093751", "text": "def normalization_zero2one_numpy(array):\n min_value, max_value = array.min(), array.max()\n new_array = (array - min_value) / (max_value - min_value)\n return new_array", "title": "" }, { "docid": "cab625983c3a1631215b184d58bf6f94", "score": "0.608145", "text": "def normalized(self):\r\n return self.__class__(array=self, mask=self.mask, normalize=True)", "title": "" }, { "docid": "546669dea2c2bb0da622dfae2af9a3fd", "score": "0.6074354", "text": "def normalise_rows(self):\n for row_index in range(self.data.shape[0]):\n self.data[row_index, :] /= np.nanmax(self.data[row_index, :])\n self.normalised = True", "title": "" }, { "docid": "887e4f2501504f1b66535c86cb2bffd7", "score": "0.6068105", "text": "def minmax_normalize(data: np.ndarray) -> np.ndarray:\n minval, maxval = np.min(data), np.max(data)\n if minval == maxval:\n norm_data = data * 0\n else:\n norm_data = (data - minval) / (maxval - minval)\n return norm_data", "title": "" }, { "docid": "9031a7091fd6ad983c1b58edca8fb642", "score": "0.6067915", "text": "def denormalize(x):\n x_max = np.percentile(x, 98)\n x_min = np.percentile(x, 2)\n x = (x - x_min) / (x_max - x_min)\n x = x.clip(0, 1)\n return x", "title": "" }, { "docid": "ff3ff11611d5b86fb569bb5f0f7b103c", "score": "0.60673004", "text": "def normalize(array):\n return array / np.sqrt(np.sum(array**2))", "title": "" }, { "docid": "8826b933206fc21a945cadcb72025f01", "score": "0.60549915", "text": "def normalize(x):\n return x/tf.sqrt(tf.reduce_sum(x**2, axis=-1, keep_dims=True)+1e-6)", "title": "" }, { "docid": "7896a0bf33ba1afdf8d4863ccca743c2", "score": "0.6049382", "text": "def normalize(image: np.array):\n mean = np.mean(image)\n std = image.std()\n if std==0:\n std = 1 \n new_image = (image - mean) / std\n\n return new_image", "title": "" }, { "docid": "1e1db04f19a4bfc6c48a9e76f7d5192e", "score": "0.604773", "text": "def unnormalise_range(array, normalised_range, unnormalised_range, constrain_range=True):\n print('array min/max: ', [np.min(array), np.max(array)])\n rescaled_array = (array - normalised_range[0]) / (normalised_range[1] - normalised_range[0]) # between 0 and 1\n print('rescaled_array min/max: ', [np.min(rescaled_array), np.max(rescaled_array)])\n if constrain_range:\n rescaled_array[rescaled_array < 0.0] = 0.0\n rescaled_array[rescaled_array > 1.0] = 1.0\n rescaled_array = unnormalised_range[0] + (rescaled_array * (unnormalised_range[1] - unnormalised_range[0]))\n return rescaled_array", "title": "" }, { "docid": "99d2ae39afb2ab3fdcbe0add7ec66815", "score": "0.6046139", "text": "def scale0to1(img):\r\n\r\n min = np.min(img)\r\n max = np.max(img)\r\n\r\n if min == max:\r\n img.fill(0.5)\r\n else:\r\n img = (img-min) / (max-min)\r\n\r\n return img.astype(np.float32)", "title": "" }, { "docid": "dbf534a49d87f59458e1bfdc1e714ef0", "score": "0.604295", "text": "def rescale(data):\n data = data.map(lambda x: 0 if isnan(x) else x)\n mnvals = data.reduce(minimum)\n mxvals = data.reduce(maximum)\n if sum(mnvals < 0) == 0:\n data = data.map(lambda x: uint8(255 * (x - mnvals)/(mxvals - mnvals)))\n else:\n mxvals = maximum(abs(mxvals), abs(mnvals))\n data = data.map(lambda x: uint8(255 * ((x / (2 * mxvals)) + 0.5)))\n return data", "title": "" }, { "docid": "e1927fbc6045f5139c08422216896b50", "score": "0.6034584", "text": "def standardizeNormalize(data):\r\n data_mean = np.mean(data[:, :, :, :], axis=0, keepdims=True)\r\n data = (data - data_mean)\r\n max_p = np.max(data[:, :, :, :])\r\n min_p = np.min(data[:, :, :, :])\r\n stdnorm_data = ((data - min_p)/(max_p - min_p)) - 0.5\r\n return stdnorm_data", "title": "" }, { "docid": "0a8c79d8943bfd08e3d925ccf16bc5ab", "score": "0.60342735", "text": "def normalize(vals):\n min_val = np.min(vals)\n max_val = np.max(vals)\n return (vals - min_val) / (max_val - min_val)", "title": "" }, { "docid": "68e3e8de783092b26cd50382806afd0f", "score": "0.60312843", "text": "def normalize_greyscale(image_data):\n # ToDo: Implement Min-Max scaling for greyscale image data\n a = 0.1\n b = 0.9\n greyscale_min = 0\n greyscale_max = 255\n return a + ( ( (image_data - greyscale_min)*(b - a) )/( greyscale_max - greyscale_min ) )", "title": "" }, { "docid": "0908f72cda5bf07be19adafd625ec1d9", "score": "0.6028488", "text": "def softmax(x):\r\n\r\n return x / x.sum(axis=0) # only difference\r", "title": "" }, { "docid": "f3a63c14f2d9a5e816eff0b62d15f7d0", "score": "0.6026137", "text": "def normalize_features(array):\n array_normalized = (array-array.mean())/array.std()\n print \"array.mean() \", array.mean()\n return array_normalized", "title": "" }, { "docid": "8a8d7abfdf597cbe8a32c7b18de58677", "score": "0.60212505", "text": "def normalize(signal):\n return (signal - np.mean(signal)) / np.std(signal)", "title": "" }, { "docid": "4ea10d801a38cb6190c3a8af03bd87ae", "score": "0.6009582", "text": "def standardize(x: np.ndarray):\r\n return (x - x.mean(axis=0)) / np.std(x, axis=0)", "title": "" }, { "docid": "a2ddd31ecbffa44be4e4bcd7b704e3cb", "score": "0.60030174", "text": "def normalize_data():\n\n samples_p = sorted(samples_path.glob(f\"sample_params*.npy\"))\n samples_s = sorted(samples_path.glob(f\"sample_spectra*.npy\"))\n print(samples_p,samples_s)\n sample_params = np.load(samples_p[-1])\n sample_spectra = np.load(samples_s[-1])#[:,:105]\n print(\"min_param\",sample_params.min(axis=0))\n print(\"max_param\",sample_params.max(axis=0))\n print(\"Loaded samplesize of \",sample_params.shape,sample_spectra.shape,params_mask.shape,spec_delete)\n sample_params = sample_params[:,params_mask]\n print(\"min_param\",sample_params.min(axis=0))\n print(\"max_param\",sample_params.max(axis=0))\n sample_spectra = np.delete(sample_spectra,len(sample_spectra[0])+spec_delete,axis=1)\n print(sample_spectra[0,:10])\n print(\"Filtered samplesize of \",(sample_params.shape),(sample_spectra.shape))\n print(\"w_x\")\n mu_x,w_x = (np.mean(sample_params, 0),whitening_matrix(sample_params))\n print(\"max_param:\",np.max(np.dot(sample_params - mu_x, w_x)),np.min(np.dot(sample_params - mu_x, w_x)))\n w_dev_x, w_dev_y = stddev_matrix(sample_params), stddev_matrix(sample_spectra)\n print(\"max_param_dev:\",np.max(np.dot(sample_params - mu_x, w_dev_x)),np.min(np.dot(sample_params - mu_x, w_dev_x)))\n\n print(\"w_y\")\n mu_y,w_y = (np.mean(sample_spectra, 0),whitening_matrix(sample_spectra))#130 seconds\n re = np.dot(sample_spectra - mu_y, w_y)\n print(\"mu_x_y\", mu_x,mu_y)\n print(\"w_x_y\", w_x,w_y)\n print(\"w_dev_x_y\", w_dev_x, w_dev_y)\n\n\n print(re[:10])\n print(\"1:\",np.max(re,axis=1))\n print(\"1:\",np.argmax(re,axis=1))\n print(\"0:\",np.max(re,axis=0))\n print(\"0:\",np.argmax(re,axis=0))\n\n obs = np.dot(re[:,:], np.linalg.inv(w_y)) + mu_y\n print(np.max(obs),np.min(obs))\n print(obs[:2])\n print(sample_spectra[:2])\n\n print(\"w_dev_y\")\n w_dev_x, w_dev_y = stddev_matrix(sample_params), stddev_matrix(sample_spectra)\n\n #w_x = w_dev_x\n x_s = np.dot(sample_params - mu_x, w_dev_x)\n params = (np.dot(x_s, np.linalg.inv(w_x)) + mu_x)\n \n print(\"min_param\",x_s.min(axis=0))\n print(\"max_param\",x_s.max(axis=0))\n print(\"min_param\",params.min(axis=0))\n print(\"max_param\",params.max(axis=0))\n print(\"w_y123\")\n w_y1, w_y2, w_y3 = whitening_matrix(sample_spectra[:,:spec_length]), whitening_matrix(sample_spectra[:,spec_length:2*spec_length]), whitening_matrix(sample_spectra[:,2*spec_length:3*spec_length])\n w_dev_y4, w_y4 = stddev_matrix(sample_spectra[:,3*spec_length:]), whitening_matrix(sample_spectra[:,3*spec_length:])\n print(\"mu_w_x\", mu_x,w_x)\n print(\"mu_w_y\", mu_y,w_y)\n print(\"w_dev_x_y\", w_dev_x, w_dev_y)\n np.save(normed_path.joinpath(\"mu_x.npy\"),mu_x)\n np.save(normed_path.joinpath(\"w_x.npy\"),w_x)\n np.save(normed_path.joinpath(\"w_y1.npy\"),w_y1)\n np.save(normed_path.joinpath(\"w_y2.npy\"),w_y2)\n np.save(normed_path.joinpath(\"w_y3.npy\"),w_y3)\n np.save(normed_path.joinpath(\"w_y4.npy\"),w_y4)\n np.save(normed_path.joinpath(\"w_dev_y4.npy\"),w_dev_y4)\n np.save(normed_path.joinpath(\"mu_y.npy\"),mu_y)\n np.save(normed_path.joinpath(\"w_y.npy\"),w_y)\n np.save(normed_path.joinpath(\"w_dev_x.npy\"),w_dev_x)\n np.save(normed_path.joinpath(\"w_dev_y.npy\"),w_dev_y)", "title": "" }, { "docid": "523013e29832f1f3cae806ebd747718f", "score": "0.6000739", "text": "def normalise(x):\n mean = np.mean(x, axis=0)\n variance = np.var(x, axis=0)\n x = ((x - mean)/ (variance ** 0.5))\n return x", "title": "" }, { "docid": "c8afc2470e658da8fc0766cb9a60bd9c", "score": "0.60000086", "text": "def preprocess_minmax(img: np.array) -> np.array:\n im_min = np.percentile(img, 2)\n im_max = np.percentile(img, 98)\n im_range = im_max - im_min\n # print(f'percentile 2 {im_min}, percentile 98 {im_max}, im_range {im_range}')\n\n # Normalise to the percentile\n img = img.astype(np.float32)\n img = (img - im_min) / im_range\n img = img.clip(0, 1)\n\n return img", "title": "" }, { "docid": "1fa537ef32bbbf8eca55c06eab974f08", "score": "0.5996008", "text": "def normalize_greyscale(image_data):\n a = 0.1\n b = 0.9\n greyscale_min = 0\n greyscale_max = 255\n return a + ( ( (image_data - greyscale_min)*(b - a) )/( greyscale_max - greyscale_min ) )", "title": "" }, { "docid": "291811d2ac45eeb0c7fdd59bb9feb1b9", "score": "0.59910536", "text": "def normalize_data(data_in):\n data_init = np.array(data_in)\n normalized = np.zeros_like(data_init)\n normalized[:, 1:, :] = data_init[:, 1:, :] / data_init[:, 0:1, :] - 1\n\n unnormalized = data_init[2400:int(normalized.shape[0] + 1), 0:1, 20]\n\n return normalized, unnormalized", "title": "" } ]
b60ca2ec9e7d03edaa107d4c760a27f7
ID that can be used to find errors in the log files.
[ { "docid": "c7aef613a851f171aa2b4e8b4ee17ba2", "score": "0.5365587", "text": "def error_code(self) -> str:\n return pulumi.get(self, \"error_code\")", "title": "" } ]
[ { "docid": "d5c865f049bd4181090bf5764b5dba62", "score": "0.6419121", "text": "def get_id(self):\n import re\n last_line = self.list_of_logs[-1]\n if re.search(\"error\", last_line, re.IGNORECASE):\n return None\n elif re.search(\"successfully built\", last_line, re.IGNORECASE):\n try:\n return re.findall('[0-9a-f]{12}', last_line)[-1]\n except IndexError:\n raise Exception(\"Docker image ID could not be found but build \"\n \"error was not found.\")", "title": "" }, { "docid": "a5c8c00e109f977f73d618c391e97ee3", "score": "0.6030724", "text": "def error_name(self):\n return self.ERROR_CODES.get(self.code)", "title": "" }, { "docid": "f6103eb234d2784c8beb0118fd0b2efe", "score": "0.59261906", "text": "def error_logging(self, name):\n self.error += 1\n with open(generate_path(\"outputs/errors.txt\"), \"a\") as file_:\n file_.write(\n '%s \\n\\n %s \\n '\n '================================ \\n\\n'\n % (name, traceback.format_exc())\n )\n\n print \"Error %d occurred.\" % self.error", "title": "" }, { "docid": "2572e6520bdfeff19fa42d5057f185c2", "score": "0.5808633", "text": "def error_finder(self, target_file):\n target_handle = open(target_file, 'r')\n verified_ids = set()\n\n for lines in target_handle:\n if lines.startswith(\"VERSION\"):\n m = locusre.match(lines)\n ID = m.group(1)\n print (ID)\n verified_ids.add(ID)\n\n target_handle.close()\n return verified_ids", "title": "" }, { "docid": "7b40206436883b50f9585475e29f013a", "score": "0.5777111", "text": "def parse_logfiles(path2log, string, filenames = 'err.*'):\n logfiles = glob(join(path2log,filenames))\n logfiles = sorted(logfiles, key = lambda f: int(basename(f).split('.')[-1]))\n ids = array([int(basename(f).split('.')[-1]) for f in logfiles])\n logging.info(\"Found {0:n} log files\".format(len(ids)))\n # list of indices where string was found\n idx = []\n # loop through files\n for i,lf in enumerate(logfiles):\n\tfor line in open(lf):\n\t if string in line: idx.append(i)\n logging.info(\"Found {0:n} log files that contain {1:s} which will be removed from missing files\".format(len(idx),string))\n return ids[idx]", "title": "" }, { "docid": "3091601806cc29cb59611554d307c6db", "score": "0.5763872", "text": "def does_errfile_exist(task_id):\n name = CFG_LOGDIR + \"/bibsched_task_\" + str(task_id) + \".err\"\n if os.path.exists(name):\n return name\n else:\n return None", "title": "" }, { "docid": "08485aaf6e080334738437166099b284", "score": "0.5756114", "text": "def get_errors(self, job_id):", "title": "" }, { "docid": "c6caf60d3d2030218afd61149f031932", "score": "0.57547843", "text": "def error(self):\n return self.directory / ERROR", "title": "" }, { "docid": "6efc372d3bda314cc896e5aebcdc74d1", "score": "0.5737534", "text": "def id(self):\n run_id = self.run_id\n if not run_id:\n run_id = os.path.basename(os.path.dirname(self.dirn))\n return \"%s:%s\" % (run_id,self.name)", "title": "" }, { "docid": "cc9c36f762f1c7f99252ee73da4334c9", "score": "0.572397", "text": "def unique_id(self):\n return _cdma_swig.pac_err_cal_sptr_unique_id(self)", "title": "" }, { "docid": "4d294b0bcfb14ef19f4389fe98cec3ab", "score": "0.5716521", "text": "def get_error_logfile(self):\n from tempfile import gettempdir\n return gettempdir() + \"/rayonix_detector_error.log\"", "title": "" }, { "docid": "4355aa8d48df4e84ffa799cc8d64a276", "score": "0.57106245", "text": "def __get_filename():\n ChunkLogger.__file_count += 1\n\n return os.path.join(C.HOME, 'var/lib/logger', '{0:010d}.log'.format(ChunkLogger.__file_count))", "title": "" }, { "docid": "634c1dafaafdee5004ea7ec7015bcc7e", "score": "0.56857806", "text": "def errorMessage(self) -> str:\n\t\treturn f'{self.error.error.name} error in {self.filename}:{self.error.line} - {self.error.message}'", "title": "" }, { "docid": "b532db5db33f6dc53afcf3c8bd7d6ce2", "score": "0.56714135", "text": "def error(self,message):\n if not Logger.__instance :\n Logger.__instance = Logger.FileLogger()\n Logger.__instance.log.error(message)", "title": "" }, { "docid": "8f44ab1506ffa098e55f322328c39736", "score": "0.56505", "text": "def error_log(self):\n return self._error_log", "title": "" }, { "docid": "bdd403abf8c34725b9324d0c9ce0528c", "score": "0.5620692", "text": "def get_error(self) -> FileErrorStr:\n return FILE_ERROR.inverse[self.error()]", "title": "" }, { "docid": "89bfa3eabc9b1fc548f525263a9cced3", "score": "0.55788606", "text": "def exceptions_log_path(cls, for_pid=None, in_dir=None):\n if for_pid is None:\n intermediate_filename_component = ''\n else:\n assert(isinstance(for_pid, IntegerForPid))\n intermediate_filename_component = '.{}'.format(for_pid)\n in_dir = in_dir or cls._log_dir\n return os.path.join(\n in_dir,\n 'logs',\n 'exceptions{}.log'.format(intermediate_filename_component))", "title": "" }, { "docid": "14e124df4eba811cac71b097d6d068af", "score": "0.55747056", "text": "def flow_log_id(self) -> str:\n return pulumi.get(self, \"flow_log_id\")", "title": "" }, { "docid": "4ff43ae118e61a002b15e5ebaf2a2ee9", "score": "0.55693185", "text": "def unique_id(self) -> str:\n #_LOGGER.error(self._uid)\n return self._uid", "title": "" }, { "docid": "9b85b095fe69ac0cf90ab7fc5fa4e09d", "score": "0.55591583", "text": "def ERROR(self): # pylint: disable=C0103", "title": "" }, { "docid": "2e0be188de0623023d7c9ed8fb67e36c", "score": "0.5541449", "text": "def get_run_id(self) -> str:", "title": "" }, { "docid": "9569e299594aa03801e11392f5a63732", "score": "0.5540247", "text": "def create_sb_error_index(error_name):\n return int(hashlib.sha1(error_name).hexdigest()[:8], 16)", "title": "" }, { "docid": "31200a32aab304b3fea6969333204735", "score": "0.55324274", "text": "def GetError(self):", "title": "" }, { "docid": "6a775447a19bfd1e8dda6feaa65006d3", "score": "0.5512932", "text": "def aggregation_key(self) -> str:\n stacktrace_head = self.error.stacktrace[:]\n if len(self.error.stacktrace) > MAX_TRACES:\n stacktrace_head = stacktrace_head[:MAX_TRACES]\n error_hash = self._hash_exception(\"\".join(stacktrace_head))\n return f\"{self.error.cls}@{error_hash}\"", "title": "" }, { "docid": "efeeba0e16adb01159a39a7f62a41c5c", "score": "0.5480327", "text": "def _get_err_prefix(fobj, method):\n return f\"method '{method}()' failed for file '{fobj._stream_name_}'\"", "title": "" }, { "docid": "b32d813c615d7908a88ede80bf1c8e77", "score": "0.54744905", "text": "def _get_error_at(self, pos, filename):\n if fn2k(filename) in self.errors:\n for (start, end), error_msg in self.errors[fn2k(filename)].items():\n if pos >= start and pos <= end:\n return error_msg\n\n return None", "title": "" }, { "docid": "3b1fd0fb1003159c24b93c3b2cab5d46", "score": "0.54590344", "text": "def get_err_prefix(fobj, method):\n return f\"method '{method}()' failed for file '{fobj._orig_fpath_}'\"", "title": "" }, { "docid": "316c294ea1a2869c0bcc9946ba23a132", "score": "0.545448", "text": "def _qualifiedFileName(self):\n return os.path.join(CHECK_RESULTS_DIR, self.logfile)", "title": "" }, { "docid": "431904d214d11e82366ff1dfa7ee4743", "score": "0.544798", "text": "def diagnostic(failure):\n _, _, tb = sys.exc_info()\n info = traceback.extract_tb(tb)\n filename, line, _, _ = info[-1]\n where = filename if len(filename) < 18 else '..' + filename[-18:]\n why = ' (%s)' % failure if failure else \"\"\n return '%s (%d) -> %s%s' % (where, line, type(failure).__name__, why)", "title": "" }, { "docid": "d06e903e2de2ef8b7074e543a49bb003", "score": "0.5414331", "text": "def _extract_id(self):\n regex = re.compile(r'\\/(.*?)\\/',re.IGNORECASE)\n id = regex.search(self.response['issue'][0]['diagnostics']).group(1)\n return id", "title": "" }, { "docid": "42de080abddc25be9e225288f899a991", "score": "0.5405159", "text": "def error(self, line_number, offset, text, check):\r\n code = text[:4]\r\n if self._ignore_code(code):\r\n return\r\n if code in self.counters:\r\n self.counters[code] += 1\r\n else:\r\n self.counters[code] = 1\r\n self.messages[code] = text[5:]\r\n # Don't care about expected errors or warnings\r\n if code in self.expected:\r\n return\r\n if self.print_filename and not self.file_errors:\r\n print((self.filename))\r\n self.file_errors += 1\r\n self.total_errors += 1\r\n return code", "title": "" }, { "docid": "20b25a115ebaf94b0f035f07746a97a2", "score": "0.53783864", "text": "def id(self) -> str:\n # Generates a unique Id based on the name of the file\n return generate_uuid_based_on_str(self.unix_relative_path)", "title": "" }, { "docid": "53f40e38770eaa2cd79652f37e6e4736", "score": "0.5372783", "text": "def error( self , msg ):\n self.errLog.prnt( \"ERROR:\" , msg )", "title": "" }, { "docid": "7c034262a4072ccbf93daac9b85122a3", "score": "0.5366218", "text": "def get_current_errors(self):\n pass", "title": "" }, { "docid": "b03d4865be86f5f56d48d221ccffd18d", "score": "0.5356097", "text": "def __str__(self):\n filename = \"<unknown>\" if not hasattr(self.file_obj, \"filename\") else self.file_obj.filename\n\n return \"Parse error in %s:%d: %s\" % (\n filename,\n self.line_number,\n self.message\n )", "title": "" }, { "docid": "b09545c4846887227c029d70e16f1386", "score": "0.5346217", "text": "def get_log_file_name(self):\n return self.log_file_name", "title": "" }, { "docid": "06ab66628a75bb9428e2913b2adca82a", "score": "0.5342414", "text": "def parse_error_file(self, path_i):\n # | - parse_error_file\n tmp = 42\n print(tmp)\n # __|", "title": "" }, { "docid": "e15df582260f6fbf01fae13f7510e147", "score": "0.5338092", "text": "def getErrorMsg(self):", "title": "" }, { "docid": "f1d29f775386069623f3846666ba0abd", "score": "0.53279865", "text": "def errorLog(self, txt):\r\n self._log.error(txt)", "title": "" }, { "docid": "6bc9f25ae5bec0955528f5e3cf9116f6", "score": "0.5327017", "text": "def get_ident():\n return -1", "title": "" }, { "docid": "01c88ceeb3553e136b0c195976d366f5", "score": "0.53239876", "text": "def error(self, e, *args, **kwargs):\n error_class = e.__class__.__name__ #取得錯誤類型\n detail = e.args[0] #取得詳細內容\n cl, exc, tb = sys.exc_info() #取得Call Stack\n errMsg = ''\n for lastCallStack in traceback.extract_tb(tb):\n # lastCallStack = traceback.extract_tb(tb)[-1] #取得Call Stack的最後一筆資料\n fileName = lastCallStack[0] #取得發生的檔案名稱\n lineNum = lastCallStack[1] #取得發生的行號\n funcName = lastCallStack[2] #取得發生的函數名稱\n errMsg += \"File \\\"{}\\\", line {}, in {}: [{}] {}\\n\".format(fileName, lineNum, funcName, error_class, detail)\n # print(errMsg)\n print('[ERROR] %s' % errMsg)\n self.logger.error(errMsg, *args, **kwargs)", "title": "" }, { "docid": "183bc1ebdf3fd3d49d6747be9036136d", "score": "0.5321294", "text": "def _get_latest_run_id(self):\n max_run_id = 0\n for path in glob.glob(\"{}/{}_[0-9]*\".format(self.tensorboard_log_path, self.tb_log_name)):\n file_name = path.split(os.sep)[-1]\n ext = file_name.split(\"_\")[-1]\n if self.tb_log_name == \"_\".join(file_name.split(\"_\")[:-1]) and ext.isdigit() and int(ext) > max_run_id:\n max_run_id = int(ext)\n return max_run_id", "title": "" }, { "docid": "b4e53debe18be71a2766cfb463ccfb57", "score": "0.5319104", "text": "def getDetectorID(self):\n if self.verbose:\n simpleLog(\"%s:%s() started...\" % (self.name, self.pfuncname()))\n return self.name", "title": "" }, { "docid": "0f716bf2a14a5112323e322039afbbb3", "score": "0.5316269", "text": "def log_errors(validation_result):\n total_file_count = len(validation_result[\"path_listing\"])\n validated_files_count = total_file_count - len(validation_result[\"path_tracking\"])\n if validated_files_count == 0:\n lgr.error(\"No valid BIDS files were found.\")\n for entry in validation_result[\"schema_tracking\"]:\n if entry[\"mandatory\"]:\n lgr.error(\n \"The `%s` regex pattern file required by BIDS was not found.\",\n entry[\"regex\"],\n )\n for i in validation_result[\"path_tracking\"]:\n lgr.warning(\"The `%s` file was not matched by any regex schema entry.\", i)", "title": "" }, { "docid": "7b367d38d77cc60ad9fda09807ed79bf", "score": "0.5315563", "text": "def __str__(self) :\n return '%s at line number %d ' % (repr(self.err), self.lineno)", "title": "" }, { "docid": "0f5548061431bc32a8bb5e4ec52f3dfa", "score": "0.53099066", "text": "def error(self, msg):\n func = inspect.currentframe().f_back.f_code\n\n if self.__date__ != datetime.today().date():\n self.__configure_file_handler__()\n\n return self.__logger__.error(\"ERRR: %s on %s in %s: %i\" % (\n str(msg),\n func.co_name,\n func.co_filename,\n func.co_firstlineno\n ))", "title": "" }, { "docid": "2ca9f488b56fe9feb35958697094efa6", "score": "0.5309032", "text": "def log_error(error_log):\n global errors\n logging.error(error_log)\n errors = errors + 1", "title": "" }, { "docid": "aa38dc57df98935fc6327de5b462b97b", "score": "0.53077465", "text": "def logParserError(self, msg):", "title": "" }, { "docid": "e4cd301d766797c957ee80b5169d8c9a", "score": "0.53056246", "text": "def error(msg):\n\t_l.error(msg)", "title": "" }, { "docid": "0ac66e3fa65ffeec4648ca9d05ddd533", "score": "0.52646744", "text": "def error_pos(\n self\n ) -> int:\n return self._error_pos", "title": "" }, { "docid": "0e448f257727656153ee196dea92857d", "score": "0.526286", "text": "def errors(self):\n return self.__error_logs", "title": "" }, { "docid": "1e44b35b156417c0aced354d23a6538e", "score": "0.5260769", "text": "def error(self, msg):\n self.log(ERROR, msg)", "title": "" }, { "docid": "ca3941359fd1c8784c9d2fd7908abecb", "score": "0.5250113", "text": "def getLogLocation(self): #$NON-NLS-1$\r", "title": "" }, { "docid": "0e01b3310e3b555a84380f4b42c385e0", "score": "0.52499205", "text": "def datalogger_error(self):\n\t\tcode = self.datalogger_status()[0]\n\n\t\tif code in [DL_STATE_NONE, DL_STATE_RUNNING, DL_STATE_WAITING, DL_STATE_STOPPED]:\n\t\t\treturn None\n\t\telif code == DL_STATE_INVAL:\n\t\t\treturn \"Invalid Parameters for Datalogger Operation\"\n\t\telif code == DL_STATE_FSFULL:\n\t\t\treturn \"Target Filesystem Full\"\n\t\telif code == DL_STATE_OVERFLOW:\n\t\t\treturn \"Session overflowed, sample rate too fast.\"\n\t\telif code == DL_STATE_BUSY:\n\t\t\treturn \"Tried to start a logging session while one was already running.\"", "title": "" }, { "docid": "7c97ed5c1464fd202f9fdc6ddffac6ef", "score": "0.524776", "text": "def get_error_path():\n return home_path + error_path", "title": "" }, { "docid": "95dd9060afee02e209d50b8f26c25bc2", "score": "0.5234664", "text": "def getErrorType(self,jobId):\n if self.getFileStatusLabelByJobId(jobId) == \"header_error\":\n # Header errors occurred, return that\n return \"header_errors\"\n elif self.interfaces.jobDb.getJobById(jobId).number_of_errors > 0:\n # Row errors occurred\n return \"row_errors\"\n else:\n # No errors occurred during validation\n return \"none\"", "title": "" }, { "docid": "935cc0a05b582f49fbbe3becc1be8457", "score": "0.52343273", "text": "def i_error(self):\n return self._i_error", "title": "" }, { "docid": "4b130b593635d89fc14442d6a107ece2", "score": "0.5233843", "text": "def get_review_id(log_file):\n\n # Initialize variables\n review_id = 0\n\n # Import the log data\n with open(log_file, 'r') as input_fh:\n review_id_data = input_fh.readlines()\n\n # Find the review ID\n for line in review_id_data:\n if 'Successfully created review' in line:\n review_id = re.split(' ', line.strip())[-1].replace('.', '')\n\n return review_id", "title": "" }, { "docid": "0f8d198a862b802996dc71eec6c5a1be", "score": "0.522742", "text": "def error_msg(self):\n return self._error", "title": "" }, { "docid": "0f8d198a862b802996dc71eec6c5a1be", "score": "0.522742", "text": "def error_msg(self):\n return self._error", "title": "" }, { "docid": "38eced6980aaa7d0e36e0fcf983757be", "score": "0.52209985", "text": "def getTypeId(self,typeName):\n return self.getIdFromDict(\n ErrorType, \"TYPE_DICT\", \"name\", typeName, \"error_type_id\")", "title": "" }, { "docid": "7ce7fea8c11535e840e69e9617d08e99", "score": "0.5211922", "text": "def __parse_error_str(self, err_str):\n pattern = r'(.*):(\\d+):(\\d+):\\s+error:(.*)$'\n m = re.match(pattern, err_str)\n return '\\n\\nIn file : {0}\\nOn line : {1}\\nerror :{2}'.format(\n m.group(1), m.group(3), m.group(4))\n # return '\\n\\nIn file : {0}\\nOn line : {1}\\nOn column : {2}\\nerror\n # :{3}'.format(m.group(1), m.group(2), m.group(3), m.group(4))", "title": "" }, { "docid": "836052ae6dbb9b6832b0ef0bbb563816", "score": "0.5207436", "text": "def last_error(self):\n ...", "title": "" }, { "docid": "20caa98a66f57b71c1234daaa964d602", "score": "0.52028924", "text": "def get_error_line_number(output, reg_string=DEFAULT_ERROR_REG_STRING):\n return int(re.search(reg_string, output).groups()[0])", "title": "" }, { "docid": "738a77e102efd26c50ab57bd43e4ffa1", "score": "0.51991636", "text": "def show_created_file_errors(errors: List[files_creator.Error]) -> None:\n if not errors:\n return\n\n print(\"\\nErrors:\")\n for error in errors:\n print(f\" - {colorama.Fore.RED}{error.error}{colorama.Fore.RESET}: {error.path.parent}/{colorama.Fore.YELLOW}{error.path}{colorama.Fore.RESET}\")", "title": "" }, { "docid": "e3fc6f6ba0c6795bfa1716f8add89096", "score": "0.5197649", "text": "def log_error(self, message):\n ts = int(time.time())\n print(\"! ERROR %i ... %s\" % (ts, message))", "title": "" }, { "docid": "62da037a51539fbb42038610ed3ace00", "score": "0.5195633", "text": "def get_log_file_path(log_id: 'DaemonID') -> Tuple[str, 'DaemonID']:\n from .models.enums import IDLiterals\n from .stores import get_store_from_id\n\n if IDLiterals.JWORKSPACE == log_id.jtype:\n workspace_id = log_id\n filepath = get_workspace_path(log_id, 'logs', 'logging.log')\n else:\n workspace_id = get_store_from_id(log_id)[log_id].workspace_id\n filepath = get_workspace_path(workspace_id, 'logs', log_id, 'logging.log')\n return filepath, workspace_id", "title": "" }, { "docid": "975c0508290bdd86904f614ebf3af99d", "score": "0.51907945", "text": "def createUniqueIdentifier(self):\n # This naming scheme follows the conventions used for creating\n # RUNID names. We've found this allows these files to be more\n # easily located and shared with other users when debugging\n # The tempfile.mkstemp method restricts the file to only the user,\n # and does not guarantee a file name can that easily be identified.\n now = datetime.now()\n username = pwd.getpwuid(os.geteuid()).pw_name\n ident = \"%s_%02d_%02d%02d_%02d%02d%02d\" % (\n username,\n now.year,\n now.month,\n now.day,\n now.hour,\n now.minute,\n now.second,\n )\n return ident", "title": "" }, { "docid": "80bd601b0b38685a3ed973418a3823df", "score": "0.51892185", "text": "def lineno():\n return str(' - ValidateUtility - line number: '+str(inspect.currentframe().f_back.f_lineno))", "title": "" }, { "docid": "70158dc33adbd8899e62a21ffd26cd01", "score": "0.5187974", "text": "def cause_id(self):\n return self._cause_id", "title": "" }, { "docid": "d1f4d05234b7d24e1aa2c918b18cad79", "score": "0.5186146", "text": "def lineno():\n return str(' - IamUserParser- line number: '+str(inspect.currentframe().f_back.f_lineno))", "title": "" }, { "docid": "382ac3fb442be0d1615d7e72953c38b1", "score": "0.5184507", "text": "def error(self, msg):", "title": "" }, { "docid": "dd6f6c0f6612e5349c89a87d8ecbf130", "score": "0.51844215", "text": "def _print_ids_with_topology_error(self, message, records):\r\n self.logger.info(\"==========================================\")\r\n self.logger.info(message + \": \")\r\n self.logger.info(records)\r\n self.logger.info(\"==========================================\")", "title": "" }, { "docid": "e43020c2a0c589f7c03c1c6223718c3c", "score": "0.51777756", "text": "def _get_lsn(err_log_path):\n with open(err_log_path) as error_log:\n for line in error_log:\n pattern = \"xtrabackup: The latest check point (for incremental):\"\n if line.startswith(pattern):\n lsn = line.split()[7].strip(\"'\")\n return int(lsn)\n elif \"The latest check point (for incremental):\" in line:\n idx = 10 if \"mariabackup\" in line else 11\n return int(line.split()[idx].strip(\"'\"))\n raise MySQLSourceError(\"Could not find LSN in XtraBackup error output %s\" % err_log_path)", "title": "" }, { "docid": "547e9ff7b7ef7640823b2553ef7009ab", "score": "0.5173717", "text": "def logError(err):\n log.failure(None, err)\n return err", "title": "" }, { "docid": "676bea9d1f39acce8ebf8917cc8db626", "score": "0.5170566", "text": "def run_id(self):\n return self._run_id", "title": "" }, { "docid": "d94c39ae5a7cebaf3c4d4635c7b21106", "score": "0.5167959", "text": "def error(self, message):\n return Error(\"{0}({1}): {2}\\n{3}\"\n .format(self.filename, self.lineno, message, self))", "title": "" }, { "docid": "4f4122f86b31b9c9a7be14076ee4c184", "score": "0.5162581", "text": "def get_logfile_name(self):\n if self.env.config.get('logging','log_type').lower()!='file': return None\n name = self.env.config.get('logging','log_file')\n fpath, fname = os.path.split(name)\n if not fpath: name = os.path.join(self.env.path,'log',name)\n if not os.path.exists(name): raise IOError\n self.env.log.debug('Logfile name: %s' % (name,))\n return name", "title": "" }, { "docid": "fccdc82b17c9a56c5d6d62acbd0c8bae", "score": "0.51546794", "text": "def GetRunNo(self):\n return self.jidfilename.split('_')[3]", "title": "" }, { "docid": "70357985c14cdb936e1425cb198398de", "score": "0.5148681", "text": "def log_error(self, resource, msg):\n self.logger.error(\"[%s] %s - %s\" % (resource['id'], resource['name'], msg))", "title": "" }, { "docid": "1c4aff85ed0c257ec5b7290a57528b6f", "score": "0.51479584", "text": "def log_error(e):\r\n print(e)", "title": "" }, { "docid": "a456220af556b698bddbff36cb6933c1", "score": "0.5146256", "text": "def id(self) :\n\t\ttry :\n\t\t\treturn self._id\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "e8b9e47837c9a966d60239255dd67436", "score": "0.5143519", "text": "def msg_id(self):", "title": "" }, { "docid": "f6f616b05cd10ea285b6f8ed6ed0d8cf", "score": "0.5136492", "text": "def stderr(self):\n return self.details[RTFOLEObjectAnalysis.KEY_STDERR]", "title": "" }, { "docid": "f939b680332d9674d73e01667fe9f4d0", "score": "0.5135546", "text": "def itkDynamicLoader_LastError() -> \"char const *\":\n return _ITKCommonBasePython.itkDynamicLoader_LastError()", "title": "" }, { "docid": "2f4cdecaf9a3c6c29d72ca03710b6c6f", "score": "0.5134755", "text": "def log_file(self):\n return self.root + '/' + os.path.basename(self.root) + '.log'", "title": "" }, { "docid": "465302dde52e38bac98419ffe3f91b72", "score": "0.5132", "text": "def get_id(properties):\n return hash(properties['log'])", "title": "" }, { "docid": "791152b438445d28561911133ec5ada3", "score": "0.51299137", "text": "def log_file(self) -> str:\n return self._log_file", "title": "" }, { "docid": "d9759b026fcc7d031f558d4a53460d51", "score": "0.51273876", "text": "def _get_run_id(\n run: mlflow.entities.Run,\n ) -> str:\n return run.info.run_uuid", "title": "" }, { "docid": "c95ac52503655dedc2f31d90fba61dd9", "score": "0.5126699", "text": "def log_error(self, e):\n print(e)", "title": "" }, { "docid": "9722a19aaeda219d2d1d1107f3f53840", "score": "0.5125385", "text": "def error(self) -> Optional[str]:\n return self._error", "title": "" }, { "docid": "712e5af20a2e8530e93b32e42fdb7c77", "score": "0.5107286", "text": "def get_error(self, error_id):\n return self._make_request('get', '/users/%s/errors/%s' % (self.user_id, error_id))[0]", "title": "" }, { "docid": "8f664b2f7dea4a492e64ee74634da149", "score": "0.5104701", "text": "def _idname(self): \n if self.layout == 0:\n idname = self.idpath.split(\"/\")[-2]\n else:\n idname = self.idpath.split(\"/\")[-4]\n pass\n return idname", "title": "" }, { "docid": "aef49b2c01ab2551a1373570f2a8b408", "score": "0.5104373", "text": "def error_list(self):\n error = str(self.__instr.ask('SYST:ERR?'))\n return error", "title": "" }, { "docid": "16bc7b7e8bd02a19c4baa7be6ff6286f", "score": "0.51036", "text": "def id(self): # pylint: disable=C0103\n return self._id", "title": "" }, { "docid": "fe4701ed09b8064caf9c994123650dca", "score": "0.5101651", "text": "def _get_error_from_log(self, line):\n #m = re.search(r\"ticky: ERROR ([\\w* ]* [\\w* \\'? ]*)\", line)\n\n error_reg_exp = r\"ticky: ERROR ([\\w* \\'? \\w*]*)\"\n m = re.search(error_reg_exp, line)\n if m != None:\n cleaned = m.group(1).strip()\n return cleaned", "title": "" }, { "docid": "7d13e5136d9133a4f9fc8f3be98be7ab", "score": "0.510136", "text": "def _id(self):\n return self.path if self.path.endswith(os.path.sep) else self.path + os.path.sep", "title": "" }, { "docid": "b7c49611f328a57042941968ad6eb9c9", "score": "0.5095167", "text": "def lineno():\n return str(' - SecurityGroupParser- line number: '+str(inspect.currentframe().f_back.f_lineno))", "title": "" }, { "docid": "e0caf3cffb8a112fb65a5bb9a7998903", "score": "0.50945425", "text": "def get_error(self):\n error_text = self.ins.ask(\"SYST:ERR?\")\n error_list = error_text.split(\",\")\n code = int(error_list[0])\n message = error_list[1]\n return code,message", "title": "" }, { "docid": "30d1cec1cd985db8138178284eb57828", "score": "0.5093901", "text": "def get_detailed_error(self):\n return _common_run(self, '+CEER')[0]", "title": "" } ]
b97fc381e1474d4b8691b703445d2fe9
Enable/Disable best practices validation for all items in the model.
[ { "docid": "e9b7d93b3391eae201f07c6faec0ae1c", "score": "0.6128238", "text": "def enable_best_practices(self, enabled=True):\n for item in self._data:\n item.validate_best_practices = enabled\n\n self.reset_results()", "title": "" } ]
[ { "docid": "e438db992897457c6a9e2dc4169a1ad7", "score": "0.67385113", "text": "def validate_all(self):\n \n pass", "title": "" }, { "docid": "7850a42c15eedad44f2c8a953044cf06", "score": "0.65433544", "text": "def set_validated(self):\n self.__validationerrors=[]\n self.set_property('secondary-icon-stock',None)", "title": "" }, { "docid": "4cccb3360e59359386696e97d0736356", "score": "0.63523126", "text": "def custom_validate(self):\n pass", "title": "" }, { "docid": "95333abc997a0cef771b591ce472ede5", "score": "0.6188237", "text": "def validate(self):\n super().validate()", "title": "" }, { "docid": "b3a074875b56a93a59e3d41b31c8dbb7", "score": "0.6150778", "text": "def _enable_relaxed_validation(cls) -> None:\n cls._relaxed_validation = True", "title": "" }, { "docid": "e051ca0016c5231ee350ecbdb4490dcd", "score": "0.61434686", "text": "def _disable_relaxed_validation(cls) -> None:\n cls._relaxed_validation = False", "title": "" }, { "docid": "7b5ea72eadfd62850a85cd677cbf1418", "score": "0.6111745", "text": "def validations_allowed(self, validations_allowed):\n\n self._validations_allowed = validations_allowed", "title": "" }, { "docid": "37e508963ec7492b2088a9efbb5bfed1", "score": "0.6085779", "text": "def validate(self):\n self.__validate()", "title": "" }, { "docid": "0024656b0a6e1233ffa558ce2490ca23", "score": "0.6010872", "text": "def validate(self, item):\n raise NotImplementedError()", "title": "" }, { "docid": "68c65a8c26544f72314dc4233e9acbee", "score": "0.6006676", "text": "def validate(self):\n for key, option in self.options.items():\n if key not in self.option_replace:\n option.validate()", "title": "" }, { "docid": "f982c5e690482f5d1e0039a4a6fe9c3e", "score": "0.5987196", "text": "def validate(cls, item):\n raise NotImplementedError", "title": "" }, { "docid": "bc4620ab347e0bbcdc33abd0d0f8b4a7", "score": "0.5952346", "text": "def validate_all(self):\n for name, field in self._fields.items():\n value = getattr(self, name)\n\n if value is None:\n if field.required:\n raise ValidationError('{!r} is required'.format(name), field=field, model=self)\n else:\n self._validate_field(field, value)\n\n map_errors(ValidationError, model=self)(self.validate)()", "title": "" }, { "docid": "40f67ae55f009efa688fb4cdbbc94ea0", "score": "0.59274685", "text": "def _set_strict_validation_for_class(cls, item_class) -> None:\n if item_class in cls._relaxed_item_classes:\n cls._relaxed_item_classes.remove(item_class)", "title": "" }, { "docid": "05f10485e6d1e36066de385716f39b99", "score": "0.59178936", "text": "def set_strict_validation_for_class(item_class) -> None:\n _OPItemValidationPolicy._set_strict_validation_for_class(item_class)", "title": "" }, { "docid": "e7695da60c6d3c7ded5043ead58f41d4", "score": "0.5916899", "text": "def enable_relaxed_validation() -> None:\n _OPItemValidationPolicy._enable_relaxed_validation()", "title": "" }, { "docid": "5db383b209d8817f6789aba9f6a43055", "score": "0.5911091", "text": "def validate(self):\n\n\t\tself.check_developer_mode()\n\n\t\tself.validate_name()\n\n\t\tself.set_defaults_for_single_and_table()\n\t\tself.set_defaults_for_autoincremented()\n\t\tself.scrub_field_names()\n\t\tself.set_default_in_list_view()\n\t\tself.set_default_translatable()\n\t\tvalidate_series(self)\n\t\tself.set(\"can_change_name_type\", validate_autoincrement_autoname(self))\n\t\tself.validate_document_type()\n\t\tvalidate_fields(self)\n\t\tself.check_indexing_for_dashboard_links()\n\n\t\tif not self.istable:\n\t\t\tvalidate_permissions(self)\n\n\t\tself.make_amendable()\n\t\tself.make_repeatable()\n\t\tself.validate_nestedset()\n\t\tself.validate_child_table()\n\t\tself.validate_website()\n\t\tself.validate_virtual_doctype_methods()\n\t\tself.ensure_minimum_max_attachment_limit()\n\t\tvalidate_links_table_fieldnames(self)\n\n\t\tif not self.is_new():\n\t\t\tself.before_update = frappe.get_doc(\"DocType\", self.name)\n\t\t\tself.setup_fields_to_fetch()\n\t\t\tself.validate_field_name_conflicts()\n\n\t\tcheck_email_append_to(self)\n\n\t\tif self.default_print_format and not self.custom:\n\t\t\tfrappe.throw(_(\"Standard DocType cannot have default print format, use Customize Form\"))", "title": "" }, { "docid": "d2614f99fdc8bc220a8cbf8dc7758d57", "score": "0.59072834", "text": "def validate(self):\n self._validator.validate(self)", "title": "" }, { "docid": "4b3ce83f073e814a58ba542874ceae87", "score": "0.59024376", "text": "def clean(self):\n model_name = self.get_model_name()\n if model_name not in self.allowed_models:\n raise ValidationError(\n f\"{self.content_type} is not in {self.allowed_models} field.\"\n )", "title": "" }, { "docid": "a4d36b3fed15f1d34be314cd4502ef70", "score": "0.5867896", "text": "def validate(self):\n pass # pragma: no cover", "title": "" }, { "docid": "e4e0173184adc4cc8625d729abd11a08", "score": "0.58585674", "text": "def validate(self):\n return 1 # override", "title": "" }, { "docid": "7efe7561700028d346a6182ffa934fe4", "score": "0.58531725", "text": "def validate(self):\r\n #TODO: Implement validation method\r\n pass", "title": "" }, { "docid": "7efe7561700028d346a6182ffa934fe4", "score": "0.58531725", "text": "def validate(self):\r\n #TODO: Implement validation method\r\n pass", "title": "" }, { "docid": "a1f37e07207bfb9e1ad5f622b980dcc8", "score": "0.5830031", "text": "def validate(self):\n pass", "title": "" }, { "docid": "a1f37e07207bfb9e1ad5f622b980dcc8", "score": "0.5830031", "text": "def validate(self):\n pass", "title": "" }, { "docid": "a1f37e07207bfb9e1ad5f622b980dcc8", "score": "0.5830031", "text": "def validate(self):\n pass", "title": "" }, { "docid": "a1f37e07207bfb9e1ad5f622b980dcc8", "score": "0.5830031", "text": "def validate(self):\n pass", "title": "" }, { "docid": "a1f37e07207bfb9e1ad5f622b980dcc8", "score": "0.5830031", "text": "def validate(self):\n pass", "title": "" }, { "docid": "a1f37e07207bfb9e1ad5f622b980dcc8", "score": "0.5830031", "text": "def validate(self):\n pass", "title": "" }, { "docid": "582c1d5e117167a3d435334b360922d7", "score": "0.58178884", "text": "def __validate(self):\n pass", "title": "" }, { "docid": "530ee39a75c9a6b5fe185f84fdb96563", "score": "0.58103323", "text": "def validate(self):\r\n extra = {}\r\n for name in self._fields:\r\n inline = getattr(self.__class__, 'validate_%s' % name, None)\r\n if inline is not None:\r\n extra[name] = [inline]\r\n\r\n return super(Form, self).validate(extra)", "title": "" }, { "docid": "530ee39a75c9a6b5fe185f84fdb96563", "score": "0.58103323", "text": "def validate(self):\r\n extra = {}\r\n for name in self._fields:\r\n inline = getattr(self.__class__, 'validate_%s' % name, None)\r\n if inline is not None:\r\n extra[name] = [inline]\r\n\r\n return super(Form, self).validate(extra)", "title": "" }, { "docid": "dea95f9f4c2712faaf9b05606e3dfa2f", "score": "0.5800606", "text": "def _validate(self):\n return True", "title": "" }, { "docid": "21949404d4c367e489d1c6614e0202ae", "score": "0.5794496", "text": "def validateQuick(self):\n # Cassettes are always valid\n self.component.update({\"validationerror\":False})\n return True", "title": "" }, { "docid": "e2e88e0b99599ebe74e0c34b7217d64e", "score": "0.57900685", "text": "def _validate(self):\n pass", "title": "" }, { "docid": "d47578b22de9a2177d6db56f9d69864b", "score": "0.57598037", "text": "def validate(self):\n errors = []\n self._validate_helper(errors)", "title": "" }, { "docid": "9cff42048710331abfd79fb3b1b46458", "score": "0.575241", "text": "def disable_relaxed_validation() -> None:\n _OPItemValidationPolicy._disable_relaxed_validation()", "title": "" }, { "docid": "2f1793578e3cac248e56dadd79eb248d", "score": "0.57469815", "text": "def validate(self):\n return True", "title": "" }, { "docid": "2f1793578e3cac248e56dadd79eb248d", "score": "0.57469815", "text": "def validate(self):\n return True", "title": "" }, { "docid": "2f1793578e3cac248e56dadd79eb248d", "score": "0.57469815", "text": "def validate(self):\n return True", "title": "" }, { "docid": "81dab3cdb27d6daa42769a165ce18277", "score": "0.5746471", "text": "def enforce_all(self, **validator_spec):\n\n self.__register_spec(None, validator_spec)", "title": "" }, { "docid": "427076455413bdd8671b6c7c9b56fdb9", "score": "0.57417643", "text": "def _validate(self):\r\n return True", "title": "" }, { "docid": "ad8c83b3016501eaadd824615d15d448", "score": "0.5741719", "text": "def validate(self):\n self._validate()", "title": "" }, { "docid": "ad8c83b3016501eaadd824615d15d448", "score": "0.5741719", "text": "def validate(self):\n self._validate()", "title": "" }, { "docid": "ad8c83b3016501eaadd824615d15d448", "score": "0.5741719", "text": "def validate(self):\n self._validate()", "title": "" }, { "docid": "ad8c83b3016501eaadd824615d15d448", "score": "0.5741719", "text": "def validate(self):\n self._validate()", "title": "" }, { "docid": "73fe65bd2ce88d16688e0a1d4aa5a20f", "score": "0.5712967", "text": "def _on_validated(self):\n\n self._accept_btn.setEnabled(not self._form_widget.has_errors() and self._valid_fields)", "title": "" }, { "docid": "35f4edbbfeb62e1778f2ecfcd5faaa80", "score": "0.57055986", "text": "def test_can_turn_off_required(self):\n model = Model()\n \n class Foo(model.Entity):\n bar = Text(required=True)\n baz = Text()\n \n obj = {'baz':'y'}\n result = model.Foo.validator.validate(obj, enforce_required=False)\n self.assertEquals(result, obj)", "title": "" }, { "docid": "a6f27c55b7fb9ad16c8391ca5bc9bf68", "score": "0.56991494", "text": "def validate(self):\r\n return True", "title": "" }, { "docid": "2e0dd8d9fe75e8fce5e097fd5d98fe99", "score": "0.56783754", "text": "def validate(self):\n self._validate()\n self.is_validated = True", "title": "" }, { "docid": "ab1fc46df8ffd08eb6fe0c518109e306", "score": "0.56765956", "text": "def __validate(self):\n if self.model not in self.models:\n raise NameError(\"The model {name} is not valid. Please choose from {models}\".format(name = self.model, models = self.models))\n\n if type(self.evaluate) != bool:\n raise TypeError(\"Evaluate must be bool\")", "title": "" }, { "docid": "4a4cecb8917e250bce22e2fcc5381d7b", "score": "0.5675694", "text": "def setup_validation(self, client):\n raise NotImplementedError(\"Please fix me.\")", "title": "" }, { "docid": "6bf5f74b6914ba9853bafa84147490f1", "score": "0.5669355", "text": "def __get_validators__(cls):\n yield from super().__get_validators__()\n yield cls.custom_validate", "title": "" }, { "docid": "172f1e33b98bd98ca23dbeae020b8c6d", "score": "0.5665902", "text": "def wrap_model_clean_methods():\n for model in ContentType.objects.filter(FeatureQuery(\"custom_validators\").get_query()):\n model_class = model.model_class()\n model_class.clean = custom_validator_clean(model_class.clean)", "title": "" }, { "docid": "f56eded7bf11877a4fc29b361f66c803", "score": "0.566051", "text": "def set_relaxed_validation_for_class(item_class) -> None:\n _OPItemValidationPolicy._set_relaxed_validation_for_class(item_class)", "title": "" }, { "docid": "bdb348d11a0aec6a552fca49089cf6c6", "score": "0.5657457", "text": "def _set_relaxed_validation_for_class(cls, item_class) -> None:\n cls._relaxed_item_classes.add(item_class)", "title": "" }, { "docid": "10f71c72a4256c577dcff7107cb95868", "score": "0.56555516", "text": "def post_validate(self):\n pass", "title": "" }, { "docid": "fcd9ef1ef9d47d29e1d45b907d25d267", "score": "0.56453145", "text": "def validate(self):\r\n\t\tself.validate_delivery_note()\r\n\t\tself.validate_items_mandatory()\r\n\t\tself.validate_case_nos()\r\n\t\tself.validate_qty()\r\n\r\n\t\tfrom erpnext.utilities.transaction_base import validate_uom_is_integer\r\n\t\tvalidate_uom_is_integer(self, \"stock_uom\", \"qty\")\r\n\t\tvalidate_uom_is_integer(self, \"weight_uom\", \"net_weight\")", "title": "" }, { "docid": "5890706c1e11dbf0ca296192c8ee0dd1", "score": "0.564348", "text": "def onValidate(self):\r\n pass", "title": "" }, { "docid": "780b7231bea971a060c7b58c4109d888", "score": "0.5642863", "text": "def validations_enabled(self, validations_enabled):\n\n self._validations_enabled = validations_enabled", "title": "" }, { "docid": "00e3ba4db782f96cddeb33afcd749ca4", "score": "0.5638912", "text": "def validate(self):\n return False", "title": "" }, { "docid": "a6b5748dcf30264e7896a536b6b89134", "score": "0.5624827", "text": "def validation(self):\n return self._mode == ModelMode.VALIDATION", "title": "" }, { "docid": "9df387b45d3e95cca9935d6a2bf57152", "score": "0.5622554", "text": "def validations():\n return gen_validations()", "title": "" }, { "docid": "4434555c51e4bceec3e0bf86a54c3dfa", "score": "0.56133944", "text": "def _validate(self):\n self._validate_type_and_enum()\n\n self._validate_index_feature_mapping()\n\n if self._domain is not None:\n self._validate_domain()\n\n self._validate_tensors()\n\n self._validate_baselines()", "title": "" }, { "docid": "096625355bf51f64bfa53a81edd85bf3", "score": "0.5580633", "text": "def validate(self):\n if not self.editable:\n assert self.populate_from is not NotSet, u\"If field (%s) is not editable, you must set populate_from\" % self.name", "title": "" }, { "docid": "f61e07eb2ec4ee727043bb2295c8ab89", "score": "0.5580276", "text": "def disable_required_field_validation(self) -> bool:\n return pulumi.get(self, \"disable_required_field_validation\")", "title": "" }, { "docid": "61b594276275bf2b1a0ea137954b5b74", "score": "0.5580167", "text": "def clean(self):\n if self.check_valid_abc:\n try:\n conform_abc(self.abc)\n except AttributeError as e:\n raise ValidationError({'abc': e})\n if self.tune.abc_tune_fingerprint == self.abc_tune_fingerprint:\n raise ValidationError({'abc': 'This setting’s tune is not a variation on the main tune.'})\n if any(x.abc_tune_fingerprint == self.abc_tune_fingerprint for x in Setting.objects.exclude(id=self.id)):\n raise ValidationError({'abc': 'This setting is not a variation of another.'})", "title": "" }, { "docid": "73548df03a522d40ac9ac0c6e10299be", "score": "0.5557022", "text": "def setup_validation(self, client, *args, **keyword_args):\n raise NotImplementedError(\"Please fix me.\")", "title": "" }, { "docid": "73548df03a522d40ac9ac0c6e10299be", "score": "0.5557022", "text": "def setup_validation(self, client, *args, **keyword_args):\n raise NotImplementedError(\"Please fix me.\")", "title": "" }, { "docid": "80eb17ddda52334e124eefde42a6ee9d", "score": "0.55545145", "text": "def pre_validate(self, form):\r\n pass", "title": "" }, { "docid": "80eb17ddda52334e124eefde42a6ee9d", "score": "0.55545145", "text": "def pre_validate(self, form):\r\n pass", "title": "" }, { "docid": "80eb17ddda52334e124eefde42a6ee9d", "score": "0.55545145", "text": "def pre_validate(self, form):\r\n pass", "title": "" }, { "docid": "88db2c8f0f198f54852aa2d83c8f559b", "score": "0.5549542", "text": "def validate(self):\n self.spec_data.validate()\n # NOTE: the rule property is supposed to be assigned in subclasses.\n self.rule.validate()", "title": "" }, { "docid": "77f7d3c6859cb9fdbc2764bb4db2b39e", "score": "0.55457556", "text": "def _validate(self):\n raise NotImplementedError('Must be implemented in subclass')", "title": "" }, { "docid": "51174fbc5a3a2077f5434b17dafc6ac3", "score": "0.5543486", "text": "def validate(self):", "title": "" }, { "docid": "51174fbc5a3a2077f5434b17dafc6ac3", "score": "0.5543486", "text": "def validate(self):", "title": "" }, { "docid": "51174fbc5a3a2077f5434b17dafc6ac3", "score": "0.5543486", "text": "def validate(self):", "title": "" }, { "docid": "51174fbc5a3a2077f5434b17dafc6ac3", "score": "0.5543486", "text": "def validate(self):", "title": "" }, { "docid": "914408fe5fd9a398b5ec82e8c2c59cb3", "score": "0.5541353", "text": "def add_validators(self):\n pass", "title": "" }, { "docid": "bf62b3e9d1a1e841a5b5b942dc66b292", "score": "0.55384326", "text": "def validate(self):\n raise NotImplementedError('validate method not implemented.')", "title": "" }, { "docid": "bf62b3e9d1a1e841a5b5b942dc66b292", "score": "0.55384326", "text": "def validate(self):\n raise NotImplementedError('validate method not implemented.')", "title": "" }, { "docid": "b0a97ca8861819cf2320dd86d080c9d8", "score": "0.55240875", "text": "def validate(self):\n raise NotImplementedError(\"This method needs to be implemented in \\\nchild classes!\")", "title": "" }, { "docid": "2060254fc82d8b64cbf35efb3989d927", "score": "0.5519314", "text": "def validate(self, kwargs):\n\t\tsuper().validate(kwargs)", "title": "" }, { "docid": "e29893c07cfd41b5f5fb820c60cde210", "score": "0.5509722", "text": "def validate(self):\n raise NotImplementedError()", "title": "" }, { "docid": "e29893c07cfd41b5f5fb820c60cde210", "score": "0.5509722", "text": "def validate(self):\n raise NotImplementedError()", "title": "" }, { "docid": "6e81bc60aad1f367e4047d4be3bab89f", "score": "0.54957616", "text": "def validate(self):\n raise NotImplementedError", "title": "" }, { "docid": "9a8b4bda9badf2cdd93a620768125dc4", "score": "0.5495082", "text": "def validate(self):\n extra = {}\n for name in self._fields:\n inline = getattr(self.__class__, 'validate_%s' % name, None)\n if inline is not None:\n extra[name] = [inline]\n\n return super(TornadoForm, self).validate(extra)", "title": "" }, { "docid": "94380481a71730a71cf1f91d8bfbc7a8", "score": "0.54904103", "text": "def _propagate(self, validate, items, has=hasattr):\n for item in (x for x in items if has(x, '_set_descriptor')):\n subdesc = validate(item)\n if subdesc is None:\n errmsg = \"\\n\".join((\n \"'%s' no puede ir dentro de '%s'\" % (item.name, self.name),\n \"(posibles valores: '%s')\" % \"', '\".join(self.descriptor.keys()),\n ))\n raise AssertionError(errmsg)\n item._set_descriptor(subdesc)", "title": "" }, { "docid": "ec52b9715978054624c35b6d6d78c18f", "score": "0.54723436", "text": "def validate(self, value):\n # Use the parent's handling of required fields, etc.\n super().validate(value)", "title": "" }, { "docid": "e6c97d4b00000e395385c28e7c805326", "score": "0.5462694", "text": "def validate(self) -> bool:\n raise NotImplementedError() # TODO", "title": "" }, { "docid": "b72c46ffe8e270d376992ecb6e5440ce", "score": "0.54614085", "text": "def _validate(self):\n if self.is_misconfigured():\n raise MisconfiguredElement(self)\n super()._validate()\n if self.validator is not None and not self._is_empty(): # Non-empty value\n self.validator.validate(self)", "title": "" }, { "docid": "9fd5be8815eec28595b5d0b1e135d75f", "score": "0.54578704", "text": "def setupValidators(self):\n self.noOfTermsInput.setValidator(QtGui.QIntValidator())\n self.regularizationConstantInput.setValidator(GuiUtils.DoubleValidator())\n self.maxDistanceInput.setValidator(GuiUtils.DoubleValidator())\n self.minQInput.setValidator(GuiUtils.DoubleValidator())\n self.maxQInput.setValidator(GuiUtils.DoubleValidator())\n self.slitHeightInput.setValidator(GuiUtils.DoubleValidator())\n self.slitWidthInput.setValidator(GuiUtils.DoubleValidator())", "title": "" }, { "docid": "cc63810cb82a25b0401614a87aa690b3", "score": "0.5446698", "text": "def test_model_multiple_choice_run_validators(self):\n for i in range(30):\n Person.objects.create(name=\"Person %s\" % i)\n\n self._validator_run = False\n def my_validator(value):\n self._validator_run = True\n\n f = forms.ModelMultipleChoiceField(queryset=Person.objects.all(),\n validators=[my_validator])\n\n f.clean([p.pk for p in Person.objects.all()[8:9]])\n self.assertTrue(self._validator_run)", "title": "" }, { "docid": "551aff0f87ba24de0b45125b1dbdea49", "score": "0.5444019", "text": "def run_validators(self, **kwargs):\n meta.model_info(self).run_validators(self, **kwargs)", "title": "" }, { "docid": "471ccd3cc26b6ec028f9d1a2cb8a9ec5", "score": "0.54428864", "text": "def validate(self, validation):\n return True", "title": "" }, { "docid": "616a4792aa1231fc313cca40f0016ca8", "score": "0.54370433", "text": "def validate(self):\r\n for validator in self.exttype_validators:\r\n validator.validate(self.ext_type)", "title": "" }, { "docid": "b90eec29dbec0b5c6c0ad327e450d782", "score": "0.54320055", "text": "def validate(self):\n self._validate_materials()\n self._validate_processes()\n self._validate_arcs()", "title": "" }, { "docid": "075d61a3e2049ba0034d1e44db2aedc8", "score": "0.5430011", "text": "def validate(self) -> None:\n pass", "title": "" }, { "docid": "a026dbd225a604e92a2da35ff28c92eb", "score": "0.54221004", "text": "def clean(self):\n cleaned_data = super().clean()\n con_name = cleaned_data.get('constraint_name')\n con_descr = cleaned_data.get('constraint_descr')\n con_type = cleaned_data.get('constraint_type')\n con_min = cleaned_data.get('constraint_min')\n con_max = cleaned_data.get('constraint_max')\n con_list = cleaned_data.get('constraint_list')\n self.validate_constraint_fields(\n con_name,\n con_descr,\n con_type,\n con_min,\n con_max,\n con_list\n )\n return", "title": "" }, { "docid": "466f5277c399c54877b19230d605325d", "score": "0.5408873", "text": "def changes_not_allowed(self):\n self.sm_Itext.Enable(False)\n self.sm_Ilabel.Enable(False)\n self.sm_Vtext.Enable(False)\n self.sm_Vlabel.Enable(False)", "title": "" }, { "docid": "3a740125c67f22ded83781b40c7590bc", "score": "0.5408001", "text": "def checkModelFormValues(self):\n return True", "title": "" } ]
32ab93180ad5e9e197cf9d5f682e4033
set_test(self) Set the default test for the cipher.
[ { "docid": "b1b9d037a48909c5d03ec5c61560f832", "score": "0.80053985", "text": "def set_test(self):\r\n\t\tself.test_plain = 'Help me I am under attack.'\r\n\t\tself.test_cipher = 'HENTEIDTLAEAPMRCMUAK'\r\n\t\tself.test_kwargs = {'n': 4}", "title": "" } ]
[ { "docid": "5d2b0cc46955f4a7aa07f3a74f53bcc4", "score": "0.8267654", "text": "def set_test(self):\r\n\t\tself.test_cipher = 'LXFOPVEFRNHR'\r\n\t\tself.test_plain = 'ATTACKATDAWN'\r\n\t\tself.test_kwargs = {'key': 'LEMON'}", "title": "" }, { "docid": "abc7ad75e8c28691219b37f21f63f512", "score": "0.82423824", "text": "def set_test(self):\r\n\t\tself.test_plain = 'THEDOGRANFAR'\r\n\t\tself.test_cipher = 'RRGTAAOHFNDE'\r\n\t\tself.test_kwargs = {}", "title": "" }, { "docid": "b0ca15c6f6e34e1157e632110965a7f2", "score": "0.81557626", "text": "def set_test(self):\r\n\t\tself.test_plain = 'Affine cipher'\r\n\t\tself.test_cipher = 'Ihhwvc swfrcp'\r\n\t\tself.test_kwargs = {'n': (5, 8), 'case': True}", "title": "" }, { "docid": "f52448d9e6982efa8a68081cfb1f1630", "score": "0.81212574", "text": "def set_test(self):\r\n\t\tself.test_plain = 'SOLITAIREX'\r\n\t\tself.test_cipher = 'KIRAKSFJAN'\r\n\t\tself.test_kwargs = {'key': 'CRYPTONOMICON'}", "title": "" }, { "docid": "903c31e578dce96bcf85d096321a9b2a", "score": "0.8054377", "text": "def set_test(self):\r\n\t\tself.test_plain = 'We are discovered, flee at once.'\r\n\t\tself.test_cipher = 'WECRLTEERDSOEEFEAOCAIVDEN'\r\n\t\tself.test_kwargs = {}", "title": "" }, { "docid": "748ef77bceaa36c9161251010e3b15eb", "score": "0.800313", "text": "def set_test(self):\r\n\t\tself.test_plain = 'Flee at once'\r\n\t\tself.test_cipher = 'UAEOLWRINS'\r\n\t\tself.test_kwargs = {'alphabet': 'BGWKZQPNDSIOAXEFCLUMTHYVR'}", "title": "" }, { "docid": "913f15989fb25d7e8607030e1281cf57", "score": "0.7945952", "text": "def set_test(self):\r\n\t\tself.test_plain = 'We are discovered, flee at once.'\r\n\t\tself.test_cipher = 'EVLNEACDTKESEAQROFOJDEECUWIREE'\r\n\t\tself.test_kwargs = {'padding': 'QKJEU', 'key': 'ZEBRAS'}", "title": "" }, { "docid": "a4e684926d327e1f5682371310db48d9", "score": "0.79417723", "text": "def set_test(self):\r\n\t\tself.test_plain = 'The quick brown fox jumps over the lazy dog.'\r\n\t\tself.test_cipher = 'WKH TXLFN EURZQ IRA MXPSV RYHU WKH ODCB GRJ.'\r\n\t\tself.test_kwargs = {}", "title": "" }, { "docid": "50b5313e93fabfc9cb84c294a3cb1152", "score": "0.79222655", "text": "def set_test(self):\r\n\t\tself.test_plain = 'attack at once'\r\n\t\tself.test_cipher = 'FAXDFADDDGDGFFFAFAXXAFAFX'\r\n\t\tself.test_kwargs = {'key': 'CARGO', 'alphabet': 'BTALPDHOZKQFVSNGICUXMREWY', 'padding': 'XGFDA'}", "title": "" }, { "docid": "7232ebc0c07de89c469e04e761472c47", "score": "0.7820975", "text": "def set_test(self):\r\n\t\tself.test_plain = 'WELL DONE IS BETTER THAN WELL SAID.'\r\n\t\tself.test_cipher = 'OAHQ HCNY NX TSZJRR HJBY HQKS OUJY.'\r\n\t\tself.test_kwargs = {'alphabet': ['PTLNBQDEOYSFAVZKGJRIHWXUMC', 'HXUCZVAMDSLKPEFJRIGTWOBNYQ']}", "title": "" }, { "docid": "37f8fe9a2e8965969811acc02ff52089", "score": "0.77122974", "text": "def set_test(self, plain = '', cipher = '', **kwargs):\r\n\t\tself.test_plain = plain\r\n\t\tself.test_cipher = cipher\r\n\t\tself.test_kwargs = kwargs", "title": "" }, { "docid": "93b448754b8b9a9f1f5bb3c771bf24b5", "score": "0.77043474", "text": "def set_test(self):\r\n\t\tself.test_plain = 'DYNAMITE WINTER PALACE'\r\n\t\tself.test_cipher = '37 106 62 36 67 47 86 26 104 53 62 77 27 55 57 66 55 36 54 27'\r\n\t\tself.test_kwargs = {'alphabet': 'ZEBRASCDFGHIKLMNOPQTUVWXY', 'deletes': 'J', 'modular': False}\r\n\t\tself.test_kwargs['key'] = 'RUSSIAN'", "title": "" }, { "docid": "676513a5b15823df35a74969fedef02c", "score": "0.75343984", "text": "def set_test(self):\r\n\t\tself.test_plain = 'ATTACK AT DAWN'\r\n\t\tself.test_cipher = '3113212731223655'\r\n\t\tself.test_kwargs = {'alphabet': 'ET AON RISBCDFGHJKLMPQ/UVWXYZ.'}", "title": "" }, { "docid": "63d51a340bdf337f6fdfda2e29c9d640", "score": "0.6981828", "text": "def test(self, test):\n self._test = test", "title": "" }, { "docid": "3c963aade5e955a41445071162555445", "score": "0.67851824", "text": "def test(self, test):\n\n self._test = test", "title": "" }, { "docid": "3c963aade5e955a41445071162555445", "score": "0.67851824", "text": "def test(self, test):\n\n self._test = test", "title": "" }, { "docid": "ef11cc5f5b6a1462cc00b6286e5a1f52", "score": "0.6527384", "text": "def setTestingMode(self, isTesting):\n pass", "title": "" }, { "docid": "b4c82e6a2b555ebb3963d86b9caec9a5", "score": "0.64851534", "text": "def set_test_mode(self, mode):\n\t\tself.test_mode = mode", "title": "" }, { "docid": "d51e444331251b5c8b2d8aae324bbe0b", "score": "0.64349794", "text": "def enable_test(self):\n\t\tself.test = True\n\t\treturn", "title": "" }, { "docid": "83089ffa678da107ce55b7443c99797c", "score": "0.6402525", "text": "def testset(self, value):\n self._testset = value", "title": "" }, { "docid": "5e9eb53d07f50bd9d7a07035b22349ab", "score": "0.6264571", "text": "def SetTest(self, Test):\n self.StatTest = Test", "title": "" }, { "docid": "0d4ab4333e7761a17379149bc0b463d6", "score": "0.618259", "text": "def testname(self, testname):\n self._testname = testname", "title": "" }, { "docid": "2640aaec8fdcfd3af4b5cae0b99dc667", "score": "0.61034507", "text": "def setUp(self):\n self.cipher = ecrypto.PseudonymCipher(_KEY1)", "title": "" }, { "docid": "45422aa93466c4b0eeae6b50843c82ec", "score": "0.6004756", "text": "def enable_test_mode(self) -> None:\r\n self.keys_view.enable_test_mode()", "title": "" }, { "docid": "187af96cadd9045a365197a341cbdd4b", "score": "0.59794366", "text": "def setTestMode(self, value):\n self.port.flushInput()\n self.port.flushOutput()\n self.port.write(\"testmode \" + value + \"\\n\")\n self.readResponseString()", "title": "" }, { "docid": "7ccd7eac23992d42437bf4e53ac5d99d", "score": "0.5970466", "text": "def setUp(self):\n self.encryption = multi_factor_settings.DEFAULT_ENCRYPTION_CLASS()", "title": "" }, { "docid": "19e3e40641cdb506ec9052884a645a3d", "score": "0.5968629", "text": "def setUp(self):\n self.cipher = ecrypto.ProbabilisticCipher(_KEY1)", "title": "" }, { "docid": "efba308654d10cd1348d481176854ca6", "score": "0.58731025", "text": "def letterboxing_test(self, letterboxing_test):\n\n self._letterboxing_test = letterboxing_test", "title": "" }, { "docid": "1463610c07b1386cf86e2715112466ae", "score": "0.57615143", "text": "def setUp(self):\n self.cipher = ecrypto.HomomorphicIntCipher(_KEY1)", "title": "" }, { "docid": "b692e352d7321b0aec3257083249631f", "score": "0.5717863", "text": "def __init__(self):\r\n\t\tself.defaults = {'alphabet': string.ascii_uppercase, 'book': False, 'case': False, \r\n\t\t\t'combines': ['IJ'], 'decode': False, 'deletes': '',\r\n\t\t\t'indexes': '12345', 'irregular': False, 'key': 'PHLEGM', 'n': 3, 'padding': 'QXJZ'}\r\n\t\tself.change_defaults()\r\n\t\tself.set_test()", "title": "" }, { "docid": "4dcd21c97cc1289ac9f5803bd56c4ced", "score": "0.5545426", "text": "def setTestDir(self, test_dir):\n self.test_dir = test_dir", "title": "" }, { "docid": "63aced943c5849683ae13e527804fdf6", "score": "0.55369955", "text": "def setUp(self):\n self.cipher = ecrypto.HomomorphicFloatCipher(_KEY1)", "title": "" }, { "docid": "211871a9367c24f8113b04f7ab3281cd", "score": "0.5533565", "text": "def setTest(self, seqNum):\n\n if seqNum < 0 or seqNum >= len(self.sequence):\n raise IndexError('test number must be 0..%d' % (len(self.sequence)))\n \n self.seqNum = seqNum\n return self.describeTest()", "title": "" }, { "docid": "844294b9363b3d7239c044f3744e3179", "score": "0.5525789", "text": "def testSetUp(self):\n pass", "title": "" }, { "docid": "844294b9363b3d7239c044f3744e3179", "score": "0.5525789", "text": "def testSetUp(self):\n pass", "title": "" }, { "docid": "844294b9363b3d7239c044f3744e3179", "score": "0.5525789", "text": "def testSetUp(self):\n pass", "title": "" }, { "docid": "3c02f906e23aeec4dd92745f6f376401", "score": "0.5467919", "text": "def compression_test(self, compression_test):\n\n self._compression_test = compression_test", "title": "" }, { "docid": "a2a909dd0a1da49c902df8a0dcaab95d", "score": "0.5459391", "text": "def testsuite(self, testsuite):\n self._testsuite = testsuite", "title": "" }, { "docid": "18436484a90be7d402b07b7175f9cc6a", "score": "0.5410165", "text": "def cadence_test(self, cadence_test):\n\n self._cadence_test = cadence_test", "title": "" }, { "docid": "4982bab207d95fd3a888c40d54246cbd", "score": "0.53784", "text": "def set_test_image(self, test_image: TestImage):\n self.__test_image = test_image", "title": "" }, { "docid": "9b0ee2b948bd421af33d9e3e28327e34", "score": "0.53556484", "text": "def test_manager(self, test_mode, test_param):\n \n try:\n if test_mode == 't1':\n self.tester_test_mode = \"t1\"\n print(\"Automatic learning for 0..9 has been turned ON!\")\n self.tester_img_flag = True\n self.tester_utf_flag = True\n self.tester_exit_flag = False\n self.tester_burst_skip_flag = False\n self.tester_utf_handler = True\n self.tester_variation_handler = True\n self.tester_variation_counter = runtime_data.parameters[\"Auto_tester\"][\"variation_default\"]\n runtime_data.variation_counter_actual = runtime_data.parameters[\"Auto_tester\"][\"variation_default\"]\n self.tester_utf_default = runtime_data.parameters[\"Auto_tester\"][\"utf_default\"]\n self.tester_utf_counter_actual = runtime_data.parameters[\"Auto_tester\"][\"utf_default\"]\n self.tester_num_to_inject = self.tester_utf_default\n self.tester_burst_skip_counter = runtime_data.parameters[\"Auto_tester\"][\"tester_burst_skip_counter\"]\n\n elif test_mode == 't2':\n self.tester_test_mode = \"t2\"\n self.tester_img_flag = True\n self.tester_utf_flag = True\n self.tester_exit_flag = False\n self.tester_burst_skip_flag = False\n self.tester_utf_handler = False\n self.tester_variation_handler = True\n self.tester_variation_counter = runtime_data.parameters[\"Auto_tester\"][\"variation_default\"]\n runtime_data.variation_counter_actual = runtime_data.parameters[\"Auto_tester\"][\"variation_default\"]\n self.tester_utf_default = -1\n self.tester_utf_counter_actual = -1\n self.tester_num_to_inject = int(test_param)\n self.tester_burst_skip_counter = runtime_data.parameters[\"Auto_tester\"][\"tester_burst_skip_counter\"]\n print(\" <<< Automatic learning for variations of number << %s >> has been turned ON! >>>\"\n % test_param)\n\n else:\n print(\"Error detecting the test mode...\")\n return\n\n finally:\n toggle_test_mode()\n self.tester_test_id = test_id_gen()\n runtime_data.tester_test_stats[\"genome_id\"] = runtime_data.genome_id\n print('Genome_id = ', runtime_data.genome_id)\n runtime_data.tester_test_stats[\"test_id\"] = self.tester_test_id\n self.tester_testing_has_begun = True", "title": "" }, { "docid": "9cdbfea1376d75057f5d4468615ba406", "score": "0.5347837", "text": "def set_test_mode(self):\n self.train_mode = False", "title": "" }, { "docid": "a1ebaa71108d6596b82bc460afecb503", "score": "0.53246427", "text": "def configure(self, fixture):\n\n TestCase.configure(self, fixture)\n # Always specify test option to ensure the mongos will terminate.\n if \"test\" not in self.options:\n self.options[\"test\"] = \"\"", "title": "" }, { "docid": "78bf422f07ddb0e48eb4a45ff1e0a141", "score": "0.5312713", "text": "def change_defaults(self):\r\n\t\tplain = string.ascii_uppercase\r\n\t\tcipher = 'EPLRAGFSOXVCWTIBZDHNYKMJUQ'\r\n\t\tself.defaults['alphabet'] = [plain, cipher]", "title": "" }, { "docid": "59ed2a7c4b85d0b280efab0087bf8eb5", "score": "0.53069013", "text": "def test(self, text = '', verbose = 1, **kwargs):\r\n\t\t# set up the parameters\r\n\t\tif text:\r\n\t\t\ttest_cipher = ''\r\n\t\telse:\r\n\t\t\ttext = self.test_plain\r\n\t\t\ttest_cipher = self.test_cipher\r\n\t\t\tkwargs = self.test_kwargs\r\n\t\tneokwargs = self.defaults.copy()\r\n\t\tneokwargs.update(kwargs)\r\n\t\t# encipher the plain text\r\n\t\tcipher = self.__call__(text, **neokwargs)\r\n\t\tif verbose:\r\n\t\t\tprint('CIPHER:', cipher)\r\n\t\t# test the cipher text if possible\r\n\t\tif test_cipher:\r\n\t\t\tcipher_match = SequenceMatcher(a = cipher, b = test_cipher).ratio()\r\n\t\t\tif verbose:\r\n\t\t\t\tprint('CIPHER RATIO:', cipher_match)\r\n\t\telse:\r\n\t\t\tcipher_match = -1\r\n\t\t# decipher the cipher text\r\n\t\tneokwargs['decode'] = True\r\n\t\tplain = self.__call__(cipher, **neokwargs)\r\n\t\tif verbose:\r\n\t\t\tprint('PLAIN:', plain)\r\n\t\t# test against the original text\r\n\t\tif neokwargs['case']:\r\n\t\t\tmatch = SequenceMatcher(a = text, b = plain, isjunk = lambda x: x in ' .')\r\n\t\telse:\r\n\t\t\tmatch = SequenceMatcher(a = text.upper(), b = plain.upper(), isjunk = lambda x: x in ' .')\r\n\t\tif verbose:\r\n\t\t\tprint('RATIO:', match.ratio())\r\n\t\treturn (cipher, cipher_match, plain, match.ratio())", "title": "" }, { "docid": "51262b0e50f19bfeabbd206ecbc400f8", "score": "0.5294815", "text": "def _test(self):\n pass", "title": "" }, { "docid": "51262b0e50f19bfeabbd206ecbc400f8", "score": "0.5294815", "text": "def _test(self):\n pass", "title": "" }, { "docid": "79a177c3320c2437eca08d6e99b23dc4", "score": "0.528752", "text": "def stripe_test(self, stripe_test):\n\n self._stripe_test = stripe_test", "title": "" }, { "docid": "a945d7eaa55bfc047b687e107243ccde", "score": "0.5284663", "text": "def apply_test_settings(self, s, test_case):\n s.settings = copy(test_case['settings'])\n print(color.YELLOW, 'will run test with custom settings:',\n '\\n{}'.format(s.settings), color.RESET)", "title": "" }, { "docid": "6d11425f69b278ece6f4885b1494b9e9", "score": "0.52276856", "text": "def setUp(self):\n keyring.set_keyring(_KeyringTestBackend())", "title": "" }, { "docid": "bda122cfbb7371250a01f0df37851fa0", "score": "0.521779", "text": "def set_random_seed() -> None:\n test.set_random_seeds()", "title": "" }, { "docid": "16434976747cef30fe40c7371ebdc408", "score": "0.5200294", "text": "def set_encryption(self, on, ttype):\n if on and ttype:\n self.lbl_encryption.set_label(str(ttype))\n if on and not ttype:\n self.lbl_encryption.set_label(_('Secured'))\n if not on:\n self.lbl_encryption.set_label(_('Unsecured'))", "title": "" }, { "docid": "16434976747cef30fe40c7371ebdc408", "score": "0.5200294", "text": "def set_encryption(self, on, ttype):\n if on and ttype:\n self.lbl_encryption.set_label(str(ttype))\n if on and not ttype:\n self.lbl_encryption.set_label(_('Secured'))\n if not on:\n self.lbl_encryption.set_label(_('Unsecured'))", "title": "" }, { "docid": "8e30d2d2da20c5b159ae61295a071a15", "score": "0.51977533", "text": "def gen_test(self):\n pass", "title": "" }, { "docid": "0fd50546620f85ecfcc49380ec42a305", "score": "0.5190131", "text": "def _test(self):\n return", "title": "" }, { "docid": "67aa00873473b6cb95104d66f61cdcb7", "score": "0.5167325", "text": "def beforeTest(self, test):\n self._timer = time()", "title": "" }, { "docid": "e81f4b97cd5c9d83babb54a349b85ff8", "score": "0.5163835", "text": "def change_defaults(self):\r\n\t\tself.defaults['case'] = True", "title": "" }, { "docid": "e81f4b97cd5c9d83babb54a349b85ff8", "score": "0.5163835", "text": "def change_defaults(self):\r\n\t\tself.defaults['case'] = True", "title": "" }, { "docid": "9072b8f26957542c6f407c5b6fd96147", "score": "0.5158605", "text": "def setUp( self ):\n self.string = \"abc\"", "title": "" }, { "docid": "f5c3def2dc3f09ef5f940ff549ed40bf", "score": "0.5147628", "text": "def hdr_test(self, hdr_test):\n\n self._hdr_test = hdr_test", "title": "" }, { "docid": "9ce3656993173d1c2c6f280db2496156", "score": "0.51446456", "text": "def test_set_random_seed(self) -> None:\n tuning = Tuning()\n tuning.set_random_seed(123456)\n self.assertEqual(tuning.random_seed, 123456)", "title": "" }, { "docid": "a000098a90303b4a9ce6f98f42f66011", "score": "0.5133164", "text": "def init_test(self):\n self.controller.send()\n self.controller.wait_for_test()", "title": "" }, { "docid": "19191cd3edbe55d88b6cac5003278d35", "score": "0.5105784", "text": "def run_test(self):\n raise NotImplementedError(\"run_test must be implemented by TestCase subclasses\")", "title": "" }, { "docid": "06abf4a119b6d9caad1f71a4526012ee", "score": "0.5082879", "text": "def test_c(self):\n self.language = 'c'\n self.run_example()\n self.language = None", "title": "" }, { "docid": "ac1f1e2c2db5c89c255af019c57ba507", "score": "0.5079507", "text": "def setUpClass(cls):\n\t\tcls.enviroment = 'testing'", "title": "" }, { "docid": "4f97d79043ebe3629c8244d052d28621", "score": "0.507515", "text": "def setdefault(self, key, default=None):\n pass", "title": "" }, { "docid": "9ab1da9e31ae662a89e9d00e43522e16", "score": "0.5066614", "text": "def set_load_test(val):\n global LOAD_TEST\n LOAD_TEST = val", "title": "" }, { "docid": "e569ac178e2af9bdd8067117655cfced", "score": "0.5040342", "text": "def change_defaults(self):\r\n\t\tself.defaults['mutation'] = mutate_sequence\r\n\t\tself.defaults['n'] = 5\r\n\t\tplain = string.ascii_uppercase\r\n\t\tcipher = plain[13:] + plain[:13]\r\n\t\tself.defaults['alphabet'] = [plain, cipher]", "title": "" }, { "docid": "9b58cbaa988ee7266250f2222c58af6e", "score": "0.50388265", "text": "def testSetUp(self):", "title": "" }, { "docid": "225e94c5ad38eec58f806474eb4cf2cd", "score": "0.50321525", "text": "def test_main(self):\n # Initializing key variables\n pass", "title": "" }, { "docid": "97cf193daa2a1c4a03f0e72b79b25162", "score": "0.5031628", "text": "def set_testing_time(self, time):\n self.testing_time = time\n return self", "title": "" }, { "docid": "5f8cc7a6834de96a3181bfde75a53da9", "score": "0.50246185", "text": "def __init__(self, cipher_string):\n self.cipher_string = cipher_string", "title": "" }, { "docid": "b2df82905a651927958f621caa7a9482", "score": "0.5023334", "text": "def set_up_test_case(self, test_case, test_fixture, **kwargs):\n # type: (TestCase, TestFixture, **Any) -> None", "title": "" }, { "docid": "7c11f9da3e39d3eb5b11011f6e3f600a", "score": "0.5019505", "text": "def _enable_test_runner(self):\n\n if not hasattr(self, \"test_runner\"):\n return\n\n test_runner = self.test_runner.lower()\n if test_runner == \"pytest\":\n self._enable_pytest()\n elif test_runner.startswith(\"nose\"): # allow nosetests... etc\n self.test_runner = \"nose\" # exact name for importing though\n self._enable_nosetest()\n else:\n self.test_runner = \"unittest\"\n self._enable_unittest()\n\n TestRunner.pypackage(self) # XXX after runner_args are set\n self.cmdclass = {\"test\": TestRunner}", "title": "" }, { "docid": "1311b5e73679057cd9da6681c00e0902", "score": "0.5019021", "text": "def test(self,**kwargs):\n return 'idk'", "title": "" }, { "docid": "1311b5e73679057cd9da6681c00e0902", "score": "0.5019021", "text": "def test(self,**kwargs):\n return 'idk'", "title": "" }, { "docid": "47a0c07d225d6401eddccad0b9531c1e", "score": "0.50010914", "text": "def gen_test(self):\n if self.byte_pos >= self.file_len-4:\n # Strategy complete\n return None\n new_case = self.seed.clone()\n new_case.data[self.byte_pos:self.byte_pos+4] = b'\\xff\\xff\\xff\\x7f'\n new_case.mutations += 'set byte %x to +4 = 0xffffffff\\n' % (self.byte_pos)\n self.byte_pos += 1\n return new_case", "title": "" }, { "docid": "25b68d08862868a9e4674f957e841696", "score": "0.49948642", "text": "def test(self):\n\t\tpass", "title": "" }, { "docid": "27303ba1580f443c5895e3f791d526ed", "score": "0.49878332", "text": "def test_python(self):\n self.language = 'python'\n self.run_example()\n self.language = None", "title": "" }, { "docid": "814d711600e428f4acb833307fe8914c", "score": "0.49749544", "text": "def startTest(self, test):\n TestResult.startTest(self, test)\n self.parent.write('%s%s\\n' % (ResponseUTStartTest,\n unicode((unicode(test), test.shortDescription()))))", "title": "" }, { "docid": "430fd8ae59bc9180c21fa0d9371678d9", "score": "0.49719095", "text": "def test___init__(self):\n pass", "title": "" }, { "docid": "430fd8ae59bc9180c21fa0d9371678d9", "score": "0.49719095", "text": "def test___init__(self):\n pass", "title": "" }, { "docid": "c3749dc1c2467426a8f2d02bc404a8a9", "score": "0.496728", "text": "def setUp(self):\n test_utils.set_three_rounds()", "title": "" }, { "docid": "c5cabb77113287bd1c31762ccc21b9bd", "score": "0.49618837", "text": "def setUpClass(cls):\n super(TestClassifier, cls).setUpClass()\n cls.acl_active_table = ''", "title": "" }, { "docid": "926ef697d9a3e4683dd8f43f6cab48ec", "score": "0.49582717", "text": "def setEnvironment(self, oSession, oTxsSession, oTestVm):\n self.oTest = tdCtxTest(oSession, oTxsSession, oTestVm);\n return self.oTest;", "title": "" }, { "docid": "69e2b652405a01dd4e31071d3b820ac6", "score": "0.49567485", "text": "def test_set_config(self):\n pass", "title": "" }, { "docid": "2324e00eeacd3742b24c6bf6c12c08b0", "score": "0.49561948", "text": "def chroma_level_test(self, chroma_level_test):\n\n self._chroma_level_test = chroma_level_test", "title": "" }, { "docid": "a4a4d9bbd5bbacc2f16665799771422a", "score": "0.4946226", "text": "def _enable_unittest(self):\n\n self._runner = unittest\n self.runner_args = getattr(self, \"runner_args\", None)\n if self.runner_args is None:\n self.runner_args = []\n else:\n self._configured_runner_args = True", "title": "" }, { "docid": "288f1fa831057ee2d7eb79dad508194d", "score": "0.4941643", "text": "def black_level_test(self, black_level_test):\n\n self._black_level_test = black_level_test", "title": "" }, { "docid": "282f4b364d31882586143db4cc433ba8", "score": "0.49381766", "text": "def test(self):\n return self._test", "title": "" }, { "docid": "9d5fd420a4e990ccf28290ed5eb3f23c", "score": "0.4934439", "text": "def runTest(self):\n if self.flag == 1:\n if app_config.SERVER_MODE is False:\n self.skipTest(\n \"Can not run Kerberos Authentication in the Desktop mode.\"\n )\n self.test_unauthorized()\n elif self.flag == 2:\n if app_config.SERVER_MODE is False:\n self.skipTest(\n \"Can not run Kerberos Authentication in the Desktop mode.\"\n )\n self.test_authorized()\n elif self.flag == 3:\n if app_config.SERVER_MODE is False:\n self.skipTest(\n \"Can not run Kerberos Authentication in the Desktop mode.\"\n )\n self.test_update_ticket()", "title": "" }, { "docid": "e2e2ffa36cf5f21d8869e200ca2d4957", "score": "0.4932988", "text": "def setUp(self):\n if self.skip_tests:\n return skip_this_test()\n super(ESTestCase, self).setUp()", "title": "" }, { "docid": "fcf84fcab032d3327b07ef04c6db50a4", "score": "0.49300912", "text": "def suicide(self, suicide):\n self.__suicide = test_input(suicide)", "title": "" }, { "docid": "ddb4b9e25f098edf6ef415164eed134c", "score": "0.49266", "text": "def test_set_new_value(self):", "title": "" }, { "docid": "343d0cce8356ac3e92c0de489f4efcd1", "score": "0.49261302", "text": "def test(self):\n pass", "title": "" }, { "docid": "f0baec97f010b1e414443800fc4cf017", "score": "0.49255598", "text": "def flash_test(self, flash_test):\n\n self._flash_test = flash_test", "title": "" }, { "docid": "db90b7d6d4b94c175e356128e0a74170", "score": "0.49242058", "text": "def test_set_with_str(self):\n presser_modes = ['auto', 'off', 'on']\n for presser_mode in presser_modes:\n expected_output = 'x-presser-mode {}'.format(presser_mode)\n self.assertEqual(set_presser_mode(presser_mode), expected_output)", "title": "" }, { "docid": "f414c91d1712112d6fb656af04b93c8e", "score": "0.4916614", "text": "def setSolo(self, val='True', **kwargs):\n \n pass", "title": "" }, { "docid": "0ba6a35896db320d5aab9ab37c1b52a6", "score": "0.4914972", "text": "def change_defaults(self):\r\n\t\tself.defaults['mutation'] = mutate_bellaso\r\n\t\tcipher = string.ascii_uppercase[13:] + string.ascii_uppercase[:13]\r\n\t\tself.defaults['alphabet'] = [string.ascii_uppercase, cipher]\r\n\t\tself.defaults['n'] = 5", "title": "" }, { "docid": "fc41ee50b568baee00a5e7f54996a743", "score": "0.49086806", "text": "def test_case(self):\n pass", "title": "" } ]
ba9232b89afd208790466ffcc6ae83e3
Test get model factory on CPU.
[ { "docid": "c63ab4ef24642f0cd5bad78718a743ce", "score": "0.7142461", "text": "def test_get_model_cpu():\n arch = {'conv1_filters': 20, 'conv2_filters': 50, 'output_classes': 10}\n model = get_model('lenet5', F.nll_loss, arch, torch.device('cpu'), 0)\n\n assert isinstance(model, QLeNet5)\n assert model.loss_fn == F.nll_loss\n assert next(model.parameters()).device.type == 'cpu'", "title": "" } ]
[ { "docid": "33d4e4f053607709d6aba0227c9d267f", "score": "0.69362444", "text": "def test_cpu_model():\n\n trainer_options = dict(\n progress_bar=False,\n experiment=get_exp(),\n max_nb_epochs=1,\n train_percent_check=0.4,\n val_percent_check=0.4\n )\n\n model, hparams = get_model()\n\n run_gpu_model_test(trainer_options, model, hparams, on_gpu=False)", "title": "" }, { "docid": "7b3be9ac1856e960f4e88c07a6bb843f", "score": "0.6830463", "text": "def test_createmodelthread(self):\n pass", "title": "" }, { "docid": "a02f08a94640c8ddd8a789eb93621908", "score": "0.6554953", "text": "def test_get_model_single_gpu():\n if not torch.cuda.is_available():\n return\n\n arch = {'conv1_filters': 20, 'conv2_filters': 50, 'output_classes': 10}\n model = get_model('lenet5', F.nll_loss, arch, torch.device('cuda:0'), 1)\n\n assert isinstance(model, QLeNet5)\n assert model.loss_fn == F.nll_loss\n assert next(model.parameters()).device.type == 'cuda'", "title": "" }, { "docid": "2914c7599d3e79fe89647b3eab05bb79", "score": "0.64572996", "text": "def test_build_model(self):\n with fa.app.test_request_context():\n fa.app.preprocess_request()\n conn = fa.g.rdb_conn\n shutil.copy(pjoin(DATA_DIR, \"test_classes.npy\"),\n pjoin(cfg.FEATURES_FOLDER, \"TEMP_TEST01_classes.npy\"))\n shutil.copy(pjoin(DATA_DIR, \"test_features.csv\"),\n pjoin(cfg.FEATURES_FOLDER, \"TEMP_TEST01_features.csv\"))\n r.table(\"projects\").insert({\"id\": \"abc123\",\n \"name\": \"abc123\"}).run(conn)\n r.table(\"features\").insert({\"id\": \"TEMP_TEST01\",\n \"projkey\": \"abc123\",\n \"name\": \"TEMP_TEST01\",\n \"created\": \"abc123\",\n \"headerfile_path\": \"HEADPATH.dat\",\n \"zipfile_path\": \"ZIPPATH.tar.gz\",\n \"featlist\": [\"a\", \"b\", \"c\"]}).run(conn)\n rv = fa.buildModel(project_name=\"abc123\",\n featureset_name=\"TEMP_TEST01\",\n model_type=\"RF\")\n res_dict = json.loads(rv.data)\n while \"currently running\" in fa.check_job_status(res_dict[\"PID\"]):\n time.sleep(1)\n new_model_key = res_dict[\"new_model_key\"]\n entry = r.table(\"models\").get(new_model_key).run(conn)\n assert \"results_msg\" in entry\n assert os.path.exists(pjoin(cfg.MODELS_FOLDER,\n \"TEMP_TEST01_RF.pkl\"))\n model = joblib.load(pjoin(cfg.MODELS_FOLDER, \"TEMP_TEST01_RF.pkl\"))\n assert hasattr(model, \"predict_proba\")\n os.remove(pjoin(cfg.MODELS_FOLDER, \"TEMP_TEST01_RF.pkl\"))\n os.remove(pjoin(cfg.FEATURES_FOLDER, \"TEMP_TEST01_classes.npy\"))\n os.remove(pjoin(cfg.FEATURES_FOLDER, \"TEMP_TEST01_features.csv\"))", "title": "" }, { "docid": "5ff7d2de07eb0f7f1841801cb39873e2", "score": "0.643934", "text": "def test_get_model(client, create_corpus):\n import json\n corpus_id = create_corpus()\n\n model_data = {\n \"name\": \"Test model\",\n \"beamWidth\": 1,\n \"corpusID\": corpus_id,\n \"decodingMergeRepeated\": True,\n \"earlyStoppingSteps\": 1,\n \"numberLayers\": 2,\n \"hiddenSize\": 2,\n \"maximumEpochs\": 2,\n \"minimumEpochs\": 1,\n \"maximumTrainingLER\": 0.4,\n \"maximumValidationLER\": 0.8,\n }\n\n response = client.post(\n '/v0.1/model',\n data=json.dumps(model_data),\n headers={'Content-Type': 'application/json'}\n )\n assert response.status_code == 201\n model_response_data = json.loads(response.data.decode('utf8'))\n model_id = model_response_data['id']\n\n assert 'maximumTrainingLER' in model_response_data, model_response_data\n assert model_response_data['maximumTrainingLER'] == 0.4\n assert model_response_data['maximumValidationLER'] == 0.8\n\n response = client.get(\n '/v0.1/model/{}'.format(model_id),\n )\n assert response.status_code == 200\n model_get_data = json.loads(response.data.decode('utf8'))\n\n assert model_get_data[\"minimumEpochs\"] == 1\n assert model_get_data[\"maximumEpochs\"] == 2\n assert model_get_data[\"maximumTrainingLER\"] == 0.4\n assert model_get_data[\"maximumValidationLER\"] == 0.8", "title": "" }, { "docid": "a86f78df4b7215a3b083e1e3e599f61b", "score": "0.6363653", "text": "def test_all_features_cpu_model():\n\n trainer_options = dict(\n gradient_clip=1.0,\n overfit_pct=0.20,\n track_grad_norm=2,\n print_nan_grads=True,\n progress_bar=False,\n experiment=get_exp(),\n accumulate_grad_batches=2,\n max_nb_epochs=1,\n train_percent_check=0.4,\n val_percent_check=0.4\n )\n\n model, hparams = get_model()\n run_gpu_model_test(trainer_options, model, hparams, on_gpu=False)", "title": "" }, { "docid": "a30f3e29585271f99113aca90ebf06cf", "score": "0.63500464", "text": "def test_get_model_multi_gpu():\n if torch.cuda.device_count() <= 1:\n return\n\n arch = {'conv1_filters': 20, 'conv2_filters': 50, 'output_classes': 10}\n model = get_model('lenet5', F.nll_loss, arch, torch.device('cuda:0'), 2)\n\n assert isinstance(model, nn.DataParallel)\n assert model.module.loss_fn == F.nll_loss", "title": "" }, { "docid": "24313cc57278358a3287faf7b5812970", "score": "0.6337154", "text": "def get_cpu_model():\n cpu_model = None\n try:\n with open('/proc/cpuinfo', 'r') as f:\n lines = f.readlines()\n for line in lines:\n ret = re.match('\\s*model name\\s*:([\\S\\s]+)$', line.strip())\n if ret:\n grps = ret.groups()\n if grps:\n cpu_model = grps[0]\n break\n except Exception, e:\n return None, 'Error retrieving CPU model : %s' % str(e)\n else:\n return cpu_model, None", "title": "" }, { "docid": "a4e41622b8a965202441d924292c7185", "score": "0.63149273", "text": "def test_simple_cpu():\n hparams = get_hparams()\n model = LightningTestModel(hparams)\n\n save_dir = init_save_dir()\n\n # exp file to get meta\n trainer_options = dict(\n max_nb_epochs=1,\n val_percent_check=0.1,\n train_percent_check=0.1,\n )\n\n # fit model\n trainer = Trainer(**trainer_options)\n result = trainer.fit(model)\n\n # traning complete\n assert result == 1, 'amp + ddp model failed to complete'\n\n clear_save_dir()", "title": "" }, { "docid": "1a3af260d3b6d02e1504d699f5ca8438", "score": "0.62659115", "text": "def test_check_cpu_load(self):\n pass", "title": "" }, { "docid": "914b539e455efd72837fb6d36ce8416d", "score": "0.6252628", "text": "def test_build_model_proc(self):\n with fa.app.test_request_context():\n fa.app.preprocess_request()\n conn = fa.g.rdb_conn\n r.table(\"features\").insert({\"id\": \"TEMP_TEST01\",\n \"name\": \"TEMP_TEST01\"}).run(conn)\n r.table(\"models\").insert({\"id\": \"TEMP_TEST01\",\n \"name\": \"TEMP_TEST01\"}).run(conn)\n shutil.copy(pjoin(DATA_DIR, \"test_classes.npy\"),\n pjoin(cfg.FEATURES_FOLDER, \"TEMP_TEST01_classes.npy\"))\n shutil.copy(pjoin(DATA_DIR, \"test_features.csv\"),\n pjoin(cfg.FEATURES_FOLDER, \"TEMP_TEST01_features.csv\"))\n fa.build_model_proc(\"TEMP_TEST01\", \"TEMP_TEST01\",\n \"RF\", \"TEMP_TEST01\")\n entry = r.table(\"models\").get(\"TEMP_TEST01\").run(conn)\n assert \"results_msg\" in entry\n assert os.path.exists(pjoin(cfg.MODELS_FOLDER,\n \"TEMP_TEST01_RF.pkl\"))\n model = joblib.load(pjoin(cfg.MODELS_FOLDER, \"TEMP_TEST01_RF.pkl\"))\n assert hasattr(model, \"predict_proba\")\n os.remove(pjoin(cfg.MODELS_FOLDER, \"TEMP_TEST01_RF.pkl\"))\n os.remove(pjoin(cfg.FEATURES_FOLDER, \"TEMP_TEST01_classes.npy\"))\n os.remove(pjoin(cfg.FEATURES_FOLDER, \"TEMP_TEST01_features.csv\"))", "title": "" }, { "docid": "c24291050b7ef2011d9960739cc598dc", "score": "0.6198713", "text": "def test_model(self, architecture):\n x = np.random.rand(5, 10).astype('float32')\n sess = tf.InteractiveSession()\n if architecture == 'exu_nam':\n model = models.NAM(\n num_inputs=x.shape[1], num_units=1024, shallow=True, activation='exu')\n elif architecture == 'relu_nam':\n model = models.NAM(\n num_inputs=x.shape[1], num_units=64, shallow=False, activation='relu')\n elif architecture == 'dnn':\n model = models.DNN()\n else:\n raise ValueError('Architecture {} not found'.format(architecture))\n out_op = model(x)\n sess.run(tf.global_variables_initializer())\n self.assertIsInstance(sess.run(out_op), np.ndarray)\n sess.close()", "title": "" }, { "docid": "64efdccf8fd5f54cf3a8e19125a31af2", "score": "0.6189348", "text": "def get_cpu(rfo, api=1, unit=1):\n\n res = rfo.get(f\"/redfish/v{api}/Systems/{unit}\")\n if res.status != 200:\n print(f\"Error: {res.status}: {res.read}\")\n return \"XXX\"\n return res.dict['ProcessorSummary']['Model']", "title": "" }, { "docid": "7ad0be7ff50113ad9c926b9fca4aa5ef", "score": "0.6186646", "text": "def test_get_pretrained_fasterrcnn():\n assert type(get_pretrained_fasterrcnn(4)) == FasterRCNN", "title": "" }, { "docid": "39e929bc961228b369401f41ace5308c", "score": "0.616412", "text": "def test_RecSysModel():\n model = RecSysModel()\n\n with pytest.raises(NotImplementedError):\n model.train(None)\n model.predict(None)\n model.save_model(None)\n model.load_model(None)", "title": "" }, { "docid": "3d0d8559cf9a96c79d60d66dc2e88b22", "score": "0.6163282", "text": "def test_single_gpu_model():\n if not torch.cuda.is_available():\n warnings.warn('test_single_gpu_model cannot run.'\n ' Rerun on a GPU node to run this test')\n return\n model, hparams = get_model()\n\n trainer_options = dict(\n progress_bar=False,\n max_nb_epochs=1,\n train_percent_check=0.1,\n val_percent_check=0.1,\n gpus=[0]\n )\n\n run_gpu_model_test(trainer_options, model, hparams)", "title": "" }, { "docid": "425e140e10e4203597cb15007ec6b20f", "score": "0.61366665", "text": "def get_model(model_type:str):\n factory_obj = None\n try:\n factory_obj = model_loading_factory(model_type)\n except ValueError as e:\n print(e)\n return factory_obj", "title": "" }, { "docid": "54f43db04aade788aecfbb939f44a713", "score": "0.61342", "text": "def test_device_affinity(self):\n class Model(nn.Module):\n\n def __init__(self):\n super(Model, self).__init__()\n self.conv = nn.Conv2d(1, 1, 1)\n self.bn = nn.BatchNorm2d(1)\n self.relu = nn.ReLU()\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n x = self.relu(x)\n return x\n\n model = Model()\n model.qconfig = torch.quantization.get_default_qat_qconfig(torch.backends.quantized.engine)\n device = torch.device('cuda:0')\n model.to(device)\n torch.quantization.prepare_qat(model, inplace=True)\n model_devices = {p.device for p in model.parameters()} | \\\n {p.device for p in model.buffers()}\n self.assertEqual(len(model_devices), 1)\n model_device = next(iter(model_devices))\n self.assertEqual(model_device, device)\n\n # ensure that running an input on CUDA works without any needed changes\n input = torch.randn(4, 1, 4, 4, device=device)\n model(input)", "title": "" }, { "docid": "f4c4a0278aeae1fabdd3588528b46250", "score": "0.6116593", "text": "def test_test_cpu_get(self):\n response = self.client.open(\n '/api/testCpu',\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "title": "" }, { "docid": "af50a94e8e845c666adc368278b95f7f", "score": "0.61059195", "text": "def test_functional() -> None:\n backbones = [\n \"alexnet\",\n \"resnet18\",\n \"resnet34\",\n \"resnet50\",\n \"resnet101\",\n \"resnext50_32x4d\",\n \"resnext101_32x8d\",\n \"wide_resnet50_2\",\n \"wide_resnet101_2\",\n \"densenet121\",\n \"densenet161\",\n \"densenet169\",\n \"densenet201\",\n \"googlenet\",\n \"mobilenet_v2\",\n \"mobilenet_v3_large\",\n \"mobilenet_v3_small\",\n ]\n assert CNNModel.postproc([1, 2]) == 1\n\n b = 4\n h = w = 512\n samples = torch.from_numpy(RNG.random((b, h, w, 3)))\n\n # Dummy entry, will generate ValueError if \"try\" fails without running the loop.\n backbone = \"empty\"\n try:\n for backbone in backbones:\n model = CNNModel(backbone, num_classes=1)\n model_ = model_to(on_gpu=ON_GPU, model=model)\n model.infer_batch(model_, samples, on_gpu=ON_GPU)\n except ValueError as exc:\n msg = f\"Model {backbone} failed.\"\n raise AssertionError(msg) from exc\n\n # skipcq\n with pytest.raises(ValueError, match=r\".*Backbone.*not supported.*\"):\n CNNModel(\"shiny_model_to_crash\", num_classes=2)", "title": "" }, { "docid": "3d1ca0cf2155b531186ba3bc4172d87f", "score": "0.60995907", "text": "def test_model():\n # Setup training/testing environment\n setup_env()\n # Construct the model\n model = setup_model()\n # Load model weights\n cp.load_checkpoint(cfg.TEST.WEIGHTS, model)\n logger.info(\"Loaded model weights from: {}\".format(cfg.TEST.WEIGHTS))\n # Create data loaders and meters\n test_loader = data_loader.construct_test_loader()\n test_meter = meters.TestMeter(len(test_loader))\n # Evaluate the model\n test_epoch(test_loader, model, test_meter, 0)", "title": "" }, { "docid": "6b944aa18eb5f641d31e155d870120e1", "score": "0.6075272", "text": "def test_model_loading(real_model):\n PredictionModel.load_model()\n\n if real_model:\n assert PredictionModel.pipeline is not None\n else:\n assert PredictionModel.pipeline is None", "title": "" }, { "docid": "cc4194275b758834c3136182af18e413", "score": "0.6069828", "text": "def ModelSelector(config: dict, test_mode=False) -> tf.keras.Model:\n # Get details\n model_name = config[\"model_name\"]\n image_width = config[\"image_width\"]\n image_height = config[\"image_height\"]\n\n print(\"[INFO]: Selecting model: {0} with input size: {1}\".format(model_name, (image_width, image_height, 3)))\n\n if model_name == \"UNet-Dice\":\n return UNetDice((image_width, image_height, 3), test_mode)\n\n elif model_name == \"UNet\":\n return UNet((image_width, image_height, 3), test_mode)\n\n elif model_name == \"ResUNet\":\n return ResUNet((image_width, image_height, 3), test_mode)\n\n elif model_name == \"ResUNet++\":\n return ResUNetPlusPlus((image_width, image_height, 3), test_mode)\n\n elif model_name == \"DeepLabv3\":\n return Deeplabv3(input_shape=(image_width, image_height, 3), classes=1, backbone=\"xception\")\n\n elif model_name == \"FCN8\":\n return FCN8((image_width, image_height, 3), test_mode)\n\n elif model_name == \"SegNet\":\n return SegNet((image_width, image_height, 3), test_mode)\n\n elif model_name == \"UNet-Attn\" or model_name == \"UNet-GuidedAttn\":\n return UNetAttn((image_width, image_height, 3), test_mode)\n\n elif model_name == \"Dilated-ResFCN\":\n return DilatedResFCN((image_width, image_height, 3), test_mode)\n\n elif model_name == \"SE-UNet\":\n return SEUNet((image_width, image_height, 3), test_mode)\n\n elif model_name == \"Dilated-UNet\":\n return DilatedUNet((image_width, image_height, 3), test_mode)\n\n elif model_name == \"GAR-Net-Experimental\":\n return GARNetExperimental((image_width, image_height, 3), test_mode)\n\n elif model_name == \"GAR-Net\":\n return GARNet((image_width, image_height, 3), test_mode)\n\n else:\n raise NotImplementedError", "title": "" }, { "docid": "6c24b82e9940d1ac0f18c751d494d01b", "score": "0.6064554", "text": "def test_start_gpu_card_model_with_single_model_cpu(self):\n args = default_args()\n args.model = [self.model_dir]\n args.port = 9696\n\n p = Process(target=start_gpu_card_model, kwargs={\"gpu_mode\": False, \"port\": args.port, \"args\": args})\n p.start()\n os.system(\"sleep 5\")\n\n assert count_process_num_on_port(9696) == 1\n assert check_gpu_memory(0) is False\n\n # batch = False\n brcp_class, brpc_prob = self.predict(batch=False)\n print(brcp_class, brpc_prob)\n assert brcp_class == [985]\n assert brpc_prob == [0.9341399073600769]\n\n # batch_size = 2\n brcp_class, brpc_prob = self.predict(batch=True, batch_size=2)\n print(brcp_class, brpc_prob)\n assert brcp_class == [985, 985]\n assert brpc_prob == [0.9341403245925903, 0.9341403245925903]\n\n kill_process(9696, 1)", "title": "" }, { "docid": "622707387f97d6092d4260b897c32180", "score": "0.60258585", "text": "def test_get_model(self):\r\n ac = get_model(\"ac\", \"mlp\")\r\n p = get_model(\"p\", \"mlp\")\r\n v = get_model(\"v\", \"mlp\")\r\n v_ = get_model(\"v\", \"cnn\")\r\n\r\n assert ac == MlpActorCritic\r\n assert p == MlpPolicy\r\n assert v == MlpValue\r\n assert v_ == CNNValue", "title": "" }, { "docid": "efc5c0c6efbe3c00d6694abe2adf1d59", "score": "0.5967683", "text": "def test_get_model(self):\n # just returns query if it's already a substitution model\n for mod in (CNFGTR(), WG01(), GN()):\n got = get_model(mod)\n self.assertEqual(id(got), id(mod))\n\n with self.assertRaises(ValueError):\n # unknown model raises exception\n _ = get_model(\"blah\")", "title": "" }, { "docid": "97a123b4b50506c91b0941415151cf12", "score": "0.59639555", "text": "def get_model(model_name: str, num_classes:int , model_type:str) -> torch.nn.Module:\n if model_type not in ['memory','encoder_memory','std']:\n raise ValueError(f'modality (model type) must be one of [\\'memory\\',\\'encoder_memory\\',\\'std\\'], not {model_type}.')\n if model_name == 'efficientnet':\n if model_type=='memory':\n model = efficientnet.MemoryEfficientNetB0(num_classes)\n elif model_type == 'encoder_memory':\n model = efficientnet.EncoderMemoryEfficientNetB0(num_classes)\n else:\n model = efficientnet.EfficientNetB0(num_classes)\n elif model_name == 'resnet18':\n if model_type=='memory':\n model = resnet.MemoryResNet18()\n elif model_type == 'encoder_memory':\n model = resnet.EncoderMemoryResNet18()\n \n else:\n model = resnet.ResNet18()\n elif model_name == 'shufflenet':\n if model_type=='memory':\n model = shufflenet.MemoryShuffleNetV2(net_size=0.5)\n elif model_type == 'encoder_memory':\n model = shufflenet.EncoderMemoryShuffleNetV2(net_size=0.5)\n else:\n model = shufflenet.ShuffleNetV2(net_size=0.5)\n elif model_name == 'densenet':\n if model_type=='memory':\n model = densenet.memory_densenet_cifar()\n elif model_type == 'encoder_memory':\n model = densenet.encoder_memory_densenet_cifar()\n else:\n model = densenet.densenet_cifar()\n elif model_name == 'googlenet':\n if model_type=='memory':\n model = googlenet.MemoryGoogLeNet()\n elif model_type == 'encoder_memory':\n model = googlenet.EncoderMemoryGoogLeNet()\n else:\n model = googlenet.GoogLeNet()\n elif model_name == 'mobilenet':\n if model_type=='memory':\n model = mobilenet.MemoryMobileNetV2(num_classes)\n elif model_type == 'encoder_memory':\n model = mobilenet.EncoderMemoryMobileNetV2(num_classes)\n else:\n model = mobilenet.MobileNetV2(num_classes)\n else:\n raise ValueError(\"Error: input model name is not valid!\")\n\n \n return model", "title": "" }, { "docid": "da548ab5c56775a7a3eed6be4cbdd10e", "score": "0.59607047", "text": "def single_proc_test():\n\n # Setup logging\n lu.setup_logging()\n # Show the config\n logger.info('Config:\\n{}'.format(cfg))\n\n # Fix the RNG seeds (see RNG comment in core/config.py for discussion)\n np.random.seed(cfg.RNG_SEED)\n torch.manual_seed(cfg.RNG_SEED)\n # Configure the CUDNN backend\n torch.backends.cudnn.benchmark = cfg.CUDNN.BENCHMARK\n\n # Evaluate the model\n test_model()", "title": "" }, { "docid": "761a0708c3704210337c1abe7e97e0f3", "score": "0.5958491", "text": "def get_model(model_name, kwargs):\n\n # If the user isn't \"sallamander\", I'm assuming it's being run on \n # a dedicated instance, so we'll want to use all available cores. \n n_usable_cores = multiprocessing.cpu_count() \\\n if os.environ['USER'] != 'sallamander' else 2\n\n rand_seed = kwargs.get('rand_seed', 24)\n\n if model_name == 'logit': \n model = LogisticRegression(random_state=rand_seed)\n elif model_name == 'random_forest': \n model = RandomForestClassifier(random_state=rand_seed, \n n_jobs=n_usable_cores)\n elif model_name == 'extra_trees': \n model = ExtraTreesClassifier(random_state=rand_seed, n_jobs=n_usable_cores)\n elif model_name == 'gboosting': \n model = GradientBoostingClassifier(random_state=rand_seed)\n elif model_name == 'neural_net': \n model = KerasNet(kwargs)\n elif model_name == 'xgboost': \n model = XGBClassifier(seed=rand_seed)\n else: \n raise Exception(\"Invalid model name! Try again...\") \n\n return model", "title": "" }, { "docid": "c8a4f029e4a0b2b8bbdafb2949d3bcd6", "score": "0.5957632", "text": "def test_get_model() -> None:\n dataset_args = {'dataset_fraction': 0.01}\n network_args = {'input_shape': (64, 64, 3),\n 'num_classes': 1000}\n model = train_model.get_model(dataset_args, network_args)\n assert model.network.layers[0].output_shape[1] == (64 * 64 * 3)\n assert model.network.layers[-1].output_shape[1] == 1000\n dataset_args = {'dataset_fraction': 0.001}\n network_args = {'input_shape': (32, 32, 3),\n 'num_classes': 999}\n model = train_model.get_model(dataset_args, network_args)\n assert model.network.layers[0].output_shape[1] == (32 * 32 * 3)\n assert model.network.layers[-1].output_shape[1] == 999", "title": "" }, { "docid": "d12a543f22519535f224665ccf49af7c", "score": "0.5954071", "text": "async def test_load_models_concurrently(self, start_serve_with_context):\n\n signal = SignalActor.remote()\n\n async def model_load_func(model_id: str):\n await signal.wait.remote()\n return\n\n multiplexer = _ModelMultiplexWrapper(\n model_load_func, None, max_num_models_per_replica=1\n )\n stop_model_ids_pusher_thread(multiplexer)\n\n loop = get_or_create_event_loop()\n tasks = [\n loop.create_task(multiplexer.load_model(\"1\")),\n loop.create_task(multiplexer.load_model(\"2\")),\n loop.create_task(multiplexer.load_model(\"3\")),\n ]\n await asyncio.sleep(1)\n assert len(multiplexer.models) == 0\n assert len(multiplexer._model_load_tasks) == len(tasks)\n assert multiplexer._push_multiplexed_replica_info\n signal.send.remote()\n done, _ = await asyncio.wait(tasks, timeout=1)\n assert len(done) == len(tasks)\n assert len(multiplexer.models) == 1\n assert \"3\" in multiplexer.models\n assert len(multiplexer._model_load_tasks) == 0", "title": "" }, { "docid": "8ac2050beaa96b72326d7452c03f0c1e", "score": "0.59504926", "text": "def create_dummy_model():\n np.random.seed(123)\n num_timesteps = 100\n num_channels = 2\n num_samples_train = 5\n num_samples_val = 3\n X_train = np.random.rand(\n num_samples_train,\n num_timesteps,\n num_channels)\n y_train = to_categorical(np.array([0, 0, 1, 1, 1]))\n X_val = np.random.rand(num_samples_val, num_timesteps, num_channels)\n y_val = to_categorical(np.array([0, 1, 1]))\n best_model, best_params, best_model_type, knn_acc = find_architecture.find_best_architecture(\n X_train, y_train, X_val, y_val, verbose=False, subset_size=10,\n number_of_models=1, nr_epochs=1)\n return(best_model)", "title": "" }, { "docid": "1028dc494c7f67cb729ad4e90d6768f4", "score": "0.59292245", "text": "def test_create_model(self):\n self.assertNotEqual(self._get_from_db(self.model), None)", "title": "" }, { "docid": "20509bfeaa59c2e587c2a21110f5b5ea", "score": "0.5926998", "text": "def test_models_run_cuda(self):\n if torch.cuda.is_available():\n # move model to cuda\n model = GMMEM(self.k, self.test_data,\n max_iter=self.max_iter).cuda()\n self.assertTrue(\"cuda\" in model.loc.device.type)\n self.assertTrue(\"cuda\" in model.pi.device.type)\n self.assertTrue(\"cuda\" in model.cov.device.type)\n\n # fit model\n model.fit(self.test_data.cuda())\n self.assertTrue(\"cuda\" in model._comp.loc.device.type)\n self.assertTrue(\n \"cuda\" in model._comp.covariance_matrix.device.type)\n self.assertTrue(\"cuda\" in model._mix.logits.device.type)\n\n model(self.test_data.cuda())\n model.cpu()\n self.assertTrue(\"cpu\" in model.loc.device.type)\n self.assertTrue(\"cpu\" in model.pi.device.type)\n self.assertTrue(\"cpu\" in model.cov.device.type)\n\n self.assertTrue(\"cpu\" in model._comp.loc.device.type)\n self.assertTrue(\n \"cpu\" in model._comp.covariance_matrix.device.type)\n self.assertTrue(\"cpu\" in model._mix.logits.device.type)", "title": "" }, { "docid": "ea02d70ffa2aab4ee12ebb79c4b53b6e", "score": "0.59240234", "text": "def test_model(self):\n if not self.st:\n self.st = self.State(exec_mode=ExecMode.TEST)\n self._create_model()\n print('\\nTESTING:')\n self.st.exec_mode = ExecMode.TEST\n self.st.out_type = OutputType.RECONSTRUCTION\n self._test()", "title": "" }, { "docid": "c158383a087624829e2db434201d5cb8", "score": "0.5919412", "text": "def test_model_build():\n import scnym\n\n torch.manual_seed(1)\n np.random.seed(1)\n\n model = scnym.model.CellTypeCLF(\n n_genes=10000,\n n_cell_types=10,\n n_hidden=64,\n n_layers=2,\n residual=False,\n init_dropout=0.0,\n )\n\n assert hasattr(model, \"classif\")\n assert ~hasattr(model, \"dan\")\n\n model = scnym.model.CellTypeCLF(\n n_genes=10000,\n n_cell_types=10,\n n_hidden=64,\n n_layers=2,\n residual=False,\n init_dropout=0.0,\n track_running_stats=False,\n )\n\n return", "title": "" }, { "docid": "67077583c8ba4318cce13b45c3df7bb7", "score": "0.5916131", "text": "def setup_model():\n # Build the model\n model = builders.build_model()\n logger.info(\"Model:\\n{}\".format(model))\n # Log model complexity\n logger.info(logging.dump_log_data(net.complexity(model), \"complexity\"))\n # Transfer the model to the current GPU device\n err_str = \"Cannot use more GPU devices than available\"\n assert cfg.NUM_GPUS <= torch.cuda.device_count(), err_str\n cur_device = torch.cuda.current_device()\n model = model.cuda(device=cur_device)\n # Use multi-process data parallel model in the multi-gpu setting\n if cfg.NUM_GPUS > 1:\n # Make model replica operate on the current device\n model = torch.nn.parallel.DistributedDataParallel(\n module=model, device_ids=[cur_device], output_device=cur_device\n )\n # Set complexity function to be module's complexity function\n model.complexity = model.module.complexity\n return model", "title": "" }, { "docid": "a69a6b69b55e023518df03171a0bfff2", "score": "0.5888434", "text": "def run_model():\n config = Config('config.yaml')\n if config.model == 'GRU':\n model = GRU()\n elif config.model == 'LSTM':\n model = LSTM()\n elif config.model == 'CNN':\n model = CNN()\n else:\n model = CNN_LSTM()\n model.run()", "title": "" }, { "docid": "4c68a502de73e7d1f785cb00933c55db", "score": "0.5871214", "text": "def test_cpu_features(test_microvm_with_api, network_config):\n vm = test_microvm_with_api\n vm.spawn()\n vm.basic_config()\n _tap, _, _ = vm.ssh_network_config(network_config, \"1\")\n vm.start()\n _check_cpu_features_arm(vm)", "title": "" }, { "docid": "b76be89ea73de5d3246242d1a69c463c", "score": "0.5847327", "text": "def test_get_learning_model(self):\n self.assertEqual(get_learning_model('testmodel').__class__, TestModel)", "title": "" }, { "docid": "06651f948534c486f234e6e6bedb1e9b", "score": "0.5844523", "text": "def load_model_test():\n model_path = pkg_resources.resource_filename('hwrt', 'misc/')\n model_file = os.path.join(model_path, \"model.tar\")\n utils.load_model(model_file)", "title": "" }, { "docid": "ae4ab40527df1cbab7ae52e99427d6b6", "score": "0.5841597", "text": "def test_get_inference_service(self):\n pass", "title": "" }, { "docid": "1f9c0e2cf250daf21e4e203057f8055d", "score": "0.5835671", "text": "def test_genModel(self):\n self.performTestForParams()", "title": "" }, { "docid": "6b2c492df971bda8be71fd01ad9d4b5d", "score": "0.5812875", "text": "def _test_model_clf():\n return CClassifierSVM()", "title": "" }, { "docid": "e9b1375a2ccee267d24e1c78b931a10f", "score": "0.5799221", "text": "def test(time_limit=_DEFAULT_TIME_LIMIT, random=None):\n physics = mujoco.Physics.from_xml_string(*get_model_and_assets())\n task = Primitives(random=random)\n return control.Environment(physics, task, time_limit=time_limit)", "title": "" }, { "docid": "6f8d1b8c0b954dbbe9641da98c9dd35a", "score": "0.5789226", "text": "def test_get_model(\n decoy: Decoy,\n subject: InstrumentCore,\n mock_engine_client: EngineClient,\n) -> None:\n decoy.when(\n mock_engine_client.state.pipettes.get_model_name(pipette_id=subject.pipette_id)\n ).then_return(\"pipette-model\")\n assert subject.get_model() == \"pipette-model\"", "title": "" }, { "docid": "04c540464f66c60116386a4e7b3aa0a3", "score": "0.5788855", "text": "def test_model(mlp_model):\n print(f'[INFO]: Testing model on sample input...')\n tmp = torch.ones(1, 1).to('cuda:0')\n out = mlp_model(tmp)\n print(f'[INFO]: Test Result is: ', out.detach().cpu().numpy())", "title": "" }, { "docid": "09612a263dcee4b40931cfa8aaf650b7", "score": "0.5786217", "text": "def init_cpu():\n # Remove the arguments to avoid interferences\n sys.argv = [sys.argv[0]]\n # Ask the runtime to avoid using GPU/GPU memory\n os.environ[\"CPU_ONLY_TEST\"] = \"1\"\n\n global LLM, SSM, GenerationConfig, GenerationResult\n from .serve import LLM, SSM, GenerationConfig, GenerationResult", "title": "" }, { "docid": "c5def130bddb93bef63d43a4599b0e55", "score": "0.5779355", "text": "def test(self):\n self.load_model()\n self.model.to(self.device)\n # Spawn data now, but the metric callable later\n data = self.get_testing_data()\n # Run the test logic (automatically stores results)\n self.test_on(data, self.snapshot)", "title": "" }, { "docid": "20dc5e57f38d91bb9791a18fc9212920", "score": "0.57773095", "text": "def test_model_loading():\n train_1 = np.random.rand(200, 20)\n train_2 = np.random.rand(200, 10)\n\n tests = {\n \"\": [[20,10], MODELS],\n }\n module = importlib.import_module(\"multiviewae\")\n for cfg, [dim, models] in tests.items():\n for m in models:\n class_ = getattr(module, m)\n if len(cfg) != 0:\n model = class_(cfg=abspath(join(dirname( __file__ ), cfg)), input_dim=dim)\n else: \n model = class_(input_dim=dim)\n \n model.fit(train_1, train_2, max_epochs=1, batch_size=200)\n\n print(\"RESULTS: \", m)\n loaded_model = class_.load_from_checkpoint(join(model.cfg.out_dir, \"last.ckpt\"))\n recon = loaded_model.predict_reconstruction(train_1, train_2)\n print_results(\"recon last.ckpt\", recon)\n loaded_model = class_.load_from_checkpoint(join(model.cfg.out_dir, \"model.ckpt\"))\n recon = loaded_model.predict_reconstruction(train_1, train_2)\n print_results(\"recon model.ckpt\", recon)", "title": "" }, { "docid": "1f99a8c88a7ee2c45b9416662c15a0db", "score": "0.57761216", "text": "def test_start_gpu_card_model_with_single_model_gpu(self):\n args = default_args()\n args.model = [self.model_dir]\n args.port = 9696\n args.gpu_ids = [\"0,1\"]\n\n p = Process(target=start_gpu_card_model, kwargs={\"gpu_mode\": True, \"port\": args.port, \"args\": args})\n p.start()\n os.system(\"sleep 10\")\n\n assert count_process_num_on_port(9696) == 1\n assert check_gpu_memory(0) is True\n assert check_gpu_memory(1) is True\n\n # batch = False\n brcp_class, brpc_prob = self.predict(batch=False)\n print(brcp_class, brpc_prob)\n assert brcp_class == [985]\n assert brpc_prob == [0.9341405034065247]\n\n # batch_size = 2\n brcp_class, brpc_prob = self.predict(batch=True, batch_size=2)\n print(brcp_class, brpc_prob)\n assert brcp_class == [985, 985]\n assert brpc_prob == [0.9341405034065247, 0.9341405034065247]\n\n kill_process(9696, 3)", "title": "" }, { "docid": "09ae215079b8b78eab76e5782baa36d7", "score": "0.5768969", "text": "def run_model(self):", "title": "" }, { "docid": "ae92dad78d88dfd39d145b552848004c", "score": "0.576172", "text": "def test_create_inference_service(self):\n pass", "title": "" }, { "docid": "6caea33c1a851fab831a1f0fa936b793", "score": "0.575827", "text": "def test_to(self):\n module_list = [crypten.nn.Linear(10, 10) for _ in range(3)]\n model = crypten.nn.Sequential(*module_list)\n\n model_cpu = model.to(\"cpu\")\n cpu = torch.device(\"cpu\")\n for param in model_cpu.parameters():\n self.assertEqual(param.device, cpu)\n for buffer in model_cpu.buffers():\n self.assertEqual(buffer.device, cpu)\n\n model_cpu = model.cpu()\n for param in model_cpu.parameters():\n self.assertEqual(param.device, cpu)\n for buffer in model_cpu.buffers():\n self.assertEqual(buffer.device, cpu)\n\n if torch.cuda.is_available():\n cuda = torch.device(\"cuda:0\")\n model_cuda = model.cuda()\n for param in model_cuda.parameters():\n self.assertEqual(param.device, cuda)\n for buffer in model_cuda.buffers():\n self.assertEqual(buffer.device, cuda)\n\n model_cuda = model.to(\"cuda:0\")\n for param in model_cuda.parameters():\n self.assertEqual(param.device, cuda)\n for buffer in model_cuda.buffers():\n self.assertEqual(buffer.device, cuda)", "title": "" }, { "docid": "6c99bd37fe9c7705680080133f3e956b", "score": "0.57474357", "text": "def test_models_run_cuda(self):\n if torch.cuda.is_available():\n # move model to cuda\n model = GMMDescent(self.k, self.test_data).cuda()\n self.assertTrue(\"cuda\" in model.loc.device.type)\n self.assertTrue(\"cuda\" in model.pi.device.type)\n self.assertTrue(\"cuda\" in model.cov.device.type)\n\n # fit model\n self.assertTrue(\"cuda\" in model._comp.loc.device.type)\n self.assertTrue(\n \"cuda\" in model._comp.covariance_matrix.device.type)\n self.assertTrue(\"cuda\" in model._mix.logits.device.type)\n\n model(self.test_data.cuda())\n model.cpu()\n self.assertTrue(\"cpu\" in model.loc.device.type)\n self.assertTrue(\"cpu\" in model.pi.device.type)\n self.assertTrue(\"cpu\" in model.cov.device.type)\n\n self.assertTrue(\"cpu\" in model._comp.loc.device.type)\n self.assertTrue(\n \"cpu\" in model._comp.covariance_matrix.device.type)\n self.assertTrue(\"cpu\" in model._mix.logits.device.type)", "title": "" }, { "docid": "5dfc50f81cb9ca741ec734423bbb6c75", "score": "0.574345", "text": "def test_model(args):\n print(\"=====> Check if the cached file exists \")\n if not os.path.isfile(args.inform_data_file):\n print(\"%s is not found\" %(args.inform_data_file))\n dataCollect = CamVidTrainInform(args.data_dir, args.classes, train_set_file= args.dataset_list, \n inform_data_file = args.inform_data_file) #collect mean std, weigth_class information\n datas = dataCollect.collectDataAndSave()\n if datas is None:\n print('Error while pickling data. Please check.')\n exit(-1)\n else:\n print(\"%s exists\" %(args.inform_data_file))\n datas = pickle.load(open(args.inform_data_file, \"rb\"))\n \n print(args)\n global network_type\n \n if args.cuda:\n print(\"=====> Use gpu id: '{}'\".format(args.gpus))\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpus\n if not torch.cuda.is_available():\n raise Exception(\"No GPU found or Wrong gpu id, please run without --cuda\")\n \n args.seed = random.randint(1, 10000)\n print(\"Random Seed: \", args.seed)\n torch.manual_seed(args.seed)\n if args.cuda:\n torch.cuda.manual_seed(args.seed) \n cudnn.enabled = True\n\n M = args.M\n N = args.N\n model = CGNet.Context_Guided_Network(classes= args.classes, M= M, N= N)\n network_type=\"CGNet\"\n print(\"=====> current architeture: CGNet_M%sN%s\"%(M, N))\n total_paramters = netParams(model)\n print(\"the number of parameters: \" + str(total_paramters))\n print(\"data['classWeights']: \", datas['classWeights'])\n weight = torch.from_numpy(datas['classWeights'])\n print(\"=====> Dataset statistics\")\n print(\"mean and std: \", datas['mean'], datas['std'])\n \n # define optimization criteria\n criteria = CrossEntropyLoss2d(weight, args.ignore_label)\n if args.cuda:\n model = model.cuda()\n criteria = criteria.cuda()\n \n #load test set\n train_transform= transforms.Compose([\n transforms.ToTensor()])\n testLoader = data.DataLoader(CamVidValDataSet(args.data_dir, args.test_data_list,f_scale=1, mean= datas['mean']),\n batch_size = args.batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=True, drop_last=True)\n\n if args.resume:\n if os.path.isfile(args.resume):\n print(\"=====> loading checkpoint '{}'\".format(args.resume))\n checkpoint = torch.load(args.resume)\n #model.load_state_dict(convert_state_dict(checkpoint['model']))\n model.load_state_dict(checkpoint['model'])\n else:\n print(\"=====> no checkpoint found at '{}'\".format(args.resume))\n \n cudnn.benchmark= True\n\n print(\"=====> beginning test\")\n print(\"length of test set:\", len(testLoader))\n mIOU_val, per_class_iu = test(args, testLoader, model, criteria)\n print(mIOU_val)\n print(per_class_iu)", "title": "" }, { "docid": "3b291d858204fdd5f6e9da869ed9b8bb", "score": "0.5738931", "text": "def init_model(self, model):\n if not HAVE_SOESGP:\n model = \"knn\"\n print \"Sorry, SOESGP/STORKGP not available, defaulting to knn\"\n \n if model == \"knn\":\n # self.mdl = KNeighborsRegressor(n_neighbors=5)\n self.mdl = ActInfKNN(self.idim, self.odim)\n elif model == \"soesgp\":\n self.mdl = ActInfSOESGP(self.idim, self.odim)\n elif model == \"storkgp\":\n self.mdl = ActInfSTORKGP(self.idim, self.odim)\n else:\n print \"unknown model, FAIL, exiting\"\n import sys\n sys.exit(1)", "title": "" }, { "docid": "37b919e36143d7087074fd5c200152d5", "score": "0.573432", "text": "def test_factory_default(get_camera):\n camera = get_camera\n\n camera.factory_default()", "title": "" }, { "docid": "f5a01149fdd7a3096e66f8a3a48f80cd", "score": "0.57341105", "text": "def initialize_cpu():\n logging.info(\"Using default (CPU) strategy\")\n return tf.distribute.get_strategy()", "title": "" }, { "docid": "6d5825d3e4561932c0e5b3e3d3b58f41", "score": "0.57154286", "text": "def get_cpu_model(self):\n return self.get_match_data(CPU_INFO_FILE, 'model name\\s+:\\s+(.*)')", "title": "" }, { "docid": "57445242da5bcfb4a107ccc82f83c9ef", "score": "0.57048535", "text": "def init_detection_model(args: Dict) -> CRAFT:\n args = Map(args)\n if not os.path.exists(args.trained_model):\n raise ValueError(\"Incorrect path for text detection model\")\n net = CRAFT() # initialize\n\n if args.cuda:\n net.load_state_dict(copyStateDict(torch.load(args.trained_model)))\n else:\n net.load_state_dict(copyStateDict(torch.load(args.trained_model, map_location='cpu')))\n\n if args.cuda:\n net = net.cuda()\n net = torch.nn.DataParallel(net)\n cudnn.benchmark = False\n\n net.eval()\n return net", "title": "" }, { "docid": "e00c5c1dba4f2abc52dbc273abddbd6c", "score": "0.57000494", "text": "def setUp(self):\n self._fs_name = 'fs'\n self._service = Service()\n self._service.setUp()\n time.sleep(1)\n self._proxy = get_object(TOP_OBJECT)\n ((self._pool_object_path, _), _, _) = Manager.Methods.CreatePool(\n self._proxy,\n {\n 'name': self._POOLNAME,\n 'redundancy': (True, 0),\n 'force': False,\n 'devices': _DEVICE_STRATEGY.example()\n }\n )\n self._pool_object = get_object(self._pool_object_path)\n (created, _, _) = Pool.Methods.CreateFilesystems(\n self._pool_object,\n {'specs': [self._fs_name]}\n )\n self._filesystem_object_path = created[0][0]\n Manager.Methods.ConfigureSimulator(self._proxy, {'denominator': 8})", "title": "" }, { "docid": "25caf44462f979aa79fbf04b1ded977a", "score": "0.569894", "text": "def getCPUmodel():\n # model name : Intel(R) Xeon(TM) CPU 2.40GHz\n # cache size : 512 KB\n # gives the return string \"Intel(R) Xeon(TM) CPU 2.40GHz 512 KB\"\n\n cpumodel = ''\n cpucache = ''\n modelstring = ''\n try:\n f = open('/proc/cpuinfo', 'r')\n except Exception, e:\n tolog(\"Could not open /proc/cpuinfo: %s\" % str(e))\n else:\n re_model = re.compile('^model name\\s+:\\s+(\\w.+)')\n re_cache = re.compile('^cache size\\s+:\\s+(\\d+ KB)')\n\n # loop over all lines in cpuinfo\n for line in f.readlines():\n # try to grab cpumodel from current line\n model = re_model.search(line)\n if model:\n # found cpu model\n cpumodel = model.group(1)\n\n # try to grab cache size from current line\n cache = re_cache.search(line)\n if cache:\n # found cache size\n cpucache = cache.group(1)\n\n # stop after 1st pair found - can be multiple cpus\n if cpumodel and cpucache:\n # create return string\n modelstring = cpumodel + \" \" + cpucache\n break\n \n f.close()\n\n # default return string if no info was found\n if not modelstring:\n modelstring = \"UNKNOWN\"\n\n return modelstring", "title": "" }, { "docid": "81aefd43fdb10efdd67fed6018e3bce9", "score": "0.56986743", "text": "def load_model(self, params):\n if self.options.load_model_sleep_interval > 1e-7:\n interval = random.random() * self.options.load_model_sleep_interval\n self.logger.info(f'Sleeping for {interval} seconds')\n time.sleep(interval + 1e-7)\n\n # Initialize models.\n model = self.model_class(self.option_map_for_model, params)\n\n if self.options.load:\n self.logger.info(f'Loading model from {self.options.load}')\n if self.options.omit_keys:\n self.logger.info(f'Omitting keys {self.options.omit_keys}')\n\n if self.options.replace_prefix:\n replace_prefix = [\n item.split(\",\")\n for item in self.options.replace_prefix\n ]\n self.logger.info(\n f'replace_prefix for state dict: {replace_prefix}')\n else:\n replace_prefix = []\n\n model.load(\n self.options.load,\n omit_keys=self.options.omit_keys,\n replace_prefix=replace_prefix,\n check_loaded_options=self.options.check_loaded_options)\n\n self.logger.info(\n f'Finished loading model from {self.options.load}')\n\n if self.options.onload:\n for func in self.options.onload:\n try:\n getattr(model, func)()\n self.logger.info('Called function {func!s} for model')\n except BaseException:\n self.logger.info('Calling function {func!s} failed!')\n raise\n if self.options.use_fp16:\n old_step = model.step\n model = FP16Model(self.option_map_for_model, params, model)\n model.step = old_step\n if torch.cuda.is_available() and \\\n self.options.gpu is not None and \\\n self.options.gpu >= 0:\n model.cuda(self.options.gpu)\n\n return model", "title": "" }, { "docid": "e1a866bad0b0cfacecccf31ec633da22", "score": "0.56962955", "text": "def test_create_model(self):\n target = next(self.target_generator)\n\n self.assertTrue(Target, type(target))\n self.assertEqual(45, target.latitude)\n self.assertEqual(-72, target.longitude)\n self.assertEqual('McDonalds', target.location)\n self.assertEqual('127.0.0.{0}'.format(self.generator_count), target.ip)\n\n try:\n self.service.create_model()\n except AttributeError:\n pass\n else:\n raise AssertionError()", "title": "" }, { "docid": "afe86bcaa5cad48d3bff7478d9fa216f", "score": "0.5695756", "text": "def setup_system(self):\n\n cpu = self.options['cpu']\n if cpu:\n with tf.device('\\cpu:0'):\n self.pred = self.setup_network(self.X_placeholder, self.y_placeholder, self.is_training)\n else:\n self.pred = self.setup_network(self.X_placeholder, self.y_placeholder, self.is_training)", "title": "" }, { "docid": "8d328fd675730d8b4ded615c8dd5fa6c", "score": "0.56788635", "text": "def test_get_hardware_status(self):\n pass", "title": "" }, { "docid": "6787bd8ba1603e4e2ef8a3931f695b05", "score": "0.5669129", "text": "def test_PhysicalSMFactory(mock_physical_params, mock_exp, test_reflections):\n physicalmodel = PhysicalSMFactory.create(\n mock_physical_params, mock_exp, test_reflections\n )\n assert isinstance(physicalmodel, PhysicalScalingModel)\n assert physicalmodel.configdict[\"lmax\"] == (\n mock_physical_params.parameterisation.lmax\n )\n assert physicalmodel.components[\"absorption\"].n_params == 24\n assert list(physicalmodel.components[\"absorption\"].parameters) == [0.0] * 24", "title": "" }, { "docid": "ce95100a1bea37c4249955e38addd184", "score": "0.56671226", "text": "def test_load_tf_device(self):\r\n dev = qml.device(\"strawberryfields.tf\", wires=2, cutoff_dim=5)\r\n assert dev.num_wires == 2\r\n assert dev.cutoff == 5\r\n assert dev.hbar == 2\r\n assert dev.shots is None\r\n assert dev.short_name == \"strawberryfields.tf\"", "title": "" }, { "docid": "e00bf322cf2881c7b5fdd764049526f7", "score": "0.5662224", "text": "def test_device_id(self):\n cpu_device = torch.device(\"cpu\")\n composable_module = CompositeParamModel(device=cpu_device)\n for param in composable_module.parameters():\n assert (\n param.device == cpu_device\n ), \"Expects module to be initialized on CPU for this unit test\"\n fully_shard(\n composable_module,\n policy=ModuleWrapPolicy({UnitModule}),\n device_id=self.rank,\n )\n for param in composable_module.parameters():\n self.assertEqual(param.device, torch.device(\"cuda\", self.rank))", "title": "" }, { "docid": "44b6a827581d570d1d81ee63b552c0ad", "score": "0.56537116", "text": "def create_model(args):\n\n client = CloudMlClient()\n model = ModelService(args.model_name, args.model_version, args.model_uri)\n\n if args.model_args:\n model.model_args = args.model_args\n if args.cpu_limit:\n model.cpu_limit = args.cpu_limit\n if args.memory_limit:\n model.memory_limit = args.memory_limit\n if args.gpu_limit:\n model.gpu_limit = int(args.gpu_limit)\n if args.framework:\n model.framework = args.framework\n if args.framework_version:\n model.framework_version = args.framework_version\n if args.docker_image:\n model.docker_image = args.docker_image\n if args.docker_command:\n model.docker_command = args.docker_command\n if args.replicas:\n model.replicas = int(args.replicas)\n if args.prepare_command:\n model.prepare_command = args.prepare_command\n if args.finish_command:\n model.finish_command = args.finish_command\n if args.node_selector_key:\n model.node_selector_key = args.node_selector_key\n if args.node_selector_value:\n model.node_selector_value = args.node_selector_value\n if args.watch:\n model_name = args.model_name\n model_version = args.model_version\n\n response = client.create_model_service(model)\n if not isinstance(response, str):\n print_model_service_info(response)\n if args.watch:\n print(\"\\nThe model is creating, feel free to Ctrl+C to stop watching\\n\")\n print(\"{:32} {:4} {:16} {:32} {:32}\".format(\n \"Model_NAME\",\n \"VERSION\",\n \"STATE\",\n \"CREATED\",\n \"UPDATED\"))\n while True:\n watch_response = client.describe_model_service(model_name, model_version)\n if not isinstance(watch_response, str):\n print(\"{:32} {:4} {:16} {:32} {:32}\".format(\n watch_response[\"model_name\"],\n watch_response[\"model_version\"],\n color_util.colorize_state(watch_response[\"state\"]),\n watch_response[\"create_time\"],\n watch_response[\"update_time\"]))\n if watch_response[\"state\"] == constant.MODEL_STATE_RUNNING:\n return\n try:\n time.sleep(constant.JOB_WATCH_INTERVAL)\n except KeyboardInterrupt:\n return\n else:\n return\n else:\n print(\"response: {}\".format(response))", "title": "" }, { "docid": "1c0bd3caa9e763c8a7dcb42938f8d64a", "score": "0.56515145", "text": "def model_provider(model_name, trained = True, PATH = None, cuda = True, eval = True, half = False, parallel = False, dicts = False, NUM_CLS = 3, NUM_SEG = 1):\n assert(model_name in ['seresnet', 'seresnext', 'efficientb5', 'efficientb4', \n 'unet_effib4', 'unet_effib5', 'unet_seresnext50',\n \"fpn_effib4\", 'fpn_effib5', 'fpn_seresnext50'])\n \n # Classification\n if model_name == 'seresnext':\n model = SeResNext_50(num_classes = NUM_CLS)\n elif model_name == 'seresnet':\n model = SeResNet_50(num_classes = NUM_CLS)\n elif model_name == 'efficientb5':\n model = EfficientNet_b5(num_classes = NUM_CLS)\n elif model_name == 'efficientb4':\n model = EfficientNet_b4(num_classes = NUM_CLS)\n \n # Unet\n elif model_name == 'unet_effib5':\n model = smp.Unet('efficientnet-b5', encoder_weights='imagenet', classes=NUM_SEG)\n elif model_name == 'unet_effib4':\n model = smp.Unet('efficientnet-b4', encoder_weights='imagenet', classes=NUM_SEG)\n elif model_name == 'unet_seresnext50':\n model = smp.Unet('se_resnext50_32x4d', encoder_weights='imagenet', classes=NUM_SEG)\n elif model_name == 'unet_inceptionv4':\n model = smp.Unet('inceptionv4', encoder_weights='imagenet', classes=NUM_SEG)\n \n # FPN \n elif model_name == 'fpn_effib4':\n model = smp.FPN('efficientnet-b4', encoder_weights='imagenet', classes=1)\n elif model_name == 'fpn_effib5':\n model = smp.FPN('efficientnet-b5', encoder_weights='imagenet', classes=1)\n elif model_name == 'fpn_seresnext50':\n model = smp.FPN('se_resnext50_32x4d', encoder_weights='imagenet', classes=1)\n elif model_name == 'fpn_inceptionv4':\n model = smp.FPN('inceptionv4', encoder_weights='imagenet', classes=1) \n \n # whether using saved state dict\n if trained:\n params = torch.load(PATH)\n\n if dicts:\n model.load_state_dict(remove_dataparallel(params))\n else:\n model.load_state_dict(remove_dataparallel(params[\"state_dict\"]))\n\n model = model.half() if half else model\n model = model.cuda() if cuda else model\n model = model.eval() if eval else model\n model = torch.nn.DataParallel(model) if parallel else model\n \n return model", "title": "" }, { "docid": "db04fd6ae737f7f95a369464fed03820", "score": "0.5650796", "text": "def run_model():\n params = initialize()", "title": "" }, { "docid": "0f740139c42483db71798d8a9bf52101", "score": "0.5646651", "text": "def def_model(self, model):\n\n if model == \"lasso\":\n return make_pipeline(RobustScaler(), Lasso(alpha=0.0005, random_state=1))\n elif model == \"ENet\":\n return make_pipeline(RobustScaler(), ElasticNet(alpha=0.0005, l1_ratio=.9, random_state=3))\n elif model == \"KRR\":\n return KernelRidge(alpha=0.6, kernel='polynomial', degree=2, coef0=2.5)\n elif model == \"GBoost\":\n return GradientBoostingRegressor(n_estimators=3000, learning_rate=0.05, max_depth=4, max_features='sqrt',\n min_samples_leaf=15, min_samples_split=10, loss='huber', random_state=5)\n\n elif model == \"Lgb\":\n return lgb.LGBMRegressor(objective='regression',num_leaves=5, learning_rate=0.05, n_estimators=720,\n max_bin=55, bagging_fraction=0.8, bagging_freq=5, feature_fraction=0.2319,\n feature_fraction_seed=9, bagging_seed=9, min_data_in_leaf=6,\n min_sum_hessian_in_leaf=1)\n else:\n raise NameError('The model is not included or the name is incorrect')", "title": "" }, { "docid": "ee5c10abe0fe9a1f385528275560bc76", "score": "0.5641105", "text": "def get_model(config, gpuid):\n with tf.name_scope(config.modelname), tf.device('/gpu:%d' % gpuid):\n model = Model(config, '%s' % config.modelname)\n return model", "title": "" }, { "docid": "6853a0d27fff3f5b453206b412c52e92", "score": "0.5638931", "text": "def cpu_test(session):\n cpu_lib = None\n try:\n cpu_lib = export_cpu_add_lib()\n test_add(session, cpu_lib, \"/cpu:0\")\n finally:\n if cpu_lib is not None:\n os.remove(cpu_lib)", "title": "" }, { "docid": "a988d621e181e0b6367a82a79256ef7f", "score": "0.5635823", "text": "def setUp(self):\n self._service = Service()\n self._service.setUp()\n time.sleep(1)\n self._proxy = get_object(TOP_OBJECT)\n ((poolpath, _), _, _) = Manager.Methods.CreatePool(\n self._proxy,\n {\n 'name': self._POOLNAME,\n 'redundancy': (True, 0),\n 'force': False,\n 'devices': []\n }\n )\n self._pool_object = get_object(poolpath)\n Manager.Methods.ConfigureSimulator(self._proxy, {'denominator': 8})", "title": "" }, { "docid": "be890163f446f7ffcc5611bef6acea44", "score": "0.5631051", "text": "def testModel(self):\n\n with self.assertRaises(ImportError):\n OnnxModel(None)", "title": "" }, { "docid": "d1787fab5e41688c97a920a0cf09c31b", "score": "0.56163126", "text": "def __init__(self, hps, gpu_mode=True, reuse=False):\n self.hps = hps\n assert hps.model_mode in ['train', 'eval', 'eval_sample', 'sample']\n # with tf.variable_scope('SCC', reuse=reuse):\n if not gpu_mode:\n with tf.device('/cpu:0'):\n print('Model using cpu.')\n self.build_model()\n else:\n print('-' * 100)\n print('model_mode:', hps.model_mode)\n print('Model using gpu.')\n self.build_model()", "title": "" }, { "docid": "ff922928935e2c1880cb11afe16ab3a3", "score": "0.56137884", "text": "def load_model(model_name):\n if model_name == 'quartznet':\n print('Using QuartzNet model')\n return stt_nemo.load_model(QUARTZNET_MODEL_PATH)\n elif model_name == 'jasper':\n print('Using Jasper model')\n return stt_nemo.load_model(JASPER_MODEL_PATH)\n elif model_name == 'deepspeech':\n print('Using DeepSpeech model')\n return stt_deepspeech.load_model(DEEPSPEECH_MODEL_PATH, DEEPSPEECH_SCORER_PATH)", "title": "" }, { "docid": "dfd46fe460e83a5a116a3f33938c6902", "score": "0.5613048", "text": "def test_set_model(self):\n d1 = Devices(\"PC\", \"Dell\", \"xps\", \"2018\",\"3rd Floor\")\n d1.set_model(\"MacBook Pro\")\n self.assertEqual(d1.get_model(), \"MacBook Pro\")", "title": "" }, { "docid": "8e2d1fb9c338c1c701de67d280b3308f", "score": "0.5608899", "text": "def test_models_run(self):\n model = GMMEM(self.k, self.test_data, max_iter=self.max_iter)\n model.fit(self.test_data)\n model(self.test_data)", "title": "" }, { "docid": "b5d664d27b62720e446831d0a812a8fb", "score": "0.560766", "text": "def test_net():\n\n create_network()", "title": "" }, { "docid": "82062be531b8f0651ca1415b006d9c5e", "score": "0.5605858", "text": "def create_model():\n if config.EXPERIMENT_DETAILS['FEATURE_EXP'] == 'mel':\n model = CustomMel()\n elif config.EXPERIMENT_DETAILS['FEATURE_EXP'] == 'raw':\n model = CustomRaw()\n if cuda:\n model.cuda()\n return model", "title": "" }, { "docid": "ff2a355a23476163869bc2cf7c4fca8c", "score": "0.5598574", "text": "def get_model():\n try:\n model = config_model['MODEL_OBJ']\n print(\"Detected pretrain model!!\")\n # os.makedirs('Models', exist_ok = True)\n # save_path = os.path.join('Models', config_model['MODEL_NAME']+'.h5')\n # print(f'Model has been saved following directory : {save_path}')\n # model.save(save_path)\n return model\n\n except Exception as e:\n print(\"Something went wrong!!\", e)", "title": "" }, { "docid": "fd87aee94e532c2c267343375b258610", "score": "0.55802757", "text": "def test_ScalingModelfactories(default_params, mock_exp, test_reflections):\n\n KBmodel = KBSMFactory.create(default_params, [], [])\n assert isinstance(KBmodel, KBScalingModel)\n\n physicalmodel = PhysicalSMFactory.create(default_params, mock_exp, test_reflections)\n assert isinstance(physicalmodel, PhysicalScalingModel)\n\n arraymodel = ArraySMFactory.create(default_params, mock_exp, test_reflections)\n assert isinstance(arraymodel, ArrayScalingModel)\n\n # Add more rigorous tests to checl that the model has been set up correctly.?\n # Might be best to refactor scaling model factories first.", "title": "" }, { "docid": "90db25bf3aee04db7353058e4364d863", "score": "0.55733943", "text": "def test_load_model(self):\n self.assertTrue(self.__class__.model)\n self.assertEqual(self.iter, 4000)", "title": "" }, { "docid": "230d72fa6b283f720a6d32e08eff1055", "score": "0.5573038", "text": "async def init():\n await asyncio.sleep(2)\n print('aaa')\n global model\n\n # Loading the sodanet module\n print('Loading SodaNet model')\n model = SodaModel()", "title": "" }, { "docid": "d6d6cb4be5119a1fa16e3700d1820c33", "score": "0.5567481", "text": "def test_early_stopping_cpu_model():\n\n stopping = EarlyStopping(monitor='val_loss')\n trainer_options = dict(\n early_stop_callback=stopping,\n gradient_clip=1.0,\n overfit_pct=0.20,\n track_grad_norm=2,\n print_nan_grads=True,\n progress_bar=False,\n experiment=get_exp(),\n train_percent_check=0.1,\n val_percent_check=0.1\n )\n\n model, hparams = get_model()\n run_gpu_model_test(trainer_options, model, hparams, on_gpu=False)\n\n # test freeze on cpu\n model.freeze()\n model.unfreeze()", "title": "" }, { "docid": "c037ac14cd1907970713ca3c3786e0b2", "score": "0.5567216", "text": "def make_model(model, **kwargs):\n if(IS_GPU):\n prepare_gpu_model(model, **kwargs)\n else:\n prepare_cpu_model(model, **kwargs)\n return model", "title": "" }, { "docid": "cf49a6540ab7a5750273deed0cbcd25a", "score": "0.55631214", "text": "def test_start_gpu_card_model_without_model(self):\n args = default_args()\n args.model = \"\"\n with pytest.raises(SystemExit) as e:\n start_gpu_card_model(gpu_mode=False, port=args.port, args=args)\n assert str(e.value) == \"-1\"", "title": "" }, { "docid": "0608e8b938771ec9a59458bec944b1d3", "score": "0.55605197", "text": "def product_model():\n return TEST_PRODUCT_MODEL", "title": "" }, { "docid": "e4ab88d2762f12b638a92da0be032546", "score": "0.5559539", "text": "def test_generate_model_circuit():\n model_circuit = quantum_volume.generate_model_circuit(\n 3, 3, random_state=np.random.RandomState(1))\n\n assert len(model_circuit) == 3\n # Ensure there are no measurement gates.\n assert list(\n model_circuit.findall_operations_with_gate_type(\n cirq.MeasurementGate)) == []", "title": "" }, { "docid": "7598c3b29b94efc9c4f07b47f20abb62", "score": "0.5556976", "text": "def connectToModelTest(project, model):\n\n # project = 'Connect1'\n # model = 'rtdemo1'\n # connect to the local project\n try:\n modelState, realTimeMode = OpalApiPy.GetModelState()\n\n except:\n projectPath = 'C:/RT-LABv11_Workspace_New/'\n projectName = os.path.join(projectPath, str(project)+'/'+str(project) +'.llp')\n modelPath = os.path.join(projectPath, 'Simulink/')\n modelName = str(model) + '.mdl'\n\n #Connects to Project\n RtlabApi.OpenProject(projectName)\n print \"Now connected to '%s' project.\" % projectName\n\n #Connects to model\n #filename = os.path.join(modelPath,modelName)\n # OpalApiPy.SetCurrentModel(filename)\n modelState, realTimeMode = OpalApiPy.GetModelState()\n # print \"Model State Connected is %s.\" %modelStateList[modelState]\n print \"Now connected to %s model.\" %modelName\n #print \"Model state 1 is %s\" %modelStateList[modelState]\n\n # OpalApiPy.StartCompile2(((\"\",31),))\n # print\"compiling\"\n # OpalApiPy.RegisterDisplay(OpalApiPy.DISPLAY_REGISTER_ALL)\n # OpalApiPy.DisplayInformation(0)\n\n try:\n # Acquire Model State, Get system control\n modelState, realTimeMode = OpalApiPy.GetModelState()\n print \"Model state is %s'.\" %modelStateList[modelState]\n\n # Model Connection Parameters\n systemControl = 0\n # modelName = ''\n modelPath = ''\n exactMatch = 0\n returnOnAmbiquity = 0\n\n # Connect API to model if already running\n if modelState == OpalApiPy.MODEL_RUNNING:\n instanceId = OpalApiPy.ConnectByName(str(model))\n print \"Now connected to running model %s.\\n\" %model\n # print (\"instanceId is: \", str(instanceId))\n\n elif (modelState == OpalApiPy.MODEL_LOADABLE):\n\n\n # If model is not loaded,load and ask to execute\n\n realTimeMode = realTimeModeList['Software Sync']\n timeFactor = 1\n OpalApiPy.Load(realTimeMode, timeFactor)\n print \"RT Project %s is Loading.\" %project\n OpalApiPy.LoadConsole()\n print \"Model %s console is loading\" % model\n\n\n chooseExecute = raw_input(\"Loading Complete.Would you like to execute model now? y/n \")\n chooseExecute = 'y'\n if chooseExecute == 'y':\n try:\n print \"Now Executing Model\"\n systemControl = 1\n OpalApiPy.GetSystemControl(systemControl)\n print \"System Control Granted\"\n\n # OpalApiPy.LoadConsole()\n # print \"Model %s console is loading\" % model\n\n OpalApiPy.ExecuteConsole()\n print \"Model %s console is executed\" %model\n\n OpalApiPy.Execute(1)\n\n #sleep(10)\n modelState,realTimeMode = OpalApiPy.GetModelState()\n print\"Model State is %s\" %modelStateList[modelState]\n if((modelState == OpalApiPy.MODEL_RUNNING) | (modelState == OpalApiPy.MODEL_LOADED)):\n print\"Model Running\"\n\n else:\n\n modelState, realTimeMode = OpalApiPy.GetModelState()\n print\"Model State is %s\" % modelStateList[modelState]\n print\"Model Not executed\"\n transitionToPause()\n\n # systemControl = 1\n # OpalApiPy.GetSystemControl(systemControl)\n # OpalApiPy.ResetConsole()\n # print \"System Control Granted\"\n # print \"Console is now reset\"\n # # resets the model after loading\n # OpalApiPy.Pause()\n # print \"Model %s is now paused\" % model\n # # OpalApiPy.Reset()\n # # print \"Model %s is now reset.\" %model\n #\n # systemControl = 0\n # OpalApiPy.GetSystemControl(systemControl)\n # print \"System Control is released\"\n # # OpalApiPy.Disconnect()\n # # print \"Disconnected from %s model\" % modelName\n\n except:\n\n modelState, realTimeMode = OpalApiPy.GetModelState()\n print\"Model State is %s\" % modelStateList[modelState]\n print\"Model Not executed\"\n transitionToPause()\n\n # print \"Module execution unsuccessful\"\n # systemControl = 1\n # OpalApiPy.GetSystemControl(systemControl)\n # OpalApiPy.ResetConsole()\n # print \"System Control Granted\"\n # print \"Console is now reset\"\n # # resets the model after loading\n # OpalApiPy.Pause()\n # print \"Model %s is now paused\" % model\n # # OpalApiPy.Reset()\n # # print \"Model %s is now reset.\" %model\n #\n # systemControl = 0\n # OpalApiPy.GetSystemControl(systemControl)\n # print \"System Control is released\"\n # # OpalApiPy.Disconnect()\n # # print \"Disconnected from %s model\" % modelName\n\n elif chooseExecute == 'n':\n\n print \"Model not executed\"\n\n # OpalApiPy.ExecuteConsole()\n # OpalApiPy.PauseConsole()\n\n\n elif (modelState == OpalApiPy.MODEL_LOADED):\n # If model is loaded but not running, execute the model\n try:\n print \"Now Executing Model\"\n systemControl = 1\n OpalApiPy.GetSystemControl(systemControl)\n print \"System Control Granted\"\n\n # Load Simulink and Matlab\n OpalApiPy.LoadConsole()\n print \"Model %s console is loaded\" % model\n OpalApiPy.ExecuteConsole()\n print \"Model %s console is executed\" % model\n OpalApiPy.Execute(1)\n\n modelState,realTimeMode = OpalApiPy.GetModelState()\n print\"Model State is %s\" %modelStateList[modelState]\n if((modelState == OpalApiPy.MODEL_RUNNING) | (modelState == OpalApiPy.MODEL_LOADED)):\n print\"Model is running\"\n\n else:\n\n modelState, realTimeMode = OpalApiPy.GetModelState()\n print\"Model State is %s\" % modelStateList[modelState]\n print\"Model Not executed\"\n transitionToPause()\n\n except:\n modelState,realTimeMode = OpalApiPy.GetModelState()\n print\"Model State is %s\" %modelStateList[modelState]\n print\"Model Not executed\"\n fullDisconnect()\n\n elif(modelState == OpalApiPy.MODEL_PAUSED):\n #try:\n systemControl = 1\n #OpalApiPy.GetSystemControl(0)\n #OpalApiPy.Disconnect()\n #OpalApiPy.ConnectByName(str(model))\n OpalApiPy.GetSystemControl(systemControl)\n OpalApiPy.LoadConsole()\n print\"Console loaded\"\n OpalApiPy.ExecuteConsole()\n print \"Model %s console is executed\" % model\n OpalApiPy.Execute(1)\n print \"Model %s is executed\" % model\n modelState, realTimeMode = OpalApiPy.GetModelState()\n print\"Model State is %s\" % modelStateList[modelState]\n if ((modelState == OpalApiPy.MODEL_RUNNING) | (modelState == OpalApiPy.MODEL_LOADED)):\n print\"Model is running\"\n\n #except:\n # logging.error('<Model Control Released>')\n #fullDisconnect()\n\n else:\n print \"Compile and Assign Model before Loading or Running\"\n\n finally:\n print \"Complete Connection Successful\"\n # Disconnect from Model after testing\n # OpalApiPy.Disconnect()\n # print \"Disconnected from %s model\" %modelName\n\n #MODEL IS ALREADY CONNECTED,\n else:\n print(\"MODEL ALREADY CONNECTED\")\n #OpalApiPy.Disconnect()\n connectToModel('IEEE39Acq', 'phasor01_IEEE39')\n modelState, realTimeMode = OpalApiPy.GetModelState()\n print\"Model State is %s\" % modelStateList[modelState]\n systemControl = 1\n OpalApiPy.GetSystemControl(systemControl)\n print\"System Control Granted\"\n #systemControl = 1\n # OpalApiPy.GetSystemControl(0)\n # OpalApiPy.Disconnect()\n # OpalApiPy.ConnectByName(str(model))\n #OpalApiPy.GetSystemControl(systemControl)\n #OpalApiPy.LoadConsole()\n #print\"Console loaded\"\n #OpalApiPy.ExecuteConsole()\n #print \"Model %s console is executed\" % model\n OpalApiPy.Execute(1)\n print \"Model %s is executed\" % model\n modelState, realTimeMode = OpalApiPy.GetModelState()\n print\"Model State is %s\" % modelStateList[modelState]\n if ((modelState == OpalApiPy.MODEL_RUNNING) | (modelState == OpalApiPy.MODEL_LOADED)):\n print\"Model is running\"", "title": "" }, { "docid": "ac0edb1861c062ea91e1e0f97127275f", "score": "0.5556842", "text": "def test_device_creation(self):\n d1 = Devices(\"PC\", \"Dell\", \"xps\", \"2018\",\"3rd Floor\")\n self.assertEqual(d1.get_device_type(), \"PC\")", "title": "" }, { "docid": "73c0ab12783b8f2311eda57e52349ede", "score": "0.5556013", "text": "def setUp(self):\n self._service = Service()\n self._service.setUp()\n time.sleep(1)\n self._proxy = get_object(TOP_OBJECT)\n self._errors = StratisdErrorsGen.get_object()\n ((self._pool_object_path, _), _, _) = Manager.CreatePool(\n self._proxy,\n name=self._POOLNAME,\n redundancy=0,\n force=False,\n devices=_DEVICE_STRATEGY.example()\n )\n self._pool_object = get_object(self._pool_object_path)\n Manager.ConfigureSimulator(self._proxy, denominator=8)", "title": "" }, { "docid": "ca3e9edb210c82f529e7c0a402bf925b", "score": "0.5555661", "text": "def test_train_no_model(mock_checkpoint):\n dummy_net = DummyNet(INPUT_SHAPE, OUTPUT_SHAPE, USE_CUDA)\n with mock.patch(\n \"src.tasks.acoustic_scenes_spectral.model.SpectralSceneNet\"\n ) as net_cls:\n net_cls.return_value = dummy_net\n train(\n num_epochs=2,\n use_cuda=USE_CUDA,\n batch_size=1,\n wandb_name=None,\n subsample=4,\n checkpoint_epochs=None,\n )", "title": "" }, { "docid": "39c2d533c3a4cb115d9f8365ac002741", "score": "0.55545574", "text": "def test_get_non_existing_learning_model(self):\n self.assertIsNone(get_learning_model('notregistered'))", "title": "" }, { "docid": "0b739a0b34406c03a6642fa2eb3b6d6b", "score": "0.5553113", "text": "def _test_one_model_params(self, registry, name):\n if name.startswith(self._PREFIX):\n name = name[len(self._PREFIX):]\n\n model_params = registry.get_model(name)()\n\n task_p = model_params.task()\n task = task_p.Instantiate()\n self.assertIsInstance(task, base_task.BaseTask)\n\n dataset_splits = model_params.datasets()\n # Registered model configurations must have at least a dataset split.\n self.assertNotEmpty(dataset_splits)\n for s in dataset_splits:\n self.assertIsInstance(s, base_input.BaseInputParams)\n # Note: Creating the input generator may require data access.", "title": "" }, { "docid": "28c33b2002955cc50585d146c97c2140", "score": "0.55518025", "text": "def firearm_detection():\n model_1_path = 'model_3.hdf5'\n model_1 = load_model(model_1_path)\n return model_1", "title": "" } ]
71cad424af1cfed320da85bfeb9e1129
Can either use index (idx) or the direct path (img_path) to return image
[ { "docid": "6a31ecf2731166221430055dac13630f", "score": "0.7684441", "text": "def get_image(self, idx, img_path=None):\n if idx is not None:\n img_path = self.image_paths[idx]\n else:\n assert(img_path is not None)\n image = io.imread(img_path)\n if self.transform:\n image = self.transform(image)\n return image", "title": "" } ]
[ { "docid": "1857528a91e680dc985e3882fa6cac7c", "score": "0.78116846", "text": "def get_image(self,idx):\n return get_image_prepared(self.cfg, self.imdb.roidb[idx])", "title": "" }, { "docid": "0e3a0717ee1ff6c6a49001e595bdf651", "score": "0.7789988", "text": "def image_path_from_index(self, index):\n image_path = os.path.join(self._data_path, 'JPEGImages_'+self._image_set,\n index + self._image_ext)\n assert os.path.exists(image_path), \\\n 'Path does not exist: {}'.format(image_path)\n \"\"\"if not os.path.exists(image_path):\n f = open(os.path.join(self._devkit_path,'fall11_urls.txt'))\n find = re.compile('.+_'+re.sub(r'\\w+_\\w+_0*','',index)+'\\s+(http.+)')\n for x in f.readlines():\n m=find.match(x)\n if m:\n url=m.group(1)\n print 'getting '+image_path+' from '+url\n try:\n urllib.urlretrieve(url,image_path)\n except IOError:\n print 'load failed'\n return ''\n break;\n f.close()\n if not os.path.exists(image_path):\n return ''\n #assert os.path.exists(image_path), \\\n # 'Unable to download image for: {}'.format(image_path)\n \"\"\"\n return image_path", "title": "" }, { "docid": "30e3399fbce00f6d1ea37efd1ecc7e97", "score": "0.7708264", "text": "def image_path_from_index(self, index):\n \n image_path = os.path.join(self._data_path, 'JPEGImages',\n index + self._image_ext)\n assert os.path.exists(image_path), \\\n 'Path does not exist: {}'.format(image_path)\n return image_path", "title": "" }, { "docid": "8b67b31551371878e0ba32a93e220a98", "score": "0.7653553", "text": "def image_path_from_index(self, index):\n image_path = os.path.join(self._data_path, 'WIDER_' + self._image_set, \n 'images', index + self._image_ext)\n assert os.path.exists(image_path), \\\n 'Path does not exist: {}'.format(image_path)\n return image_path", "title": "" }, { "docid": "7232ed0b6acbfdc73bc618581b07113c", "score": "0.7646645", "text": "def image_path_at(self, i):\n return self.image_path_from_index(self._image_index[i])", "title": "" }, { "docid": "d5b7fa38ad574777e4b57e8ba0f427e9", "score": "0.7629", "text": "def image_path_from_index(self, index):\r\n index = index.split('.')[0]\r\n image_file = os.path.join(self.data_path, 'images', self.image_set, index + '.jpg')\r\n assert os.path.exists(image_file), 'Path does not exist: {}'.format(image_file)\r\n return image_file", "title": "" }, { "docid": "bd17b77757b55ad490747f92512bb492", "score": "0.75685376", "text": "def get_image(self, index):\n img_info = self._coco.loadImgs(self._img_ids[index])[0]\n return cv2.imread(os.path.join(self._img_path, img_info[\"file_name\"]))", "title": "" }, { "docid": "fb0bec299de6800e2616d2e0b37ec7f5", "score": "0.75640166", "text": "def image_path_from_index(self, index):\n image_path = os.path.join(self._data_path, 'image', index + self._image_ext)\n\n assert os.path.exists(image_path), 'Path does not exist: {}'.format(image_path)\n return image_path", "title": "" }, { "docid": "107352c11428936e3af2c5385935b5b6", "score": "0.75638527", "text": "def image_path_at(self, i):\n #print 'at '+str(i)\n #print self._image_index[i]\n toreturn= self.image_path_from_index(self._image_index[i])\n if toreturn != '':\n return toreturn\n \n del self._image_index[i]\n return self.image_path_at(i)", "title": "" }, { "docid": "cf2b65a58a8429cad1128f460ccfbcb7", "score": "0.75405014", "text": "def image_path_at(self, i):\n return self._image_index[i] + self._image_ext", "title": "" }, { "docid": "e053b14014b12bbf5559fb693ffd0487", "score": "0.7483369", "text": "def pull_image(self, index):\n img_id = self.ids[index]\n img = load_image(self._imgpath / (img_id.stem + '.jpg'))\n return img", "title": "" }, { "docid": "14ceada06cf75f3a8f7233119c0c00b2", "score": "0.7479458", "text": "def __getitem__(self, index):\n image_path = self.image_paths[index]\n image = Image.open(image_path).convert('YCbCr')\n #grayscale_image = image.split()[0]\n #if self.transform is not None:\n # image = self.transform(image)\n return image", "title": "" }, { "docid": "3fc9f1df784b3fb59cd5bb2018e6bb0e", "score": "0.7399699", "text": "def __getitem__(self, index):\n\n try:\n img_file = self.image_files[index]\n # if img_file starts with a backslash, remove it\n # otherwise, os.path.join does not work\n if img_file[0] == \"/\" or img_file == \"\\\\\":\n img_file = img_file[1:]\n img_file = os.path.join(self.images_dir, img_file)\n img = Image.open(img_file).convert(\"RGB\")\n\n except Exception as e:\n logger.error(\"ERROR while loading image {}\".format(img_file))\n logger.error(\"{}\".format(e))\n return None\n\n # peacefully return the pair\n img = self.transform(img)\n label = self.labels[index]\n return img, label", "title": "" }, { "docid": "c545cec9ab86ddce516a832d40cf8ec1", "score": "0.73739064", "text": "def __getitem__(self, index):\n # read a image given a random integer index\n path = self.paths[index]\n img = Image.open(path).convert('RGB')\n\n # apply image transformation\n img = self.transform(img)\n\n return {'img': img,\n 'path': path\n }", "title": "" }, { "docid": "d7d5af65837bdc9b04ac46e5ed1a161c", "score": "0.73733526", "text": "def _get_image(self, idx: int) -> np.ndarray:\n\n img_idx = np.searchsorted(self._lens, idx, side='right')\n image = self._image_loader(self._samples[img_idx][0])\n patch_idx = idx - self._lens[img_idx]\n image = image[self._grids[patch_idx]]\n return image", "title": "" }, { "docid": "b61349a7e2d1f4e5506c5fc1b6cc923e", "score": "0.7359266", "text": "def __getitem__(self, index):\n img = self.data[index]\n\n # doing this so that it is consistent with all other datasets\n # to return a PIL Image\n img = Image.fromarray(img.numpy(), mode='L')\n\n if self.transform is not None:\n img = self.transform(img)\n\n #if self.target_transform is not None:\n # target = self.target_transform(target)\n img.to(torch.float32)\n\n return img, 1, -1, index", "title": "" }, { "docid": "e362e2c56f155de93668e35713ffe184", "score": "0.7352045", "text": "def image_path_from_index(self, index):\n\n image_path = os.path.join(self._data_path, index + '_scene' + self._image_ext)\n assert os.path.exists(image_path), \\\n 'Path does not exist: {}'.format(image_path)\n return image_path", "title": "" }, { "docid": "fffa181c55b3dc11c357dcac5d279cb7", "score": "0.7346971", "text": "def __getitem__(self, index):\n img = self.data[index]\n\n # doing this so that it is consistent with all other datasets\n # to return a PIL Image\n img = Image.fromarray(img.numpy(), mode='L')\n\n if self.transform is not None:\n img = self.transform(img)\n \n img.to(torch.float32)\n\n\n return img, 1, -1, index", "title": "" }, { "docid": "3fb67ca7daa770bf55e672ba52938990", "score": "0.7342082", "text": "def _get_idx_file(self, idx):\n img_path, mask_path = self.imgs[idx], self.masks[idx]\n\n return img_path, mask_path", "title": "" }, { "docid": "d73b67b1893ee3d6ab0d2cb9d5166a6b", "score": "0.7335856", "text": "def image_path_at(self, i):\n return self.image_path_from_index(self.image_index[i])", "title": "" }, { "docid": "d73b67b1893ee3d6ab0d2cb9d5166a6b", "score": "0.7335856", "text": "def image_path_at(self, i):\n return self.image_path_from_index(self.image_index[i])", "title": "" }, { "docid": "460314027384499ecaf748b675a92e97", "score": "0.72802365", "text": "def __getitem__(self, index):\n img, target = self.data[index], int(self.targets[index])\n\n # doing this so that it is consistent with all other datasets\n # to return a PIL Image\n img = Image.fromarray(img.numpy(), mode='L')\n\n if self.transform is not None:\n img = self.transform(img)\n\n #if self.target_transform is not None:\n # target = self.target_transform(target)\n \n img.to(torch.float32)\n\n return img, 1, -1, index\n #return img, target, -1, index", "title": "" }, { "docid": "460314027384499ecaf748b675a92e97", "score": "0.72802365", "text": "def __getitem__(self, index):\n img, target = self.data[index], int(self.targets[index])\n\n # doing this so that it is consistent with all other datasets\n # to return a PIL Image\n img = Image.fromarray(img.numpy(), mode='L')\n\n if self.transform is not None:\n img = self.transform(img)\n\n #if self.target_transform is not None:\n # target = self.target_transform(target)\n \n img.to(torch.float32)\n\n return img, 1, -1, index\n #return img, target, -1, index", "title": "" }, { "docid": "07f551b7020420fa5f678cdd1b9298f3", "score": "0.7279649", "text": "def image_path_from_index(self, index):\n\n image_path = os.path.join(self._data_path, index + '-color' + self._image_ext)\n assert os.path.exists(image_path), \\\n 'Path does not exist: {}'.format(image_path)\n return image_path", "title": "" }, { "docid": "d688f43a554c122ffb29d4f1e3ecc9c5", "score": "0.7214286", "text": "def __getitem__(self, index):\n name = self.im_names[index]\n im_name = os.path.join(self.im_dir, name)\n label = [\n parse_im_name(name, 'id'),\n parse_im_name(name, 'cam'),\n self.marks[index]\n ]\n img = Image.open(im_name).convert('RGB')\n img = np.asarray(img)\n\n return img, label", "title": "" }, { "docid": "add3d4538fce41aa0cf51af7747873df", "score": "0.7196116", "text": "def pull_image(self, idx):\n img_id = self.id_to_img_map[idx]\n\n try:\n file_name = self.coco.loadImgs(img_id)[0]['file_name']\n except:\n file_name = self.coco.loadImgs(img_id)[0]['coco_url'].split('.org/')[-1]\n\n return Image.open(os.path.join(self.root, file_name)).convert('RGB')", "title": "" }, { "docid": "e16a4376eb0c04b953996ce165c8b7db", "score": "0.71938044", "text": "def __getitem__(self, idx: int) -> Tuple[Any, int, str]:\n x = self.target_list[idx]\n path, label, _ = x.items()\n\n img = Image.open(path).convert(\"RGB\")\n\n if self.transform:\n img = self.transform(img)\n return img, label, path", "title": "" }, { "docid": "bbe89a272ebd5b2a0d8a2a957c039016", "score": "0.7187044", "text": "def image_path_from_index(self, index):\n\n # Example image path for index=119993:\n # images/train2014/COCO_train2014_000000119993.jpg\n if self._year == '2017':\n file_name = str(index).zfill(12) + '.jpg'\n elif self._year == '2014':\n file_name = ('COCO_' + self._data_name + '_' + str(index).zfill(12) + '.jpg')\n image_path = osp.join(self._data_path, 'images', self._data_name, file_name)\n assert osp.exists(image_path), 'Path does not exist: {}'.format(image_path)\n return image_path", "title": "" }, { "docid": "ff5dc7d5392551c6267df93576afa807", "score": "0.7177194", "text": "def __getitem__(self, index):\n image, _ = super().__getitem__(index)\n image_path, _ = self.imgs[index]\n image_width, image_height = Image.open(image_path).size\n\n image_name = image_path.strip().split('/')[-1].split('.')[0]\n if self.ground_truth_is_xml:\n n_id, _ = image_name.split('_')\n ground_truth_path = os.path.join(self.ground_truth_root,\n n_id,\n '%s.xml' % image_name)\n else:\n ground_truth_path = os.path.join(self.ground_truth_root,\n '%s_Segmentation.png' % (\n image_name))\n ground_truth_mask = self._get_ground_truth_mask(ground_truth_path,\n image_height,\n image_width)\n ground_truth_mask = self.ground_truth_transform(\n ground_truth_mask).squeeze(0)\n return image, ground_truth_mask, image_name", "title": "" }, { "docid": "8f74226e00a0359ab2e12fdb3f350e37", "score": "0.7171874", "text": "def __getitem__(self, index):\n \n img = self.img_arr[index]\n \n if 0:\n img[:2, :] = 0\n img[-2:, :] = 0\n img[:, :2] = 0\n img[:, -2:] = 0\n \n # some boundry cleanup\n \n if self.transform:\n img = self.transform(image=img)\n \n #if random.randint(0, 1):\n # if random.randint(0, 1):\n # img = cv2.dilate(img, self.k, iterations=1)\n # else:\n # img = cv2.erode(img, self.k, iterations=1)\n \n opt = random.randint(0, 1)\n if opt == 0:\n img = self.do_random_line(img)\n #elif opt == 1:\n # msk = self.grid_mask(img)\n # img[msk==1] = 0\n \n # scale\n img = img / 255.\n # norm\n img = (img - 0.05213458652450004) / 0.16489511099093002\n \n # make channel\n img = img[None, :, :]\n if self.RGB:\n img = np.repeat(img, 3, axis=0)\n \n lbl = self.labels[index].astype(int)\n if self.split_label:\n lbl = lbl.tolist()\n \n return img.astype(self.dtype), lbl", "title": "" }, { "docid": "f5f7313ec93c83acc78a3cb06ebfc1da", "score": "0.71220964", "text": "def __getitem__(self, index):\n\n path, target = self.data[index], int(self.targets[index])\n\n # doing this so that it is consistent with all other datasets\n # to return a PIL Image\n img = self.loader(path)\n\n if self.transform is not None:\n img = self.transform(img)\n\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return img, target", "title": "" }, { "docid": "1c67b0ce2cda2093e43a165f2aa684fa", "score": "0.7110501", "text": "def __getitem__(self, index):\n # Read image\n img = Image.open(os.path.join(self.data_folder, self.image_file_names[index]), mode='r')\n img = img.convert('RGB')\n if img.width <= 96 or img.height <= 96:\n print(self.image_file_names[index], img.width, img.height)\n lr_img, hr_img = self.transform(img)\n images = {'lr': lr_img, 'hr': hr_img, 'lr_names':self.image_file_names[index], 'hr_names':self.image_file_names[index]}\n return images", "title": "" }, { "docid": "cb17717afa91285baf41c34ffbe43fc6", "score": "0.7109505", "text": "def __getitem__(self, index):\n img_name = self.data.iloc[index, 0]\n img_name = os.path.join(self.root_dir, img_name)\n img = Image.open(img_name)\n\n label = self.data.iloc[index, 1].astype(np.float32)\n label = np.expand_dims(label, axis=0)\n\n if label == 1:\n mask = np.ones((1, self.map_size, self.map_size), dtype=np.float32) * self.label_weight\n else:\n mask = np.ones((1, self.map_size, self.map_size), dtype=np.float32) * (1.0 - self.label_weight)\n\n if self.transform:\n img = self.transform(img)\n\n return img, mask, label", "title": "" }, { "docid": "bc50a5a02b467c3b2df59385e284dedc", "score": "0.7100492", "text": "def get_image(self, index):\n x = self.x[index]\n if self.open_image:\n img = Image.open(x).convert(\"RGB\")\n else:\n img = Image.fromarray(x.astype(\"uint8\"))\n return img", "title": "" }, { "docid": "61795ba19b486ff8b81c63eff1a1d7e6", "score": "0.71000654", "text": "def __getitem__(self, index): \n \n image = Image.open(str(self.path_test + self.test_files[index]))\n ImageId = self.test_files[index]\n return self.transform(image), ImageId", "title": "" }, { "docid": "fcbcdbffe3be128ef643de416d713c3a", "score": "0.7088725", "text": "def image_path_at(self, i):\n return os.path.join(self._data_path, self._image_index[i])", "title": "" }, { "docid": "d2f3266fe141e1a4e98f5350b33040ab", "score": "0.70427316", "text": "def image_path_at(self, i):\n return self.image_path_from_index(self._image_index[i])", "title": "" }, { "docid": "d2f3266fe141e1a4e98f5350b33040ab", "score": "0.70427316", "text": "def image_path_at(self, i):\n return self.image_path_from_index(self._image_index[i])", "title": "" }, { "docid": "d2f3266fe141e1a4e98f5350b33040ab", "score": "0.70427316", "text": "def image_path_at(self, i):\n return self.image_path_from_index(self._image_index[i])", "title": "" }, { "docid": "845eaafe7332cdb7933822686bbf07dc", "score": "0.70163643", "text": "def __getitem__(self, index):\n # a image and a corresponding label dictionary\n image_fn, image_tn = self.filenames[index]\n \n image = Image.open(image_fn)\n image = image.resize((448, 448), Image.BILINEAR)\n \n label = parse_gt(image_tn)\n label = parse_label_dic(label)\n \n if self.transform is not None:\n image = self.transform(image)\n\n return image, label", "title": "" }, { "docid": "c20d19ff97dd26721bb32a557d671cdb", "score": "0.70135003", "text": "def __getitem__(self, index):\n image_fn, label_val = self.img_filenames[index], self.labels[index]\n\n \"\"\" INPUT: image part \"\"\"\n image = Image.open(image_fn).convert('RGB')\n if self.transform is not None:\n image = self.transform(image)\n #image.show()\n #print (image.format,image.size,image.mode)\n\n \"\"\" INPUT: label part \"\"\"\n return image, label_val", "title": "" }, { "docid": "de7d3af5d5bfe40dfb23c790337484fc", "score": "0.70000815", "text": "def __getitem__(self, index):\r\n X = [io.imread(path[0]) for path in self.image_paths_and_responses[index*self.batch_size:(index+1)*self.batch_size]]\r\n y = [response[1] for response in self.image_paths_and_responses[index*self.batch_size:(index+1)*self.batch_size]]\r\n \r\n return np.array(X), np.array(y)", "title": "" }, { "docid": "decf77bd2926e34cbb2b2bdc813cf8ff", "score": "0.69934684", "text": "def __getitem__(self, idx):\n\n # Load new file if index is out of range\n if (idx - self.index_offset) >= self.current_img_nib.shape[2]:\n self.index_offset += self.current_img_nib.shape[2]\n self.current_img_nib, self.current_label_nib = self.load_nibs(\n self.files[self.counter], self.labels[self.counter]\n )\n self.counter += 1\n\n # Extract image and label\n img = self.current_img_nib[:, :, idx - self.index_offset]\n label = self.current_label_nib[:, :, idx - self.index_offset]\n\n seed = np.random.randint(2147483647) # make a seed with numpy generator\n random.seed(seed) # apply this seed to img tranfsorms\n torch.manual_seed(seed) # needed for torchvision 0.7\n if self.transforms is not None:\n img = self.transforms(img)\n\n random.seed(seed) # apply this seed to target transforms\n torch.manual_seed(seed) # needed for torchvision 0.7\n if self.target_transforms is not None:\n label = self.target_transforms(label)\n\n return img, label[0]", "title": "" }, { "docid": "226f7f4dc2bc5328089c29f7b6e3c40d", "score": "0.69691646", "text": "def __getitem__(self, index): \n \n image = Image.open(str(self.path_train + self.filenames[index]))\n ImageId = self.filenames[index]\n label = self.get_label(ImageId)\n \n if self.aug:\n data = {\"image\": np.array(image)}\n transformed = self.transform(**data)\n image = transformed['image']/255\n image = np.transpose(image, (2, 0, 1))\n return image, label\n else:\n return self.transform(image), label", "title": "" }, { "docid": "5b602ad3b0f42889b8abe28ad85c6ffc", "score": "0.6939119", "text": "def __getitem__(self, index):\n if self.mode == \"train\":\n dataset = self.train_dataset\n elif self.mode == \"val\":\n dataset = self.val_dataset\n else:\n dataset = self.test_dataset\n\n filename, label = dataset[index]\n image = Image.open(os.path.join(self.image_dir, filename)).convert('RGB')\n\n return self.transform(image), label", "title": "" }, { "docid": "c5af4f8f7319a478853ed652ab26f1eb", "score": "0.6894365", "text": "def __getitem__(self, index): \n \n image = Image.open(str(self.path_train + self.filenames[index]))\n ImageId = self.filenames[index]\n label = self.get_label(ImageId)\n mask = self.get_mask(ImageId) \n if self.aug:\n data = {\"image\": np.array(image), \"mask\": mask}\n transformed = self.transform(**data)\n image = transformed['image']/255\n image = np.transpose(image, (2, 0, 1))\n return image, transformed['mask'][np.newaxis,:,:], label\n else:\n \n return self.transform(image), mask[np.newaxis,:,:], label", "title": "" }, { "docid": "d55ab6a71df5c316c0f80d90378e95a0", "score": "0.68825513", "text": "def image_csv_index(self: Dataset, _index: int) -> int:\n id = image_id(self, _index)\n mask = self.image_table[\"id\"] == f\"{id}_image\"\n indices = mask.index[mask]\n assert len(indices) == 1, f\"Missing or ambiguous image: {id}\"\n return int(indices[0])", "title": "" }, { "docid": "beb95f06ab05aa7e66b67b6686847771", "score": "0.6860399", "text": "def __getitem__(self, index):\n # Shuffle data on epoch start (dont work for distributed sampler)\n if index == 0:\n self.reinit_queue(shuffle=True)\n # Update index by rank\n index = self.index_shift + index\n # Get id index and image queue local index\n ind_id = index // self.ims_per_id\n ind_el = index % self.ims_per_id\n im_name = os.path.join(self.im_dir, self.image_queue[ind_id][ind_el])\n label = self.label_queue[ind_id][ind_el]\n\n img = Image.open(im_name).convert('RGB')\n img = np.asarray(img)\n\n return img, label", "title": "" }, { "docid": "c91edf1795f8ffa6c7c4fb948ec76e3a", "score": "0.6849109", "text": "def __getitem__(self, index):\n img, target, semi_target = self.data[index], int(self.targets[index]), int(self.semi_targets[index])\n\n # doing this so that it is consistent with all other datasets\n # to return a PIL Image\n img = Image.fromarray(img.numpy(), mode='L')\n\n if self.transform is not None:\n img = self.transform(img)\n\n if self.target_transform is not None:\n target = self.target_transform(target)\n \n img.to(torch.float32)\n\n return img, target, semi_target, index", "title": "" }, { "docid": "ce9f3c0e6dfe03e024daf4a870223f1d", "score": "0.6813522", "text": "def __getitem__(self, idx):\n if torch.is_tensor(idx):\n idx = idx.tolist()\n\n if self.use_cache:\n image, label = self.cached_data[idx]\n sample = {'img': image,\n 'label': int(label)}\n return sample\n else:\n img_path = self.img_list[idx][0]\n image = Image.open(img_path).convert('RGB')\n\n sample = {'img': self.transform(image),\n 'label': int(self.img_list[idx][1])}\n return sample", "title": "" }, { "docid": "d9007971c02986d47784a205aad0fc8c", "score": "0.67544633", "text": "def __getitem__(self, index):\n dataset = self.train_dataset if self.mode == 'train' else self.test_dataset\n filename, label = dataset[index]\n image = Image.open(os.path.join(self.image_dir, filename))\n return self.transform(image), torch.FloatTensor(label)", "title": "" }, { "docid": "10c6d799a777298f0089a516904c3c52", "score": "0.6729647", "text": "def __getitem__(self, index):\n \n img = self.img_arr[index]\n \n if self.transform:\n \n opt = random.randint(0, 9)\n if opt == 1: img = self.perspective(image=img)\n elif opt == 3: img = self.scale(image=img)\n elif opt == 4: img = self.rotate(image=img)\n elif opt == 5: img = self.shearx(image=img)\n elif opt == 6: img = self.sheary(image=img)\n elif opt == 7: img = self.stretchx(image=img)\n elif opt == 8: img = self.stretchy(image=img)\n elif opt == 9: img = self.piecewise(image=img)\n \n opt = random.randint(0, 5)\n if opt == 1: img = cv2.erode(img, self.k, iterations=1)\n elif opt == 2: img = cv2.dilate(img, self.k, iterations=1)\n elif opt == 3: img = self.do_random_line(image=img)\n elif opt == 4: img = self.edgedetect(image=img)\n elif opt == 5: img = self.contrast(image=img)\n \n opt = random.randint(0, 1)\n if opt == 1: img = self.do_random_block_fade(image=img)\n \n if self.raw == False:\n # scale\n img = img / 255.\n # norm\n img = (img - 0.055373063765223995) / 0.17266245915644673\n \n # make channel\n img = img[None, :, :]\n if self.RGB:\n img = np.repeat(img, 3, axis=0)\n \n lbl = self.labels[index].astype(int)\n if self.split_label:\n lbl = lbl.tolist()\n \n return img.astype(self.dtype), lbl", "title": "" }, { "docid": "aa82523d4b3d1d72d37e8e616a69e403", "score": "0.67252547", "text": "def __getitem__(self, idx):\r\n image = Image.open(self.list_images[idx])\r\n image = self.transform(image).float()\r\n\r\n # In the utk face dataset, labels are contained in image names, for instance an image of age 23, black man\r\n # is typically named as `root_dir/23_0_1_158478565845.jpg`\r\n labels = self.list_images[idx].split('/')[-1].split('_')\r\n age, gender, race = torch.tensor(float(labels[0])).float(),\\\r\n torch.tensor(int(labels[1])).long(),\\\r\n torch.tensor(int(labels[2])).long()\r\n\r\n return image, age, gender, race", "title": "" }, { "docid": "2c9fef9b9160aadfc109e1e59887c09f", "score": "0.6718965", "text": "def __getitem__(self, index: int) -> Batch:\n # by default, the npz format stores arrays in a dict with keys 'arr_0', 'arr_1', ... \n image = np.load(os.path.join(self.input_root, self.input_list[index]))['arr_0']\n if self.transform is not None:\n image = self.transform(image)\n if self.train:\n patient_index = self.patient_index_from_dataset_index(index)\n target = self.get_target(patient_index)\n return image, target\n return image", "title": "" }, { "docid": "b6266993df6adb8610221a304c24aa27", "score": "0.6714811", "text": "def annotation_path_from_index(self, index):\r\n index = index.split('.')[0]\r\n image_file = os.path.join(self.data_path,'annotations', self.image_set, index+'.png')\r\n if self.image_set == 'testing':\r\n image_file = None\r\n else:\r\n assert os.path.exists(image_file), 'Path does not exist: {}'.format(image_file)\r\n return image_file", "title": "" }, { "docid": "7f866b85dddf0e93b1aeb08061c3154a", "score": "0.6706578", "text": "def __getitem__(self, index):\n img = self.get_image(index)\n y = self.y[index]\n img = self.trsf(img)\n return img, y", "title": "" }, { "docid": "6f6cb6591a3bf2e3c7dac05a09cf5d99", "score": "0.6704857", "text": "def __getitem__(self, index):\n \n # Get item special method\n first_val = int(list(self.dataset_numbers.values())[0])\n if index < first_val:\n class_val = 'noncovid'\n label = torch.Tensor([1, 0])\n else:\n class_val = 'covid'\n index = index - first_val\n label = torch.Tensor([0, 1])\n im = self.open_img(self.groups, class_val, index)\n im = transforms.functional.to_tensor(np.array(im)).float()\n return im, label", "title": "" }, { "docid": "6f6cb6591a3bf2e3c7dac05a09cf5d99", "score": "0.6704857", "text": "def __getitem__(self, index):\n \n # Get item special method\n first_val = int(list(self.dataset_numbers.values())[0])\n if index < first_val:\n class_val = 'noncovid'\n label = torch.Tensor([1, 0])\n else:\n class_val = 'covid'\n index = index - first_val\n label = torch.Tensor([0, 1])\n im = self.open_img(self.groups, class_val, index)\n im = transforms.functional.to_tensor(np.array(im)).float()\n return im, label", "title": "" }, { "docid": "6f6cb6591a3bf2e3c7dac05a09cf5d99", "score": "0.6704857", "text": "def __getitem__(self, index):\n \n # Get item special method\n first_val = int(list(self.dataset_numbers.values())[0])\n if index < first_val:\n class_val = 'noncovid'\n label = torch.Tensor([1, 0])\n else:\n class_val = 'covid'\n index = index - first_val\n label = torch.Tensor([0, 1])\n im = self.open_img(self.groups, class_val, index)\n im = transforms.functional.to_tensor(np.array(im)).float()\n return im, label", "title": "" }, { "docid": "ee89992610dfbe3c2f16b407ad3b418d", "score": "0.6680627", "text": "def __getitem__(self, idx):\n #print(idx)\n i = idx * self.batch_size\n if i == 0:\n data_zip_list = list(zip(self.input_img_paths, self.target_img_paths))\n random.shuffle(data_zip_list)\n self.input_img_paths, self.target_img_paths = zip(*data_zip_list)\n batch_input_img_paths = self.input_img_paths[i : i + self.batch_size]\n batch_target_img_paths = self.target_img_paths[i : i + self.batch_size]\n x = np.zeros((self.batch_size,) + self.img_size + (3,), dtype=\"float32\")\n for j, path in enumerate(batch_input_img_paths):\n img = cv2.imread(path, cv2.IMREAD_COLOR)\n n = np.random.randint(0, 3)\n if n == 0:\n img = cv2.blur(img, (3, 3)) / 255.\n elif n == 1:\n img = cv2.blur(img, (5, 5)) / 255.\n else:\n img = img / 255.\n x[j] = img\n y = np.zeros((self.batch_size,) + self.img_size + (1,), dtype=\"float32\")\n for j, path in enumerate(batch_target_img_paths):\n img = cv2.imread(path, cv2.IMREAD_GRAYSCALE) * 1.\n y[j] = np.expand_dims(img, 2)\n return x, y", "title": "" }, { "docid": "e99d3d618a55c8bcd93609e5ddf42343", "score": "0.6666778", "text": "def __getitem__(self, index):\n filename, label = self.dataset[index]\n image = Image.open(os.path.join(self.images, filename))\n return self.transform(image), torch.FloatTensor(label)", "title": "" }, { "docid": "c5f36094c81e8c1d220cb13781e8dbb4", "score": "0.6662379", "text": "def __getitem__(self, index):\n vocab = self.vocab\n root = self.root\n ann_id = self.ids[index]\n img_id = ann_id[0]\n caption = self.dataset[img_id]['sentences'][ann_id[1]]['raw']\n path = self.dataset[img_id]['filename']\n\n image = Image.open(os.path.join(root, path)).convert('RGB')\n if self.transform is not None:\n image = self.transform(image)\n\n # Convert caption (string) to word ids.\n tokens = nltk.tokenize.word_tokenize(\n str(caption).lower().decode('utf-8'))\n caption = []\n caption.append(vocab('<start>'))\n caption.extend([vocab(token) for token in tokens])\n caption.append(vocab('<end>'))\n target = torch.Tensor(caption)\n return image, target, index, img_id", "title": "" }, { "docid": "d940aaf9ad70420e5d2413c756c9506c", "score": "0.6653799", "text": "def __getitem__(self, index):\n vocab = self.vocab\n root, caption, img_id, path, image = self.get_raw_item(index)\n\n if self.transform is not None:\n image = self.transform(image)\n\n # Convert caption (string) to word ids.\n tokens = nltk.tokenize.word_tokenize(\n str(caption).lower().decode('utf-8'))\n caption = []\n caption.append(vocab('<start>'))\n caption.extend([vocab(token) for token in tokens])\n caption.append(vocab('<end>'))\n target = torch.Tensor(caption)\n return image, target, index, img_id", "title": "" }, { "docid": "b5a33a7a4e8b7efb14eae32f512f14dd", "score": "0.66348493", "text": "def __getitem__(self, index):\n if self.train:\n img, target = self.train_data[index], self.train_labels[index]\n else:\n img, target = self.test_data[index], self.test_labels[index]\n\n # doing this so that it is consistent with all other datasets\n # to return a PIL Image\n img = Image.fromarray(img.squeeze().numpy(), mode='RGB')\n\n if self.transform is not None:\n img = self.transform(img)\n\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return img, target", "title": "" }, { "docid": "b5c6d0a166d3dd374254916410d9cdf0", "score": "0.6632213", "text": "def get_image_path(image_lists, label_name, index, image_dir, category):\n label_lists = image_lists[label_name]\n category_list = label_lists[category]\n mod_index = index % len(category_list)\n base_name = category_list[mod_index]\n sub_dir = label_lists['dir']\n full_path = os.path.join(image_dir, sub_dir, base_name)\n return full_path", "title": "" }, { "docid": "fcaa140c7fda24f25716af5a282431b8", "score": "0.66306263", "text": "def get_image_data(gltf, img_idx):\n pyimage = gltf.data.images[img_idx]\n image_name = \"Image_\" + str(img_idx)\n\n assert(not (pyimage.uri is not None and pyimage.buffer_view is not None))\n\n if pyimage.uri is not None:\n data, file_name = gltf.load_uri(pyimage.uri)\n return data, file_name or image_name\n\n elif pyimage.buffer_view is not None:\n data = BinaryData.get_buffer_view(gltf, pyimage.buffer_view)\n return data, image_name\n\n return None, None", "title": "" }, { "docid": "f4f6b738d8de536a22ff5f974c424230", "score": "0.66237414", "text": "def __getitem__(self, idx):\n # load IUV image\n img_name = self.pose_images[idx]\n if img_name in self.invalid_images:\n idx = 0\n img_name = self.pose_images[idx]\n \n img_path = os.path.join(self.root_dir, img_name)\n img = Image.open(img_path).convert('RGB')\n img = self.tsfm(img).float()\n \n segs_name = img_name[:-4] + \"_IUV.mat\" \n segs_path = os.path.join(self.dp_root_dir, segs_name)\n segs = scipy.io.loadmat(segs_path)['segm']\n segs = smart_padding_depth(segs) # smart pad to make square\n segs = cv2.resize(segs, (224, 224)) # resize to 224\n segs = torch.tensor(segs).unsqueeze(0).float()\n \n joints = self.joints[idx].view(-1, 3) / 1000.0\n joints = joints - joints[6].unsqueeze(0)\n\n return img, segs, joints", "title": "" }, { "docid": "5a91d34b9dc41353cb7a54eb19b80675", "score": "0.66190946", "text": "def __getitem__(self, idx):\n\n if self.test_mode:\n return self.prepare_test_img(idx)\n else:\n return self.prepare_train_img(idx)", "title": "" }, { "docid": "a499feacbce0264b51ece05505db95f3", "score": "0.6613223", "text": "def __getitem__(self, idx):\r\n prev_imgpath, bbox_prev, curr_imgpath, bbox_curr = self._alov_imgpairs[idx]\r\n image_prev = image_io.load(prev_imgpath)\r\n image_prev = np.asarray(image_prev, dtype=np.uint8)\r\n image_curr = image_io.load(curr_imgpath)\r\n image_curr = np.asarray(image_curr, dtype=np.uint8)\r\n\r\n if self._dbg:\r\n viz, env = self._viz, self._env\r\n prev_img_bbox = draw.bbox(image_prev, bbox_prev)\r\n curr_img_bbox = draw.bbox(image_curr, bbox_curr)\r\n viz.plot_image_opencv(prev_img_bbox, 'prev', env=env)\r\n viz.plot_image_opencv(curr_img_bbox, 'current', env=env)\r\n\r\n del prev_img_bbox\r\n del curr_img_bbox\r\n\r\n return image_prev, bbox_prev, image_curr, bbox_curr", "title": "" }, { "docid": "d5133f06efcd1cbe34ba8346eb70c841", "score": "0.6612788", "text": "def __getitem__(self, index):\n dataset = self.train_dataset if self.mode == 'train' else self.test_dataset\n filename, cls, label = dataset[index]\n target_cls = 'img' if cls == 'd65' else 'd65'\n image = Image.open(os.path.join(self.image_dir, self.mode, cls, filename))\n filename = filename.replace('d65', 'img') if cls == 'd65' else filename.replace('img', 'd65')\n target = Image.open(os.path.join(self.image_dir, self.mode, target_cls, filename))\n image, target = self.get_transforms(image, target)\n if self.mode == 'train':\n return image, target, torch.LongTensor(label)\n else:\n return image, target, torch.LongTensor(label), filename", "title": "" }, { "docid": "faa829724632f632b2a9d7f5a9083153", "score": "0.6611", "text": "def __getitem__(self, idx):\n if self.test_mode:\n return self.prepare_test_img(idx)\n\n return self.prepare_train_img(idx)", "title": "" }, { "docid": "920ad78b4e06bdcf4b7f8775d6d12c0c", "score": "0.66107047", "text": "def __getitem__(self, index):\n data = self.df.iloc[index]\n\n # Get image\n if self.load_all_in_mem:\n img = self.img_dict[data['img_name']]\n else:\n img_filename = Path(os.path.join(self.cache_full_dir, data['img_path']))\n img = self.img_loader(img_filename)\n if self.transform:\n img = self.transform(img)\n elif self.return_type == \"pt\":\n img = transforms.ToTensor()(img)\n if (self.return_type == \"np\") and isinstance(img, PIL.Image.Image):\n img = np.array(img)\n # Get label with shape (1,)\n if self.return_type == \"np\":\n label = data[self.class_names].to_numpy().astype(\"float32\")\n else:\n label = torch.FloatTensor(data[self.class_names].to_numpy().astype(\"float32\"))\n return img, label", "title": "" }, { "docid": "e203c222cc16d8781fb9e7a9a88c898a", "score": "0.6600447", "text": "def __getitem__(self, idx):\n return next(self._image_generator)", "title": "" }, { "docid": "f247ea6014bfca4ebb539ea5c68394cd", "score": "0.65850765", "text": "def __getitem__(self, index):\n # if self.train:\n # img, target = self.train_data[index], self.train_labels[index]\n # else:\n # img, target = self.test_data[index], self.test_labels[index]\n img, target = self.data[index], self.labels[index]\n\n # doing this so that it is consistent with all other datasets\n # to return a PIL Image\n # print(type(img))\n if self.type == 'MNIST':\n img = Image.fromarray(img.numpy(), mode='L').convert('RGB')\n elif self.type == 'MNIST-M':\n img = Image.fromarray(img.squeeze().numpy(), mode='RGB')\n\n if self.transform is not None:\n img = self.transform(img)\n\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return img, target", "title": "" }, { "docid": "c12ad3b68b9c152a92151ace8c7d9e69", "score": "0.65837467", "text": "def __getitem__(self, index):\n\n out_dict = dict()\n\n img_path_src = self.paths[index]\n # print(\"src img path:\", img_path_src)\n index_tgt =(index + random.randint(1, self.length-1)) % self.length\n img_path_tgt = self.paths[index_tgt]\n\n mask_idx = int(os.path.basename(img_path_src).split(\"_\")[0])\n\n # open and convert images\n img_src = Image.open(img_path_src).convert('RGB')\n img_tgt = Image.open(img_path_tgt).convert('RGB')\n out_dict['src'] = self.transform_img(img_src)\n out_dict['tgt'] = self.transform_img(img_tgt)\n\n # extract all src masks, skip the floor, sky and walls\n mask_paths = sorted(glob.glob(os.path.join(self.data_dir, f\"{mask_idx}_mask_*.jpg\")))[4:]\n\n # make sure that a new path is chosen every time\n random.shuffle(mask_paths)\n\n\n # find suitable mask\n for p in mask_paths:\n mask = self.transform_mask(Image.open(p).convert(\"1\"))\n mask_binary = (mask > 0).int()\n surface = mask_binary.sum().item()\n # print(\"surface:\", surface)\n if surface > self.opt.min_obj_surface:\n out_dict[\"mask\"] = mask\n return out_dict\n\n\n # never return None (just pick a new image)\n return self.__getitem__(random.randint(1, self.length-1))\n\n out_dict = None\n\n return out_dict", "title": "" }, { "docid": "9ae49fe67d9b9b2b0711dec7aad24bf5", "score": "0.65705895", "text": "def __getitem__(self, idx):\n img = None\n try:\n img = cv.imread(self.samples_info[idx]['path'], cv.IMREAD_COLOR)\n bbox = self.samples_info[idx]['bbox']\n landmarks = self.samples_info[idx]['landmarks']\n\n if bbox is not None or landmarks is not None:\n if landmarks is not None:\n landmarks = np.array(landmarks).reshape(5, -1)\n landmarks[:,0] = landmarks[:,0]*bbox[2] + bbox[0]\n landmarks[:,1] = landmarks[:,1]*bbox[3] + bbox[1]\n img = FivePointsAligner.align(img, landmarks.reshape(-1), d_size=(bbox[2], bbox[3]),\n normalized=False, show=False)\n if bbox is not None and landmarks is None:\n img = img[bbox[1]:bbox[1] + bbox[3], bbox[0]:bbox[0] + bbox[2]]\n except BaseException:\n print('Corrupted image!', self.samples_info[idx])\n img = np.zeros((128, 128, 3), dtype='uint8')\n\n if self.transform:\n img = self.transform(img)\n\n return {'img': img, 'idx': idx}", "title": "" }, { "docid": "e2f916592cf4f5e0beae13595e9724b6", "score": "0.65700185", "text": "def imageindex(self, imagename=None):\n # empty name -> return first image\n if imagename is None:\n return 0\n\n # check for valid name\n try:\n ind = self.imgnames.index(imagename.upper())\n except ValueError as error:\n msg = 'invalid image name (%s)' % imagename\n log.error('Imageindex: ' + msg)\n raise error\n\n # return index\n return ind", "title": "" }, { "docid": "c4d51e92ca19b18659ce019a01aefadc", "score": "0.65699834", "text": "def __getitem__(self, idx):\n\n\t\tlr_img = Image.fromarray(color.gray2rgb(io.imread(self.lr_list_files[idx])))\n\t\thr_img = Image.fromarray(color.gray2rgb(io.imread(self.hr_list_files[idx])))\n\t\timg_name = self.lr_list_files[idx].split('/')[-1]\n\t\tsample = {'lr': lr_img, 'hr': hr_img, 'name': img_name}\n\n\t\tif self.transform:\n\t\t\tsample['lr'] = self.transform[0](sample['lr'])\n\t\t\tsample['hr'] = self.transform[1](sample['hr'])\n\t\treturn sample", "title": "" }, { "docid": "644823bf448fb274b92b7d8211e7d585", "score": "0.6560773", "text": "def __getitem__(self, idx):\n # load IUV image\n img_name = self.pose_images[idx]\n if img_name in self.invalid_images:\n idx = 0\n img_name = self.pose_images[idx]\n \n ori_img_name = img_name\n ori_img_path = os.path.join(self.root_dir, ori_img_name)\n ori_img = Image.open(ori_img_path).convert('RGB')\n ori_img = self.tsfm(ori_img).float()\n \n img_name = img_name[:-4] + \"_IUV.mat\" \n img_path = os.path.join(self.dp_root_dir, img_name)\n img = scipy.io.loadmat(img_path)['segm']\n img = smart_padding_depth(img) # smart pad to make square\n img = cv2.resize(img, (224, 224)) # resize to 224\n img = torch.tensor(img).unsqueeze(0).float()\n \n combined_img = torch.cat((ori_img, img), dim=0)\n\n joints = self.joints[idx].view(-1, 3) / 1000.0\n joints = joints - joints[6].unsqueeze(0)\n\n return combined_img, joints", "title": "" }, { "docid": "99ed2e1247fb2d6d8c9c391d621bde0b", "score": "0.6560704", "text": "def image_path_from_index(self, index, type, check=True, cls_name=\"\"):\n if type == \"observed\":\n image_file = os.path.join(self.observed_data_path, index + \"-color.png\")\n elif type == \"gt_observed\":\n image_file = os.path.join(self.gt_observed_data_path, index + \"-color.png\")\n elif type == \"rendered\":\n image_file = os.path.join(self.rendered_data_path, index + \"-color.png\")\n if check:\n assert os.path.exists(\n image_file\n ), \"type: {}, path does not exist: {}, self.real_data_path:{}\".format(\n type, image_file, self.observed_data_path\n )\n\n return image_file", "title": "" }, { "docid": "d94c0c74df294abda1c33e47692e544f", "score": "0.6552304", "text": "def image(self, index):\n image = cv2.imread(os.path.join(self.path, self.patient[index] + '.jpg'), 1)\n\n ## apply resize for standartize image resolution\n if image.shape != self.size:\n image = cv2.resize(image, self.size, interpolation = cv2.INTER_AREA)\n \n return image", "title": "" }, { "docid": "cb0e8ab4c726630b340ebc0cb977c709", "score": "0.65421265", "text": "def __getitem__(self, index):\n ImageFile.LOAD_TRUNCATED_IMAGES = True\n num_row, num_col = self.pairs.shape\n\n if num_col == 1:\n img_path1 = os.path.join(self.root_dir, self.data_dir_raw, str(self.pairs.iloc[index, 0]))\n img_path2 = os.path.join(self.root_dir, self.data_dir_exp, str(self.pairs.iloc[index, 0])) # paired high quality image\n image1 = Image.open(img_path1)\n # image1 = image1.convert(\"L\")\n image2 = Image.open(img_path2)\n # image2 = image2.convert(\"L\")\n name = str(self.pairs.iloc[index, 0])\n imgName, _ = name.split('.', 1)\n if self.transform:\n try:\n image1 = self.transform(image1)\n image2 = self.transform(image2)\n except:\n print(\"Cannot transform images: {} and {}\".format(img_path1, img_path2))\n return image1, image2, imgName\n\n elif num_col == 2:\n img_path1 = os.path.join(self.root_dir, self.data_dir_raw, str(self.pairs.iloc[index, 0])) # low-quality image\n img_path2 = os.path.join(self.root_dir, self.data_dir_exp, str(self.pairs.iloc[index, 1])) # unpaired high quality image\n #img_path3 = os.path.join(self.root_dir, self.data_dir_exp, str(self.pairs.iloc[index, 1])) # paired high quality image\n image1 = Image.open(img_path1)\n image2 = Image.open(img_path2)\n # print(len(image2.split()))\n # print('+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')\n #image2 = cv2.imread(img_path2,1)\n #image3 = Image.open(img_path3)\n\n\n #image3 = cv2.imread(img_path3,1)\n name = str(self.pairs.iloc[index, 0])\n imgName, _ = name.split('.', 1)\n if self.transform:\n try:\n image1 = self.transform(image1)\n image2 = self.transform(image2)\n #image3 = self.transform(image3)\n except:\n print(\"Cannot transform images: {}, {} and {}\".format(img_path1, img_path2))\n return image1, image2, imgName", "title": "" }, { "docid": "beb6510e65efaa1406b28698cbccf7be", "score": "0.6529464", "text": "def image_path_at(self, i):\n image_path = os.path.join(self._data_path, self._image_names[i])\n assert os.path.isfile(image_path), \"Path does not exist: {}\".format(image_path)\n return image_path", "title": "" }, { "docid": "2070d6f634388eff0016d21db863377f", "score": "0.6526482", "text": "def __getitem__(self, index):\n im, gt, h, w = self.pull_item(index)\n return im, gt", "title": "" }, { "docid": "60d1bf86d5bf83a572360f28bb78ea5d", "score": "0.65122247", "text": "def __getitem__(self, index):\n # read a image given a random integer index\n img_path_cg = self.img_paths_cg[index % self.A_size] # make sure index is within then range\n if self.opt.serial_batches: # make sure index is within then range\n index_B = index % self.B_size\n else: # randomize the index for domain B to avoid fixed pairs.\n index_B = random.randint(0, self.B_size - 1)\n img_path_iiw = self.img_paths_iiw[index_B]\n\n img_path_cg = self.dataroot + \"/intrinsics_final/images/\" + img_path_cg\n img_path_iiw = self.dataroot + \"/IIW/iiw-dataset/data/\" + img_path_iiw.split('/')[-1][:-3]\n img_cg = Image.open(img_path_cg).convert('RGB')\n img_iiw = Image.open(img_path_iiw).convert('RGB')\n \n # apply the same transform to both A and B\n img_cg = self.transform_A(img_cg)\n img_iiw = self.transform_B(img_iiw)\n\n # img_cg = torch.unsqueeze(img_cg, 0) # [1, 3, 256, 256]\n # img_cg = torch.unsqueeze(img_cg, 0) # [1, 3, 256, 256]\n \n return {'A': img_cg, 'B': img_iiw, 'A_paths': img_path_cg, 'B_paths': img_path_iiw}", "title": "" }, { "docid": "cbcf895202fc3238d517f9e12a8bd8e3", "score": "0.6505351", "text": "def __getitem__(self, index):\n return self.get_image(index), self.get_keypoints(index)", "title": "" }, { "docid": "891bd35397f9bb4f6b3e8f0bfd1f98ce", "score": "0.6493202", "text": "def _load_image_set_index(self):\n image_path_dict = ['0--Parade', '1--Handshaking', '10--People_Marching', '11--Meeting', '12--Group', '13--Interview', '14--Traffic', '15--Stock_Market', '16--Award_Ceremony', '17--Ceremony', '18--Concerts', '19--Couple', '2--Demonstration', '20--Family_Group', '21--Festival', '22--Picnic', '23--Shoppers', '24--Soldier_Firing', '25--Soldier_Patrol', '26--Soldier_Drilling', '27--Spa', '28--Sports_Fan', '29--Students_Schoolkids', '3--Riot', '30--Surgeons', '31--Waiter_Waitress', '32--Worker_Laborer', '33--Running', '34--Baseball', '35--Basketball', '36--Football', '37--Soccer', '38--Tennis', '39--Ice_Skating', '4--Dancing', '40--Gymnastics', '41--Swimming', '42--Car_Racing', '43--Row_Boat', '44--Aerobics', '45--Balloonist', '46--Jockey', '47--Matador_Bullfighter', '48--Parachutist_Paratrooper', '49--Greeting', '5--Car_Accident', '50--Celebration_Or_Party', '51--Dresses', '52--Photographers', '53--Raid', '54--Rescue', '55--Sports_Coach_Trainer', '56--Voter', '57--Angler', '58--Hockey', '59--people--driving--car', '6--Funeral', '61--Street_Battle', '7--Cheering', '8--Election_Campain', '9--Press_Conference']\n\n file_list = self._mat['file_list']\n image_index = []\n for idx, section in enumerate(file_list):\n for photo in section[0]:\n photo_path = os.path.join(image_path_dict[idx] , photo[0][0])\n image_index.append(photo_path)\n\n return image_index", "title": "" }, { "docid": "99347b4a0ae7687e6d759a1eed3a6c24", "score": "0.6487795", "text": "def __getitem__(self, idx):\n if torch.is_tensor(idx):\n idx = idx.tolist()\n\n img_path = os.path.join(self.img_dir, self.json_df.loc[idx, 'Image'])\n # image = read_image(img_path)\n image = Image.open(img_path)\n\n classes = torch.LongTensor(self.json_df.loc[idx, 'Class'])\n xcs = torch.FloatTensor(self.json_df.loc[idx, 'xc'])\n ycs = torch.FloatTensor(self.json_df.loc[idx, 'yc'])\n widths = torch.FloatTensor(self.json_df.loc[idx, 'width'])\n heights = torch.FloatTensor(self.json_df.loc[idx, 'height'])\n\n if self.transform:\n image = self.transform(image)\n\n else:\n image = np.array(image)\n image = torch.from_numpy(np.transpose(image,(-1,0,1)))\n\n return image, classes, xcs, ycs, widths, heights", "title": "" }, { "docid": "b74ac5df05ce4e337ad89be472fa847c", "score": "0.64840263", "text": "def __getitem__(self, index):\n # get filepath\n path = self.file_index[\"rgb_filename\"].iloc[index]\n\n # load and transform image\n X = Image.open(path)\n X = self.transform(X)\n\n # get class\n y = self.file_index[\"dance_id\"][index]\n\n return X, y", "title": "" }, { "docid": "b0b0df5aea823716fbc20eadde026c31", "score": "0.6480065", "text": "def __getitem__(self, idx):\n # load IUV image\n img_name = self.pose_images[idx]\n if img_name in self.invalid_images:\n idx = 0\n img_name = self.pose_images[idx]\n img_name = img_name[:-4] + \"_IUV.mat\"\n \n img_path = os.path.join(self.dp_root_dir, img_name)\n img = scipy.io.loadmat(img_path)['segm']\n \n img = smart_padding_depth(img) # smart pad to make square\n img = cv2.resize(img, (224, 224)) # resize to 224\n img = torch.tensor(img).unsqueeze(0)\n \n joints = self.joints[idx].view(-1, 3) / 1000.0\n joints = joints - joints[6].unsqueeze(0)\n \n return img.float(), joints", "title": "" }, { "docid": "9500e98699ab9a7778fc6a731d730c8c", "score": "0.64755577", "text": "def __getitem__(self, idx):\n if isinstance(idx, slice):\n return self._getitems(idx)\n if idx >= len(self.samples):\n raise AssertionError(\"sample index is out-of-range\")\n if idx < 0:\n idx = len(self.samples) + idx\n sample = self.samples[idx]\n image_path = sample[self.path_key]\n image = cv.imread(image_path)\n if image is None:\n raise AssertionError(\"invalid image at '%s'\" % image_path)\n if self.center_crop is not None:\n tl = (image.shape[1] // 2 - self.center_crop[0] // 2,\n image.shape[0] // 2 - self.center_crop[1] // 2)\n br = (tl[0] + self.center_crop[0], tl[1] + self.center_crop[1])\n image = thelper.draw.safe_crop(image, tl, br)\n scale = 1.0 / self.downscale_factor\n image_lowres = cv.resize(image, dsize=(0, 0), fx=scale, fy=scale)\n if self.rescale_lowres:\n image_lowres = cv.resize(image_lowres, dsize=(image.shape[1], image.shape[0]))\n sample = {\n self.lowres_image_key: image_lowres,\n self.highres_image_key: image,\n self.idx_key: idx,\n **sample\n }\n if self.transforms:\n sample = self.transforms(sample)\n return sample", "title": "" }, { "docid": "e0b684ed6fdc831a2ecb7ccd88ac79a1", "score": "0.64575636", "text": "def get_image_path(image_lists, label_name, index, image_dir, category):\n if label_name not in image_lists:\n tf.logging.fatal('Label does not exist %s', label_name)\n label_lists = image_lists[label_name]\n if category not in label_lists:\n tf.logging.fatal('Category does not exist %s', category)\n category_list = label_lists[category]\n if not category_list:\n tf.logging.fatal('Label %s has no images in the category %s', label_name, category)\n mod_index = index % len(category_list)\n base_name = category_list[mod_index]\n sub_dir = label_lists['dir']\n full_path = os.path.join(image_dir, sub_dir, base_name)\n return full_path", "title": "" }, { "docid": "5b524433e5267240aad4f14d6786f054", "score": "0.64570844", "text": "def getImage (self, index=None):\n wellsample = self.getWellSample(index)\n if wellsample:\n return wellsample.getImage()\n return None", "title": "" }, { "docid": "59a68dddbb4d8a52d3a4abba5e08f875", "score": "0.64439917", "text": "def image_id(self: Dataset, _index: int) -> str:\n return self.image_ids[_index]", "title": "" }, { "docid": "d19aefd8fa55eae48ce6817546881aab", "score": "0.64431906", "text": "def image(self, index):\n try:\n data = pydicom.read_file(os.path.join(\n self.path,'ID_'+self.patient[index]+ '.dcm'))\n image = data.pixel_array \n except:\n print(\"Oops! Something wrong with this file: {}\".format(self.patient[index]))\n raise ValueError\n data = self.correct_dcm(data)\n \n if self.window == 'histogram':\n image = self.historgam(data.pixel_array)\n elif self.window == 'windows':\n image = self.windows(data)\n else:\n image = self.mixed(data)\n\n ## apply resize for standartize image resolution\n if image.shape != self.size:\n image = cv2.resize(image, self.size, interpolation = cv2.INTER_AREA)\n \n return image", "title": "" }, { "docid": "f2ba1989c3a72d9536bffa40951f0e07", "score": "0.64424896", "text": "def __getitem__(self, index):\n coco = self.coco\n vocab = self.vocab\n ann_id = self.ids[index]\n caption = coco.anns[ann_id]['caption']\n img_id = coco.anns[ann_id]['image_id']\n path = coco.loadImgs(img_id)[0]['file_name']\n\n image = Image.open(os.path.join(self.root, path)).convert('RGB')\n if self.transform is not None:\n image = self.transform(image)\n\n # Convert caption (string) to word ids.\n tokens = nltk.tokenize.word_tokenize(str(caption).lower())\n caption = []\n caption.append(vocab('<start>'))\n caption.extend([vocab(token) for token in tokens])\n caption.append(vocab('<end>'))\n target = torch.Tensor(caption)\n return image, target", "title": "" }, { "docid": "19070c0c3bc66f965bcb731efa1dec84", "score": "0.64405906", "text": "def __getitem__(self, index):\n vocab = self.vocab\n img_name = self.json[index][0]\n img_path = os.path.join(self.root, img_name)\n image = Image.open(img_path).convert(\"RGB\")\n\n\n if self.transform is not None:\n image = self.transform(image)\n\n tokens = jieba.cut(self.json[index][1],cut_all=False)\n caption_jieba = []\n caption_jieba.append(vocab('<start>'))\n caption_jieba.extend([vocab(c) for c in tokens])\n caption_jieba.append(vocab('<end>'))\n return image, torch.Tensor(caption_jieba)", "title": "" }, { "docid": "751777e5e34305682ed5913ec85d6c8e", "score": "0.64384925", "text": "def image_tensor(self: Dataset, _index: int) -> torch.Tensor:\n return self[_index][0]", "title": "" }, { "docid": "9464644ccb40e1ca59920f5c0e553a03", "score": "0.64371425", "text": "def get_image_path(image_lists, label_name, index, image_dir, category):\n if label_name not in image_lists:\n tf.logging.fatal('Label does not exist %s.', label_name)\n label_lists = image_lists[label_name]\n if category not in label_lists:\n tf.logging.fatal('Category does not exist %s.', category)\n category_list = label_lists[category]\n if not category_list:\n tf.logging.fatal('Label %s has no images in the category %s.',\n label_name, category)\n mod_index = index % len(category_list)\n base_name = category_list[mod_index]\n sub_dir = label_lists['dir']\n full_path = os.path.join(image_dir, sub_dir, base_name)\n return full_path", "title": "" }, { "docid": "9464644ccb40e1ca59920f5c0e553a03", "score": "0.64371425", "text": "def get_image_path(image_lists, label_name, index, image_dir, category):\n if label_name not in image_lists:\n tf.logging.fatal('Label does not exist %s.', label_name)\n label_lists = image_lists[label_name]\n if category not in label_lists:\n tf.logging.fatal('Category does not exist %s.', category)\n category_list = label_lists[category]\n if not category_list:\n tf.logging.fatal('Label %s has no images in the category %s.',\n label_name, category)\n mod_index = index % len(category_list)\n base_name = category_list[mod_index]\n sub_dir = label_lists['dir']\n full_path = os.path.join(image_dir, sub_dir, base_name)\n return full_path", "title": "" } ]
afcebe25fd3accfad4d0eea5b1a8e1b7
Delete property file(s) for the specified set of resources
[ { "docid": "9614b83b901b0dd7a9cfddd121b33f78", "score": "0.5541111", "text": "def delete(\n ctx,\n resource_type=None,\n select=None,\n models=None,\n exclude=None,\n selector=None,\n project_dir=None,\n profiles_dir=None,\n profile=None,\n target=None,\n vars=None,\n bypass_cache=None,\n state=None,\n log_level=None,\n):\n _, transformed_ls_results = _initiate_alterations(\n ctx,\n resource_type=resource_type,\n select=select,\n models=models,\n exclude=exclude,\n selector=selector,\n project_dir=project_dir,\n profiles_dir=profiles_dir,\n profile=profile,\n target=target,\n vars=vars,\n bypass_cache=bypass_cache,\n state=state,\n log_level=log_level,\n )\n _delete_all_property_files(ctx, transformed_ls_results)", "title": "" } ]
[ { "docid": "07091de612c92dcf179b0b7a0ce53403", "score": "0.7127628", "text": "def _delete_all_property_files(ctx, transformed_ls_results):\n resource_paths = [\n Path(ctx.config['project_path'], resource_location)\n for resource_location in transformed_ls_results\n ]\n property_paths = [\n rp.with_suffix('.yml')\n for rp in resource_paths\n if rp.with_suffix('.yml').exists()\n ]\n _LOGGER.info(\n f'{len(property_paths)} of {len(resource_paths)}'\n f' have existing property files'\n )\n # Delete the selected property paths\n if len(property_paths) > 0:\n deletion_message_yml_paths = '\\n'.join(\n [str(property_path) for property_path in property_paths]\n )\n deletion_message_prefix = '\\nThe following files will be deleted:\\n\\n'\n deletion_message_suffix = (\n f'\\n\\nAre you sure you want to delete these'\n f' {len(property_paths)} file(s) (answer: y/n)?\\n'\n )\n deletion_confirmation = input(\n f'{deletion_message_prefix}'\n f'{deletion_message_yml_paths}'\n f'{deletion_message_suffix}'\n )\n # User confirmation\n while deletion_confirmation.lower() not in ['y', 'n']:\n deletion_confirmation = input(\n '\\nPlease enter \"y\" to confirm deletion'\n ' or \"n\" to abort deletion.\\n'\n )\n if deletion_confirmation.lower() == 'y':\n for file in property_paths:\n os.remove(file)\n _LOGGER.info('Deletion confirmed.')\n else:\n _LOGGER.info('Deletion aborted.')\n else:\n _LOGGER.info('There are no files to delete.')", "title": "" }, { "docid": "e515bad4712866aaa63bd5e162b9b420", "score": "0.64168364", "text": "def clear(self):\r\n\t\tfor i in range(1, mxs.fileProperties.getNumProperties(self.customName)):\r\n\t\t\tmxs.fileProperties.deleteProperty(self.customName, i)", "title": "" }, { "docid": "df1d858cec751ae231b3f127d3b0699e", "score": "0.62939584", "text": "def cleanup(*files):\n for f in files:\n if hasattr(f, 'name'): f = f.name\n if os.path.exists(f): os.unlink(f)", "title": "" }, { "docid": "317cc25e443208b2310ff8c312f3a329", "score": "0.6243156", "text": "def cleanup(*files):\n for f in files:\n if os.path.exists(f):\n os.remove(f)", "title": "" }, { "docid": "a9a6faed9831f0243096d9f636b994a9", "score": "0.60854363", "text": "def delete(self, resource_dict):\n for key, resource_id in resource_dict.items():\n scenario = self._delete_map.get(key)\n if not scenario:\n logger.warn(\n 'Unable to find scenario to clean {}:{}'\n .format(key, resource_id)\n )\n continue\n try:\n scenario.run(resource_dict)\n except (CinderNotFound,\n GlanceNotFound,\n NeutronNotFound,\n NovaNotFound):\n pass\n except Exception:\n logger.exception(\n \"Error garbage collecting {}:{}\"\n .format(key, resource_id)\n )", "title": "" }, { "docid": "07895b78ff0569e66b8b67f0f2664e21", "score": "0.5980383", "text": "def delete(self):\n self._delete_all_resources()", "title": "" }, { "docid": "5e9ccb7c9256d802956cbe7fece9ee95", "score": "0.59692633", "text": "def _delete_files(cls, files):\n storage = _bs_model_storage()\n list(map(storage.delete, files))", "title": "" }, { "docid": "0403e109f1ac5b040686ba65b146da53", "score": "0.5916951", "text": "def delete_objects(self, Delete: Dict):\n for obj in Delete.get('Objects'):\n filename = os.path.join(self.basedir, filekey(obj.get('Key')))\n os.remove(filename)", "title": "" }, { "docid": "6624877fbea7e499135887f2f798dc6a", "score": "0.5882895", "text": "def __del__(self):\n files = ['mlpack_dct_predict.csv']\n for f in files:\n if os.path.isfile(f):\n os.remove(f)", "title": "" }, { "docid": "2c4e068aa856dccd68b8c6126b4add7b", "score": "0.58787787", "text": "def delete_files(files):\n\n for file in files:\n if os.path.isfile(file):\n os.remove(file)", "title": "" }, { "docid": "d74eb5bac5df4c652bee1f1f4ef51aa4", "score": "0.5878653", "text": "def destroy_resources(self, include=[], exclude=[], wipe=False):\r\n\r\n with self._get_deployment_lock():\r\n for r in self.resources.itervalues():\r\n r._destroyed_event = threading.Event()\r\n r._errored = False\r\n for rev_dep in r.destroy_before(self.resources.itervalues()):\r\n try:\r\n rev_dep._wait_for.append(r)\r\n except AttributeError:\r\n rev_dep._wait_for = [ r ]\r\n\r\n def worker(m):\r\n try:\r\n if not should_do(m, include, exclude): return\r\n try:\r\n for dep in m._wait_for:\r\n dep._destroyed_event.wait()\r\n # !!! Should we print a message here?\r\n if dep._errored:\r\n m._errored = True\r\n return\r\n except AttributeError:\r\n pass\r\n if m.destroy(wipe=wipe): self.delete_resource(m)\r\n except:\r\n m._errored = True\r\n raise\r\n finally:\r\n m._destroyed_event.set()\r\n\r\n nixops.parallel.run_tasks(nr_workers=-1, tasks=self.resources.values(), worker_fun=worker)\r\n\r\n # Remove the destroyed machines from the rollback profile.\r\n # This way, a subsequent \"nix-env --delete-generations old\" or\r\n # \"nix-collect-garbage -d\" will get rid of the machine\r\n # configurations.\r\n if self.rollback_enabled: # and len(self.active) == 0:\r\n profile = self.create_profile()\r\n attrs = {m.name:\r\n Function(\"builtins.storePath\", m.cur_toplevel, call=True)\r\n for m in self.active.itervalues() if m.cur_toplevel}\r\n if subprocess.call(\r\n [\"nix-env\", \"-p\", profile, \"--set\", \"*\", \"-I\", \"nixops=\" + self.expr_path,\r\n \"-f\", \"<nixops/update-profile.nix>\",\r\n \"--arg\", \"machines\", py2nix(attrs, inline=True)]) != 0:\r\n raise Exception(\"cannot update profile ‘{0}’\".format(profile))", "title": "" }, { "docid": "f4391fb00b5ed7c3c2ef3a3c7adb5018", "score": "0.5875117", "text": "def delete_metadata(self, resource, keys, subresource=None):\n resource = getid(resource)\n if subresource:\n subresource = getid(subresource)\n resource = f\"{resource}{self.subresource_path}/{subresource}\"\n\n for key in keys:\n self._delete(f\"{self.resource_path}/{resource}/metadata/{key}\")", "title": "" }, { "docid": "541fbde6176533fb17050da2b1018999", "score": "0.58473", "text": "def rm_files(files):\n for each in files:\n os.remove(each)", "title": "" }, { "docid": "a42efc69fe0052d0e4908dc1bede152a", "score": "0.58456105", "text": "def destroy(self, request, pk=None):\n property = get_object_or_404(self.queryset, pk=pk)\n location = property.location\n contact = property.contact\n pictures = property.pictures\n\n # Don't use bulk deletion because it doesn't use overriden delete\n # on Picture Model, so with it picture files won't be deleted\n for picture in pictures.get_queryset():\n picture.delete()\n\n property.delete()\n location.delete()\n contact.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)", "title": "" }, { "docid": "3a0573c319d67062889f1f820e9b5090", "score": "0.5834067", "text": "def teardown(files):\n print('Removing created files')\n for f in files:\n print(f)\n os.remove(f)\n return", "title": "" }, { "docid": "01a5b9a7b1cc189ad822a0acd8368774", "score": "0.5806237", "text": "def _delete_all(self,resource):\n r = requests.delete(self.endpoint(resource))\n return r", "title": "" }, { "docid": "6a9d46395ad10073185ee84755ad5bff", "score": "0.5802474", "text": "def cleanup():\n for name in (\"results.json\", \"metrics.json\"):\n path = os.path.join(workspace, name)\n run(\"rm -f {}\".format(path))", "title": "" }, { "docid": "592a8de7370495570a59a9f793a51a63", "score": "0.57961226", "text": "def batchDeleteFiles(self, files):\n\t\tself._batchDeleteEquals(\"files\", \"file_path\", files)", "title": "" }, { "docid": "552565e14fa2922a99f3df4dfea5a71a", "score": "0.5795865", "text": "def delete_custom_properties():\n for object_type in CUSTOM_ECOMMERCE_PROPERTIES:\n for obj_property in CUSTOM_ECOMMERCE_PROPERTIES[object_type][\"properties\"]:\n if object_property_exists(object_type, obj_property):\n delete_object_property(object_type, obj_property)\n for group in CUSTOM_ECOMMERCE_PROPERTIES[object_type][\"groups\"]:\n if property_group_exists(object_type, group):\n delete_property_group(object_type, group)", "title": "" }, { "docid": "1db7a6278d6caecb616ffc205dd7c627", "score": "0.57701087", "text": "def clean_resource_json(resource_json):\n\n for a in ('parent_docname', 'parent', 'template', 'repr', 'series'):\n if a in resource_json:\n del resource_json[a]\n\n props = resource_json['props']\n for prop in (\n 'acquireds', 'style', 'in_nav', 'nav_title', 'weight',\n 'auto_excerpt'):\n if prop in props:\n del props[prop]\n\n return resource_json", "title": "" }, { "docid": "16e8d1f9e849b20f69710f3d49e6f401", "score": "0.57638747", "text": "def _clean_files(*files):\n for file_to_remove in files:\n LOGGER.info(f\"Removing: {file_to_remove}\")\n if os.path.exists(file_to_remove):\n os.remove(file_to_remove)", "title": "" }, { "docid": "d57832d3cc2b8a900602c5ff0acc00fb", "score": "0.5716858", "text": "def delete(list):\n for file in list:\n if os.path.isfile(file):\n os.unlink(file)\n return", "title": "" }, { "docid": "104c68767521566452bd0c17dc0a9265", "score": "0.5712357", "text": "def rm(*paths):\n for p in paths:\n sh(\"rm -rf {}\", p)\n return paths", "title": "" }, { "docid": "3392b4f8ac3df0809503e6292b8f5802", "score": "0.56879425", "text": "def unset(cls, client, resource, args) :\n\t\ttry :\n\t\t\tif type(resource) is not list :\n\t\t\t\tunsetresource = lsnappsprofile()\n\t\t\t\tif type(resource) != type(unsetresource):\n\t\t\t\t\tunsetresource.appsprofilename = resource\n\t\t\t\telse :\n\t\t\t\t\tunsetresource.appsprofilename = resource.appsprofilename\n\t\t\t\treturn unsetresource.unset_resource(client, args)\n\t\t\telse :\n\t\t\t\tif type(resource[0]) != cls :\n\t\t\t\t\tif (resource and len(resource) > 0) :\n\t\t\t\t\t\tunsetresources = [ lsnappsprofile() for _ in range(len(resource))]\n\t\t\t\t\t\tfor i in range(len(resource)) :\n\t\t\t\t\t\t\tunsetresources[i].appsprofilename = resource[i]\n\t\t\t\telse :\n\t\t\t\t\tif (resource and len(resource) > 0) :\n\t\t\t\t\t\tunsetresources = [ lsnappsprofile() for _ in range(len(resource))]\n\t\t\t\t\t\tfor i in range(len(resource)) :\n\t\t\t\t\t\t\tunsetresources[i].appsprofilename = resource[i].appsprofilename\n\t\t\t\tresult = cls.unset_bulk_request(client, unsetresources, args)\n\t\t\treturn result\n\t\texcept Exception as e :\n\t\t\traise e", "title": "" }, { "docid": "9ca9a9db499347daabd3283c0592687d", "score": "0.56797224", "text": "def remove_files(self, paths):\n for p in paths:\n self._remove_record(p)", "title": "" }, { "docid": "f08e775b283996043880965d17bd18fe", "score": "0.5679049", "text": "def cleanup(data):\n for section, entries in data:\n for entry in entries:\n git_rm(entry.path)", "title": "" }, { "docid": "c759aed063185e8af73658869f6c14a9", "score": "0.56671757", "text": "def delete_files(self):\n self.files_names = glob.glob('img*.ppm')\n\n for img in self.files_names:\n try:\n os.remove(img)\n except OSError:\n print(\"Error: %s : %s\" % (img, OSError.strerror))", "title": "" }, { "docid": "7138d2209c83840595a36322f8c0c498", "score": "0.56655544", "text": "def clean():\n for name in [\"testfile\", \"testfile.gz\", \"testfile.bz2\"]:\n try:\n os.remove(name)\n except:\n continue", "title": "" }, { "docid": "d201f19aad83caffc93baef7a59d15a6", "score": "0.5660638", "text": "def deinitialize(ctx, **_):\n ctx = ctx or _ctx\n del ctx.instance.runtime_properties['resources']", "title": "" }, { "docid": "a2be65c9c76b451a461bf24ccf88e584", "score": "0.56343174", "text": "def delete_file_paths(file_paths):\n for file_path in file_paths:\n try:\n remove(file_path)\n except:\n pass", "title": "" }, { "docid": "3de35d4bc401202a8bf254c6d7b51d3e", "score": "0.5612071", "text": "def deleteMultiProxy( multiProxyDict ):\n if multiProxyDict[ 'tempFile' ]:\n try:\n os.unlink( multiProxyDict[ 'file' ] )\n except:\n pass", "title": "" }, { "docid": "682ed9dce38a271b5d46d9061693158b", "score": "0.5602438", "text": "def purge(files, replace=False):\n if len(files) > 0:\n for f in files:\n os.remove(f)\n if replace:\n touch(f)", "title": "" }, { "docid": "b82c411410ab3045a4426cd664c58864", "score": "0.5586783", "text": "def delete_files(self):\n raise NotImplementedError", "title": "" }, { "docid": "4a01fcbd2e69072f772e620d51edafa6", "score": "0.55831957", "text": "def delete_properties (self, uri, like=''):\n select = \"\"\"DELETE FROM property \n WHERE uri LIKE %s\n \"\"\" \n success = self._dbupdate(select, uri+like)\n return success", "title": "" }, { "docid": "5fe8fdb60956062f57421aafb50ff4b8", "score": "0.55795354", "text": "def clear_dictionaries():\n work_packages_path = os.path.join(PATH, \"work_packages\")\n for directory in [d for d in os.listdir(work_packages_path) if os.path.isdir(os.path.join(work_packages_path, d))]:\n r_files = glob.glob(os.path.join(work_packages_path, directory) + '/*')\n for f in r_files:\n print(f)\n os.remove(f)\n os.rmdir(os.path.join(work_packages_path, directory))\n r_files = glob.glob(os.path.join(work_packages_path, '*'))\n for f in r_files:\n os.remove(f)", "title": "" }, { "docid": "e955aaaefeb719438eb0819b04769e06", "score": "0.5576268", "text": "def delete(cls, client, resource) :\n\t\ttry :\n\t\t\tif type(resource) is not list :\n\t\t\t\tdeleteresource = lsnappsprofile()\n\t\t\t\tif type(resource) != type(deleteresource):\n\t\t\t\t\tdeleteresource.appsprofilename = resource\n\t\t\t\telse :\n\t\t\t\t\tdeleteresource.appsprofilename = resource.appsprofilename\n\t\t\t\treturn deleteresource.delete_resource(client)\n\t\t\telse :\n\t\t\t\tif type(resource[0]) != cls :\n\t\t\t\t\tif (resource and len(resource) > 0) :\n\t\t\t\t\t\tdeleteresources = [ lsnappsprofile() for _ in range(len(resource))]\n\t\t\t\t\t\tfor i in range(len(resource)) :\n\t\t\t\t\t\t\tdeleteresources[i].appsprofilename = resource[i]\n\t\t\t\telse :\n\t\t\t\t\tif (resource and len(resource) > 0) :\n\t\t\t\t\t\tdeleteresources = [ lsnappsprofile() for _ in range(len(resource))]\n\t\t\t\t\t\tfor i in range(len(resource)) :\n\t\t\t\t\t\t\tdeleteresources[i].appsprofilename = resource[i].appsprofilename\n\t\t\t\tresult = cls.delete_bulk_request(client, deleteresources)\n\t\t\treturn result\n\t\texcept Exception as e :\n\t\t\traise e", "title": "" }, { "docid": "a703648a0dec2b5afbfc61cb024164e0", "score": "0.5568904", "text": "def test_1_clean():\n for this_file in (\n \"usb.iso\",\n \"usb.db\",\n \"catalogue.json\",\n \"new.iso\",\n \"new_0000.iso\",\n \"new_0001.iso\",\n \"new_0002.iso\",\n \"archiver.pickle\",\n \"archiver.dill\",\n \"archiver.json\",\n ):\n try:\n os.remove(Path(this_file))\n except FileNotFoundError:\n pass", "title": "" }, { "docid": "7927289b611011a9646e45551da78540", "score": "0.5556534", "text": "def remove_files(self, *paths):\r\n for path in paths:\r\n self.remove_file(path)", "title": "" }, { "docid": "cb2d7b5c3b06d11b71481d61f903f3ac", "score": "0.55561626", "text": "def clear_files():\n if os.path.exists(FICHIER_NOMS):\n os.remove(FICHIER_NOMS)\n if os.path.exists(FICHIER_OPERATIONS):\n os.remove(FICHIER_OPERATIONS)", "title": "" }, { "docid": "162d52b6f392fceb06dd735f8607d7de", "score": "0.5555102", "text": "def delete_list_files(\n *,\n files: List[Path] | List[str]\n) -> NoReturn:\n paths = [Path(file) for file in files]\n for path in paths:\n path.unlink()", "title": "" }, { "docid": "4a64ac2600e92e3bf79306f1357a0ba0", "score": "0.5530811", "text": "def cleanup_temporary_files(list_of_files):\n for single_file in list_of_files:\n try:\n os.remove(single_file)\n except OSError:\n # Raises exception if this doesn't exist (never created or\n # already removed). Ignore.\n pass", "title": "" }, { "docid": "c84dbfe4351d6c4154a5bcbc65097719", "score": "0.5528499", "text": "def _delete_argument_values_that_must_be_files_or_dirs(to_unlink):\n for name in to_unlink:\n if os.path.isfile(name):\n os.unlink(name)\n else:\n os.rmdir(name)", "title": "" }, { "docid": "7570d9bb73a66666007f5eb3854123c4", "score": "0.5526439", "text": "def resource_delete_list(self) -> List[Command]:\n return [\n # desure machine mount\n self.mount_point_delete(),\n self.mount_dir_delete(),\n # desure runtime folders\n self.folder_delete(self.machine_store.base_dir()),\n ]", "title": "" }, { "docid": "6642e76fa6f25560e49903fcb213491b", "score": "0.5523906", "text": "def remove_files(self):\n if not self.is_loaded():\n return;\n\n for f in self.manifest[\"files-remove\"]:\n\n if os.path.isfile(f[\"path\"]):\n os.remove(f[\"path\"])", "title": "" }, { "docid": "be133fb524a874f69bd7dee9eafb0de2", "score": "0.5506843", "text": "def deleteSoilRasters(context, manifest):\n for entry in manifest.keys():\n if entry.find('soil_raster_') == 0:\n filePath = os.path.join( context.projectDir, manifest[entry] )\n deleteGeoTiff(filePath)", "title": "" }, { "docid": "eb8a65cabaaea087ce3ee48506ab827a", "score": "0.5503364", "text": "def DeleteMultiple(self, remote_files):\n files_str = \" \".join(remote_files)\n if files_str:\n self._Shell(\"rm -fr %s\" % files_str)", "title": "" }, { "docid": "e997bd692007e97ffe2f02fc54a1d907", "score": "0.5485528", "text": "def cleanUpFiles():\n\n candidates = [localconffile,\n localtpfile,\n localpackagesfile,\n localgroupsfile,\n unsignedconffile,\n unsignedtpfile,\n unsignedpackagesfile,\n unsignedgroupsfile,\n \"GetCookie\"]\n \n for file in candidates:\n removeFile(file)", "title": "" }, { "docid": "ad964b2c2915594e011930c7992df8ac", "score": "0.5485291", "text": "def delete(args):\n # walk down the line to the last specified key\n config = value = ConfigObj(args.file)\n for key in args.keys:\n section = value\n value = value[key]\n del section[key]\n if not args.keep_empty_sections:\n # remove the deleted key from the key list\n args.keys.pop()\n while section is not config:\n # walk up all sections and remove empty ones\n if section:\n break\n section = section.parent\n key = args.keys.pop()\n del section[key]\n if args.delete_file_if_empty and not config:\n # remove the file, if empty\n os.unlink(args.file)\n else:\n config.write()", "title": "" }, { "docid": "5cc0909c1ac61ae5246b101754bf5df9", "score": "0.5478214", "text": "def delete_test_files(self):\n\n for attachment in self.todo.attachments.all():\n attachment.delete()", "title": "" }, { "docid": "1e0a4155fb5e1753479fbfad4d6d42d4", "score": "0.54631287", "text": "def delete_files(path, files):\n for filename in files:\n file_path = os.path.join(path, filename)\n if os.path.isfile(file_path):\n os.remove(file_path)\n else:\n print(f'Cannot file file: {file_path}')", "title": "" }, { "docid": "cc413e43c10cf131e31ef3e8fc03a920", "score": "0.5462314", "text": "def delRessource():\n del var.eatingList[-1]\n del var.drinkingList[-1]\n del var.staminaList[-1]\n del var.staminaList[-1]", "title": "" }, { "docid": "afd7889cca21c8fb056c55b0fb8cd96b", "score": "0.5461991", "text": "def cleanup_fileserver(**_):\n ctx.logger.info('Cleaning up fileserver...')\n\n paths_to_remove = ctx.instance.runtime_properties['paths_to_remove']\n execute_and_log(['rm', '-rf'] + paths_to_remove, ignore_errors=True)", "title": "" }, { "docid": "3ed58a0f4b18a15d0127be98594ebc75", "score": "0.5458769", "text": "def delete_many(self, filepath, keys2delete):\n\n option = self.j_utils.danger_information()\n print('\\n option: ', option)\n if option == \"quit\":\n print('\\n chosen quit')\n return False\n if option == 'yes':\n pass \n \n if isfile(filepath):\n with open(filepath) as f:\n new_data = json.load(f)\n for i in range(len(keys2delete)):\n new_data.pop(keys2delete[i])\n \n with open(filepath, 'w') as f:\n json.dump(\n new_data, \n f, \n indent=4, \n sort_keys=True, \n separators=(',', ': ')\n )\n self.j_utils.success_information()\n return True\n else:\n self.j_utils.not_exists_information()\n return False", "title": "" }, { "docid": "0e75a8889ad204ea33150627871fc9c4", "score": "0.5454281", "text": "def _remove(self, filenames):\n # fmt: off\n for f in filenames:\n try:\n os.remove(f)\n log(f'cleanup [{os.path.basename(f)}]')\n except FileNotFoundError:\n pass\n # fmt: on", "title": "" }, { "docid": "7f7b95fdb78cc340ebd555ef4d6887f7", "score": "0.5451039", "text": "def tearDown(self):\n\n del self.p1\n del self.p2\n if os.path.exists(\"file.json\"):\n os.remove(\"file.json\")", "title": "" }, { "docid": "69faf500d09f7d39c208d7dcd5e0f5d1", "score": "0.54480803", "text": "def logical_delete(self, user, resource=None, delete_res_files=True, delete_meta_files=True):\n\n from hs_core.hydroshare.resource import delete_resource_file\n\n parent_aggr = self.get_parent()\n if resource is None:\n resource = self.resource\n\n if delete_meta_files:\n # delete associated metadata and map xml documents\n istorage = resource.get_irods_storage()\n if istorage.exists(self.metadata_file_path):\n istorage.delete(self.metadata_file_path)\n if istorage.exists(self.map_file_path):\n istorage.delete(self.map_file_path)\n\n # delete schema json file if this a model aggregation\n if istorage.exists(self.schema_file_path):\n istorage.delete(self.schema_file_path)\n\n # delete all resource files associated with this instance of logical file\n if delete_res_files:\n for f in self.files.all():\n delete_resource_file(resource.short_id, f.id, user, delete_logical_file=False)\n else:\n # first need to set the aggregation for each of the associated resource files to None\n # so that deleting the aggregation (logical file) does not cascade to deleting of\n # resource files associated with the aggregation\n self.files.update(logical_file_object_id=None, logical_file_content_type=None)\n\n # delete logical file first then delete the associated metadata file object\n # deleting the logical file object will not automatically delete the associated\n # metadata file object\n metadata = self.metadata if self.has_metadata else None\n\n # if we are deleting a model program aggregation, then we need to set the\n # metadata of all the associated model instances to dirty\n if self.is_model_program:\n self.set_model_instances_dirty()\n self.delete()\n\n if metadata is not None:\n # this should also delete on all metadata elements that have generic relations with\n # the metadata object\n metadata.delete()\n\n # if this deleted aggregation has a parent aggregation - xml files for the parent\n # aggregation need to be regenerated at the time of download - so need to set metadata to dirty\n if parent_aggr is not None:\n parent_aggr.set_metadata_dirty()\n\n resource.cleanup_aggregations()", "title": "" }, { "docid": "a1a0c0c97bb5bc6b9b94b378e39124f5", "score": "0.54439086", "text": "def clean(self):\n os.popen('rm -rf pdf.txt')\n os.popen('rm -rf html.txt')\n os.popen('rm -rf /ci/data/autotest/tmp/srv')", "title": "" }, { "docid": "349d4f88df11e77be03a654c5fd4ad9d", "score": "0.54416585", "text": "def clean_up():\n if testing:\n print(\"Deleting all Files\")\n for i in range(len(file_names)):\n if os.path.isfile(file_names[i]):\n os.remove(file_names[i])", "title": "" }, { "docid": "0a480f57d654043e80a1cafc89ca1328", "score": "0.54405904", "text": "def remove(self):\n for pipeline in self._data_pipeline:\n self._data_pipeline[pipeline].remove_file()\n for pipeline in self._data_pipeline:\n self._data_pipeline[pipeline].remove_folder()", "title": "" }, { "docid": "241c6cd5d641c13f6d076f28c7833ab4", "score": "0.54307365", "text": "def delete_files(paths: List[str]) -> None:\n if any('husqvarna-datalake/raw/' in path for path in paths):\n raise PermissionError(\n 'Access Denied: Not possible to remove files from raw layer')\n\n client = boto3.client('s3')\n\n for path in paths:\n bucket, key = _extract_bucket_and_key(path)\n client.delete_object(Bucket=bucket, Key=key)", "title": "" }, { "docid": "9fe916df531b811878e39c48cdd6240f", "score": "0.5419099", "text": "def cleanup_files(self):\n logger.debug('Cleaning up...')\n with indent_log():\n for req in self.reqs_to_cleanup:\n req.remove_temporary_source()", "title": "" }, { "docid": "905a1bade72fad30e0265366301993fa", "score": "0.54107416", "text": "def delete(dict_keys={}, file=conf_main):\n try:\n config_dict = read(file)\n dict_keys = [s.strip() for s in dict_keys]\n for key in dict_keys[:-1]:\n if '_x' not in locals():\n if key not in config_dict:\n config_dict[key] = {}\n _x = config_dict.get(key)\n else:\n if key not in _x:\n _x[key] = {}\n _x = _x.get(key)\n del _x[dict_keys[-1]]\n\n tempfile = join(path_conf, str(uuid.uuid4()))\n with open(tempfile, 'w') as f:\n json.dump(config_dict, f, indent=4)\n\n # rename temporary file replacing old file\n os.rename(tempfile, join(path_conf, file))\n except Exception as err:\n print(f\"Could not delete key in the file '{file}': {err}\")", "title": "" }, { "docid": "2642275501af9ee5f9f7277508a45c44", "score": "0.53922004", "text": "def delete_property(self, en_name: str, fa_name: str):\n\n with MongoClient('mongodb://localhost:27017/') as client:\n products = client.shopping.products\n products.update_one({\n \"_id\": ObjectId(self.properties)\n },{\n '$unset': {\n f\"en.{en_name}\": 1,\n f\"fa.{fa_name}\": 1\n }\n })", "title": "" }, { "docid": "e43cf033de0ad82044a34dd57dbe727b", "score": "0.5390422", "text": "def delete(project_id):\n remove(f'{files_path}/{project_id}.json')", "title": "" }, { "docid": "23133c39d7fec756d6bfad27c342e632", "score": "0.5386653", "text": "def deleteResourceFromStorage(self, resource_name):\n self.deleteFile(resource_name)", "title": "" }, { "docid": "5937275d30928cc8ca2bffc24834d914", "score": "0.53844935", "text": "def delete_resources_on_service_clients(physical_line, logical_line, filename,\n line_number, lines):\n if not _common_service_clients_check(logical_line, physical_line,\n filename):\n return\n\n for line in lines[line_number:]:\n if METHOD.match(line) or CLASS.match(line):\n # the end of a method\n return\n\n if 'self.delete(' not in line and 'self.delete_resource(' not in line:\n continue\n\n if METHOD_DELETE_RESOURCE.match(logical_line):\n return\n\n msg = (\"T111: [DELETE /resources/<id>] methods should be \"\n \"delete_<resource name>\")\n yield (0, msg)", "title": "" }, { "docid": "486aa73e96bc178ab158fa4d0f9e0035", "score": "0.53812224", "text": "def purge(inp, *, images):\n global IMAGES\n IMAGES = [i for i in IMAGES if i not in images]\n save_images(images[0].category, 'records purged', inp.user)\n return lex.images.purge(count=len(images))", "title": "" }, { "docid": "d14f51f2d756870ac6c862f89949e143", "score": "0.53783303", "text": "def copy_and_cleanup(paths: List[Tuple[str]]) -> None:\n copy_keys(paths)\n delete_files([t[0] for t in paths])", "title": "" }, { "docid": "2418c66a30b9a344819fad740bd5af99", "score": "0.53667057", "text": "def resource_cleanup(cls):\n super(BaseTagsTest, cls).resource_cleanup()", "title": "" }, { "docid": "5bb01fdd5779b35aac08638bad7a1eff", "score": "0.53586966", "text": "def tearDown(self):\n try:\n list = os.listdir(os.path.join(settings.MEDIA_ROOT,\n 'uploads/images/galleries/'))\n except:\n return\n for file in list:\n os.remove(os.path.join(settings.MEDIA_ROOT,\n f'uploads/images/galleries/{file}'))", "title": "" }, { "docid": "c41d334ea88783d2a10cfe88c28e29e3", "score": "0.535661", "text": "def delete(uris):\n # if uri list is empty do nothing\n if not uris:\n return True\n\n # group uris by type\n grouped_uris = util.classify_uris(uris,\n as_dataframe=False,\n exclude=['services', 'publishers'],\n require_same_type=True)\n resource = list(grouped_uris)[0]\n uris = grouped_uris[resource]\n\n db = get_db()\n for uri in uris:\n if resource == 'collections':\n if uri not in get_collections():\n logger.error('Collection does not exist: %s', uri)\n raise ValueError('Collection does not exists')\n\n with db_session:\n datasets = db.Dataset.select(lambda d: d.collection.name == uri)\n if datasets.count() > 0:\n datasets.delete()\n db.Collection[uri].delete()\n\n path = _get_project_dir()\n path = os.path.join(path, uri)\n if os.path.exists(path):\n logger.info('deleting all data under path: %s' % path)\n shutil.rmtree(path)\n\n if resource == 'datasets':\n with db_session:\n dataset = db.Dataset[uri]\n\n if dataset.source == 'derived':\n catalog_entry_datasets = select_datasets(lambda d: d.catalog_entry == dataset.catalog_entry)\n\n if len(catalog_entry_datasets) == 1:\n _, _, catalog_id = util.parse_service_uri(dataset.catalog_entry)\n db.QuestCatalog[catalog_id].delete()\n\n try:\n os.remove(dataset.file_path)\n except (OSError, TypeError):\n pass\n\n dataset.delete()\n\n return True", "title": "" }, { "docid": "e60f4120547b781922f960e7d8a2296c", "score": "0.5353475", "text": "def remove_all_data():\n if os.path.exists(EMPLOYEE_PICKLE_FILE):\n os.remove(EMPLOYEE_PICKLE_FILE)\n if os.path.exists(PROJECT_PICKLE_FILE):\n os.remove(PROJECT_PICKLE_FILE)", "title": "" }, { "docid": "16a95a8049ebf60c5fd46226cc9ea03c", "score": "0.53496516", "text": "def clean(self):\n for d in self.files[\"locations\"]:\n shutil.rmtree(d)\n return", "title": "" }, { "docid": "dcf6b1a2eda02a0a6847d2c9b8906483", "score": "0.53417665", "text": "def Delete(self):\n\n for i in range(self._filetrs.GetSize()):\n p = self._filetrs.GetObject(i)\n p.Delete()", "title": "" }, { "docid": "7cc1449b906a51a7f73cb1c6439678d9", "score": "0.5336056", "text": "def clean_results(*args):\r\n for fp in args:\r\n if _path.isfile(fp) and _path.exists(fp):\r\n _remove(fp)", "title": "" }, { "docid": "07d5f565cf13f382c71218ddb023bd2f", "score": "0.5334752", "text": "def _delete_metadata_files(takeout_dir):\n for dirpath, _, filenames in os.walk(os.path.join(takeout_dir,\n *PHOTOS_SUBDIR)):\n metadata_files = [os.path.join(dirpath, name) for name in filenames if\n name.endswith('.json')]\n\n for metadata_file in metadata_files:\n os.remove(metadata_file)", "title": "" }, { "docid": "965c79112ddcace71fe14d6731c89bfb", "score": "0.5323365", "text": "def clean_cassettes(session):\n for dir in Path(__file__).parent.glob(\"**/cassettes/*\"):\n print(f\"Removing {dir}\")\n rmtree(dir, ignore_errors=True)", "title": "" }, { "docid": "43ca0d1a3ddc0afa6173c03c330902a9", "score": "0.53212154", "text": "def tearDown(self):\n\t\tfor format, file in self.outfiles.iteritems():\n\t\t\tif (file and os.path.isfile(file)):\n\t\t\t\tos.remove(file)", "title": "" }, { "docid": "6cd528a41e4f21f5b5668c0c90d5bba3", "score": "0.53141665", "text": "def delete(escaped, outfile, preserve_timestamp, file, key, encoding):\n if escaped:\n key = list(map(unescape, key))\n with click.open_file(file, encoding=encoding) as fpin:\n with click.open_file(outfile, 'w', encoding=encoding) as fpout:\n setproperties(fpin, fpout, dict.fromkeys(key), preserve_timestamp)", "title": "" }, { "docid": "e996ce4a90fbe516cadf496b604d07f1", "score": "0.531247", "text": "def rm(file_names, **kwargs):\n try:\n action = actions.Action(os.getcwd())\n init_logger(action.path)\n if not file_names and not ('all' in kwargs and kwargs['all']):\n logger.info(\"Usage: ltk rm [OPTIONS] FILE_NAMES...\")\n return\n action.rm_action(file_names, **kwargs)\n except (UninitializedError, ResourceNotFound, RequestFailedError) as e:\n print_log(e)\n logger.error(e)\n return", "title": "" }, { "docid": "c61e60520b40a0d5463cd3c27bceb836", "score": "0.5311952", "text": "def clear_files(self):\n del self.files[:]", "title": "" }, { "docid": "44cc8f861a53acd90f8c35f32f58a054", "score": "0.5304881", "text": "def cleanDumpFiles(self):\n temp_dir_path = self.getDumpFileDirectory()\n list(\n map(os.remove,\n (os.path.join(temp_dir_path, file_path)\n for file_path in os.listdir(temp_dir_path)\n if os.path.exists(os.path.join(temp_dir_path, file_path)))))\n #for file_path in os.listdir(temp_dir_path):\n # full_path = os.path.join(temp_dir_path, file_path)\n # if os.path.exists(full_path):\n # os.remove(full_path)\n os.rmdir(temp_dir_path)\n logging.info('all dump records generated by this testcase are cleaned successfully')", "title": "" }, { "docid": "66447dcffe92b923b3173e77d7303bb6", "score": "0.5299639", "text": "def remove_files(*patterns):\n for pattern in patterns:\n for fname in glob.glob(pattern):\n os.remove(fname)", "title": "" }, { "docid": "c496d4ffbf4794651c724f871eee9632", "score": "0.52976924", "text": "def destroy_tf_resources():\n try:\n os.system(\"terraform destroy -auto-approve\")\n except Exception as error:\n logger.info(f\"Error in destroying tf resources: {error}\")", "title": "" }, { "docid": "0b45ee85c601f207bd4893b5cde9c480", "score": "0.5296543", "text": "def clean_store():\n non_python = set(glob(f'{os.path.dirname(__file__)}/*')) - set(glob(f'{os.path.dirname(__file__)}/*.py'))\n for path in non_python:\n if os.path.isfile(path):\n os.remove(path)\n else:\n shutil.rmtree(path)", "title": "" }, { "docid": "5e349c7a43c2b7d86dfd258cf2689b11", "score": "0.5296377", "text": "def bulk_delete(self, keys: Iterable[str]):\n for k in keys:\n try:\n del self[k]\n except KeyError:\n pass", "title": "" }, { "docid": "73b3a1254f6fd69431f2355df52405c8", "score": "0.5293572", "text": "def resource_set_delete(self, token, id):\n return self._realm.client.delete(\n '{}/{}'.format(\n self.well_known['resource_registration_endpoint'], id),\n headers=self.get_headers(token)\n )", "title": "" }, { "docid": "a91461d15b797aafa06eae786d0a9f3c", "score": "0.52857155", "text": "def remove_files(*filenames):\n for i in filenames:\n try:\n os.remove(i)\n except:\n pass", "title": "" }, { "docid": "52e8ffc1ad75b35ccf846b4d56e0ead6", "score": "0.52838475", "text": "def clear():\n OSUtility.removedirs(\"resources\\\\audios\")", "title": "" }, { "docid": "ecd14391829f4333cbad1e7ae7624a0a", "score": "0.5278885", "text": "def cleanup():\n for f in Internals.__created_temp_files:\n utils.silent_remove(f)\n Internals.__created_temp_files = []", "title": "" }, { "docid": "7470d8ee7559d3699220b984b5093174", "score": "0.52763844", "text": "def delete_model_store(model_store=None):\n model_store = model_store if model_store else MODEL_STORE\n for f in glob.glob(model_store + \"/*.mar\"):\n os.remove(f)", "title": "" }, { "docid": "945d4ec0b9f59f82eac98b63129a49f3", "score": "0.52759147", "text": "def _gc(self):\n to_remove = [self.chk_objects, self.chk_bkgrnd, self.catalog_file, self.path]\n for f in to_remove:\n if os.path.exists(f):\n os.remove(f)", "title": "" }, { "docid": "4f59ee1bb9e98762c378509c8994b264", "score": "0.5268117", "text": "def tearDown(self):\n for file_name in self.quizzes.keys():\n os.remove(file_name)", "title": "" }, { "docid": "2f89b5d188f1999326f25f6c31c0505d", "score": "0.5260265", "text": "def clean():\r\n import os\r\n import glob\r\n\r\n for p in ('bin', 'Scripts', 'build', 'dist', 'include', 'lib', 'man',\r\n 'share', 'FlexGet.egg-info', 'paver-minilib.zip', 'setup.py'):\r\n pth = path(p)\r\n if pth.isdir():\r\n pth.rmtree()\r\n elif pth.isfile():\r\n pth.remove()\r\n\r\n for pkg in set(options.setup.packages) | set(('tests',)):\r\n for filename in glob.glob(pkg.replace('.', os.sep) + \"/*.py[oc~]\"):\r\n path(filename).remove()", "title": "" }, { "docid": "6b96cc51ad677ccee2f1f8a105a16289", "score": "0.52583486", "text": "def _clean_resource_config(config, allowed_params):\n for key in list(config.keys()):\n if key not in allowed_params:\n config.pop(key)", "title": "" }, { "docid": "0d23fb8f5c0ebb5f8e66db7ece2fc657", "score": "0.52578616", "text": "def cleanup(self):\r\n\t\tself.cleanup_stale_items()\r\n\t\tself.cleanup_resources_directory()", "title": "" }, { "docid": "65f2972120ac340e5036363fd641a530", "score": "0.5245624", "text": "def delete_files(s3, bucket_name, file_list):\n\n cnt = 0 # for logging\n for file_name in file_list:\n s3.Object(bucket_name, file_name).delete()\n cnt += 1\n print(f'{cnt} files have been deleted')", "title": "" }, { "docid": "f47bf1d3cbe62c2fa94e3f7654b98789", "score": "0.5240175", "text": "def delete_content():\n all_videos = os.listdir('Videos')\n for video in all_videos:\n try:\n os.unlink('Videos/{}'.format(video))\n except Exception as e:\n log.console(e)\n pass\n all_photos = os.listdir('DCApp')\n for photo in all_photos:\n try:\n os.unlink('DCApp/{}'.format(photo))\n except Exception as e:\n log.console(e)\n pass", "title": "" }, { "docid": "2b27267c3ca197de8709df6e5889d70e", "score": "0.5233372", "text": "def delete_all(self, context, resource_class, for_update=False, **kwargs):\n self._validate_resource_class(resource_class)\n attr_val = {k: v for k, v in list(kwargs.items())\n if k in resource_class.attributes() +\n ['in_', 'notin_', 'order_by']}\n return self._delete_db(context.store, resource_class, **attr_val)", "title": "" }, { "docid": "c2e2b83274325ac6382c6c3ac6343bd7", "score": "0.52274853", "text": "def remove_props(props):\n def preprocessor(data=None):\n for prop in props:\n del data[prop]\n\n return preprocessor", "title": "" } ]
cb367b9c25cd0cb0be785c2e03d99dda
1. Scrape Wikipedia for list of Illinois counties and store in list 2. Loop through county list including county name in each request submission
[ { "docid": "e31debce864abfd1529b4c1bc7b14036", "score": "0.6705514", "text": "def get_response(self):\n\n # Scrape Illinois counties in order to pass county name to URL\n url_counties = \"https://en.wikipedia.org/wiki/List_of_counties_in_Illinois\"\n response_url_counties = requests.get(url_counties)\n soup = BeautifulSoup(response_url_counties.text, \"html.parser\")\n\n counties = []\n\n for county in soup.select(\"tbody > tr > th > a\"):\n counties.append(county.get_text())\n\n counties = [county.split(\" \")[0] for county in counties]\n\n for index, county in enumerate(counties, start=1):\n\n # Submit API request\n url = \"https://idph.illinois.gov/DPHPublicInformation/api/COVID/GetCountyHistorical?countyName={}\".format(county)\n response = requests.get(url)\n\n # Output to log\n log.logging.info(\"Request-Response {} submitted for {} with status code of {}\".format(index, type(self).__name__, response.status_code))\n\n for row in response.json()[\"values\"]:\n self.data_il.append(row)\n\n # Pause program before submitting next API request\n time.sleep(120)", "title": "" } ]
[ { "docid": "8ee020d5481124082ff88a983611dbf9", "score": "0.6404991", "text": "def _input_county_to_search_on_list_page(self):\n self.driver.get(secrets.SITE_URL + '/list/geography')\n time.sleep(2)\n previous_counties_to_remove = self.driver.find_elements_by_class_name('listed')\n for county in previous_counties_to_remove:\n county.click()\n\n county_input_field = self.driver.find_element_by_id('countysearch')\n county = self.county\n county_and_state = self.county + ' County, OH'\n county_input_field.send_keys(county)\n time.sleep(2)\n js_string = \"e=$('.ui-menu-item-wrapper:contains({})'); e.click()\".format(county_and_state)\n self.driver.execute_script(js_string)\n self.driver.execute_script(js_string) # need to do this twice because it clicks on the first element the first time", "title": "" }, { "docid": "1b562c89741518263fcf82ddf564d3dd", "score": "0.6060859", "text": "def _clean_up_county_search(self):\n self.driver.get(secrets.SITE_URL + '/list/geography')\n time.sleep(2)\n previous_counties_to_remove = self.driver.find_elements_by_class_name('listed')\n for county in previous_counties_to_remove:\n county.click()", "title": "" }, { "docid": "91b0187417b093c624ebcbbdad803df9", "score": "0.6010446", "text": "def scrape_stuff(cities):\n newdata = []\n for place in cities:\n req = requests.get(place[2], verify=False)\n soup = BeautifulSoup(req.content, 'html.parser')\n ovw = overview_parse(soup)\n newdata.append(place + ovw)\n\n return newdata", "title": "" }, { "docid": "6ce16824a0112b87c2e8cea71c2942c6", "score": "0.595009", "text": "def county_breakdown():\n\n\n\tdb = MySQLdb.connect(\"mysql-server\", \"root\", \"secret\", \"mydb\")\n\tcursor = db.cursor()\n\n\n\t# Getting a list of all the counties\n\tall_counties = cursor.execute(\"SELECT DISTINCT name FROM counties ORDER BY name\")\n\tall_counties_result = cursor.fetchall()\n\n\tcounty_list = []\n\tfor count, elem in enumerate(all_counties_result):\n\t\telem_to_add = all_counties_result[count][0]\n\t\tcounty_list.append(elem_to_add)\n\n\tdropdown_list = county_list \n\n\n\n\t# Getting HTML table\t\n\tspecific_county = 'ALAMANCE'\n\tspecific_county_input = request.form.get('county_select')\n\n\n\t# Default, in the case that none are selected \n\tif specific_county_input != None:\n\t\tspecific_county = specific_county_input\n\n\tcounty_answer = cursor.execute(\"\"\"\n\t\t\t\t\tWITH voted_table AS (\n\t\t\t\t\t\tSELECT voters.ncid, c.county_id, name, CASE WHEN voting_method IS NULL THEN 0 ELSE 1 END AS voted \n\t\t\t\t\t\tFROM voters \n\t\t\t\t\t\tLEFT JOIN vhist\n\t\t\t\t\t\t\tON voters.ncid=vhist.ncid\n\t\t\t\t\t\tJOIN counties c\n\t\t\t\t\t\t\tON voters.county_id = c.county_id\n\t\t\t\t\t\tWHERE name = %s)\n\n\t\t\t\t\tSELECT name AS county, AVG(voted) \n\t\t\t\t\tFROM voted_table\n\t\t\t\t\tGROUP BY name\n\t\t\t\t\tORDER BY name\"\"\", (specific_county,))\n\n\tcounty_answer_table = cursor.fetchall()\n\tcounty_formatted_answers = format_county_result(county_answer_table)\n\tcounty_html_table = json2html.convert(json=county_formatted_answers)\t\n\n\t\n\treturn render_template('countytemplate.html', county_table_input=county_html_table, dropdown_list=dropdown_list, \n\t\t\t\t\t\t\tselected_county = specific_county)", "title": "" }, { "docid": "b6f032db8aa2aa14536929670552702c", "score": "0.58618504", "text": "def get_county() -> Dict:\n url = 'https://socoemergency.org/emergency/novel-coronavirus/coronavirus-cases/'\n # need this to avoid 403 error ¯\\_(ツ)_/¯\n headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'}\n page = requests.get(url, headers=headers)\n page.raise_for_status()\n sonoma_soup = BeautifulSoup(page.content, 'html5lib')\n\n hist_cases, total_tests, cases_by_source, cases_by_age, cases_by_race = get_table_tags(sonoma_soup)\n\n # calculate total cases to compute values from percentages\n # we previously summed the cases across all genders, but with gender data unavailable,\n # now we calculate the the sum of the cases across all age groups\n total_cases = sum([int(group['raw_count']) for group in transform_age(cases_by_age)])\n\n meta_from_baypd = (\n \"On or about 2021-06-03, Sonoma County stopped providing case totals \"\n \"by gender. Null values are inserted as placeholders for consistency.\"\n )\n\n model = {\n 'name': 'Sonoma County',\n 'update_time': datetime.now(timezone.utc).isoformat(),\n 'source_url': url,\n 'meta_from_source': get_source_meta(sonoma_soup),\n 'meta_from_baypd': meta_from_baypd,\n 'series': transform_cases(hist_cases),\n 'case_totals': {\n 'transmission_cat': transform_transmission(cases_by_source, total_cases),\n 'transmission_cat_orig': transform_transmission(cases_by_source, total_cases, standardize=False),\n 'age_group': transform_age(cases_by_age),\n 'race_eth': transform_race_eth(cases_by_race),\n # 'gender': transform_gender(cases_by_gender) # Gender breakdown is no longer available\n 'gender': {'male': -1, 'female': -1} # Insert a placeholder for compatibility\n },\n 'tests_totals': {\n 'tests': transform_tests(total_tests),\n },\n }\n return model", "title": "" }, { "docid": "7a5e121a2267ef710963b697f45eee44", "score": "0.57477576", "text": "def browser_collection():\n url = 'https://campus.concordia.ca/psc/pscsprd/EMPLOYEE/SA/c/SA_LEARNER_SERVICES.SSR_SSENRL_LIST.GBL?Page=SSR_SSENRL_LIST&Action=A&TargetFrameName=None'\n browser.get(url)\n get_terms = lambda: browser.find_element_by_css_selector('.PSLEVEL2GRID').find_elements_by_tag_name('tr')[1:]\n terms = get_terms()\n num_terms = len(terms)\n for _ in range(num_terms):\n browser.get(url)\n terms = get_terms()\n get_term_info(terms[num_terms - 1])\n num_terms -= 1\n browser.close()", "title": "" }, { "docid": "c528b57932891643b4d0b8f2cc1b109b", "score": "0.57229704", "text": "def main(self):\n regions = self.ca_regions if self.params.regions.strip() == 'all' else [r.strip() for r in self.params.regions.split(\",\")]\n for region in regions:\n start = 0\n while True:\n url = \"http://%s.craigslist.org/search/mca?query=%s&srchType=A&s=%d\" % (region, self.params.keyword, start)\n num_of_results = self.getPage(region, url)\n start += 100\n if not num_of_results:\n break\n\n self.printReport()", "title": "" }, { "docid": "0e0fd6983bc884face27a8193466ca47", "score": "0.57002807", "text": "def get_nyc_geosearch_results(text):\n\n url = \"https://geosearch.planninglabs.nyc/v1/autocomplete\"\n payload = {\"text\": text}\n res = requests.get(url, params=payload)\n data = res.json()\n features = data[\"features\"]\n first_ten = features[0:9]\n labels_list = []\n\n for location in first_ten:\n labels_list.append(location[\"label\"])\n\n print(\"!!!!!!!!!!\")\n print(f\"labels_list = {labels_list}\")\n return labels_list", "title": "" }, { "docid": "cfab08bf47de98346ea81a530e8dbcda", "score": "0.56750166", "text": "def get_areas(url,payload):\n # Define the request headers and payload\n headers = {'Accept':'*/*',\n 'Accept-Encoding':'gzip, deflate, sdch',\n 'Accept-Language':'en-US,en;q=0.8',\n 'Cache-Control':'max-age=0',\n 'Connection':'keep-alive',\n 'Content-Type':'application/x-www-form-urlencoded',\n 'Cookie':'JSESSIONID=DF1B195C91D3F94E1D5FC65EA4A63031; WT_FPC=id=217344d542160f164a81466367471337:lv=1466373814829:ss=1466372100422; msplva-gisappvip.appdmz.nycnet_80_COOKIE=R3993650177',\n 'Host':'maps.nyc.gov',\n 'Referer':'http://maps.nyc.gov/census/',\n 'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36',\n 'X-Requested-With':'XMLHttpRequest'}\n # Create a request and send it to the server with our specified arguments\n s = requests.Session()\n content = s.post(url,headers=headers,data=payload)\n response_string = content.content\n # Remove the first four characters of the content (they are '{}&&')\n response_string = response_string[4:]\n # Convert from string to dictionary\n locations = json.loads(response_string)\n # Make a DataFrame of the neighborhoods/census tracts\n locations_df = pd.DataFrame(locations['items'])\n # Return the DataFrame\n return locations_df", "title": "" }, { "docid": "2770d4876425751f762075d60ae4e3c2", "score": "0.5606722", "text": "def get_citations_needed_report(URL):\n report = request(URL)\n for i in report:\n citation_text = i.find_parent('p')\n print(citation_text.text)", "title": "" }, { "docid": "4550c0e95570f94b846800bace0013e0", "score": "0.5580026", "text": "def scrape_cities(cities):\n for city in cities:\n yield from scrape_city(city)", "title": "" }, { "docid": "ee0c880d0fbb9edb65bfaf92922c878b", "score": "0.55111593", "text": "def ccle_scrape():\n global client, testing\n mapping = client.Metis.MajorToAcronyms\n\n departments = []\n\n if testing:\n departments = [{'major': 'Computer Science', 'acronym': 'COM SCI'}]\n else:\n with open('major_list.json') as json_file:\n departments = json.load(json_file)[\"majors\"]\n\n departments = list(mapping.find({}))\n\n quarterSymbols = ['W', 'S', '1', 'F']\n\n for dept in departments:\n\n try:\n major = dept[\"acronym\"]\n\n # replace spaces with '%20'\n abbrev = major.replace(' ', '%20')\n\n # go through each quarter in a range of years\n for i in range(16,21):\n for q in quarterSymbols:\n quarter = str(i) + q\n ccle_professor_scraper(abbrev, quarter, major)\n except:\n print(\"No acronym for: \" + dept)", "title": "" }, { "docid": "836dd879c5d58c55db76ba95c774022a", "score": "0.5504289", "text": "def get_competitions(fltr = ['all']):\n\n\n # loads information of existing countries\n countries = get_starting_values('data/countries.json')\n\n # udpates countires filter if needed\n if fltr == ['all']:\n fltr = sorted(list(countries.keys()))\n\n # loads information of existing competitions in all countiries\n competitions = get_starting_values('data/competitions.json')\n\n count_countries = 0\n count_competitions = 0\n\n # iterates over countires\n for country in fltr:\n\n if country not in competitions:\n competitions[country] = {}\n\n soup = create_soup('https://int.soccerway.com' + countries[country])\n\n # iterates over competitions to find new competitions\n for div in soup.findAll(\"div\", {'class':'content plain '}):\n for a in div.findAll('a', {\"class\": re.compile('flag.*')}):\n\n url = a['href']\n name = url.split('/')[-3]\n\n if name not in competitions[country]:\n competitions[country][name] = url\n count_competitions += 1\n\n # saves file prints result\n with open('data/competitions.json', 'w') as fp:\n json.dump(competitions, fp, ensure_ascii=False)\n\n count_countries += 1\n\n print(count_competitions, 'competitions from', country, 'added\\n')\n time.sleep(3)", "title": "" }, { "docid": "87200d3f813494e7f748b46b91ccd0ec", "score": "0.54708004", "text": "def fill(self):\n url_citations = _CITATIONAUTH.format(self.id)\n url = '{0}&pagesize={1}'.format(url_citations, _PAGESIZE)\n soup = _get_soup(_HOST+url)\n self.name = soup.find('div', id='gsc_prf_in').text\n self.affiliation = soup.find('div', class_='gsc_prf_il').text\n self.interests = [i.text.strip() for i in soup.find_all('a', class_='gsc_prf_inta')]\n self.url_picture = soup.find('img')['src']\n\n # h-index, i10-index and h-index, i10-index in the last 5 years\n index = soup.find_all('td', class_='gsc_rsb_std')\n if index:\n self.hindex = int(index[2].text)\n self.hindex5y = int(index[3].text)\n self.i10index = int(index[4].text)\n self.i10index5y = int(index[5].text)\n else:\n self.hindex = self.hindex5y = self.i10index = self.i10index5y = 0\n\n # number of citations per year\n years = [int(y.text) for y in soup.find_all('span', class_='gsc_g_t')]\n cites = [int(c.text) for c in soup.find_all('span', class_='gsc_g_al')]\n self.cites_per_year = dict(zip(years, cites))\n\n self.publications = list()\n pubstart = 0\n while True:\n for row in soup.find_all('tr', class_='gsc_a_tr'):\n new_pub = Publication(row, 'citations')\n self.publications.append(new_pub)\n if 'disabled' not in soup.find('button', id='gsc_bpf_more').attrs:\n pubstart += _PAGESIZE\n url = '{0}&cstart={1}&pagesize={2}'.format(url_citations, pubstart, _PAGESIZE)\n soup = _get_soup(_HOST+url)\n else:\n break\n self._filled = True\n return self", "title": "" }, { "docid": "b43907c3a0d024af93414dfa28038c7c", "score": "0.545379", "text": "def get_data(pages):\n \n for page in pages:\n\n res = requests.get(page, headers=headers, verify=False)\n \n if res.status_code != 200:\n print('Web site does not exist') \n res.raise_for_status()\n \n soup = BeautifulSoup(res.text, 'html.parser')\n \n d = {}\n \n title = soup.findAll(\"h1\", {\"class\": \"title\"})\n title_text = BeautifulSoup.get_text(title[0]) \n d['title'] = title_text\n \n mydivs = soup.findAll(\"div\", {\"class\": \"album-info\"})\n \n fields = ['Language(s)', 'Instrument(s)', \n 'Culture Group(s)', 'Country(s)',\n 'Year(s) Released', 'Recording Location(s)']\n \n for div in mydivs:\n div_text = BeautifulSoup.get_text(div)\n for field in fields:\n if field in div_text:\n result = div.findAll(\"div\", {\"class\": \"copy\"})\n d[field] = BeautifulSoup.get_text(result[0]).strip()\n \n liner = soup.findAll(\"a\", {\"class\": \"last-sidebar-item\"})\n try:\n d['liner'] = liner[0]['href']\n except IndexError:\n d['liner'] = 'None'\n \n country = d.get('Country(s)')\n \n location = 'None'\n \n if country:\n country = country.split(\";\")[0]\n country = country.split(\" \")[0]\n location = random_point_in_country(\"World_Countries.shp\", country)\n \n \n return_d = {'ethnic_groups': (d.get('Country(s)', 'None') + ' - ' + d.get('Culture Group(s)') \n if d.get('Culture Group(s)') else d.get('Country(s)', 'None')),\n 'album_title': (d['title'] + ' (' + d.get('Year(s) Released') + ')'\n if d.get('Year(s) Released') else d['title']),\n 'languages': d.get('Language(s)', 'None'),\n 'instruments': d.get('Instrument(s)', 'None'),\n 'liner': d['liner'],\n 'location': location}\n \n yield return_d", "title": "" }, { "docid": "16d759165cf664ab537ed63e54d49005", "score": "0.54324424", "text": "def _counties(self):\n try:\n return self._cached_counties\n except AttributeError:\n county_ocd_re = re.compile(r'ocd-division/country:us/state:' +\n self.state.lower() + r'/county:[^/]+$')\n self._cached_counties = [m for m in self.jurisdiction_mappings()\n if county_ocd_re.match(m['ocd_id'])]\n return self._cached_counties", "title": "" }, { "docid": "e8994789849bd6c0f044a24adf5bcda2", "score": "0.5427158", "text": "def g_scrape(input_query):\r\n clear_results()\r\n for index in range(len(input_query)):\r\n search_query = input_query[index] + \" \" + keyword_entry.get()\r\n for i in search(query=search_query,tld='co.in',lang='en',num=1,stop=1,pause=2):\r\n url = i\r\n try:\r\n emails = set()\r\n response = requests.get(url)\r\n new_emails = set(re.findall(r\"[a-z0-9\\.\\-+_]+@[a-z0-9\\.\\-+_]+\\.[a-z]+\", response.text, re.I))\r\n emails.update(new_emails)\r\n result_display.insert('end', \"Emails for \"+ input_query[index] + \":\\n\")\r\n for j in emails:\r\n result_display.insert('end', \"\" + j + \"\\n\")\r\n result_display.insert('end', \"\\n\")\r\n except:\r\n result_display.insert('end', \"Webpage error for \"+ input_query[index] +\"! Please try again with different keywords\\n\")", "title": "" }, { "docid": "4c7f5f3287ed014500be2057255673f1", "score": "0.53940904", "text": "async def get_election_offices():\n # Define coroutine functions (context managers)\n async with CloudflareScraper() as session:\n async with session.get(BASE_URL) as s:\n # ClientResponse.read() is a coroutine function so it must be awaited\n text = await s.read()\n soup = bS(text.decode(\"utf-8\"), \"html.parser\")\n\n test_county_data = get_county_codes_and_names(soup)\n county_data = sorted(test_county_data, key=lambda k: k[\"countyName\"])\n num_scraped = 0\n master_list = []\n\n # Create list that will store asyncio tasks\n tasks: List[Task] = []\n for county in county_data:\n code = county[\"countyCode\"]\n name = county[\"countyName\"]\n # Create task for a future asynchronous operation and store it in task list\n tasks.append(asyncio.create_task(scrape_one_county(session, code, name)))\n\n # Run the coroutines and iterate over the yielded results as they complete\n # (out-of-order). Use asyncio.gather() with a couple code modifications to\n # preserve list order\n future: Future[Tuple[str, str, str, str]]\n for future in asyncio.as_completed(tasks):\n # Unpack awaited result of scrape_one_county()\n cleaned_string, protected_email, _, county_name = await future\n schema = format_data_into_schema(\n cleaned_string, protected_email, county_name\n )\n master_list.append(schema)\n num_scraped += 1\n print(\n f\"[Florida] Scraped {county_name} county: \"\n f\"#{num_scraped} of {len(county_data)} .... \"\n f\"[{round((num_scraped / len(county_data)) * 100, 2)}%]\"\n )\n\n with open(os.path.join(ROOT_DIR, \"scrapers\", \"florida\", \"florida.json\"), \"w\") as f:\n json.dump(master_list, f)\n return master_list", "title": "" }, { "docid": "6f057bb63a56ef4b7570202502130f21", "score": "0.5376974", "text": "def _get_county_candidates(zcta):\n if zcta in _COUNTY_CANDIDATES_CACHE:\n return _COUNTY_CANDIDATES_CACHE[zcta]\n candidate_lists = []\n for prop in ['containedInPlace', 'geoOverlaps']:\n resp = datacommons.get_property_values([zcta],\n prop,\n out=True,\n value_type='County')\n candidate_lists.append(sorted(resp[zcta]))\n _COUNTY_CANDIDATES_CACHE[zcta] = candidate_lists\n return candidate_lists", "title": "" }, { "docid": "db9af5176b994501bcdb8cdd686fa037", "score": "0.532855", "text": "def get_cities(self):\n url = \"https://m.ctrip.com/restapi/soa2/16593/json/getCityList?_fxpcqlniredt=09031157111978003970\" \\\n \"&__gw_appid=99999999&__gw_ver=1.0&__gw_from=10320666865&__gw_platform=H5\"\n headers = {'content-type': 'application/json'}\n payload = {\n \"args\": \"{\\\"parameter\\\":{\\\"version\\\":\\\"1\\\"}}\",\n \"head\": {\"cid\": \"09031157111978003970\", \"ctok\": \"\", \"cver\": \"1.0\", \"lang\": \"01\", \"sid\": \"8888\",\n \"syscode\": \"09\", \"auth\": None,\n \"extension\": [\n {\"name\": \"terminaltype\", \"value\": \"20\"},\n {\"name\": \"devicetype\", \"value\": \"Macintosh\"},\n {\"name\": \"devicebrand\", \"value\": \"undefined\"},\n {\"name\": \"devicephone\", \"value\": \"Mac\"},\n {\"name\": \"browsername\", \"value\": \"Safari\"},\n {\"name\": \"browserver\", \"value\": \"605.1.15\"},\n {\"name\": \"os\", \"value\": \"IOS\"},\n {\"name\": \"osver\", \"value\": \"10.146\"},\n {\"name\": \"channelid\", \"value\": \"2\"},\n {\"name\": \"page\", \"value\": \"10320666865\"},\n {\"name\": \"refpage\", \"value\": \"\"},\n {\"name\": \"currentpage\",\n \"value\": \"feb85487-ea6f-811d-20ae-1731ec2d34dc\"},\n {\"name\": \"pagename\", \"value\": \"citylist\"},\n {\"name\": \"refpagename\", \"value\": \"\"},\n {\"name\": \"refpageid\", \"value\": \"\"},\n {\"name\": \"vid\", \"value\": \"\"},\n {\"name\": \"la\", \"value\": \"\"},\n {\"name\": \"lo\", \"value\": \"\"},\n {\"name\": \"geoType\", \"value\": \"\"}, {\"name\": \"traceid\",\n \"value\": \"899371e7-ea7d-7c28-35dd-f263725e8d8b\"},\n {\"name\": \"protocal\", \"value\": \"https\"}]},\n \"contentType\": \"json\"\n }\n\n try:\n response = requests.post(url, data=json.dumps(payload), headers=headers)\n _body = json.loads(response.text)\n cities = []\n _result = _body['result']\n result = json.loads(_result)\n data = result['data']\n _cities = data['cityGroup']['cities']\n for cities_letter_group in _cities.values():\n for city in cities_letter_group:\n cities.append({'id': city['cityId'], 'name': city['cityName']})\n return cities\n except Exception as e:\n self.logger.exception(e)\n return None", "title": "" }, { "docid": "d45cccf236faa695ddceb554894d0029", "score": "0.53255963", "text": "def covid_records(grade_state):\n list_covid_records = []\n start_url = 'https://data.ca.gov/api/3/action/datastore_search?resource_id=926fd08f-cc91-4828-af38-bd45de97f8c3'\n records_json = get_records_json(start_url)\n # count calls to the API for realizing grade mode\n count = 1\n # make sure the first page of API can be downloaded \n if records_json != None:\n # find the records list from json\n covid_records_results = records_json['result']\n covid_records = covid_records_results['records']\n while covid_records != []:\n read_covid_records(covid_records, list_covid_records)\n # stop crawling when we reached the third call under grade mode\n if grade_state and count == 3:\n break\n # go to the next url \n next_url = 'https://data.ca.gov'+ covid_records_results['_links']['next']\n records_json = get_records_json(next_url)\n count += 1\n if records_json != None:\n covid_records_results = records_json['result']\n covid_records = covid_records_results['records']\n # when the url can't be downloaded, stop calling \n else:\n break\n # create a dataframe based on data retrieved from the API \n cols = ['county','date','new_cases', 'new_deaths'] \n covid_records_df = pd.DataFrame.from_records(list_covid_records,columns=cols)\n print('Successfully get data from source 4!')\n return covid_records_df", "title": "" }, { "docid": "ab883c26de4cb08472abe7ed563d81a4", "score": "0.52537733", "text": "def post_request(self):\r\n for i in range(1,self.endpage):\r\n if i == 1:\r\n url = 'http://stats.gd.gov.cn/zyhygyzjz/index' + '.html'\r\n else:\r\n page = i\r\n url = 'http://stats.gd.gov.cn/zyhygyzjz/index_' + str(i) + '.html'\r\n \r\n req = urllib.request.Request(url, headers=self.headers)\r\n data = urllib.request.urlopen(req).read().decode()\r\n print(url)\r\n soup = BeautifulSoup(data, 'lxml')\r\n urllist = soup.select('li a[target=\"_blank\"]')\r\n self.urllist = urllist\r\n return self.urllist", "title": "" }, { "docid": "6b0464bfd23d7dcb7618c82854e27904", "score": "0.52338004", "text": "def scrape_vehicles():\n \n # A link to Minneapolis craigslist auto section - 10 pages\n urls = ['http://minneapolis.craigslist.org/cto',\n 'http://minneapolis.craigslist.org/cto/index100.html',\n 'http://minneapolis.craigslist.org/cto/index200.html',\n 'http://minneapolis.craigslist.org/cto/index300.html',\n 'http://minneapolis.craigslist.org/cto/index400.html',\n 'http://minneapolis.craigslist.org/cto/index500.html',\n 'http://minneapolis.craigslist.org/cto/index600.html',\n 'http://minneapolis.craigslist.org/cto/index700.html',\n 'http://minneapolis.craigslist.org/cto/index800.html',\n 'http://minneapolis.craigslist.org/cto/index900.html'\n ]\n\n counter = 0\n \n\t# Create a results file to store all of our scraping findings\n file = open('scrape_results.json', 'w')\n\t\n for url_cl in urls:\n\n # Download the list of vehicles for sale by owner\n\n # 1. Here were using requests, a python library for accessing the web\n\n # we add \"cto/\" to the url to tell requests\n # to get the cars and truck by owner\n # on minneapolis.craigslist.org\n # response = requests.get(BASE_URL + \"cto/\")\n\t\t\n\t\t# Just use URL from our list\n response = requests.get(url_cl)\n\n # 2. Now we parse HTML using Beautiful Soup library\n\t\t\n # This returns a `soup` object which gives us convenience methods for parsing html\n soup = BeautifulSoup(response.content, \"html.parser\")\n #print(soup)\n\n # Find all the posts in the page.\n\n # Here we're telling BeautifulSoup to get us every\n # span tag that has a class that equals pl\n # these tags might look something like this:\n # <span class='pl'> {content} </span>\n # auto_links = soup.find_all('span', {'class':'pl'})\n\t\t\n\t\t# Realized that we need to go after the \"result-row\" instead\n\t\t# that gives us the link row in every page of results\n auto_links = soup.find_all('li', {'class':'result-row'})\n\n\t\t\n\t\t# Get all the links to auto pages:\n for auto_link in auto_links:\n \n # for each span list, find the \"a\" tag which \n # represents the link to the for sale auto page.\n link = auto_link.find('a').attrs['href']\n \n link_desc = auto_link.find('a')\n\n print(\"Auto Page Link:\")\n print(link)\n #print(\"'link_desc = \" + link_desc.string)\n\n #print counter\n\n # join this relative link with the \n # BASE_URL to create an absolute link\n\n url = urljoin(BASE_URL, link)\n \n # pass this url to a function (defined below) to scrape \n # info about that vehicle on auto page\n scrape_vehicle(url, counter, file)\n counter += 1", "title": "" }, { "docid": "a7a520bf6b8eea48ff8bce3a9e32f981", "score": "0.5228422", "text": "def return_cases(search_input):\n\n soup_strings = []\n for string in soup.strings:\n soup_strings.append(string)\n\n maxi = len(soup_strings)\n i = 0\n data_extract = []\n la_cases = []\n\n while (i <= maxi):\n if(soup_strings[i] == str(search_input)):\n length = len(soup_strings[i:])\n start = len(soup_strings) - length\n end = start + length\n stop = 'Total'\n placeholder=str()\n\n while ((start <= end) & (placeholder != stop)):\n placeholder = soup_strings[start]\n data_extract.append(soup_strings[start])\n start+=1\n\n data_start,data_end = search_start_end(data_extract)\n\n for i in range(len(data_extract)):\n if ((i >= data_start) & (i <= data_end)):\n la_cases.append(data_extract[i])\n break\n i+=1\n\n del data_extract, i, maxi, data_start, data_end, length\n\n la_cases = remove_tabs_and_obs(la_cases)\n\n return la_cases", "title": "" }, { "docid": "2c8353f24e0fa5aecc4fa12cd3186fcd", "score": "0.52007353", "text": "def scrape_population():\n rows = [['name', 'population'], ['Kosovo', '1920079'], ['Sao Tome and Principe', '211028']]\n URL = 'https://en.wikipedia.org/wiki/List_of_countries_by_population_(United_Nations)'\n page = requests.get(URL)\n soup = BeautifulSoup(page.content, 'html.parser')\n\n countries = soup.find('table', id='main').find('tbody').find_all('tr')[1:-1]\n\n for country in countries:\n fields = country.find_all('td')\n name = fields[0].find('a')['title']\n population = fields[-2].text.replace(',', '')\n\n # Rename countries\n name = name.replace('The Bahamas', 'Bahamas')\n name = name.replace('The Gambia', 'Gambia')\n name = name.replace('Georgia (country)', 'Georgia')\n name = name.replace('Republic of Ireland', 'Ireland')\n name = re.sub('^Republic of the Congo', 'Congo', name)\n name = name.replace('East Timor', 'Timor-Leste')\n name = name.replace('Falkland Islands', 'Falkland Islands (Malvinas)')\n\n if name == 'Serbia':\n # Remove Kosovo population\n population = '7078110'\n\n rows.append([name, population])\n \n with open('generated/population.csv', 'w', newline='', encoding='utf-8') as f:\n writer = csv.writer(f)\n writer.writerows(rows)", "title": "" }, { "docid": "49c60331fcf00ff55d9d746f888caf3f", "score": "0.5178847", "text": "def get_job_urls(self, url):\n \n sector = {}\n href = {}\n \n response = self._request_until_succeed(url)\n # certain regions have no job postings\n if response is None:\n return([sector, href]) \n soup = BeautifulSoup(response, 'html.parser')\n\n #get counts of number of jobs in different areas\n name_box = soup.find_all('div', attrs={'class': 'wrapper'})\n \n for name in name_box:\n #print(name)\n newnames = name.find_all('a', attrs={'class' : 'topLink tdnone '})\n if len(newnames) > 0:\n for i, n in enumerate(newnames):\n sect = n.find('span', attrs='link').get_text().strip()\n cnt = n.find('span', attrs='counter nowrap').get_text().strip().replace(',','')\n #export a tuple rather than dictionary\n sector[sect] = cnt\n href[sect] = n['href']\n\n return([sector,href])", "title": "" }, { "docid": "da922392b4d981cbc7d0c6031f3f9b5a", "score": "0.51712364", "text": "def scrape_all_pages(self, **filter_args):\n\n dropdown_ids = [\n (\n 'years',\n 'ctl00_ContentPlaceHolder1_lstYears_Input',\n 'ctl00_ContentPlaceHolder1_lstYears_DropDown'\n ),\n (\n 'bodies',\n 'ctl00_ContentPlaceHolder1_lstBodies_Input',\n 'ctl00_ContentPlaceHolder1_lstBodies_DropDown'\n )\n ]\n\n for field, input_id, dropdown_id in dropdown_ids:\n dropdown_xpath = \"//div[@id='{}']/div/ul/li\".format(dropdown_id)\n\n # click on the dropdown menu\n self._click(self.driver.find_element_by_id(input_id))\n\n # wait for first list item to populate\n parent_xpath = \"//div[@id='{}']..\".format(dropdown_id)\n waiting = True\n while waiting:\n time.sleep(0.1)\n dropdown_text = self.driver. \\\n find_element_by_xpath(dropdown_xpath).text\n waiting = dropdown_text == ''\n\n # select filter term \n if field in filter_args.keys():\n # if a particular filter is specified, use that\n elms = self.driver.find_elements_by_xpath(dropdown_xpath)\n filter_options = [elm.text for elm in elms]\n try:\n i = filter_options.index(filter_args[field])\n except ValueError:\n self._log('scraper: unable to find item {} in list {}, '\n 'aborting:'.format(\n filter_args[field], field), self.city_name)\n return []\n filter_elm = elms[i]\n else:\n # if not, select first option in dropdown\n filter_elm = self.driver.find_element_by_xpath(dropdown_xpath)\n self._click(filter_elm)\n\n # click search button\n search_button_id = 'ctl00_ContentPlaceHolder1_btnSearch'\n search_button = self.driver.find_element_by_id(search_button_id)\n self._click(search_button)\n\n # click through pages and save html\n c = 1\n page_data = []\n while True:\n # scrape the page data\n self._log('scraper: scraping page {}'.format(c))\n page_data.append(self.driver.page_source)\n\n # increase page count\n c += 1\n\n # get page links, if any \n try:\n pages, pagelinks = self._get_page_links(self.driver)\n page_signature = self._get_page_signature()\n except NoSuchElementException:\n self._log('scraper: could not find data table on page, '\n 'aborting:'\n ' {}'.format(self.city_name))\n return []\n\n # click through pages\n if pages:\n try:\n # click on the integer we want\n i = pages.index(str(c))\n link = pagelinks[i]\n except:\n # if it's not there and the list ends with '...',\n # then click on '...'\n if pages[-1] == '...':\n link = pagelinks[-1]\n # if it's not there and the list starts with '...',\n # then we are done.\n else:\n break\n self._click(link)\n else:\n break\n\n # wait for page to load\n timeout = self._wait_for_table_load(page_signature)\n if timeout == \"timeout\":\n break\n else:\n pass\n\n return page_data", "title": "" }, { "docid": "d14b09f736de5e033583d3c309f795d9", "score": "0.5161379", "text": "def scrape():\n URL = \"http://codeforces.com/api/contest.list?gym=false\"\n r = requests.get(URL).content\n contests = json.loads(r)['result']\n data = {}\n try:\n for contest in contests:\n if contest['phase'] != 'FINISHED':\n temp = {}\n temp['name'] = contest['name']\n temp['time'] = home.localize(\n datetime.datetime.fromtimestamp(contest['startTimeSeconds']))\n temp['duration'] = datetime.timedelta(\n seconds=contest['durationSeconds'])\n temp['link'] = 'http://codeforces.com/contests/' + \\\n str(contest['id'])\n data[contest['id']] = temp\n except Exception as e:\n print(\"Something went wrong in codeforces.scrape!\", e)\n return data\n # scrape_contests()", "title": "" }, { "docid": "780908e5eb2ff0e686dfc8e4c629e077", "score": "0.5159798", "text": "def thenumbers_scraper():\n movie_names = []\n release_dates = []\n production_budgets = []\n domestic_grosses = []\n worldwide_grosses = []\n\n start_time = time.time()\n requests = 0\n\n pages = ['/' + str(i) for i in np.arange(101, 6002, 100)] # 61 pages of results\n pages.insert(0, \"\") # insert \"\" in front of list\n\n for page in pages:\n\n # make a get request\n url = 'https://www.the-numbers.com/movie/budgets/all'\n response = get(url + page)\n \n # pause the loop\n time.sleep(random.randint(2,8))\n\n # monitor the requests\n requests += 1\n elapsed_time = time.time() - start_time\n print('Request:{}; Frequency: {} requests/s'.format(requests, requests/elapsed_time))\n clear_output(wait = True)\n\n # throw a warning for non-200 status codes\n if response.status_code != 200:\n warn('Request: {}; Status code: {}'.format(requests, response.status_code))\n\n # break the loop if the number of requests is greater than expected\n if requests > 200:\n warn('Number of requests was greater than expected.')\n break\n\n # parse the content of the request with BeautifulSoup\n soup = BeautifulSoup(response.text, 'lxml') \n movie_table = soup.find('table')\n rows = [row for row in movie_table.find_all('tr')]\n \n for row in rows[1:101]:\n \n items = row.find_all('td')\n\n # scrape the release date\n release_date = items[1].find('a')\n if release_date:\n release_dates.append(release_date.text)\n else:\n release_date = 'no release date'\n release_dates.append(release_date)\n\n # scrape the movie name\n movie_name = items[2].find('a')\n if movie_name:\n movie_names.append(movie_name.text)\n else:\n movie_name = 'no movie name'\n movie_names.append(movie_name)\n\n # scrape the production budget\n production_budget = items[3]\n if production_budget:\n production_budgets.append(production_budget.text)\n else:\n production_budget = 'no budget'\n production_budgets.append(production_budget)\n\n # scrape the domestic gross\n domestic_gross = items[4]\n if domestic_gross:\n domestic_grosses.append(domestic_gross.text)\n else:\n domestic_gross = 'no domestic gross'\n domestic_grosses.append(domestic_gross) \n\n # scrape the worldwide gross\n worldwide_gross = items[5]\n if worldwide_gross:\n worldwide_grosses.append(worldwide_gross.text)\n else:\n worldwide_gross = 'no worldwide gross'\n worldwide_grosses.append(worldwide_gross) \n\n print((time.time()-start_time)/60, \"minutes\")\n\n # store lists into dataframe\n thenumbers_data = pd.DataFrame({'movie': movie_names,\n 'release_date': release_dates,\n 'production_budget': production_budgets,\n 'domestic_gross': domestic_grosses,\n 'worldwide_gross': worldwide_grosses \n })\n\n return thenumbers_data", "title": "" }, { "docid": "ed2f0ee1830494ebe23c93a76377750f", "score": "0.5158248", "text": "def geocodeBibcodeList(listname):\n\tBibList=[row[0] for row in openCSVreader(listname)]\n\tLenBibList=len(BibList)\n\tcounter=1\n\tfor bibcode in BibList:\n\t\tgeoQueryContainer(bibcode)\n\t\tstrCounter=str(counter)\n\t\tstrLenBibList=str(LenBibList)\n\t\tprint \"{0} of {1} bibcodes processed.\".format(strCounter, strLenBibList)\n\t\tprint \"\"\n\t\tcounter+=1\n\tprint(\"Finished geocoding. {0} unique affiliations were geocoded. Now de-duplicating affiliations by address.\".format(str(len(ADDRESSES_DICT))))\n\treturn 0", "title": "" }, { "docid": "b3f4e81d193f985182b8fb470a2e20be", "score": "0.5150334", "text": "def collect_scopus_data_for_issn_list(issn_list, af_ids):\n # number of ISSNs packed into one API call\n number_per_call = 100\n\n if issn_list is None:\n return []\n\n # calculate the number of API calls based on the number of ISSNs and the number of calls\n number_of_calls = int(len(issn_list) / number_per_call)\n print('performing {} calls to the scopus API'.format(number_of_calls))\n # prepare empty list of eids\n eids = []\n af_ids_string = ''\n for af_id in af_ids:\n print(af_id)\n if af_ids_string is not '':\n af_ids_string += ' OR '\n af_ids_string += 'AF-ID(' + af_id + ')'\n print(af_ids_string)\n\n # general search string for affiliation ID and ISSN\n search_string = '({afids}) AND ISSN({issns})'\n for n in range(0, number_of_calls):\n # perform he individual API calls\n print('making call {} of {}'.format(n, number_of_calls))\n\n # prepare the search string for the ISSNs\n issn_string = ''.join(issn_list[n * number_per_call:(n + 1) * number_per_call])\n\n print(search_string.format(afids=af_ids_string, issns=issn_string[:-4]))\n\n # perform the scopus search and add the individual eids to the list\n search = ScopusSearch(search_string.format(afids=af_ids_string, issns=issn_string[:-4]))\n eids += search.get_eids()\n return eids", "title": "" }, { "docid": "b22e20a8b6c89b5e3b3fb11330c2546c", "score": "0.5125131", "text": "def scrape_wiki(abs_url, main_url, numpages, delay):\n\n if numpages < 0 or delay < 0:\n raise ValueError(\"numpages or delay cannot be less than zero.\")\n\n if numpages == 0:\n raise ValueError(\"numpages cannot be 0.\")\n\n # Too short of a delay may cause website to block us\n if delay < 1:\n raise ValueError(\"Scrape delay too short; delay must be at least \" \\\n \"one second.\")\n\n # List to contain abbreviations parsed from downloaded pages\n wiki_abbrevs = []\n\n for __ in range(numpages):\n\n # Fetch web page; check whether successful\n print(f\"Fetching {abs_url}...\")\n\n try:\n response = requests.get(abs_url)\n\n # No need to catch specific exceptions such as ConnectionError\n # or TimeoutError for this simple script: just catch the base class\n except requests.exceptions.RequestException as err:\n print(f\"\\nERROR:\\n{err}\", file=sys.stderr)\n sys.exit(\"\\nCheck that you are connected to the internet \" \\\n \"and that URL is correct.\\n\")\n\n # See whether we retrieved the page successfully or not\n stat_code = response.status_code\n\n if not _is200(stat_code):\n print(f\"Status code {stat_code} unexpected: exepecting 200.\",\n file=sys.stderr)\n sys.exit(\"Quitting script.\")\n\n print(f\"{response} => GET successful! Page retrieved.\")\n\n try:\n\n print(\"Parsing page for abbreviations...\", end=\"\")\n\n # Create BS object that will allow you to easily parse the webpage\n soup = BeautifulSoup(response.text, \"html.parser\")\n\n # Get section on page that has all the abbreviations\n div = soup.find(id=\"mw-pages\")\n\n # Zero-in some more to get all the li tags in the div. Each li\n # contains a hperlink which in turns contains an abbreviation\n li_tags = div.findAll('li')\n\n # Collect the text from each hyperlink, i.e. the abbreviation\n for li_tag in li_tags:\n wiki_abbrevs.append(li_tag.a.string)\n\n # Get the hyperlink to the next page we want to download\n hyperlink = div.find('a', text='next page')\n\n # Get relative URL to the next page with abbreviations\n # Caution: program assumes only 21 pages need to be fetched,\n # but this could be changed at any time:\n # If program hits the last page, there will be no next page\n # hyperlink; the following should prevent any unwanted crashes\n # in such a case.\n if not hyperlink:\n break\n\n # Build the URL of the next page to be scraped\n rel_url = hyperlink['href']\n abs_url = main_url + rel_url\n\n # If we scrape site too quickly, it may block us\n time.sleep(delay)\n\n print(\"DONE!\")\n\n except AttributeError as err:\n # In case we get a page we can scrape but doesn't have the tags\n # we need to process (ie we'll be returned a None somewhere)\n print(\"AttributeError: {0}\".format(err), file=sys.stderr)\n sys.exit()\n\n return wiki_abbrevs", "title": "" }, { "docid": "92afe340cfbb1b27603400f6bc68cd36", "score": "0.5121955", "text": "def scrape(soup):\n search = soup.find_all(\"div\", {\"class\": \"jobsearch-SerpJobCard unifiedRow row result\"})\n print(\"Number of jobs on page: \", len(search))\n roleTitle = []\n company = []\n fullLocation = []\n locationState = []\n locationCity = []\n locationZipCode = []\n locationArea = []\n salary = []\n minSalary = []\n maxSalary = []\n salaryTimeUnits = []\n shortDesc = []\n expressApply = []\n sponsor = []\n indeedURL = []\n expLevel = []\n jobType = []\n responsiveEmployer = []\n jobID = []\n\n if search is None:\n return # might need to return all the lists\n else:\n for job in search:\n roleTitle.append(getRoleTitle(job))\n company.append(getCompany(job))\n locationState.append(getLocationState(job))\n locationCity.append(getLocationCity(job))\n locationZipCode.append(getLocationZipCode(job))\n locationArea.append(getLocationArea(getFullLocation(job)))\n minSalary.append(getMinimumSalary(getSalary(job)))\n maxSalary.append(getMaximumSalary(getSalary(job)))\n salaryTimeUnits.append(getSalaryTimeUnits(getSalary(job)))\n shortDesc.append(getShortDescription(job))\n expressApply.append(getHasExpressApply(job))\n sponsor.append(getIsSponsored(job))\n indeedURL.append(getIndeedJobListingURL(job))\n expLevel.append(getExperienceLevel(URL)) # uses url\n jobType.append(getJobType(URL)) # uses url\n responsiveEmployer.append(getResponsiveEmployer(job))\n jobID.append(getJobID(job))\n\n return roleTitle, company, fullLocation, locationState, \\\n locationCity, locationZipCode, locationArea, \\\n salary, minSalary, maxSalary, salaryTimeUnits, \\\n shortDesc, expressApply, sponsor, indeedURL, \\\n expLevel, jobType, responsiveEmployer, jobID", "title": "" }, { "docid": "29381432763471185f9b4ad876e5b6a4", "score": "0.51174444", "text": "def fetch_events_classical_studies(base_url='https://www.classics.upenn.edu'):\n events = []\n html_page = requests.get(urljoin(base_url, '/events'))\n page_soup = BeautifulSoup(html_page.content, 'html.parser')\n events_list = page_soup.find('div', attrs={'class': 'item-list'})\n\n for event in events_list.find_all('li'):\n title = event.find('h3')\n event_url = title.find('a')['href']\n event_url = urljoin(base_url, event_url)\n title = title.text.strip() if title is not None else ''\n date = event.find('span', attrs={'class': 'date-display-single'})\n date = date.get_text() if date is not None else ''\n starttime, endtime = find_startend_time(date)\n location = event.find('span', attrs={'class': 'event-location'})\n location = location.text.strip() if location is not None else ''\n\n event_page = requests.get(event_url)\n event_soup = BeautifulSoup(event_page.content, 'html.parser')\n if event_soup is not None:\n description = event_soup.find('div', attrs={\n 'class': 'field-type-text-with-summary'})\n description = description.get_text().strip() if description is not None else ''\n start_time = event_soup.find(\n 'span', attrs={'class': 'date-display-start'})\n end_time = event_soup.find(\n 'span', attrs={'class': 'date-display-end'})\n\n if start_time is not None and end_time is not None:\n starttime, endtime = start_time.text.strip(), end_time.text.strip()\n starttime = starttime.split(' - ')[-1]\n endtime = endtime.split(' - ')[-1]\n\n events.append({\n 'title': title,\n 'speaker': '',\n 'date': date,\n 'location': location,\n 'description': description,\n 'starttime': starttime,\n 'endtime': endtime,\n 'url': event_url,\n 'owner': 'Department of Classical Studies'\n })\n return events", "title": "" }, { "docid": "b61b1d1e7aca619cac144a929fd49de2", "score": "0.51075345", "text": "def scrape_each_whisky(get_all_whiskys_data: Dict[str, str], headers: Dict[str, str]) -> Dict[str, str]:\n\n # Initialise empty dicts for return\n scraped_data: Dict[str, str] = {}\n errors: Dict[str, str] = {}\n\n count: int = 1\n total: int = len(get_all_whiskys_data)\n\n # Begin loop over passed data\n for whisky in get_all_whiskys_data:\n if whisky in scraped_data:\n print(f\"{whisky} already exists\")\n else:\n try:\n print(f\"[{count}/{total}] - Scraping {whisky} info...\")\n req = requests.get(\n f\"{get_all_whiskys_data[whisky]}\", headers)\n soup: BeautifulSoup = BeautifulSoup(req.content, 'html.parser')\n\n title: str = soup.find(\n \"h1\", id='ContentPlaceHolder1_pageH1').get_text()\n\n initial_image = soup.find(\"div\", id='imgProductBigDiv').find(\n \"div\", class_='productImageWrap').find(\"img\").get(\"src\")\n\n image: str = \"\".join(initial_image[2:])\n\n # Attempt to get Varietal (Country)\n varietal: str = detailed_data(soup, \"Country\")\n\n # Attempt to get Region\n region: str = detailed_data(soup, \"Region\")\n\n # Attempt to get Brand\n brand: str = detailed_data(soup, \"Distillery\")\n\n # Attempt to get Age\n age: str = detailed_data(soup, \"YearsMatured\")\n\n # Attempt to get Style\n style: str = detailed_data(soup, \"Style\")\n\n # Attempt to get Alcohol Percentage\n alcohol_percentage: str = detailed_data(soup, \"Alcohol\")\n\n scraped_data[title] = {\n \"Country\": \"\",\n \"Image\": image,\n \"Varietal\": varietal,\n \"Region\": region,\n \"Whisky Style\": style,\n \"Brand Name\": brand,\n \"Name\": title,\n \"Age\": age,\n \"Alcohol Volume (%)\": alcohol_percentage,\n \"Price ($ per bottle)\": \"\",\n \"Peated (Y/N)\": \"\",\n \"Rating ( /10)\": \"\"}\n\n # print(data)\n print(f\"Scraped {whisky}!\")\n except AttributeError:\n print(f\"Error on: {whisky}\")\n errors[whisky] = get_all_whiskys_data[whisky]\n continue\n\n count += 1\n\n return {\"scraped_data\": scraped_data, \"errors\": errors}", "title": "" }, { "docid": "45c6600d6fe08b70a3d136bdde5d1b59", "score": "0.5105666", "text": "def fetch_events_classical_studies(base_url='https://www.classics.upenn.edu'):\n events = []\n html_page = requests.get(urljoin(base_url, '/events'))\n page_soup = BeautifulSoup(html_page.content, 'html.parser')\n events_list = page_soup.find('div', attrs={'class': 'item-list'})\n\n for event_url in events_list.find_all('a'):\n event_url = urljoin(base_url, event_url['href'])\n event_page = requests.get(event_url)\n event_soup = BeautifulSoup(event_page.content, 'html.parser')\n if event_soup is not None:\n title = event_soup.find('h1', attrs={'class': 'page-header'})\n title = title.text if title is not None else ''\n date = event_soup.find(\n 'span', attrs={'class': 'date-display-single'})\n date = date.text if date is not None else ''\n if event_soup.find('p', attrs={'class': 'MsoNormal'}) is not None:\n location = event_soup.find(\n 'p', attrs={'class': 'MsoNormal'}).text\n elif event_soup.find('p').text is not None:\n location = event_soup.find('p').text\n else:\n location = ''\n description = event_soup.find('div', attrs={\n 'class': 'field field-name-body field-type-text-with-summary field-label-hidden'})\n if description is not None:\n description = description.text\n else:\n description = ''\n try:\n event_time = event_soup.find(\n 'span', attrs={'class': 'date-display-single'}).text.strip()\n starttime, endtime = find_startend_time(event_time)\n except:\n starttime, endtime = '', ''\n\n events.append({\n 'title': title,\n 'date': date,\n 'location': location,\n 'description': description,\n 'url': event_url,\n 'starttime': starttime,\n 'endtime': endtime,\n 'owner': 'Department of Classical Studies'\n })\n return events", "title": "" }, { "docid": "349c74fc3d3f096a9964908c8826fa5d", "score": "0.5090672", "text": "def scrape(url):\n #create BeautifulSoup object from html file\n soup = make_soup(url)\n\n ############################SECTION: SCRAPE INFO############################\n #scrape person info under <section id=\"individual-info\" role=\"region\">\n info = soup.find(\"section\", id=\"individual-info\")\n #pull name and main title from webpage\n name_title = info.find(\"h2\")\n #get name by pulling text then strip whitespace\n name = ''.join(pull_only_text(name_title)).strip()\n #pull title from <div>\n title = name_title.find(\"div\", class_=\"display-title\").string\n #pull professor's department (easy lookup with display)\n department = info.find(\"a\", title=\"organization name\").string\n\n ########################SECTION: SCRAPE CONTACT INFO########################\n #pull all listed contact info from webpage\n contact_info = info.find(\"ul\", class_=\"contact_list\")\n #get the span containing phone number\n phone_span = contact_info.find(\"span\", class_=\"phone\")\n #declare empty phone variable\n phone = \"\"\n #if phone != None/ phone number exists\n #TODO: optimize this process by defaulting phone = None?????????????\n if phone_span != None:\n #pull phone text from nest\n phone = ''.join(pull_only_text(phone_span)).strip()\n #otherwise set phone to None\n else:\n phone = '-'\n #get a element containing email info\n email_a = contact_info.find(\"a\", title=\"email\")\n #declare empty variable for email\n email = \"\"\n #if email exists\n if email_a != None:\n #pull email text from nest\n email = ''.join(pull_only_text(email_a)).strip()\n #otherwise set email to None\n else:\n email = '-'\n #pull website contained in li\n website_li = contact_info.find(\"li\", class_=\"webpages\")\n #placeholder for website\n website = \"\"\n #check if it exists\n if website_li != None:\n #find the website\n website = website_li.find(\"a\").get(\"href\")\n #if it doesn't exist\n else:\n #set website to none\n website = '-'\n\n #########################SECTION: RETURN DF OBJECT##########################\n return pd.DataFrame({'name':name, 'title':title, 'department':department,\n 'phone':phone, 'email':email, 'website':website}, index=[0])", "title": "" }, { "docid": "8f84726830ae7f708c576bc0a516bff1", "score": "0.50712633", "text": "def tracts(request):\n county_fips = request.GET.get('county_fips', '')\n state_fips = request.GET.get('state_fips', '')\n\n tracts = StateCensusTract.objects.filter(\n countyfp=county_fips, statefp=state_fips).order_by('geoid')\n tracts = _paged(tracts, request)\n\n return HttpResponse(\n GeoJSONSerializer().serialize(tracts, use_natural_keys=True),\n content_type='application/json')", "title": "" }, { "docid": "14396764b9e159635a1bded0b96c6000", "score": "0.50669175", "text": "def scrapeCampus(url, dicts=0):\n\tmeals = ('Breakfast', 'Lunch', 'Dinner', 'Knight Room')\n\tif dicts:\n\t\treturn {meal: scrapeMeal(url + \"&mealName=\" + meal.replace(' ', '+')) for meal in meals}\n\telse:\n\t\treturn [{\n\t\t\t\"meal_name\" : meal,\n\t\t\t\"genres\" : scrapeMeal(url + \"&mealName=\" + meal.replace(' ', '+'), dicts=0)\n\t\t} for meal in meals]", "title": "" }, { "docid": "3726b9f0a9c975fc86b2cd944cc5224d", "score": "0.50613064", "text": "def getInfo():\n global docs\n with open('/Users/madisonthompson/Downloads/WEBPAGES_RAW/bookkeeping.json') as json_data:\n d = json.load(json_data)\n for urlfile in d:\n folder_num, file_num = urlfile.split('/')\n file_path = '/Users/madisonthompson/Downloads/WEBPAGES_RAW/'+ folder_num +'/' + file_num\n url = \"http://\" + d[urlfile]\n \n if is_valid(url) == True:\n docs += 1\n ## docIDs[urlfile] = url\n with open(file_path) as content:\n x = content.read()\n ##create beuatiful soup class object and parse\n soup = BeautifulSoup(x,\"html.parser\")\n ##return unicode text for html doc\n words= soup.get_text()\n ##tokenize the words\n tokens = nltk.word_tokenize(words)\n \n for token in tokens:\n if len(token) < 25:\n if token not in index and urlfile not in index[token]:\n index[token][urlfile] = 1\n if token in index and urlfile not in index[token]:\n index[token][urlfile] = 1\n else:\n index[token][urlfile] += 1\n print docs\n return index", "title": "" }, { "docid": "3206e7894c098bd342cd6740d021d8d4", "score": "0.5048306", "text": "def scrap_worldometers(country):\n # Sending a HTTP GET request to a URL and fetch raw HTML text\n html_text = requests.get(url).text\n\n # Parse the html content\n soup = BeautifulSoup(html_text, 'html.parser')\n\n # Get the table having the id main_table_countries_today\n table = soup.find('table', attrs={'id': 'main_table_countries_today'})\n\n # Get the table headers\n table_headers = table.find('thead').select('th')\n\n # keeping all the header columns in a list\n headers_list = []\n for h in table_headers:\n headers_list.append(h.text)\n\n # to hold countries for which data to be retrieved\n countries = []\n # if single and string add it to countries list\n if not isinstance(country, list):\n countries.append(country)\n else:\n # add list of countries\n countries.extend(country)\n\n # to hold final countries information\n country_data = []\n\n # Handle multiple countries\n for c in countries:\n # getting the row for specific country\n if soup.find(lambda t: t.text.strip().lower() == str(c).lower()) is not None:\n country_row_data = soup.find(lambda t: t.text.strip().lower() == str(c).lower()).parent.select('td')\n\n values = []\n for row in country_row_data:\n values.append(row.text)\n\n # create list of dictionary contains each country covid information\n country_data.append(filter_and_process(dict(zip(headers_list, values))))\n # No country matches with the passed country name\n else:\n country_data.append({c: 'No Data available for this country'})\n return country_data", "title": "" }, { "docid": "ec26d83fd412fadc5d1ed802a2ca13fc", "score": "0.50383437", "text": "def get_districts(request_header):\n states = requests.get(\"https://cdn-api.co-vin.in/api/v2/admin/location/states\", headers=request_header)\n\n if states.status_code == 200:\n states = states.json()[\"states\"]\n\n refined_states = []\n for state in states:\n tmp = {\"state\": state[\"state_name\"]}\n refined_states.append(tmp)\n\n display_table(refined_states)\n state = int(input(\"\\nEnter State index: \"))\n state_id = states[state - 1][\"state_id\"]\n\n districts = requests.get(f\"https://cdn-api.co-vin.in/api/v2/admin/location/districts/{state_id}\",\n headers=request_header)\n\n if districts.status_code == 200:\n districts = districts.json()[\"districts\"]\n\n refined_districts = []\n for district in districts:\n tmp = {\"district\": district[\"district_name\"]}\n refined_districts.append(tmp)\n\n display_table(refined_districts)\n reqd_districts = input(\"\\nEnter comma separated index numbers of districts to monitor : \")\n districts_idx = [int(idx) - 1 for idx in reqd_districts.split(\",\")]\n reqd_districts = [\n {\n \"district_id\": item[\"district_id\"],\n \"district_name\": item[\"district_name\"],\n \"alert_freq\": 440 + ((2 * idx) * 110),\n }\n for idx, item in enumerate(districts)\n if idx in districts_idx\n ]\n print(f\"Selected districts: \")\n display_table(reqd_districts)\n return reqd_districts\n else:\n print(\"Unable to fetch districts\")\n print(districts.status_code)\n print(districts.text)\n os.system(\"pause\")\n sys.exit(1)\n else:\n print(\"Unable to fetch states\")\n print(states.status_code)\n print(states.text)\n os.system(\"pause\")\n sys.exit(1)", "title": "" }, { "docid": "3e99e9650e851ba0c59150dfbe918ee5", "score": "0.50277036", "text": "def get_citations(**args):\n # create the queues\n tasks = JoinableQueue()\n results = JoinableQueue()\n # how many threads are there to be used\n if 'threads' in args:\n threads = args['threads']\n else:\n threads = cpu_count()\n # initialize the \"harvesters\" (each harvester get the citations for a bibcode)\n harvesters = [ CitationHarvester(tasks, results) for i in range(threads)]\n # start the harvesters\n for b in harvesters:\n b.start()\n # put the bibcodes in the tasks queue\n for bib in args['bibcodes']:\n tasks.put(bib)\n # add some 'None' values at the end of the tasks list, to faciliate proper closure\n for i in range(threads):\n tasks.put(None)\n\n tasks.join()\n for b in harvesters:\n b.join()\n\n return [item for sublist in cit_dict.values() for item in sublist]", "title": "" }, { "docid": "0668dc73981ccecb81f5fb4f227d00cd", "score": "0.50181735", "text": "def get_schools(county, year, grade):\r\n url = \"https://app.azdhs.gov/IDRReportStats/Home/GetSchoolTable?{0}\"\r\n query = {\r\n 'bRegex': 'false',\r\n 'bRegex_0': 'false',\r\n 'bRegex_1': 'false',\r\n 'bRegex_2': 'false',\r\n 'bRegex_3': 'false',\r\n 'bRegex_4': 'false',\r\n 'bRegex_5': 'false',\r\n 'bRegex_6': 'false',\r\n 'bRegex_7': 'false',\r\n 'bRegex_8': 'false',\r\n 'bSearchable_0': 'false',\r\n 'bSearchable_1': 'true',\r\n 'bSearchable_2': 'false',\r\n 'bSearchable_3': 'false',\r\n 'bSearchable_4': 'false',\r\n 'bSearchable_5': 'false',\r\n 'bSearchable_6': 'true',\r\n 'bSearchable_7': 'true',\r\n 'bSearchable_8': 'false',\r\n 'iColumns': '9',\r\n 'iDisplayLength': '2000',\r\n 'iDisplayStart': '0',\r\n 'mDataProp_0': 'SCHOOL_YEAR',\r\n 'mDataProp_1': 'SCHOOL_NAME',\r\n 'mDataProp_2': 'SCHOOL_TYPE',\r\n 'mDataProp_3': 'SCHOOL_GRADE',\r\n 'mDataProp_4': 'ENROLLED',\r\n 'mDataProp_5': 'ADDRESS',\r\n 'mDataProp_6': 'CITY',\r\n 'mDataProp_7': 'ZIP',\r\n 'mDataProp_8': 'COUNTY',\r\n 'sColumns': ',,,,,,,,',\r\n 'sEcho': '1',\r\n 'selectedCounty': county,\r\n 'selectedGrade': grade,\r\n 'selectedYear': year,\r\n }\r\n command = ['curl', url.format(urllib.parse.urlencode(query))]\r\n with subprocess.Popen(command, stdout=subprocess.PIPE) as proc:\r\n schools = json.loads(proc.communicate()[0].decode())['aaData']\r\n\r\n return schools", "title": "" }, { "docid": "39cb8b20853fa4fecc9358b3c2bb4a23", "score": "0.5014434", "text": "def process_request(reqs, num_candidates=10):\n requests = []\n\n # create initial set of candidates\n for r in reqs:\n requests.extend(r.expand())\n\n for r in requests:\n process_special_properties(r)\n\n print('Received %d NEAT requests' % len(requests))\n for r in requests:\n logging.debug(str(r))\n\n pre_resolve = False\n for r in requests:\n try:\n if r['__request_type'].value == 'pre-resolve':\n pre_resolve = True\n except KeyError:\n pass\n\n if pre_resolve:\n # remove property from request\n del r['__request_type']\n\n if pre_resolve:\n logging.warning(\"skipping pre_resolve PM lookup\")\n return requests\n\n candidates = []\n\n # main lookup sequence\n # --------------------\n for i, request in enumerate(requests):\n print(policy.term_separator(\"processing request %d/%d\" % (i + 1, len(requests)), offset=0, line_char='─'))\n print(\"%s\" % request)\n\n print('Profile lookup...')\n updated_requests = profiles.lookup(request, tag='(profile)')\n\n print(' Profile lookup returned %d candidates:' % len(updated_requests))\n for ur in updated_requests:\n logging.debug(\"updated request %s\" % (ur))\n\n cib_candidates = []\n print('CIB lookup...')\n for ur in updated_requests:\n for c in cib.lookup(ur):\n if c in cib_candidates: continue\n cib_candidates.append(c)\n\n cib_candidates.sort(key=attrgetter('score'), reverse=True)\n print(' CIB lookup returned %d candidates:' % len(cib_candidates))\n for c in cib_candidates:\n logging.debug(' %s %.1f %.1f' % (c, *c.score))\n\n print('PIB lookup...')\n for j, candidate in enumerate(cib_candidates):\n cand_id = 'on CIB candidate %s' % (j + 1)\n for c in pib.lookup(candidate, tag=cand_id):\n if c in candidates: continue\n candidates.append(c)\n print(' Policy lookup returned %d candidates:' % len(candidates))\n\n # post process candidates\n\n # Each candidate must only contain a single transport protocol. Expand sets to individual candidates.\n tmp_list = []\n for candidate in candidates:\n transport = candidate.get(\"transport\").value\n if isinstance(transport, set):\n for t in transport:\n c = deepcopy(candidate)\n c[\"transport\"].value = t\n tmp_list.append(c)\n else:\n tmp_list.append(candidate)\n candidates = tmp_list\n\n # TODO handle 'to_controller' property, wait for controller response\n candidates.sort(key=attrgetter('score'), reverse=True)\n top_candidates = candidates[:num_candidates]\n\n for candidate in top_candidates:\n cleanup_special_properties(candidate)\n\n print(policy.term_separator(line_char='─'))\n print(\"%d candidates generated\" % (len(candidates)))\n print(policy.term_separator('Top %d' % num_candidates))\n for candidate in top_candidates:\n print(candidate, 'score: %d|%d' % candidate.score)\n\n # TODO check if candidates contain the minimum src/dst/transport tuple\n print(policy.term_separator())\n return top_candidates", "title": "" }, { "docid": "65133d30b5568b6903052e0fed24a5a3", "score": "0.50083476", "text": "def find_jobs_from(website, job_title, location, limit, desired_characs, filename=\"results.xls\"):\n \n if website == 'Indeed':\n url, page_final = urls_indeed_pages(job_title, location, limit)\n\n jobs_list_final = {}\n n_page = 0\n num_listings_final = 0\n\n while n_page < page_final:\n start = limit * n_page\n\n url_page = str(url)+'&start='+str(start)\n print(\"Working on page: \",n_page,\" with URL: \", url_page)\n\n job_soup = load_indeed_jobs_div(url_page)\n jobs_list, num_listings = extract_job_information_indeed(url_page, desired_characs, n_page)\n\n df2 = pd.DataFrame(jobs_list)\n print(df2.head())\n\n if n_page == 0:\n jobs_df = df2\n else:\n jobs_df = pd.concat([jobs_df, df2], ignore_index=True)\n\n print(jobs_df.head())\n num_listings_final += num_listings\n n_page += 1\n\n jobs_df.to_excel(filename)\n #save_jobs_to_excel(jobs_df, filename)\n \n print('{} new job postings retrieved from {}. Stored in {}.'.format(num_listings_final, \n website, filename))", "title": "" }, { "docid": "88af1fbe8dd68550e5d0e8e8a92e138e", "score": "0.50001734", "text": "def scrape(dicts=0):\n\tprefix = URL_PREFIX + \"pickmenu.asp?locationNum=0\"\n\t# There doesn't seem to be a hall #2\n\thalls = (('Brower Commons', '1'), ('Livingston Dining Commons', '3'),\n\t ('Busch Dining Hall', '4'), ('Neilson Dining Hall', '5'))\n\tif dicts:\n\t\treturn {hall[0]: scrapeCampus(prefix + hall[1]) for hall in halls}\n\telse:\n\t\treturn [{\n\t\t\t\"location_name\" : hall[0],\n\t\t\t\"meals\" : scrapeCampus(prefix + hall[1], dicts=0)\n\t\t} for hall in halls]", "title": "" }, { "docid": "878d2f91180b8c191e7ed3c97c48cd00", "score": "0.49964505", "text": "def web_scraper(self, table, countries_fetch_list):\n txt = self.html(self.url)\n country_history = {}\n\n try:\n # Fetching global data and printing to console\n if table == 'all' or table == 'world':\n # global corona info:\n self.world = self.parsing_main_data(txt)\n print(self.world)\n\n # Fetching countries data\n if table == 'all' or table == 'countries':\n self.countries = self.parsing_country_page(txt)\n\n # Printing countries data to console\n for country_dict in self.countries:\n # each country corona info\n print(country_dict)\n\n # the list of countries\n country_list = [country_dict['country'] for country_dict in self.countries]\n print(len(country_list), 'countries:', country_list)\n\n if table == 'all' or table == 'history':\n # get link for each country webpage:\n country_link_dict = self.get_countries_links(txt)\n if not countries_fetch_list:\n countries_fetch_list = country_list\n\n # for each country fetch its history\n for country in countries_fetch_list:\n if country in COUNTRIES_NAMES_TO_CODES.keys():\n txt = self.html(self.url + country_link_dict[country])\n country_history[country] = self.parsing_country_history(txt)\n print(f'{country}\\n{country_history[country]}')\n self.history = country_history\n\n except Exception as ex:\n self.logger.error(f'{ERR_MSG_FETCH}')\n raise ValueError(ERR_MSG_FETCH)\n return True", "title": "" }, { "docid": "9a090b3d5a859c9882ead03e1a547b38", "score": "0.49937212", "text": "def fetch_and_clean_tables_from_wikipedia():\n\tgini_url = \"https://en.wikipedia.org/wiki/List_of_U.S._states_by_Gini_coefficient\"\n\tpov_url = \"https://en.wikipedia.org/wiki/List_of_U.S._states_and_territories_by_poverty_rate\"\n\turb_url = \"https://en.wikipedia.org/wiki/Urbanization_in_the_United_States\"\n\tclimate_url = \"\" ####\n\n\turb_state_mapping = lambda x: x[:x.find('[')]\n\n\t#First we grab the dirty tables\n\n\tgini = pd.read_html(gini_url)\n\tgini = gini[2] # this gets correct table from wikipedia page\n\n\tpov = pd.read_html(pov_url)\n\tpov = pov[2]\n\n\turb = pd.read_html(urb_url)\n\turb = urb[-1]\n\turb = urb.droplevel(level= 0, axis = 1) #clean the unecessary multindex\n\n\t# climate = pd.read_html(climate_url) #TODO\n\t# data sourcing of climate not straightforward like others\n\n\t#Then we clean the tables such that the output is directly usable\n\n\tgini.columns = gini.columns.str.replace(' ', '_')\n\tpov.columns = pov.columns.str.replace(' ', '_')\n\turb.columns = urb.columns.str.replace(' ', '_')\n\n\n\tgini = gini.rename(columns={\n\t\t'State_or_federal_district': 'state',\n\t\t'Gini_Coefficient': 'gini_coef'\n\t})\n\tgini.drop(['Rank'], axis=1, inplace=True)\n\tgini.set_index('state', inplace=True)\n\tgini.columns = gini.columns.str.lower()\n\n\tpov = pov.rename(columns={\n\t\t'State': 'state',\n\t\t'2019_Poverty_rate(percent_of_persons_in_poverty)[note_2][7]': 'pov_2019',\n\t\t'2014_Poverty_Rates_(includes_unrelated_children)': 'pov_2014'\n\t\t})\n\tpov.drop(['Rank', 'Supplemental_Poverty_Measure_(2017–2019_average)_(Geographically_Adjusted)'], axis=1, inplace=True)\n\tpov.set_index('state', inplace = True)\n\tpov.columns = pov.columns.str.lower()\n\n\n\turb = urb.rename(columns={'State/Territory': 'state',\n\t\t'2010': 'urb_2010',\n\t\t'2000': 'urb_2000' })\n\turb = urb[['state', 'urb_2010', 'urb_2000']].copy()\n\turb['state'] = urb['state'].apply(urb_state_mapping)\n\turb.set_index('state', inplace=True)\n\turb.columns = urb.columns.str.lower()\n\n\t#join them all\n\tmacro_df = gini.merge(pov, 'inner', 'state').merge(urb, 'inner', 'state')\n\treturn macro_df.dropna()", "title": "" }, { "docid": "7bae4e7ba3162f15792b42785c06939f", "score": "0.4990565", "text": "def get_rooms(classes, curs):\n\n for dept, num, name in classes:\n payload = {'p_term': 'FL', 'p_dept': dept, 'p_course': num, 'p_title': name, 'p_print_flag': 'N', 'p_list_all': 'N'}\n r = requests.post(\"http://osoc.berkeley.edu/OSOC/osoc\", params=payload)\n soup = BeautifulSoup(r.text, \"html.parser\")\n tables = soup.find_all('table')\n del tables[0] #remove header and footer\n del tables[-1]\n\n for table in tables:\n elems = table.find_all('td')\n location = elems[6].string\n if not location in CANCELLED:\n process_location(location, curs)", "title": "" }, { "docid": "2069b64f8a083f71911f0d929dccc6e9", "score": "0.49894997", "text": "def gsc_user_detail_works(soup: BeautifulSoup, user: User,research_rec) -> int:\r\n\r\n #print( user['citations_all'])\r\n if soup.find('tbody', id='gsc_a_b'):\r\n works_soup = soup.find_all('tr', class_='gsc_a_tr')\r\n\r\n\r\n researc_counter = 0\r\n #research_rec = 1\r\n print(len(works_soup))\r\n for work in works_soup:\r\n\r\n # Batch processing: Start to parse in the work (position) specified\r\n #if start_in_work is not None and record < start_in_work:\r\n #record += 1\r\n #continue\r\n\r\n \"\"\"\r\n w = Work()\r\n w['user_name'] = user['name']\r\n w['gsc_title'] = sub(r\"\\s\", ' ', sub(r\"\\s+\", ' ', work.find(class_='gsc_a_t').a.text)).strip()\r\n\r\n href = quote_plus(work.find(class_='gsc_a_t').a['data-href'].replace(\"&pagesize=100\", \"\"))\r\n w['url'] = f\"{user['page'].url}#d=gs_md_cita-d&p=&u={href}%26tzom%3D360\"\r\n\r\n extra_data = work.find_all(class_=\"gs_gray\")\r\n w['authors'] = extra_data[0].string\r\n\r\n try:\r\n w['citations_count'] = int(work.find(class_='gsc_a_c').a.string)\r\n except Exception:\r\n w['citations_count'] = 0\r\n\r\n try:\r\n citations_url = (work.find(class_='gsc_a_c').a['href']).strip()\r\n w['citations_url'] = citations_url if citations_url else None\r\n except Exception:\r\n w['citations_url'] = None\r\n\r\n try:\r\n w['id'] = search(r\"cites=(.*)$\", w['citations_url']).group(1)\r\n except Exception:\r\n w['id'] = None\r\n\r\n try:\r\n # TODO: Check if this condition works\r\n w['gsc_publication'] = extra_data[1].text if not extra_data[1].text else None\r\n except Exception:\r\n w['gsc_publication'] = None\r\n\r\n try:\r\n w['year'] = work.find(class_='gsc_a_y').span.string\r\n except Exception:\r\n w['year'] = None\r\n #gsc_work_details(work_details_request(browser, w['gsc_title']), w)\r\n\r\n #if config.crossref:\r\n #crf_work_details(w, user)\r\n print(int(research_rec))\r\n print(user[\"id\"])\r\n print(w['user_name'])\r\n print(w['gsc_title'])\r\n print(w['url'])\r\n print( w['authors'])\r\n print( w['citations_count'])\r\n print(w['citations_url'])\r\n print(w['id'])\r\n print(w['gsc_publication'])\r\n print(w['year'])\r\n #gsc_work_wos_citations(browser, w)\r\n\r\n # Printing and saving to file\r\n #print(f\"In work: {record} >>> {w.as_csv()}\\n\")\r\n #file.write((w.as_csv() + \"\\n\").encode())\r\n if w['year']!=None:\r\n sql = \"INSERT INTO research (research_id, acadmic_id,acadmic_name,cited_by,research_year) VALUES (%s,%s,%s,%s,%s)\"\r\n val = (int(research_rec),int(user[\"id\"]),str(w['gsc_title']),int(w['citations_count']),int(w['year']))\r\n else:\r\n sql = \"INSERT INTO research (research_id, acadmic_id,acadmic_name,cited_by) VALUES (%s,%s,%s,%s)\"\r\n val = (int(research_rec), int(user[\"id\"]), str(w['gsc_title']), int(w['citations_count']))\r\n cursor.execute(sql, val)\r\n connection.commit()\r\n \"\"\"\r\n researc_counter += 1\r\n research_rec += 1\r\n user['researc_n'] = researc_counter\r\n return research_rec", "title": "" }, { "docid": "3092183ca134cc9bb93bb50ced08c6c2", "score": "0.49848667", "text": "def get_state(state_init, state_name):\r\n state_name_link = re.sub(' ', '%20', state_name)\r\n url = (\r\n 'https://www.internationalstudent.com/schools_awarding_aid/'\r\n +state_init\r\n +'/'\r\n +state_name_link\r\n +'.html'\r\n )\r\n driver.get(url)\r\n html = driver.page_source\r\n soup = BeautifulSoup(html, 'lxml') \r\n table = soup.find('table')\r\n lines = table.find_all('tr')\r\n entries = []\r\n if len(lines) > 2:\r\n for line in lines[1:]:\r\n entries.append(get_entry(line))\r\n elif len(lines) == 2:\r\n if lines[1].text == 'No results found.':\r\n return(entries)\r\n else: # it means there is one school\r\n entries.append(get_entry(lines[1]))\r\n else:\r\n print('something went wrong', state_name)\r\n return(entries)", "title": "" }, { "docid": "5b0ec730ac2022eaad1c92190bd922ad", "score": "0.49762627", "text": "def crawl(browser, username, infile, outfile):\r\n\r\n # first check and read the input file\r\n all_names = collect_names(infile)\r\n\r\n fieldnames = ['Search name', 'Name', 'URL']\r\n # then check we can write the output file\r\n # we don't want to complete process and show error about not\r\n # able to write outputs\r\n with open(outfile, 'w', newline='') as csvfile:\r\n # just write headers now\r\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\r\n writer.writeheader()\r\n\r\n\r\n # now open the browser\r\n with WebBus(browser) as bus:\r\n\r\n bus.driver.get(LINKEDIN_URL)\r\n\r\n login_into_linkedin(bus.driver, username)\r\n time.sleep(random.uniform(30, 60))\r\n\r\n for name in all_names:\r\n links = []\r\n nametexts = []\r\n try:\r\n search_input = bus.driver.find_element_by_css_selector('.ember-view input')\r\n print('Found search box')\r\n time.sleep(random.uniform(2, 5))\r\n except NoSuchElementException:\r\n print('NoSuchElementException search_input')\r\n continue\r\n search_input.clear()\r\n search_input.send_keys(name)\r\n print('Input name: ', name)\r\n time.sleep(random.uniform(2, 5))\r\n try:\r\n bus.driver.find_element_by_css_selector('.search-typeahead-v2__button').click()\r\n print('Clicked search')\r\n time.sleep(random.uniform(5, 10))\r\n except NoSuchElementException:\r\n print('Click search button fails')\r\n\r\n profiles = []\r\n\r\n # collect the profile links - later I'll iterate through the experience to decide which is the right one\r\n results = None\r\n print('Current URL: ', bus.driver.current_url)\r\n \r\n try:\r\n links = bus.driver.find_elements_by_css_selector(\".search-result__info .search-result__result-link\")\r\n except NoSuchElementException:\r\n print('Links failed', NoSuchElementException)\r\n\r\n links = [link.get_attribute('href') for link in links]\r\n print('Links:', links)\r\n if links != []:\r\n i = 0\r\n try:\r\n nametexts = bus.driver.find_elements_by_css_selector(\"span.name.actor-name\")\r\n nametexts = [nametext.text for nametext in nametexts]\r\n except NoSuchElementException:\r\n\r\n print('Name texts failed', NoSuchElementException)\r\n while len(links)>len(nametexts):\r\n nametexts.append(\"No name found\")\r\n print('Appended name')\r\n \r\n print('Name texts:', nametexts[i])\r\n with open(outfile, 'a+', newline='') as csvfile:\r\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\r\n for link in links:\r\n # every search result\r\n print('Link: ', link)\r\n print('Name text: ', nametexts[i])\r\n## time.sleep(random.uniform(0.2, 2))\r\n\r\n data = {'Search name': name.encode('ascii', 'ignore').decode('utf-8'), 'Name': nametexts[i].encode('ascii', 'ignore').decode('utf-8'), 'URL': link}\r\n print(data)\r\n profiles.append(data)\r\n i = i + 1\r\n writer.writerows(profiles)\r\n click.echo(\"Checked: \" + name)\r\n else:\r\n print(\"Not found: \" + name)\r\n time.sleep(random.uniform(2, 5))", "title": "" }, { "docid": "05fded35d6568520ecdd8d523762995b", "score": "0.49669993", "text": "def get_country_statistics():\n #URL to get all the data for countries\n url = 'https://api.covid19api.com/summary'\n req = requests.get(url, auth=HTTPBasicAuth(USERNAME, PASSWORD))\n #print(str(req.json()))\n response = req.json()['Countries']\n lenght = len(response)\n for i in range(lenght):\n if response[i]['Country'] not in COUNTRIES:\n COUNTRIES.append(response[i]['Country'])\n\n return COUNTRIES", "title": "" }, { "docid": "0544f53a577dbd47905b39bd0ab947fd", "score": "0.49625093", "text": "def masterlist():\n \n query = dbquery('select page_title from page where page_namespace = 14 and (page_title like \"%-Class_%_articles\" or page_title like \"Unassessed_%_articles\" or page_title like \"WikiProject_%_articles\") and page_title not like \"%-importance_%\" and page_title not like \"Wikipedia_%\" and page_title not like \"Template-%\" and page_title not like \"Redirect-%\" and page_title not like \"Project-%\" and page_title not like \"Portal-%\" and page_title not like \"File-%\" and page_title not like \"FM-%\" and page_title not like \"Category-%\" and page_title not like \"Cat-%\" and page_title not like \"Book-%\" and page_title not like \"NA-%\" and page_title not like \"%_Operation_Majestic_Titan_%\" and page_title not like \"%_Version_%\" and page_title not like \"All_Wikipedia_%\" and page_title not like \"%_Wikipedia-Books_%\" and page_title not like \"Assessed-%\" and page_title not like \"%-Priority_%\" and page_title not like \"Unassessed_field_%\" and page_title not like \"Unassessed_importance_%\" and page_title not like \"Unassessed-Class_articles\" and page_title not like \"%_Article_quality_research_articles\" and page_title not like \"WikiProject_lists_of_encyclopedic_articles\";')\n categories = []\n for row in query:\n categories.append(row[0].decode('utf-8'))\n \n # Record in a dictionary of lists? wikiprojects = {'Military history': ['Category:A', 'Category:B']}\n buckets = {}\n \n for category in categories:\n projectname = category\n projectname = re.sub('WikiProject_', '', projectname) # Some categories include \"WikiProject\" in the category name.\n projectname = re.sub('-related', '', projectname) # e.g. \"Museum-related\" -> \"Museum\"\n projectname = re.sub('_quality', '', projectname) # e.g. \"Unassessed_quality\" -> \"Unassessed\"\n projectname = re.sub('_task_forces_by', '', projectname)\n projectname = re.sub('_task_force', '', projectname)\n projectname = re.sub('_taskforce', '', projectname)\n projectname = re.sub('_work_group', '', projectname)\n projectname = re.sub('_workgroup', '', projectname)\n projectname = re.sub('_subproject_selected_articles', '', projectname)\n projectname = re.sub('_automatically_assessed', '', projectname)\n projectname = re.sub(r'_articles$', '', projectname)\n projectname = re.sub(r'_newsletter$', '', projectname)\n projectname = re.sub(r'^((.*)-Class|Unassessed)_', '', projectname)\n projectname = projectname[0].upper() + projectname[1:] # Capitalize the first letter\n try:\n buckets[projectname].append(category)\n except KeyError:\n buckets[projectname] = []\n buckets[projectname].append(category)\n \n # For each key in buckets, try to match it to a real WikiProject or task force name\n # Checks against the redirect table so that it can follow redirects\n\n pagetitles = {}\n namespaces = {2: 'User:', 3: 'User_talk:', 4: 'Wikipedia:', 5: 'Wikipedia_talk:', 100: 'Portal:', 101: 'Portal_talk:'}\n # Heavens help me if WikiProjects end up in namespaces other than those.\n \n for key in buckets.keys():\n project_area = key\n query = dbquery('select page.page_title,redirect.rd_namespace,redirect.rd_title from page left join redirect on redirect.rd_from = page.page_id where page_title = \"WikiProject_' + key + '\" and page_namespace = 4;')\n if len(query) == 0:\n query = dbquery('select page.page_title,redirect.rd_namespace,redirect.rd_title from page left join redirect on redirect.rd_from = page.page_id where page_title = \"WikiProject_' + key + 's\" and page_namespace = 4;')\n if len(query) == 0:\n print('Warning: No project page found for key: ' + key)\n continue\n\n page_title = query[0][0]\n rd_namespace = query[0][1]\n rd_title = query[0][2]\n \n if rd_title is not None:\n pagetitles[key] = namespaces[rd_namespace] + rd_title.decode('utf-8')\n elif rd_title is None and page_title is not None:\n pagetitles[key] = namespaces[4] + page_title.decode('utf-8')\n \n # At this point, each key of buckets should be tied to an actual page name\n output = {}\n \n for key in buckets.keys():\n for category in buckets[key]:\n try:\n output[pagetitles[key]].append(category)\n except KeyError:\n output[pagetitles[key]] = []\n output[pagetitles[key]].append(category)\n \n return output", "title": "" }, { "docid": "c5d2624d074e6b65583c1a55977d0189", "score": "0.49576288", "text": "def searchpageparsing(page): # Note for initial Coldwell this was run seperately, for more managable errors\n if not page: # Failed webdl handling\n return None\n scrapelist = []\n\n soup = bs4.BeautifulSoup(page.text, 'lxml')\n parent_element = soup.find('a', {'id': 'resultsNext'})\n\n while parent_element:\n link = parent_element['href']\n scrapelist.append(link)\n page = webdl('https://www.cbcworldwide.com' + link)\n\n soup = bs4.BeautifulSoup(page.text, 'lxml')\n parent_element = soup.find('a', {'id': 'resultsNext'})\n\n return scrapelist", "title": "" }, { "docid": "69b79d07e4b518251b8aec565dd0dbc8", "score": "0.49540037", "text": "def scrape(self):\n\n for idx, main_url in enumerate(self.urls):\n self.driver.get(main_url)\n\n self.get_building_features(main_url)\n # Remove url that were scraped\n self.urls.pop(idx)\n \n \n '''self.wait('/html/body/script[2]/text()', 'XPATH')\n JSON_DATA = \"return JSON.stringify(dataLayer)\"\n\n try:\n bldg_data = self.driver.execute_script(JSON_DATA)\n bldg_data = json.loads(bldg_data)[0]\n\n self.get_building_features(main_url, bldg_data)\n except JavascriptException:\n print(self.driver.current_url)'''", "title": "" }, { "docid": "93993547646607922d85c932edae1942", "score": "0.49458018", "text": "def place_ua(city_cleaner):\n logging.info(\"Beginning UA lookup\")\n lookup = defaultdict(dict)\n download = requests.get(URL)\n reader = csv.reader(download.content.decode('latin-1').encode('utf-8').splitlines(), delimiter=',')\n not_designated = 0\n total = 0\n # skip header line\n next(reader)\n for row in reader:\n total += 1\n state_fips = row[2]\n ua = row[0]\n place_name = row[4]\n place_fips = row[3]\n\n if place_fips == '99999' or ua == '99999':\n not_designated += 1\n continue\n\n cleaned_place_name = re.sub(r'\\([^)]*\\)', '', place_name).rstrip()\n suffix_found = False\n for suffix in SUFFIXES:\n if cleaned_place_name.endswith(suffix):\n cleaned_place_name = cleaned_place_name.replace(suffix, '').rstrip()\n for delimiter in DELIMITERS:\n if delimiter in cleaned_place_name:\n places = cleaned_place_name.split(delimiter)\n for place in places:\n if place:\n lookup[ABBR_LOOKUP[state_fips]][city_cleaner(place)] = ua\n break\n lookup[ABBR_LOOKUP[state_fips]][city_cleaner(cleaned_place_name)] = ua\n suffix_found = True\n break\n if not suffix_found:\n lookup[ABBR_LOOKUP[state_fips]][cleaned_place_name] = ua\n\n logging.info(\n 'Done extracting urbanized areas and urban clusters. %s total rows, %s not designated, %s found',\n total,\n not_designated,\n total - not_designated\n )\n\n return lookup", "title": "" }, { "docid": "86298faa51e727f6f7936e7b6387b311", "score": "0.49440315", "text": "def download_users(keywords: str,uni_id,userId,n) -> Tuple[str, int]:\r\n\r\n # Page counter\r\n proxy = Proxy({\r\n 'proxyType': ProxyType.MANUAL,\r\n 'httpProxy': get_proxy(),\r\n 'ftpProxy': get_proxy(),\r\n 'sslProxy': get_proxy(),\r\n 'noProxy': ''})\r\n browser = webdriver.Firefox(executable_path=config.driver, proxy=proxy)\r\n #browser.set_window_position(-3000, 0)\r\n\r\n\r\n page = 1\r\n paramList = []\r\n # Page where is the list of authors\r\n citations_page = URLFactory(ScholarURLType.CITATIONS, keywords)\r\n #print(citations_page.generate())\r\n print(citations_page.generate())\r\n # HTML of the list of authors\r\n display_user_page_request(browser, citations_page.generate())\r\n users_soup = BeautifulSoup(browser.page_source, 'html.parser')\r\n #users_soup = beautifulsoup_request(citations_page.generate())\r\n print(users_soup)\r\n for i in range(n):\r\n display_user_page_request(browser, citations_page.next_url(users_soup))\r\n users_soup = BeautifulSoup(browser.page_source, 'html.parser')\r\n userId += 10\r\n #print(users_soup)\r\n\r\n #print(users_soup)\r\n # All the authors\r\n users_list = []\r\n\r\n while True:\r\n print(f\"--- PAGE {page} ---\")\r\n users = gsc_users(users_soup,userId)\r\n\r\n users_list.extend(users)\r\n print(users_list)\r\n if len(users_list) == 0:\r\n print(\"%s didn't match any user profiles\" % keywords)\r\n break\r\n for user in users:\r\n #print(user.as_csv() + \"\\n\")\r\n\r\n\r\n citations_user_page = user['page']\r\n #works_soup = beautifulsoup_request(citations_user_page.url)\r\n display_user_page_request(browser, citations_user_page.first_url())\r\n works_soup = BeautifulSoup(browser.page_source, 'html.parser')\r\n #str = works_soup.find('div',id=\"gsc_lwp\").text\r\n #str = str.split(\"Show\")[0]\r\n #str = str.split(\"Articles\")[1]\r\n #print(str[3:])\r\n #recearch_count = int(str[3:])\r\n #print(recearch_count)\r\n #user['researc_n'] = recearch_count\r\n try:\r\n citations_years = works_soup.find_all('td', class_='gsc_rsb_std')\r\n # print(citations_years)\r\n\r\n user['citations_all'] = citations_years[0].text\r\n user['citations_fiveyear'] = citations_years[1].text\r\n user['hindex_all'] = citations_years[2].text\r\n user['hindex_fiveyear'] = citations_years[3].text\r\n user['i10index_all'] = citations_years[4].text\r\n user['i10index_fiveyear'] = citations_years[5].text\r\n except:\r\n user['citations_all'] = 0\r\n user['citations_fiveyear'] = 0\r\n user['hindex_all'] = 0\r\n user['hindex_fiveyear'] = 0\r\n user['i10index_all'] = 0\r\n user['i10index_fiveyear'] = 0\r\n print(\"Citation info doesn;t exist\")\r\n\r\n # endregion\r\n #\r\n\r\n #researchId = gsc_user_detail_works(works_soup, user,researchId)\r\n user_google_url = user['page'].first_url()\r\n sql = \"INSERT INTO academic (uni_id,academic_id,academic_name,n_citations ,n_h_index_all ,n_h_index_5,n_i10_index_all,n_i10_index_5,academic_photo,academic_google_url) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\r\n val = (uni_id,int(user['id']), user['name'],int(user['citations_all']), int(user['hindex_all']) ,int(user['hindex_fiveyear']),int(user['i10index_all']),int(user['i10index_fiveyear']), user['avatar'], user_google_url)\r\n cursor.execute(sql, val)\r\n connection.commit()\r\n #browser.close()\r\n if users_soup.find('button', attrs={'aria-label': 'Next'}) is None or \\\r\n users_soup.find('button', attrs={'aria-label': 'Next'}).has_attr('onclick') is not True:\r\n break\r\n #users_soup = beautifulsoup_request(citations_page.next_url(users_soup))\r\n display_user_page_request(browser, citations_page.next_url(users_soup))\r\n users_soup = BeautifulSoup(browser.page_source, 'html.parser')\r\n userId += 10\r\n # request(citations_page.next_url(users_soup))\r\n page += 1\r\n paramList.append(userId)\r\n #paramList.append(researchId)\r\n #browser.close()\r\n return paramList", "title": "" }, { "docid": "40958cc9b422a8569b931130bdd97e77", "score": "0.49371248", "text": "def challenge_6(given_list):\n duplicate_list = []\n temp_list = []\n\n # 1 -- below for loop extracts duplicate names and their attributes into their own list\n for x in given_list:\n if x[0][\"name\"] not in temp_list:\n temp_list.append(x[0][\"name\"])\n else:\n duplicate_list.append(x)\n\n print(\"Duplicate names list: \", duplicate_list)\n\n # 2 -- alter the OG list and remove duplicate_list \n for x in duplicate_list:\n for y in given_list:\n if x == y:\n given_list.pop(given_list.index(y))\n\n print(\"Altered original list: \", given_list)\n\n # 3 -- calculate average age\n\n count = 0\n ages = 0\n today = date.today()\n\n N = 51\n younger = [] # 4 -- Find all the people with age < N\n\n for x in given_list:\n date_obj = datetime.strptime(x[0][\"dob\"], '%d/%m/%Y') # converting date string to date object\n difference = today - date_obj.date()\n ages += difference.days\n count +=1\n # 4 -- Find all the people with age < N\n if (difference.days)/365 < N:\n younger.append(x)\n\n average_age = (ages/count)/365 # 3 -- calculate average age\n avgfloat = \"{:.2f}\".format(average_age)\n\n print(\"The average age is: \", avgfloat)\n print(\"The people who have an age with less than 51 are: \", younger)\n \n \n # 5 -- Unique countries ---------- still needs work\n\n import requests\n import json\n\n unique_countries = []\n\n with open(\"countries.json\", \"r\") as data_file:\n raw_json = data_file.readline()\n countries = json.loads(raw_json)\n\n for person in given_list:\n for country in countries:\n if country[\"demonym\"] == person[0]['nationality']:\n unique_countries.append(country[\"name\"])\n else:\n pass\n\n print(unique_countries)\n\n return \"End of Testing\"", "title": "" }, { "docid": "efaa94b396c31b0f27731502123ddfd9", "score": "0.49352613", "text": "def request_terms() -> List[Dict[str, str]]:\n url = \"https://compassxe-ssb.tamu.edu/StudentRegistrationSsb/ssb/classSearch/getTerms?dataType=json&offset=1&max=500\"\n response: Response = requests.get(url)\n return json.loads(response.content)", "title": "" }, { "docid": "85ac983ea27da2b744f24e5308250965", "score": "0.49337077", "text": "def get_businesses(categories, location, name, reviews_limit):\n business_id = ''\n business_rating = ''\n uri_params = []\n url_params = {'categories': categories.replace(' ', '+'),\n 'location': location.replace(' ', '+'),\n 'limit': BUSINESS_SEARCH_LIMIT #not currently being used - set to 0, no param passed in\n }\n \n #2 - gets token \n bearer_token = get_bearer_token(API_HOST, TOKEN_PATH)\n\n #3 - sends fully qualified request\n response = send_request(API_HOST, SEARCH_PATH, bearer_token, url_params)\n businesses = response.get('businesses')\n \n print(len(businesses)) #print lenght businesses object -- REMOVE AT WILL\n print('---------')\n name_found = 0\n for i in businesses:\n if i['name'] == name:\n name_found = 1\n business_id = i['id']\n business_name = i['name']\n business_rating = i['rating']\n review_count = i['review_count']\n page = i['url']\n print(u'ID: {0} NAME: {1} RATING: {2} REVIEW COUNT: {3} PAGE: {4}'.format(business_id, \\\n business_name, business_rating, review_count, page))\n break\n \n if name_found == 0:\n print(u'No businesses for {0} in {1} with the name {2} found.'.format(categories, location, name))\n return \n \n print('---------')\n print(u'Match found, querying for ratings for: \"{0}\" in {1}...'.format(business_name, location))\n print('---------')\n\n #4 - If business has reviews, get reviews using retrieved business_id\n if review_count > 0:\n if review_count < int(reviews_limit): #only retrieve the number of reviews specifed by criteria\n print('---------')\n print(u'actual review count: {0} vs. reviews limit you provided: {1}'.format(review_count, reviews_limit))\n print('---------')\n print(u'Less reviews than you requested were found for {0}'.format(name))\n \n #4 - gets a public version of the reviews \n uri_params.extend([categories, location, name, reviews_limit])\n final_reviews = {'name':'',\n 'uri':'',\n 'reviews':''}\n final_reviews['name'] = name\n final_reviews['uri'] = make_public_reviews(uri_params)\n \n #5 - gets reviews for the business based on limit passed\n reviews = get_reviews(name, business_id, page, reviews_limit)\n final_reviews['reviews'] = reviews\n pprint.pprint(final_reviews)\n return final_reviews\n else:\n print(u'No Reviews are available for {0}.'.format(name))\n return", "title": "" }, { "docid": "d94c80a644f50aed03fd4a8df22a503d", "score": "0.49248192", "text": "async def get_today_available_county_dataset():\n logging.info('Crawler will fetch daily tables of available countries.')\n # Get session to further requests\n async with ClientSession() as session:\n content = await search_site(session)\n countries = get_countries_available_today_with_urls(content)\n\n dataset = {}\n tasks = []\n for index, (country_name, path) in enumerate(countries):\n logging.debug('Performing query on {0} at index {1}'.format(country_name, index))\n task = get_country_data_set(session, country_name, path, dataset)\n tasks.append(task)\n\n await asyncio.gather(*tasks)\n\n # Export dataset\n export_dataset(dataset)\n\n return dataset", "title": "" }, { "docid": "420c5253fd3148bc5f3067e7c76dc8c0", "score": "0.49220392", "text": "def scraper(c, ch_url_idx):\n print(\"-\"*50)\n char_pages_urls = get_char_pages_url(c)\n print(f\"This character {c} has {len(char_pages_urls)} pages.\")\n if ch_url_idx:\n char_pages_urls = char_pages_urls[ch_url_idx-1:]\n print(f\"Starting scraping with URL {char_pages_urls[0]}\")\n for ch_idx, ch_url in enumerate(tqdm(char_pages_urls)):\n word_urls = get_word_urls(ch_url)\n urban_data = []\n\n threads = min(MAX_THREADS, len(word_urls))\n with concurrent.futures.ThreadPoolExecutor(max_workers=threads) as executor:\n res = executor.map(get_word_data, word_urls)\n\n for r in res:\n urban_data.append([c, ch_url, *r])\n\n write_to_file(c, urban_data)\n time.sleep(1.5)\n print(f\"Success! Character {c} complete.\")\n print(\"-\"*50)", "title": "" }, { "docid": "95f34a5915631b783fe700e5f8272c36", "score": "0.49162683", "text": "def process_nationality(_fields, _line_num):\n\n\taliases = _fields[headKB['nationality']['ADJECTIVAL FORM']].split(KB_MULTIVALUE_DELIM)\n\tfor t in aliases:\n\t\tadd_to_dictionary(t, _line_num, \"nationality\", _fields)", "title": "" }, { "docid": "b9e218c7f8fdb2eaa5cacc513563e8ae", "score": "0.49105117", "text": "def scrape_manager(state_name: str, county_name: str = None) -> None:\n logging.info(f\"Create raw data and config directory for state: {state_name} county: {county_name}\")\n if county_name is None:\n state_config_path = path.join('states', state_name, 'configs')\n raw_data_dir = path.join('states', state_name, 'raw_data')\n else:\n state_config_path = path.join('states', state_name, 'counties', county_name, 'configs')\n raw_data_dir = path.join('states', state_name, 'counties', county_name, 'raw_data')\n\n logging.info(f\"Get responses from text file\")\n state_response_list, state_data_type_names, request_type = get_responses_from_config_files_in_dir(\n config_dir=state_config_path)\n\n if not path.isdir(raw_data_dir):\n os.makedirs(raw_data_dir)\n utils_lib.save_raw_data(\n save_dir=raw_data_dir,\n response_list=state_response_list,\n data_type_names=state_data_type_names,\n request_type=request_type)", "title": "" }, { "docid": "2f3fc0ee64862412b48b89a0423cc903", "score": "0.49058798", "text": "def load_county_lookups(apps, schema_editor):\n country_lookup = apps.get_model('registry', 'CountryLookup')\n state_lookup = apps.get_model('registry', 'StateLookup')\n county_lookup = apps.get_model('registry', 'CountyLookup')\n data_src = os.path.join(INITIAL_DATA_DIR, 'county.csv')\n with default_storage.open(data_src, 'r') as csvfile:\n csvreader = csv.reader(csvfile)\n next(csvreader)\n for country_cd, state_cd, county_cd, county_nm in csvreader:\n try:\n country = country_lookup.objects.get(country_cd=country_cd)\n state = state_lookup.objects.get(state_cd=state_cd, country_cd=country_cd)\n except (country_lookup.DoesNotExist, state_lookup.DoesNotExist):\n continue\n else:\n county = county_lookup(\n country_cd=country,\n state_id=state,\n county_cd=county_cd,\n county_nm=county_nm\n )\n county.save()", "title": "" }, { "docid": "e6290e1af3566fcb73ca0934e63d1113", "score": "0.4905519", "text": "def scrape_city(city):\n browser = Chrome()\n browse_to_city(browser, city)\n rows = get_table_rows(browser, '.data_wide_table')\n yield from parse_table_rows(rows, city)\n browser.quit()", "title": "" }, { "docid": "badd4bf8449b471824816091c85748af", "score": "0.49033672", "text": "def get_all_salaries(city_urls, cities: List[str]) -> Iterable[Dict[str, Any]]:\n browser = Chrome()\n for url, city in zip(city_urls, cities):\n print(url)\n browser.get(url)\n try:\n sel_title = 'td.bar-chart-bootstrap-range-label'\n sel_title2 = 'div.pay-spotlight__pay-value'\n avg_salary = browser.find_element_by_css_selector(sel_title2).text.strip().strip('$').replace(',','')\n salary_range = browser.find_element_by_css_selector(sel_title).text\n min_salary, max_salary = parse_salary_range(salary_range)\n result = {'city': city,\n 'avg_salary': avg_salary,\n 'min_salary': min_salary,\n 'max_salary': max_salary}\n yield result\n except NoSuchElementException:\n sel_title = 'div.pay-spotlight__pay-value'\n avg_salary = browser.find_element_by_css_selector(sel_title).text.strip().strip('$')\n result = {'city': city,\n 'avg_salary': avg_salary}\n yield result", "title": "" }, { "docid": "798b06c43e7d2ca2d53f056cb587c300", "score": "0.49014366", "text": "def parse(self, response):\n \n # The class is being change several times [nb-quickfilter | nb2-quickfilter]\n all_countries_page = response.css('div.nb-quickfilter > select[name=usage] > option')\n \n # If all_countries_page is [], try other method\n if not all_countries_page:\n all_countries_page = response.css('div.nb2-quickfilter > select[name=usage] > option')\n \n DetailsExtractSpider.countries_name = all_countries_page.css(' ::text').extract()[2:] # This contains \n # all the countries name\n all_country = all_countries_page.css(' ::attr(value)').extract()[2:] \n \n for country in all_country:\n yield scrapy.Request(url=response.urljoin(country), callback=self.each_page)", "title": "" }, { "docid": "df0b751ffdc388099544ca60ed2d4eb8", "score": "0.48981005", "text": "def test_requests(request_classifier):\n requests = []\n\n requests.append((request.Request(['195.154.169.9', '-', '-',\n datetime(2016, 4, 10, 4, 46, 40, tzinfo=pytz.utc), 'GET', '/', '200',\n '42751', '-',\n 'Mozilla/5.0 (X11; Linux i686; rv:8.0) Gecko/20100101 Firefox/8.0']),\n 'OK'))\n\n print('')\n request_number = 0\n\n for request_item in requests:\n print('REQUEST #' + str(request_number) + ' ~')\n request_classifier.try_classify_request(request_item)\n request_number += 1\n print('')", "title": "" }, { "docid": "c38b7b426d1df4e8afac3da91b36eeec", "score": "0.48977834", "text": "def scrape_wiki(db):\n scraper = WikiScraper(db)\n scraper.scrape_items()\n update_selectors(db)\n print(\"Wiki Scraper has finished\")", "title": "" }, { "docid": "669dde82923d36819dcd7e8d35514d0a", "score": "0.48912916", "text": "def format_location_detail_county(data):\n df = pd.DataFrame([vars(o) for o in data]).explode('city').explode('zcta') \\\n .explode('cd').reset_index(drop=True)\n df.rename(columns={'fsid': 'fsid_placeholder', 'name': 'name_placeholder'}, inplace=True)\n\n if not df['city'].isna().values.all():\n df = pd.concat([df.drop(['city'], axis=1), df['city'].apply(pd.Series)], axis=1)\n df.rename(columns={'fsid': 'city_fips', 'name': 'city_name'}, inplace=True)\n else:\n df.drop(['city'], axis=1, inplace=True)\n df['city_fips'] = pd.NA\n df['city_name'] = pd.NA\n\n if not df['zcta'].isna().values.all():\n df = pd.concat([df.drop(['zcta'], axis=1), df['zcta'].apply(pd.Series)], axis=1)\n df.rename(columns={'fsid': 'zipCode', 'name': 'zcta_name'}, inplace=True)\n else:\n df.drop(['zcta'], axis=1, inplace=True)\n df['zipCode'] = pd.NA\n df['zcta_name'] = pd.NA\n\n if not df['cd'].isna().values.all():\n df = pd.concat([df.drop(['cd'], axis=1), df['cd'].apply(pd.Series)], axis=1)\n df.rename(columns={'fsid': 'cd_fips', 'name': 'cd_name'}, inplace=True)\n else:\n df.drop(['cd'], axis=1, inplace=True)\n df['cd_fips'] = pd.NA\n df['cd_name'] = pd.NA\n\n if not df['state'].isna().values.all():\n df = pd.concat([df.drop(['state'], axis=1), df['state'].apply(pd.Series)], axis=1)\n df.rename(columns={'fsid': 'state_fips', 'name': 'state_name'}, inplace=True)\n else:\n df.drop(['state'], axis=1, inplace=True)\n df['state_fips'] = pd.NA\n df['state_name'] = pd.NA\n\n df.rename(columns={'fsid_placeholder': 'fsid', 'name_placeholder': 'name'}, inplace=True)\n df['fsid'] = df['fsid'].apply(str)\n df['city_fips'] = df['city_fips'].astype('Int64').apply(str)\n df['zipCode'] = df['zipCode'].astype('Int64').apply(str)\n df['cd_fips'] = df['cd_fips'].astype('Int64').apply(str)\n df['state_fips'] = df['state_fips'].astype('Int64').apply(str).apply(lambda x: x.zfill(2))\n df['geometry'] = df['geometry'].apply(get_geom_center)\n df = pd.concat([df.drop(['geometry'], axis=1), df['geometry'].apply(pd.Series)], axis=1)\n return df[['fsid', 'valid_id', 'name', 'isCoastal', 'city_fips', 'city_name', 'zipCode', 'fips', 'cd_fips',\n 'cd_name', 'state_fips', 'state_name', 'latitude', 'longitude', 'error']]", "title": "" }, { "docid": "124b74a08788564fe5d6df1dd847ae31", "score": "0.4879476", "text": "def wikiscraper(search_list):\n # Created using https://muddoo.com/tutorials/how-to-\n # extract-data-from-a-website-using-python/\n\n for row in search_list:\n try:\n # Strip input of spaces and replace with underscore\n wikipage = find_exception(row[0])\n content = urllib.request.urlopen('https://en.wikipedia.org/wiki/' +\n wikipage.replace(\" \", \"_\"))\n soup = BeautifulSoup(content.read(), 'html.parser')\n row = keyword_matcher(soup, row)\n # This handles 404 errors\n except urllib.error.HTTPError:\n row.append(\"Wikipedia article not found\")\n return search_list", "title": "" }, { "docid": "95b1103c60acc00754f5255101437476", "score": "0.4877543", "text": "def fetch_events_italian_studies(base_url='https://www.sas.upenn.edu'):\n page_soup = BeautifulSoup(requests.get(\n urljoin(base_url, '/italians/center/events')).content, 'html.parser')\n\n events = []\n event_table = page_soup.find('div', attrs={'class': 'view-content'})\n all_events = event_table.find_all('div', attrs={'class': 'field-content'})\n for event in all_events:\n event_url = urljoin(base_url, event.find('a')['href'])\n event_soup = BeautifulSoup(requests.get(\n event_url).content, 'html.parser')\n title = event_soup.find('h1', attrs={'class': 'title'})\n title = title.text.strip() if title is not None else ''\n date = event_soup.find(\n 'span', attrs={'class': 'date-display-single'}).text.strip()\n\n starttime = event_soup.find(\n 'div', attrs={'class': 'field field-type-datetime field-field-event-time'})\n starttime = starttime.text.replace(\n 'Time:', '').strip() if starttime is not None else ''\n if starttime is '':\n starttime = event_soup.find(\n 'span', attrs={'class': 'date-display-start'}).text.strip()\n endtime = event_soup.find(\n 'span', attrs={'class': 'date-display-end'})\n endtime = endtime.text.strip() if endtime is not None else ''\n else:\n starttime, endtime = find_startend_time(starttime)\n\n page_details = [t.text.strip() for t in event_soup.find_all(\n 'div', attrs={'class': 'field-items'})]\n location, speaker = '', ''\n for detail in page_details:\n if 'Speaker' in detail:\n speaker = detail.replace('Speaker:', '').strip()\n if 'Location' in detail:\n location = detail.replace('Location:', '').strip()\n\n description = event_soup.find(\n 'div', attrs={'id': 'content-area'}).find('div', attrs={'class': 'content'})\n description = '\\n'.join([t.text for t in description.find_all(\n 'p')]) if description is not None else ''\n events.append({\n 'title': title,\n 'url': event_url,\n 'date': date,\n 'starttime': starttime,\n 'endtime': endtime,\n 'location': location,\n 'description': description,\n 'speaker': speaker,\n 'owner': 'Italian Studies'\n })\n return events", "title": "" }, { "docid": "48eaa211f9d63bad260dc3b27ab66d63", "score": "0.4877543", "text": "def fetch_events_italian_studies(base_url='https://www.sas.upenn.edu'):\n page_soup = BeautifulSoup(requests.get(\n urljoin(base_url, '/italians/center/events')).content, 'html.parser')\n\n events = []\n event_table = page_soup.find('div', attrs={'class': 'view-content'})\n all_events = event_table.find_all('div', attrs={'class': 'field-content'})\n for event in all_events:\n event_url = urljoin(base_url, event.find('a')['href'])\n event_soup = BeautifulSoup(requests.get(\n event_url).content, 'html.parser')\n title = event_soup.find('h1', attrs={'class': 'title'})\n title = title.text.strip() if title is not None else ''\n date = event_soup.find(\n 'span', attrs={'class': 'date-display-single'}).text.strip()\n\n starttime = event_soup.find(\n 'div', attrs={'class': 'field field-type-datetime field-field-event-time'})\n starttime = starttime.text.replace(\n 'Time:', '').strip() if starttime is not None else ''\n if starttime is '':\n starttime = event_soup.find(\n 'span', attrs={'class': 'date-display-start'}).text.strip()\n endtime = event_soup.find(\n 'span', attrs={'class': 'date-display-end'})\n endtime = endtime.text.strip() if endtime is not None else ''\n else:\n starttime, endtime = find_startend_time(starttime)\n\n page_details = [t.text.strip() for t in event_soup.find_all(\n 'div', attrs={'class': 'field-items'})]\n location, speaker = '', ''\n for detail in page_details:\n if 'Speaker' in detail:\n speaker = detail.replace('Speaker:', '').strip()\n if 'Location' in detail:\n location = detail.replace('Location:', '').strip()\n\n description = event_soup.find(\n 'div', attrs={'id': 'content-area'}).find('div', attrs={'class': 'content'})\n description = '\\n'.join([t.text for t in description.find_all(\n 'p')]) if description is not None else ''\n events.append({\n 'title': title,\n 'speaker': speaker,\n 'date': date,\n 'location': location,\n 'description': description,\n 'starttime': starttime,\n 'endtime': endtime,\n 'url': event_url,\n 'owner': 'Italian Studies'\n })\n return events", "title": "" }, { "docid": "6a7e51be90e91a2370747c3101234145", "score": "0.4874047", "text": "def test_county_limit_by_state__valid_arg(self):\n response_11 = self.client.get(self.url, {'state': 11})\n self.assertEqual(response_11.status_code, status.HTTP_200_OK)\n self.assertFalse(not response_11.data['data'])\n\n response_DC = self.client.get(self.url, {'state': 'DC'})\n self.assertEqual(len(response_11.data['data']), 2)\n self.assertTrue(response_11.data['data'] == response_DC.data['data'])\n\n response_VA = self.client.get(self.url, {'state': 'VA'})\n self.assertTrue(len(response_VA.data['data']) == 1)\n self.assertFalse(response_11.data['data'] == response_VA.data['data'])", "title": "" }, { "docid": "f94834a02f0cef1e1616697b026c0f0f", "score": "0.4867712", "text": "def soupify(self, occurance=\"first\"):\n\n driver = self.driver\n if occurance == \"first\":\n driver.get(\"https://www.sunbeltnetwork.com/business-search/business-results/\")\n country = driver.find_element_by_name(\"country\")\n country.send_keys(\"United States\")\n state = driver.find_element_by_name(\"state\")\n state.send_keys(self.state)\n\n if self.settings is not None:\n cats = self.settings[\"categories\"]\n\n if cats != [\"All Categories\"]:\n for cat in cats:\n x = f\"//option[text()='{cat}']\"\n element = driver.find_element_by_xpath(x)\n element.click()\n element.submit()\n\n if \"price_min\" in self.settings:\n price = self.settings[\"price_min\"]\n element = driver.find_element_by_name(\"price-min\")\n element.click()\n element.send_keys(price)\n element.submit()\n\n if \"price_max\" in self.settingg:\n price = self.settings[\"price_max\"]\n element = driver.find_element_by_name(\"price-max\")\n element.click()\n element.send_keys(price)\n element.submit()\n\n if \"revenue_min\" in self.settings:\n revenue = self.settings[\"revenue_min\"]\n element = driver.find_element_by_name(\"revenue-min\")\n element.click()\n element.send_keys(revenue)\n element.submit()\n\n if \"revenue_max\" in self.settingg:\n revenue = self.settings[\"revenue_max\"]\n element = driver.find_element_by_name(\"revenue-max\")\n element.click()\n element.send_keys(revenue)\n element.submit()\n\n if \"cf_min\" in self.settings:\n cf = self.settings[\"cf_min\"]\n element = driver.find_element_by_name(\"cf-min\")\n element.click()\n element.send_keys(cf)\n element.submit()\n\n if \"cf_max\" in self.settingg:\n cf = self.settings[\"cf_max\"]\n element = driver.find_element_by_name(\"cf-max\")\n element.click()\n element.send_keys(cf)\n element.submit()\n\n else:\n cf = driver.find_element_by_name(\"cf-min\")\n cf.send_keys(\"750000\")\n cf2 = driver.find_element_by_name(\"cf-max\")\n cf2.send_keys(\"3000000\")\n cf2.submit()\n\n time.sleep(1)\n\n elif occurance == \"next\":\n next_button = driver.find_element_by_partial_link_text(\"NEXT\")\n next_button.click()\n time.sleep(1)\n else:\n raise ValueError(\"Occurance must be 'first' or 'next'.\")\n\n soup = bs.BeautifulSoup(driver.page_source, \"lxml\")\n if \" 0 businesses and companies for sale in\" in str(soup):\n print(\"No businesses found.\")\n return soup", "title": "" }, { "docid": "f5a1fece748cb6bad7c6e405ead02427", "score": "0.48672614", "text": "def main(haystack, needles, output):\n\n cities = get_cities(needles)\n find_cities(haystack, cities, output)", "title": "" }, { "docid": "4abbfe4837afc1542ff6b69c57eb7f2e", "score": "0.48588356", "text": "def search_city(request):\n\n term = request.GET.get('term').strip()\n\n lang = get_language()\n results_per_page = int_arg(request.GET.get('more', 30))\n cursor = request.GET.get('cursor')\n limit = results_per_page\n\n manager = instance.geolocation_app.geolocation_manager\n\n if cursor:\n cursor = json.loads(cursor)\n cursor_name, cursor_id = cursor\n else:\n cursor_name = cursor_id = None\n\n cities = manager.get_cities(cursor_name=cursor_name, limit=limit, lang=lang, term=term)\n\n city_list = []\n\n for city_r in cities:\n country_r = manager.get_country(country_id=city_r['cou_id'], lang=lang)\n\n c = {\n 'id': city_r['cit_id'],\n 'name': city_r['name'],\n 'lat': city_r['lat'],\n 'long': city_r['long'],\n 'display_names': city_r['alt_name'],\n 'country': {\n 'id': country_r['cou_id'],\n 'name': country_r['name'],\n 'code3': country_r['code3'],\n 'lat': country_r['lat'],\n 'long': country_r['long'],\n 'display_names': country_r['alt_name'],\n 'preferred_names': country_r['preferred_name'],\n 'short_names': country_r['short_name']\n },\n 'preferred_names': city_r['preferred_name'],\n 'short_names': city_r['short_name']\n }\n city_list.append(c)\n\n if len(city_list) > 0:\n # prev cursor (asc order)\n entity = city_list[0]\n prev_cursor = (entity['name'], entity['id'])\n\n # next cursor (asc order)\n entity = city_list[-1]\n next_cursor = (entity['name'], entity['id'])\n else:\n prev_cursor = None\n next_cursor = None\n\n results = {\n 'perms': [],\n 'items': city_list,\n 'prev': prev_cursor,\n 'cursor': cursor,\n 'next': next_cursor,\n }\n\n return HttpResponseRest(request, results)", "title": "" }, { "docid": "cfeadfbc9ac341f777cdabb435b65cca", "score": "0.48557955", "text": "def geoQuery(loc, bibcode, count):\n\tQ={'address':loc, 'sensor':'false'}\n\tif loc not in ADDRESSES_DICT:\n\t\ttry:\n\t\t\tgeoRequest=requests.get(GEO_URL_BASE, params=Q)\n\t\t\tgeoDict=geoRequest.json()\n\t\t\tif geoDict['status'] == 'OK':\n\t\t\t\tlat=geoDict['results'][0]['geometry']['location']['lat']\n\t\t\t\tlng=geoDict['results'][0]['geometry']['location']['lng']\n\t\t\t\tcountry='NULL'\n\t\t\t\tstate='NULL'\n\t\t\t\ttrusted=False\n\t\t\t\tfor i in geoDict['results'][0]['address_components']:\n\t\t\t\t\tif 'country' in i['types']:\n\t\t\t\t\t\tcountry=i['long_name']\n\t\t\t\t\tif 'administrative_area_level_1' in i['types']:\n\t\t\t\t\t\tstate=i['long_name']\n\t\t\t\t\tif 'route' in i['types']:\n\t\t\t\t\t\ttrusted=True\n\t\t\t\taddress=geoDict['results'][0]['formatted_address']\n\t\t\t\tlat=str(lat).encode('utf-8')\n\t\t\t\tlng=str(lng).encode('utf-8')\n\t\t\t\tcountry=country.encode('utf-8')\n\t\t\t\tstate=state.encode('utf-8')\n\t\t\t\taddress=address.encode('utf-8')\n\t\t\t\tstringCount=str(count).encode('utf-8')\n\t\t\t\tstringBibcode=bibcode.encode('utf-8')\n\t\t\t\twriteList=[stringBibcode,loc,lat,lng,address,country,state,trusted,stringCount]\n\t\t\t\tADDRESSES_DICT[loc]={'location':(lat,lng,address,country,state,trusted),'count':count}\n\t\t\telse:\n\t\t\t\twriteList=[bibcode, loc, geoDict['status'],count,time.strftime(\"%Y-%m-%d %H:%M:%S\", time.gmtime())]\n\t\t\t\tADDRESSES_DICT[loc]={'location':('unknown','unknown','unknown','unknown','unknown','unknown'),'count':count}\n\t\t\ttime.sleep(1)\n\t\t\treturn writeList\n\t\texcept requests.exceptions.ConnectionError, e:\n\t\t\tprint(\"Could not get geocoding information for {0}. Connection error:\".format(bibcode))\n\t\t\tprint(e)\n\t\t\twriteList=[bibcode, loc, \"ConnectionError\",count,time.strftime(\"%Y-%m-%d %H:%M:%S\", time.gmtime())]\n\t\t\treturn writeList\n\telif ADDRESSES_DICT[loc]['location'][0]=='unknown':\n\t\tprint('\"{0}\" has already been sent to be geocoded, and no results were found.'.format(loc))\n\t\tstringBibcode=bibcode.encode('utf-8')\n\t\twriteList=[stringBibcode, loc, 'Zero results, duplicate query',count,time.strftime(\"%Y-%m-%d %H:%M:%S\", time.gmtime())]\n\telse:\n\t\tprint('\"{0}\" has already been geocoded successfully. Using previous results...'.format(loc))\n\t\tlat=str(ADDRESSES_DICT[loc]['location'][0]).encode('utf-8')\n\t\tlng=str(ADDRESSES_DICT[loc]['location'][1]).encode('utf-8')\n\t\taddress=ADDRESSES_DICT[loc]['location'][2].encode('utf-8')\n\t\tcountry=ADDRESSES_DICT[loc]['location'][3].encode('utf-8')\n\t\tstate=ADDRESSES_DICT[loc]['location'][4].encode('utf-8')\n\t\ttrusted=ADDRESSES_DICT[loc]['location'][5]\n\t\tstringCount=str(count).encode('utf-8')\n\t\tstringBibcode=bibcode.encode('utf-8')\n\t\twriteList=[stringBibcode,loc,lat,lng,address,country,state,trusted,stringCount]\n\t\tADDRESSES_DICT[loc]['count']+=count\n\t\treturn writeList", "title": "" }, { "docid": "e1280f4e0053d449639c2da6dac5b86f", "score": "0.485401", "text": "def parse_company_page(url):\n html = scraperwiki.scrape(url)\n soup = BeautifulSoup(html)\n content = soup.find(\"div\", {\"id\": \"col1_content\"})\n date_scraped = datetime.datetime.strftime(datetime.datetime.now(), \"%Y-%m-%d\")\n mailsrch = re.compile(r'[\\w\\-][\\w\\-\\.]+@[\\w\\-][\\w\\-\\.]+[a-zA-Z]{1,4}')\n data = {\n 'datescraped': date_scraped,\n 'emails': ', '.join(mailsrch.findall(content.text)),\n 'company_name': content.span.text,\n 'address': content.text.split('\\n')[4].strip(),\n 'city': content.text.split('\\n')[5].strip(),\n 'state': content.text.split('\\n')[6].strip(),\n 'zip': content.text.split('\\n')[7].strip(),\n 'country': content.text.split('\\n')[8].strip(),\n 'sourceurl': url\n }\n scraperwiki.sqlite.save(unique_keys=['company_name'], data=data)", "title": "" }, { "docid": "e1280f4e0053d449639c2da6dac5b86f", "score": "0.485401", "text": "def parse_company_page(url):\n html = scraperwiki.scrape(url)\n soup = BeautifulSoup(html)\n content = soup.find(\"div\", {\"id\": \"col1_content\"})\n date_scraped = datetime.datetime.strftime(datetime.datetime.now(), \"%Y-%m-%d\")\n mailsrch = re.compile(r'[\\w\\-][\\w\\-\\.]+@[\\w\\-][\\w\\-\\.]+[a-zA-Z]{1,4}')\n data = {\n 'datescraped': date_scraped,\n 'emails': ', '.join(mailsrch.findall(content.text)),\n 'company_name': content.span.text,\n 'address': content.text.split('\\n')[4].strip(),\n 'city': content.text.split('\\n')[5].strip(),\n 'state': content.text.split('\\n')[6].strip(),\n 'zip': content.text.split('\\n')[7].strip(),\n 'country': content.text.split('\\n')[8].strip(),\n 'sourceurl': url\n }\n scraperwiki.sqlite.save(unique_keys=['company_name'], data=data)", "title": "" }, { "docid": "5f6d329326c0c5642f21ddb579ba2cf0", "score": "0.48516318", "text": "def gsc_users(soup: BeautifulSoup,userId) -> List[User]:\r\n\r\n users = []\r\n\r\n if soup.find('div', id='gsc_sa_ccl'):\r\n #print(\"aaa\")\r\n users_soup = soup.find_all('div', class_='gs_ai gs_scl gs_ai_chpr')\r\n #print(users_soup)\r\n user_id = 1\r\n for user in users_soup:\r\n u = User()\r\n u['avatar'] = ScholarURLType.BASE.value + user.find(class_='gs_ai_pho').img['src']\r\n print(u['avatar'])\r\n u['page'] = URLFactory(type_=ScholarURLType.CITATIONS_USER,\r\n url=ScholarURLType.BASE.value +\r\n user.find(class_='gs_ai_t').h3.a['href'])\r\n #print(u['page'])\r\n #u['id'] = search(r\"user=(.*)&hl\", u['page'].url).group(1)\r\n print(u['page'].first_url())\r\n u['id'] = userId\r\n print(userId)\r\n userId += 1\r\n try:\r\n u['name'] = user.find(class_='gs_ai_t').h3.a.string.title()\r\n print(u['name'])\r\n except AttributeError:\r\n markup = user.find(class_='gsc_oai_name')\r\n name_tag = None\r\n\r\n while markup.a.find(class_='gs_hlt') is not None:\r\n name_tag = markup.a\r\n name_tag.span.unwrap()\r\n u['name'] = name_tag.get_text()\r\n\r\n #u['affiliation'] = ' '.join(user.find(class_='gsc_oai_aff').find_all(text=True))\r\n\r\n #try:\r\n # Searching just fot the number in this string to get citations count\r\n #u['citations_count'] = int(findall(r\"\\d+\", user.find(class_='gsc_oai_cby').string)[0])\r\n #except TypeError:\r\n #u['citations_count'] = 0\r\n\r\n users.append(u)\r\n\r\n return users", "title": "" }, { "docid": "f1c8e01520793ccb7914604d9f7d938e", "score": "0.4838764", "text": "def get_test_data() -> dict:\n id_numbers = ['0000000001', '0000000002', '0000000003', '0000000004', '0000000005', '0000000006', '0000000007',\n '0000000008', '0000000009', '0000000010']\n first_names = ['Jane', 'John', 'Kathy', 'Kevin', 'Leslie', 'Lamar', 'Megan', 'Matthew', 'Nancy', 'Nick']\n last_names = ['Jones', 'James', 'King', 'Kelly', 'Lee', 'Lewis', 'Morgan', 'Martin', 'Nicholson', 'Newman']\n job_titles = ['Project Manager', 'Software Engineer', 'Financial Manger', 'Java Developer', 'Business Analyst',\n 'Systems Engineer', 'Network Engineer', 'Senior Software Engineer', 'Web Developer',\n 'Systems Administrator']\n company_names = ['Apple Inc.', 'Amazon.com', 'Alphabet Inc.', 'Microsoft', 'IBM', 'Dell Technologies', 'Intel',\n 'Hewlett Packard Enterprise', 'Oracle', 'Salesforce.com']\n company_city = ['Cupertino', 'Seattle', 'Mountain View', 'Redmond', 'Armonk', 'Austin', 'Santa Clara', 'Palo Alto',\n 'Redwood City', 'San Francisco']\n company_states = ['CA', 'WA', 'CA', 'WA', 'NY', 'TX', 'CA', 'CA', 'CA', 'CA']\n school1 = ['School of Engineering & Applied Science'] * len(id_numbers)\n school2 = ['School of Engineering & Applied Science', '', '', 'School of Engineering & Applied Science', '', '', '',\n 'School of Engineering & Applied Science', 'School of Engineering & Applied Science', '']\n school3 = [''] * len(id_numbers)\n major1 = ['Computer Science', 'Computer Engineering', 'Aerospace Engineering', 'Electrical Engineering',\n 'Aerospace Engineering', 'Electrical Engineering', 'Computer Science', 'Computer Engineering',\n 'Civil Engineering', 'Computer Science']\n major2 = ['Computer Engineering', '', '', 'Computer Science', '', '', '', 'Industrial & Systems Engineering',\n 'Computer Science', '']\n major3 = [''] * len(id_numbers)\n degree_code1 = ['Ph.D.', 'B.S.', 'B.S.', 'M.S.', 'B.S.', 'B.S.', 'B.S.', 'M.S.', 'M.S.', 'B.S.']\n degree_code2 = ['M.S.', '', '', 'B.S.', '', '', '', 'B.S.', 'B.S.', '']\n degree_code3 = [''] * len(id_numbers)\n degree_year1 = ['2003', '2016', '2016', '2016', '2016', '2016', '2007', '2010', '2002', '2009']\n degree_year2 = ['1998', '', '', '2012', '', '', '', '2008', '2000', '']\n degree_year3 = [''] * len(id_numbers)\n return {\n 'ID_NUMBER': id_numbers,\n 'FIRST_NAME': first_names,\n 'LAST_NAME': last_names,\n 'WORK_TITLE': job_titles,\n 'WORK_COMPANY_NAME1': company_names,\n 'WORK_CITY': company_city,\n 'WORK_STATE_CODE': company_states,\n 'SCHOOL1': school1,\n 'DEGREE_CODE1': degree_code1,\n 'DEGREE_YEAR1': degree_year1,\n 'MAJOR1': major1,\n 'SCHOOL2': school2,\n 'DEGREE_CODE2': degree_code2,\n 'DEGREE_YEAR2': degree_year2,\n 'MAJOR2': major2,\n 'SCHOOL3': school3,\n 'DEGREE_CODE3': degree_code3,\n 'DEGREE_YEAR3': degree_year3,\n 'MAJOR3': major3\n }", "title": "" }, { "docid": "344f493e5c0b021b957ac7e4cca0e5c5", "score": "0.48379374", "text": "def crawl_allpage_target(self):\n # calcus nbr need request count\n # each page at most ONE_AUTHOR_MAINPAGE_IMGCOUNT(20181003:48) images\n require_page_cnt = 0\n if self.max_cnt <= dataload.ONE_PAGE_COMMIT:\n require_page_cnt = 1\n else:\n require_page_cnt = int(self.max_cnt / dataload.ONE_PAGE_COMMIT)\n # remainder decision\n if self.max_cnt % dataload.ONE_PAGE_COMMIT != 0:\n require_page_cnt += 1\n\n # build request url of one page \n iid_string_tail = ''\n page_url_array = []\n for ix in range(require_page_cnt):\n # tail number limit\n tmp_tail_nbr = dataload.ONE_PAGE_COMMIT * (ix + 1)\n if tmp_tail_nbr > self.max_cnt:\n tmp_tail_nbr = self.max_cnt\n for index in self.pure_idlist[(dataload.ONE_PAGE_COMMIT * ix):tmp_tail_nbr]:\n iid_string_tail += dataload.IDS_UNIT(index)\n one_page_request_url = dataload.ALLREPOINFO_URL(self.user_input_id, iid_string_tail)\n iid_string_tail = '' # clear last cache\n page_url_array.append(one_page_request_url)\n \n # gather all data from response xhr page into a temp list\n tmp_receive_list = []\n for i in range(require_page_cnt):\n tmp_receive_list += self.crawl_onepage_data(i + 1, page_url_array[i])\n # handle url string\n repo_target_all_list = []\n for i in range(len(tmp_receive_list)):\n # tasnform title '\\\\uxxx' to unicode\n tmp_receive_list[i][1] = self.pvmx.unicode_escape(tmp_receive_list[i][1])\n # replace emoji string\n tmp_receive_list[i][1] = self.pvmx.replace_emoji(tmp_receive_list[i][1])\n # build original url without image format\n tmp = tmp_receive_list[i][2]\n tmp = tmp.replace('\\\\', '') # delete character '\\' \n tmp_receive_list[i][2] = dataload.ORIGINAL_IMAGE_HEAD + tmp[50:] + '.png'\n repo_target_all_list.append(tmp_receive_list[i]) # move original item to target list\n # use page count number build total url\n tmp_page_count_str = tmp_receive_list[i][3]\n if tmp_page_count_str.isdigit():\n index_page_count = int(tmp_page_count_str)\n if index_page_count != 1:\n # add others items into list\n for px in range(index_page_count - 1):\n insert_item = [tmp_receive_list[i][0], \n tmp_receive_list[i][1], \n tmp_receive_list[i][2][:-5] + str(px + 1) + '.png', \n tmp_receive_list[i][3]]\n repo_target_all_list.append(insert_item)\n else:\n log_context = 'Page count process error!'\n self.pvmx.logprowork(self.logpath, log_context)\n exit(-1)\n del tmp_receive_list # clear cache\n\n # collection target count\n alive_targetcnt = len(repo_target_all_list)\n require_img_nbr = 0\n if self.ir_mode == 1:\n require_img_str = dataload.logtime_input(\n 'Gather all repo %d, whole target(s): %d, enter you want count: '\n % (self.max_cnt, alive_targetcnt))\n # if user input isn't number\n while not require_img_str.isdigit():\n dataload.logtime_print(\n 'Input error, your input content was not a decimal number')\n require_img_str = dataload.logtime_input(\n 'Enter again(max is %d): ' % alive_targetcnt)\n require_img_nbr = int(require_img_str)\n # if user input number more than limit max, set it to max\n if require_img_nbr > alive_targetcnt:\n require_img_nbr = alive_targetcnt\n elif require_img_nbr <= 0:\n dataload.logtime_print('What the f**k is wrong with you?')\n exit(-1)\n # server mode directly catch all of alive targets\n elif self.ir_mode == 2:\n require_img_nbr = alive_targetcnt\n dataload.logtime_print('Server mode auto crawl all of alive targets')\n \n # download image number limit\n for k, i in enumerate(repo_target_all_list[:require_img_nbr]):\n self.target_capture.append(i[2]) # put url into target capture list\n self.basepages.append(dataload.BASEPAGE_URL + i[0]) # build basepage url\n \n # display author info\n log_context = ('Illustrator: ' + self.author_name + ' id: '\n + self.user_input_id + ' require image(s): ' \n + str(require_img_nbr) + ', target table:')\n self.pvmx.logprowork(self.logpath, log_context)\n # use prettytable build a table save and print info list\n image_info_table = PrettyTable(\n [\"ImageNumber\", \"ImageID\", \"ImageTitle\", \"ImagePageName\"])\n for k, i in enumerate(repo_target_all_list[:require_img_nbr]):\n image_info_table.add_row([(k + 1), i[0], i[1], i[2][57:-4]]) \n # save with str format and no time word\n self.pvmx.logprowork(self.logpath, str(image_info_table), 'N')\n del repo_target_all_list # clear cache ", "title": "" }, { "docid": "da8e2e19cc61ba2c3a985724f03da856", "score": "0.48359364", "text": "def processData():\n\n\t#url of the page we want to scrape \n\turl = \"https://www.mohfw.gov.in/\"\n\n\t# Initiating the webdriver. \n\tdriver = webdriver.Chrome('./chromedriver') \n\tdriver.get(url) \n\t# To ensure that the page is loaded \n\ttime.sleep(5) \n\thtml = driver.page_source \n\n\t# Applying bs4 to html variable\n\tsoup = BeautifulSoup(html, \"html.parser\") \n\tall_divs = soup.find(class_=\"statetable table table-striped\") \n\tdriver.close() # closing the webdriver\n\n\tdata = all_divs.find_all(\"td\")\n\tstatelist = []\n\n\tfor row in data:\n\t\ta = row.get_text()\n\t\tstatelist.append(a)\n\n\t# Removing entries of Total cases and extra strings at end\n\tdel statelist[-11:]\n\n\tall_states = []\n\theaders = [\"Serial No.\", \"State\", \"Total Active Cases\", \"Change in Active Cases\", \"Total Cases\", \n\t\"Change in Total Cases\", \"Total Death\", \"Change in Total Death\"]\n\n\tfor i in range(35):\n\t\tstate = {}\n\t\tfor indx, entry in enumerate(headers):\n\t\t\ttmp = statelist[i*8 + indx].strip()\n\t\t\tif entry == \"State\":\n\t\t\t\tstate[entry] = tmp\n\t\t\telse:\n\t\t\t\tif tmp == \"\":\n\t\t\t\t\tstate[entry] = 0\n\t\t\t\telse:\n\t\t\t\t\tstate[entry] = int(tmp)\n\t\n\t\tall_states.append(state)\n\n\treturn all_states", "title": "" }, { "docid": "e939e099e384e1c801171f4151e84593", "score": "0.4834871", "text": "def main(url='http://sixty-north.com/c/t.txt'):\n words = fetch_words(url)\n print_items(words)", "title": "" }, { "docid": "ef45fef5e1f53030e0867481724c04f6", "score": "0.4834646", "text": "def main(url):\n items = fetch_words(url)\n print_items(items)", "title": "" }, { "docid": "3fe97062855047a520ba8447d2c57de9", "score": "0.48336115", "text": "def main():\n company_file = os.path.join(\"scrape\", \"missing_companies.txt\")\n with open(company_file, encoding=\"utf-8\") as f:\n companies = [_.replace(\"/review/\", \"\").replace(\"\\n\", \"\") for _ in f.readlines()]\n\n for company in companies:\n save_company(company)", "title": "" }, { "docid": "ada5cecad6ce0fe4459a60acd63473de", "score": "0.4829276", "text": "def get_cases_from_bulk(jurisdiction=\"Illinois\", data_format=\"json\"):\n utils.get_and_extract_from_bulk(jurisdiction=jurisdiction, data_format=data_format)", "title": "" }, { "docid": "ddc4beaf819f60b7d5843ef69a4b68e0", "score": "0.4826665", "text": "def scrape_all(url,limit=1000):\n soup = scrape_one(url)\n max_results = soup.select('#searchCount')[0].get_text().split(\" \")[-1].replace(\",\",\"\")\n results_start = 10\n url_list = []\n if int(max_results) > limit:\n max_results = limit\n else:\n pass\n while results_start < max_results:\n url_list.append(START+\"&start={}\".format(results_start))\n results_start +=10\n assign_bots(url_list)\n # for url in url_list:\n # scrape_one(url)", "title": "" }, { "docid": "ad4bcba361f1bfb7282c4cad2b2cf4b5", "score": "0.482475", "text": "def main(url):\n words = fetch_words(url)\n print_items(words)", "title": "" }, { "docid": "6f835a2633b65a66bd4c26c3849b4c45", "score": "0.48244542", "text": "def main():\n\tcompanies = [\"under armour\", \"apple\", \"go pro\", \"yahoo\"]\n\tnews_sources = [\"bloomberg\", \"seeking alpha\", \"market watch\"]\n\textra_params = [\"news\", \"stock\", \"investing\"]\n\tmax_depth = 1\n\tqueries = build_queries(companies, news_sources, extra_params) #build the google search\n\tweb_scraper(queries, max_depth) #get the raw data from the query to be passed into get_info()", "title": "" }, { "docid": "6b98ca3ea21ad9af33b0cb1d1225a1cc", "score": "0.48213533", "text": "def get_mncs_from_wikipedia(mccs):\n mnc_country_re = re.compile('^====\\s+(?P<country>.*?)(\\s+-\\s+(?P<cc>[^\\s]{2}))?\\s+====$')\n mnc_line_re = re.compile('^\\|\\s+(?P<mcc>[0-9]+)\\s+\\|\\|\\s+(?P<mnc>[0-9]+)' +\n '(\\s+\\|\\|\\s+(?P<brand>[^|]*)' +\n '(\\s+\\|\\|\\s+(?P<operator>[^|]*)' +\n '(\\s+\\|\\|\\s+(?P<status>[^|]*)' +\n '(\\s+\\|\\|\\s+(?P<bands>[^|]*)' + '))))')\n f = urllib.urlopen(mnc_list_url)\n country = cc = ''\n for line in f.readlines():\n line = line.strip()\n match = mnc_country_re.match(line)\n if match:\n country = match.group('country')\n cc = (match.group('cc') or '').lower()\n match = mnc_line_re.match(line)\n if match:\n update_mncs(mccs, match.group('mcc'), match.group('mnc'),\n country=country, cc=cc, brand=match.group('brand'),\n operator=match.group('operator'),\n status=match.group('status'),\n bands=match.group('bands'))", "title": "" }, { "docid": "f0c545d886f0d689a36441fe4cfdde60", "score": "0.48211825", "text": "def wikipedia_request_page_from_geocoding(flatitude, flongitude):\n\n places_list = []\n\n loc = \"{}|{}\".format(flatitude, flongitude)\n print(loc)\n\n parameters = {\n \"action\": \"query\",\n \"list\": \"geosearch\",\n \"gscoord\": loc,\n \"gsradius\": __RADIUS_DEFAULT__,\n \"gslimit\": __GS_LIMIT_DEFAULT__,\n \"format\": \"json\",\n }\n\n # API Request\n response = requests.get(url=__WIKIPEDiA_URL__, params=parameters)\n\n if response.status_code == 200:\n\n reply_dict = response.json()\n\n places_list = reply_dict['query']['geosearch']\n\n if places_list:\n\n for idx, place in enumerate(places_list):\n print(idx, \"W#{}\".format(place['pageid']), place['title'], place['dist'], \"m\")\n\n else:\n print('address not found')\n lg.warning('address not found')\n else:\n print('mediawiki reply error')\n lg.warning('mediawiki reply error')\n\n del response\n\n return places_list", "title": "" }, { "docid": "0cae8d6f375a6198c5c824d45566f1ca", "score": "0.48177102", "text": "def get_salaries(city_urls: List[str]) -> Iterable[Dict[str, Any]]:\n browser = Chrome()\n sel_title = 'td.bar-chart-bootstrap-range-label'\n for url in city_urls:\n browser.get(url)\n salary_range = browser.find_element_by_css_selector(sel_title).text\n min_salary, max_salary = parse_salary_range(salary_range)\n result = {'city': city,\n 'min_salary': min_salary,\n 'max_salary': max_salary}\n yield result\n browser.quit()", "title": "" }, { "docid": "b27458ddc4ca6d1c3c0b84bdbc6d6d42", "score": "0.4817586", "text": "def getCollegeWithURL():", "title": "" }, { "docid": "77d287620295b5e81dd440af27fcd901", "score": "0.48137093", "text": "def cities(self, count=5, **option):\n\n if 'coordinate' in option:\n if type(option['coordinate']) is str:\n lat, lon = option['coordinate'].split()\n else :\n lat, lon = option['coordinate']\n\n option['lat'] = lat\n option['lon'] = lon\n del option['coordinate']\n\n\n cities_url = self.baseurl + 'cities'\n response = requests.get(url=cities_url, params=option, headers=self.headers)\n\n if(self.debug):\n print(response.url)\n\n return response.json()", "title": "" }, { "docid": "22d99ece101bf46d5e6f61b493ae5ab0", "score": "0.4807431", "text": "def scrape_location_data(place, yr_1, m_1, d_1, h_1, min_1, yr_f, m_f, d_f, h_f, min_f):\n building_to_location_id_dic = {'john_jay': '125','jjs': '155', \"Butler_Library_4\":'132',\n \"Butler_Library_5\":'133', \"Butler_Library_6\":'134',\n \"Science_and_Engineering_Library\":'145', \"Uris\":'23',\n \"Butler_Library_2\":'130', \"Butler_Library_3\":'131'}\n t_1 = str(yr_1) + '-' + str(m_1) + '-' + str(d_1) + 'T' + str(h_1) + ':' + str(min_1)\n t_f = str(yr_f) + '-' + str(m_f) + '-' + str(d_f) + 'T' + str(h_f) + ':' + str(min_f)\n api_key = pd.read_pickle('../data/key.pkl').iloc[0][0]\n url = \"http://density.adicu.com/window/\" + t_1 + \"/\" + t_f + \"/group/\" + building_to_location_id_dic[place] + \"?auth_token=\" + api_key\n page = req.get(url) # get first page content\n content = json.loads(page.content)\n try:\n next_page = content['next_page'] # get url for next page\n except:\n next_page = None\n page_data = pd.DataFrame(content['data']).loc[:,['client_count','dump_time']] # get data for this page\n page_data.dump_time = pd.to_datetime(page_data.dump_time) # convert time on page to datetime\n location_df = page_data.set_index('dump_time') # set as index\n location_df.index = location_df.index.rename('t') # rename index\n location_df.columns = ['count'] # rename column\n page_count = 1\n location_df.sort_index(ascending = True, inplace=True)\n while next_page is not None: # do it until there is no url for a next page\n print 'scraping page ' + str(page_count) + ' for ' + place\n page_count += 1\n (next_df, next_page) = process_page_data(next_page)\n location_df = location_df.append(next_df) # add each page's data to the total\n return location_df", "title": "" } ]
a66c5d07085932ee8d1d62e0ee04be2d
New() > itkBayesianClassifierImageFilterVIUC2ULDD Create a new object of the class itkBayesianClassifierImageFilterVIUC2ULDD and set the input and the parameters if some named or nonnamed arguments are passed to that method. New() tries to assign all the non named parameters to the input of the new objects the first non named parameter in the first input, etc. The named parameters are used by calling the method with the same name prefixed by 'Set'.
[ { "docid": "7ca29369721e24c3f4d9c79cebb5103c", "score": "0.7953373", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIUC2ULDD.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" } ]
[ { "docid": "a4fda03e1794568db25e2955babdf8f2", "score": "0.7995358", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIUC2USDD.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "ec85a42714f1f9c9af185b1ae1f33ae2", "score": "0.79571533", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIUC2UCDD.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "e6afbf1e2a63a114d1b175ea5e2de337", "score": "0.7955998", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIUL2UCDD.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "0488405fa1da7cd46d7807f4d97b167d", "score": "0.7945556", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIUS2UCDD.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "d0bc5473935213101ed4e86ee97b59b0", "score": "0.7858753", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIUL2USDD.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "2f01aa4d8cacae600c78a6673bc06e1b", "score": "0.78035057", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIUS2USDD.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "db95a84cb8add8626b909a5b09983b31", "score": "0.77982485", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIUS2ULDD.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "72f0797a5036dbcf3c4fac375fae5ae9", "score": "0.77321005", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIUC2USFF.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "2846281cf625c38a94fea70b2dd67f3e", "score": "0.77295053", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIUL2UCFF.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "889612aad35e06cf9ec2210c1d85b6a0", "score": "0.7725783", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIUC2ULFF.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "8fb53bdb2390b9d5204e2ca06c0fae36", "score": "0.7723747", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIUC3UCDD.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "83d8bc656d7b61dcff48ce53ac5c3b83", "score": "0.7723221", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIUL2ULDD.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "60ae610d2c2cfa3bb2efe5178e99b2f4", "score": "0.7720666", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIUS2UCFF.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "1345976981482b2c1698211322520873", "score": "0.7716001", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIUC3USDD.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "61235d1856095355ffd01614b3551529", "score": "0.769542", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIUC3ULDD.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "04070ab07f6fc317e727b152e18507c2", "score": "0.7671118", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIUS3UCDD.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "6c875e04da3c828b14009d4e4a682cf2", "score": "0.7670654", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIUL3UCDD.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "e302a1f42e0fe0a651bc1cd5e6e487c6", "score": "0.7670651", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIUC2UCFF.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "471168afd0d58f7d57de6cc3689986ee", "score": "0.76515263", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIUL2USFF.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "1a1b21d39ea8e49c8eff2d8034c0b57f", "score": "0.7630464", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIUS2ULFF.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "f932e248743b6448d761559db0d31d6f", "score": "0.76264787", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIUS2USFF.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "fce45268738bc407094b4ba0f72f362c", "score": "0.75661767", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIUL3USDD.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "cbafb519f195625e314bc8108fe67341", "score": "0.75534755", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIUL2ULFF.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "328621693b9dc8a8aa1b70af21529bad", "score": "0.7546566", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVID2UCDD.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "4718b1a4366e55e77b5f68cebebe044c", "score": "0.7544645", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIUS3USDD.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "bb49ba0335af8d21c09ff1d958641843", "score": "0.7544257", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIUS3ULDD.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "0363e976bbd9d0fbb4e8cf4f9e4f241a", "score": "0.75077873", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIUL3ULDD.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "8444f12c49481fae61ab6f682ef4aedf", "score": "0.7447443", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVID2USDD.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "bcae8524ea99928a3d98594222e320df", "score": "0.74453175", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIF2UCDD.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "ef9cdb791ee2991c0e1148f8fb42adcb", "score": "0.7426203", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIF2USDD.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "6fbfdb26d44790850e6c810f0511a8a5", "score": "0.7423744", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIUC3ULFF.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "f87ad7dd179fb8b42bbfd1bcb3080a61", "score": "0.7420696", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIUC3USFF.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "490e7a9488883a2584ef73823e58c638", "score": "0.74070853", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIUS3UCFF.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "d1bbe2645a2321246f85d2dd029dbbdc", "score": "0.7403823", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIUL3UCFF.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "f3ea98983025dbd5d3ba5ae97aa6ee54", "score": "0.7399764", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIUC3UCFF.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "0c5fa1623464c2407f2bef6864b05e3d", "score": "0.7382866", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVID2ULDD.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "5073539acf8a4fab92c1847199396d87", "score": "0.73824924", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVID3UCDD.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "736c930c28bd94bc680b7ad7cdf4148f", "score": "0.7317098", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIUL3USFF.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "f68939e41e9450af98764af8fa9b10dd", "score": "0.73149914", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIUS3ULFF.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "a7930d8cc82995d3f9d458a8bde8b5c7", "score": "0.7313974", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIUS3USFF.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "3a8a3463f32f743ee6e1d2468ec2077f", "score": "0.72727394", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIUL3ULFF.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "6226cfc55747a2f4420fc9964fa95b48", "score": "0.72699714", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVID2UCFF.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "a9297812f218b47b918aaf52462fe762", "score": "0.7269808", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIF2ULDD.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "915d00b4af497d0d61e2616275927403", "score": "0.7268236", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVID3USDD.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "1bf334228d573534bc4412392ff42633", "score": "0.7261335", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIF3UCDD.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "028d281a909f24efc7c17fe5d8d9e7dd", "score": "0.7242595", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVID2USFF.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "e49e44a556bd95fe93fcf44b293778b1", "score": "0.7234716", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVID3ULDD.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "01cffb87665e4ad18a91531dd59ce310", "score": "0.7226514", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVID2ULFF.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "851deab0d77670ac774e7c9e255318bc", "score": "0.72264254", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIF3USDD.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "b15a8c81415b40cadba273ba870a1ff1", "score": "0.71174324", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIF3ULDD.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "371f5c61b49229fe45ba5f740ea7513b", "score": "0.71103114", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVID3UCFF.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "e0700b62d507988b24e1f08d8f7bf20a", "score": "0.70821655", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIF2USFF.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "441ddb6d6570b2de6f57f2be024c8c11", "score": "0.7068355", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIF2UCFF.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "6d82f0512176f9cf900f01e8924a27eb", "score": "0.70571226", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVID3USFF.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "8c307f8eefd32c9b4bf0965d0124deee", "score": "0.7050998", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVID3ULFF.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "fd3ddf18af54a9cfae931f875928a197", "score": "0.70092636", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIF2ULFF.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "e8a3f0d97df06d811346410822dbdc27", "score": "0.68767196", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIF3UCFF.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "3918971dfbb6989de6677bd8c13433f7", "score": "0.68662924", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIF3USFF.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "20b08123ed495c9de143ddfd16db310a", "score": "0.67978776", "text": "def New(*args, **kargs):\n obj = itkBayesianClassifierImageFilterVIF3ULFF.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "f2815983413e9c4d1fc2b13dffd89c08", "score": "0.67574656", "text": "def New(*args, **kargs):\n obj = itkDiscreteGaussianDerivativeImageFilterIUC2IUC2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "e103d5f884315455fba1ea8fb8ff2aad", "score": "0.67153686", "text": "def New(*args, **kargs):\n obj = itkSigmoidImageFilterIUC2IUC2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "b00962aa2b0abb17d024b77b25bff65e", "score": "0.67058563", "text": "def New(*args, **kargs):\n obj = itkSigmoidImageFilterIUC2IUC2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "7fb907b695c02e637a0c085203a67296", "score": "0.6662912", "text": "def New(*args, **kargs):\n obj = itkNeighborhoodConnectedImageFilterIUC2IUC2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "437307cfaedcdba265c409d21d0a74cc", "score": "0.65683126", "text": "def New(*args, **kargs):\n obj = itkSigmoidImageFilterIUL2IUL2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "1105a5a8092982c15afd947551817cff", "score": "0.656809", "text": "def New(*args, **kargs):\n obj = itkAddConstantToImageFilterIUC2DIUC2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "d8dbb1217d1fc342d2bb4635f9db6a14", "score": "0.6531777", "text": "def New(*args, **kargs):\n obj = itkOtsuThresholdImageCalculatorIUC2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "b0f7357719ae9f6d95d45d40214807c0", "score": "0.65305144", "text": "def New(*args, **kargs):\n obj = itkNeighborhoodConnectedImageFilterIUL2IUL2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "26292fe385b8479b4f7486c3ebb86aaf", "score": "0.6511645", "text": "def New(*args, **kargs):\n obj = itkInvertIntensityImageFilterIUC2IUC2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "10c918694c3855682ab2422303ea9aea", "score": "0.6465982", "text": "def New(*args, **kargs):\n obj = itkImageFileWriterVIUC2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "aba3074dabd9e99daf222c253a618581", "score": "0.64597154", "text": "def New(*args, **kargs):\n obj = itkMultiplyByConstantImageFilterIUC2DIUC2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "3456a3bac492a84a89aa743913580000", "score": "0.64567536", "text": "def New(*args, **kargs):\n obj = itkValuedRegionalMinimaImageFilterIUC2IUC2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "eec523992fee83d3e34923f03a007a37", "score": "0.64536417", "text": "def New(*args, **kargs):\n obj = itkSigmoidImageFilterIUL2IUL2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "5a415eeddc2bbae2a2e0006694fb43c5", "score": "0.64461815", "text": "def New(*args, **kargs):\n obj = itkInvertIntensityImageFilterIUC2IUC2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "4e659d6a4cb678a7d34b621214beebc6", "score": "0.64370745", "text": "def New(*args, **kargs):\n obj = itkModulusImageFilterIUC2IUC2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "f546814a99c4e26d8f2fadccb145b516", "score": "0.6436917", "text": "def New(*args, **kargs):\n obj = itkAddConstantToImageFilterIUC2DIUC2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "5aae65cd80400faaee206d7073e6356e", "score": "0.6427618", "text": "def New(*args, **kargs):\n obj = itkBinaryStatisticsKeepNObjectsImageFilterIUC2IUC2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "2eaf800ea4ead83e3b4371306d7b7b87", "score": "0.64041996", "text": "def New(*args, **kargs):\n obj = itkAddConstantToImageFilterIUL2DIUL2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "daa2b581fc870922f2ae1ae39567deea", "score": "0.64032567", "text": "def New(*args, **kargs):\n obj = itkSigmoidImageFilterIUC3IUC3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "0239da138864f40b43df609161792832", "score": "0.6401049", "text": "def New(*args, **kargs):\n obj = itkDiscreteGaussianDerivativeImageFilterIUC3IUC3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "13fc88aac50c73e1f63f6925aaf9725f", "score": "0.6381585", "text": "def New(*args, **kargs):\n obj = itkLog10ImageFilterIUC2IUC2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "4baa39232b366e8823eda80768e30dcd", "score": "0.63585687", "text": "def New(*args, **kargs):\n obj = itkValuedRegionalMinimaImageFilterIUC2IUC2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "d733821e876ad829642551c3e836b82e", "score": "0.6355271", "text": "def New(*args, **kargs):\n obj = itkLog10ImageFilterIUC2IUC2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "89ed9582d114a3bc5f795c862bb6b4f8", "score": "0.63279814", "text": "def New(*args, **kargs):\n obj = itkValuedRegionalMinimaImageFilterIUL2IUL2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "827c368e4fd16bebe50eaed7108df5a8", "score": "0.6324634", "text": "def New(*args, **kargs):\n obj = itkOtsuThresholdImageCalculatorIUL2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "33f1311620cca6d683874351257b8fbf", "score": "0.6314792", "text": "def New(*args, **kargs):\n obj = itkLogImageFilterIUC2IUC2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "9de0322f37545896012aef2efaef566f", "score": "0.6309353", "text": "def New(*args, **kargs):\n obj = itkImageUC2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "7aac6b085929beb24041726e0d3153a1", "score": "0.6301018", "text": "def New(*args, **kargs):\n obj = itkMultiplyByConstantImageFilterIUC2DIUC2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "563f67348e314cd566f1afd620fa8de9", "score": "0.6299844", "text": "def New(*args, **kargs):\n obj = itkImageFileWriterVIUL2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "a866934ccc422f826e84133835d5d71c", "score": "0.6292229", "text": "def New(*args, **kargs):\n obj = itkNeighborhoodConnectedImageFilterIUC3IUC3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "6aa00f34cd9d2259262fa7854351cff9", "score": "0.6290564", "text": "def New(*args, **kargs):\n obj = itkSigmoidImageFilterIUC3IUC3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "0ed6dfb3f31bf4d87d6978c933b107a8", "score": "0.6279602", "text": "def New(*args, **kargs):\n obj = itkAddConstantToImageFilterIUL2DIUL2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "852a3fcfd751b76570795e0752b8d4fa", "score": "0.6266658", "text": "def New(*args, **kargs):\n obj = itkMultiplyByConstantImageFilterIUL2DIUL2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "5e99b73aed23a6d0041456cf2efbb788", "score": "0.62639564", "text": "def New(*args, **kargs):\n obj = itkSigmoidImageFilterIUL3IUL3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "009d575e82aef1e4b2420c257e05b0a2", "score": "0.62598175", "text": "def New(*args, **kargs):\n obj = itkSimilarityIndexImageFilterIUC2IUC2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "c71756bba8150d20fad9979441ed47c6", "score": "0.6254724", "text": "def New(*args, **kargs):\n obj = itkKernelImageFilterIUC2IUC2SE2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "a1f0d0be1801da422fade8ade5bb63f0", "score": "0.62414825", "text": "def New(*args, **kargs):\n obj = itkBinaryStatisticsKeepNObjectsImageFilterIUC2IF2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "7126c015ff70685e6032458151844bfa", "score": "0.62385285", "text": "def New(*args, **kargs):\n obj = itkOtsuThresholdImageCalculatorIUC3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "c57d6126e481b0b305cb2cc54d77676b", "score": "0.6213577", "text": "def New(*args, **kargs):\n obj = itkESMDemonsRegistrationFunctionIUC2IUC2IVF22.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "4c0a85cdd3e73bf181b6ff08b7fc4a49", "score": "0.618922", "text": "def New(*args, **kargs):\n obj = itkComposeRGBImageFilterIUS2IRGBUS2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "edc7aace7468ae9d35092040805dd6aa", "score": "0.61892104", "text": "def New(*args, **kargs):\n obj = itkLog10ImageFilterIUL2IUL2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" } ]
1ae17b54df4db987d1f0422e0571a527
Update existing records to set live_attendance with timestamps keys.
[ { "docid": "bee43b2fcf1520aea12722da671c22df", "score": "0.7211444", "text": "def migrate_live_attendance(apps, schema_editor):\n try:\n livesessions = apps.get_model(\"core\", \"LiveSession\")\n except LookupError:\n return\n livesessions_to_migrate = livesessions.objects.filter(live_attendance__isnull=False)\n for livesession in livesessions_to_migrate.iterator():\n for key in list(livesession.live_attendance):\n try:\n datetime.fromtimestamp(int(key))\n except ValueError as error:\n try:\n new_key = int(key) // 1000\n datetime.fromtimestamp(new_key)\n livesession.live_attendance[\n str(new_key)\n ] = livesession.live_attendance.pop(key)\n except ValueError as error: # shouldn't happen\n livesession.live_attendance.pop(key)\n\n livesession.save()", "title": "" } ]
[ { "docid": "14d72cecd8716d4bdc3c82e4f041fb1e", "score": "0.5741195", "text": "def update_timestamp(self, pit_entry: PendingInterestTableEntry):", "title": "" }, { "docid": "df41953ba4f601f9b8fe31a11bf1886a", "score": "0.5737282", "text": "def _update_something_ts(data, key, value):\n if data.get(key) is not None:\n # Once verified_ts etc. is set, it should not be modified.\n raise UserDBValueError(\"Refusing to modify {!r} of element\".format(key))\n if value is None:\n return\n if value is True:\n value = datetime.datetime.utcnow()\n data[key] = value", "title": "" }, { "docid": "cb78f065e6988c67d4c0185029c300d1", "score": "0.5672488", "text": "def updateAccountTime(self, timeStamp):", "title": "" }, { "docid": "41194cc974ae1da65ef1027987b17066", "score": "0.5657332", "text": "def update_record_timestamp(self, rec):\n rec = self.record_manager.merge_from_dict({\n 'id':rec.id,\n 'import_date':datetime.datetime.utcnow()\n })\n msg = to_str(\"Updated record #{}\".format(rec.id))\n logger.debug(msg)\n return rec", "title": "" }, { "docid": "6575816a3bc0d3c4a100616267e4cbae", "score": "0.56264716", "text": "def update_timetrack(cls, data, master_data):\n staff = {}\n staff['s_id'] = data['from_employ']\n staff['s_role'] = data['from_employ_role']\n staff['status'] = data['status']\n staff['submitted_at'] = data['status_time']\n staff['note'] = data['note']\n staff['c_id'] = master_data['c_id']\n staff['c_name'] = master_data['c_name']\n staff['c_type'] = master_data['c_type']\n staff['case'] = master_data['case']\n staff['time_taken'] = data['time_taken']\n app.App.mongodb.db.timetrack.insert_one(staff)", "title": "" }, { "docid": "ea7dd3d03ab23c54544b33caf77e6d75", "score": "0.5538063", "text": "def update_records_interval(now):\n _update_route53(\n aws_access_key_id, aws_secret_access_key, zone, domain, records, ttl\n )", "title": "" }, { "docid": "5dda89664246ca6c5478f82efa50fabd", "score": "0.55330163", "text": "def save(self, *args, **kwargs):\n self.incident.updated = timezone.now()\n self.incident.save()\n super(IncidentUpdate, self).save(*args, **kwargs)", "title": "" }, { "docid": "29ec6adcaf5779e836dcc70487ba775d", "score": "0.5457963", "text": "def _update_timestamps(task_instance, status=None):\n # type: (TaskInstance, Optional[Text]) -> None\n # Ensure the datetime object is naive.\n # Refer to \"[#dt2]\" in docstring for\n # :py:class:`bson.son.SON`\n now = datetime.utcnow()\n\n timestamps =\\\n task_instance.metadata.setdefault(\n TaskInstance.META_TIMESTAMPS,\n {},\n )\n\n timestamps.setdefault(TaskInstance.TIMESTAMP_CREATED, now)\n timestamps[TaskInstance.TIMESTAMP_LAST_MODIFIED] = now\n\n if status:\n timestamps[status] = now", "title": "" }, { "docid": "090cd569515f464d58c405062b480993", "score": "0.54579526", "text": "def update_event_attendance_from_tasks(event):\n learners = event.task_set.filter(role__name='learner').count()\n Event.objects \\\n .filter(pk=event.pk) \\\n .filter(Q(attendance__lt=learners) | Q(attendance__isnull=True)) \\\n .update(attendance=learners)", "title": "" }, { "docid": "c9eaf172077c1d465e6581547622f6b4", "score": "0.5219134", "text": "def update_observation(self, update_time=datetime.datetime.utcnow()):", "title": "" }, { "docid": "9d27c1dad2eb82570b1a8b44d94e44df", "score": "0.5199305", "text": "def update_records(self, records):\n raise NotImplementedError('Method must be implemented by child class')", "title": "" }, { "docid": "17f469450e193f1f70946f2f7dcf0eef", "score": "0.51732844", "text": "def save(self, *args, **kwargs):\n self.updated_at = timezone.now()\n return super(Retailer, self).save(*args, **kwargs)", "title": "" }, { "docid": "fd2483d8079d1bc79433806e68713e14", "score": "0.5159574", "text": "def save(self, update_attr=True, *args, **kwargs):\n if self.pk is None:\n # Create bsp_uid\n pass\n\n if self.pk:\n self.modified_on = timezone.now()\n\n # --- Validate timings ---\n list_timings = []\n for day_tmng in self.timings:\n if day_tmng.closed:\n day_tmng.start_time = None\n day_tmng.end_time = None\n\n if day_tmng.full_day:\n self.start_time = 0000\n self.end_time = 2400\n\n list_timings.append(day_tmng)\n self.timings = list_timings\n\n if update_attr:\n list_attr = []\n for key, val in self.attributes.iteritems():\n if not isinstance(val, dict):\n list_attr.append(\n BusinessServicePoint.Attribute(\n name = key,\n value = val\n )\n )\n\n for key, val in self.custom_attributes.iteritems():\n if not isinstance(val, dict):\n list_attr.append(\n BusinessServicePoint.Attribute(\n name = key,\n value = val\n )\n )\n\n self.list_attributes = list_attr\n\n return super(BusinessServicePoint, self).save(*args, **kwargs)", "title": "" }, { "docid": "98026c987d54cce7ba003ca06bb3880d", "score": "0.5115177", "text": "def set_times_safe(self,update_dt):\n if self.update_dt is None:\n self.update_dt = update_dt\n if self.creation_dt is None:\n self.creation_dt = update_dt", "title": "" }, { "docid": "605b81f45fd35044506eb2623caf9f88", "score": "0.5112427", "text": "def update(self,dt):\n pass", "title": "" }, { "docid": "f9f557457ae4faf14451d6c547c4bb42", "score": "0.5068059", "text": "def set_updated(cls, usr):\n\n usr.updated = time.strftime('%Y-%m-%d %H:%M:%S')\n if not usr.created:\n usr.created = usr.updated\n usr.createdby = usr.updatedby", "title": "" }, { "docid": "477736a9dc1e8b128720bc3ba2ace56e", "score": "0.506014", "text": "def update_records_service(call: ServiceCall) -> None:\n _update_route53(\n aws_access_key_id, aws_secret_access_key, zone, domain, records, ttl\n )", "title": "" }, { "docid": "f0a6ac60479363ed38e8415d67f64704", "score": "0.5057515", "text": "def update(self):\n existing_table = self.metadata.point_table\n with self.staging_table as s_table:\n staging = s_table.table\n delete_absent_hashes(staging.name, existing_table.name)\n with Update(staging, self.dataset, existing_table) as new_records:\n new_records.insert()\n update_meta(self.metadata, existing_table)", "title": "" }, { "docid": "2c8151d047703a1926a21a71aa6d8bb7", "score": "0.5023003", "text": "def update(self,records,**kw):\r\n # ignore unknown fields\r\n kw = dict([(k,v) for (k,v) in kw.items() if k in self.fields])\r\n if isinstance(records,dict):\r\n records = [ records ]\r\n # update indices\r\n for indx in set(self.indices.keys()) & set (kw.keys()):\r\n for record in records:\r\n if record[indx] == kw[indx]:\r\n continue\r\n _id = record[\"__id__\"]\r\n # remove id for the old value\r\n old_pos = bisect.bisect(self.indices[indx][record[indx]],_id)-1\r\n del self.indices[indx][record[indx]][old_pos]\r\n if not self.indices[indx][record[indx]]:\r\n del self.indices[indx][record[indx]]\r\n # insert new value\r\n bisect.insort(self.indices[indx].setdefault(kw[indx],[]),_id)\r\n for record in records:\r\n # update record values\r\n record.update(kw)\r\n # increment version number\r\n record[\"__version__\"] += 1", "title": "" }, { "docid": "c46e9751385adf01a987e057480703de", "score": "0.5018219", "text": "def updateTimeStamp(self, timeStamp):\n self.TimeStamp[1] = timeStamp", "title": "" }, { "docid": "c4fe03cb16a539ddabc7f58718ec4f9e", "score": "0.49975818", "text": "def set_ts(self, ts):\n sql = u\"UPDATE %s SET ts = %s WHERE id = %s;\" % (\n self._meta.db_table, ts, self.pk\n )\n cursor = connection.cursor()\n cursor.execute(sql)\n transaction.commit_unless_managed()", "title": "" }, { "docid": "717acafc2c3fd6b07625c2fb0e5a25f5", "score": "0.49752098", "text": "def mark_all_as_inactive(self):\n _keys = self._keys\n _updated = []\n for k in _keys:\n k.inactive_since = time.time()\n _updated.append(k)\n self._keys = _updated", "title": "" }, { "docid": "b44a476a5fa2df6235b58f7b06789f03", "score": "0.49521095", "text": "def update(self, date, new_data):\n raise NotImplementedError(\"DB Should implement update method\")", "title": "" }, { "docid": "afef1294e0ce12ab0f0d456a670e3544", "score": "0.49319556", "text": "def save(self, update_modified=True, *args, **kwargs):\n if self.significant_fields_updated:\n if update_modified:\n self.modified = now()\n super(TimeTrackable, self).save(*args, **kwargs)\n self._update_field_state()", "title": "" }, { "docid": "a1d5d88ddb71831c14661f5392ffd663", "score": "0.49280113", "text": "def update_user_activity(user):\n OnlineUserActivity.objects.update_or_create(user=user, defaults={'last_activity': timezone.now()})", "title": "" }, { "docid": "8113140422cf3b29120bccf807110cd9", "score": "0.48982224", "text": "def update_appointment(access_token, appointments):\n require_waiting_time_status = [\"Checked In\", \"In Room\", \"Complete\", \"In Session\"]\n for a in appointments:\n patient_id = a.get('patient')\n scheduled_time = a.get('scheduled_time')\n appointment_id = a.get('id')\n appointment_saved = Appointment.objects.filter(appointment_id=appointment_id)\n if not appointment_saved:\n patient_info = Doctor.get_patient(access_token, patient_id)\n print \"Get new appointment!\"\n new_appointment = Appointment(appointment_id=str(appointment_id),\n duration=a.get('duration'),\n doctor_id=str(a.get('doctor')),\n scheduled_time=scheduled_time,\n patient_id=patient_id,\n patient_SSN=patient_info['social_security_number'],\n patient_first_name=patient_info['first_name'],\n patient_last_name=patient_info['last_name']\n )\n new_appointment.save()\n elif a.get('status') == \"Arrived\":\n try:\n a['arrived_time'] = Appointment.objects.get(appointment_id=appointment_id).arrived_time\n except ObjectDoesNotExist:\n a['arrived_time'] = None\n elif a.get('status') in require_waiting_time_status:\n try:\n a['waiting_time'] = int(Appointment.objects.get(appointment_id=appointment_id).waiting_time) / 60\n except TypeError as e:\n print \"type error, waiting time is %s\" % Appointment.objects.get(appointment_id=appointment_id).waiting_time\n a['waiting_time'] = None\n else:\n # Todo handle other status\n pass\n # print \"waiting time is \", Appointment.objects.get(appointment_id=69403988).waiting_time\n return appointments", "title": "" }, { "docid": "1960e6de986707b0655e06e93f58777e", "score": "0.4883799", "text": "def update(self):\n self.getDbRecord().update()", "title": "" }, { "docid": "de170ee03cbde692bde2229c14e05e01", "score": "0.4883585", "text": "def update_many(self, cursor, table, prim_key_columns, values, echo=False):\n # First we create a temporal table using the schema from the original one\n if echo:\n print('creating the temporal table')\n temp_table = self._create_temp_table_from_existent(\n cursor, table, columns=prim_key_columns, schema_only=True\n )\n # Then, we populate the temporal table with the values to update\n if echo:\n print('Inserting the values to update in the temporal table')\n self.insert_many(cursor, temp_table, prim_key_columns, values)\n # Finally, we update the table with the new values, using a join with the\n # temporal one, for being more efficient\n temp_alias = 'temp'\n prim_key = prim_key_columns[0]\n columns_to_ud = prim_key_columns[1:]\n set_str = ', '.join(map(lambda c: \"{} = {}.{}\".format(c, temp_alias, c),\n columns_to_ud))\n join_filter = '{}.{} = {}.{}'.format(table, prim_key, temp_alias, prim_key)\n ud_query = \"\"\"\n UPDATE {}\n SET {}\n FROM {} {} WHERE {}\n \"\"\".format(table, set_str, temp_table, temp_alias, join_filter)\n if echo:\n print('Updating the table with the specified values')\n cursor.execute(ud_query)", "title": "" }, { "docid": "c0541d3782d79c1d957003890b36c388", "score": "0.48816288", "text": "def update(self, raw):\n self.time = datetime.datetime.fromisoformat(raw.pop('time'))\n self.time = self.time.astimezone(datetime.timezone.utc)\n raw_end_time = raw.pop('end_time', None)\n if raw_end_time:\n self.end_time = datetime.datetime.fromisoformat(raw_end_time)\n self.end_time = self.end_time.astimezone(datetime.timezone.utc)\n # TODO use set?\n for field in ('name', 'description', 'location', 'lat', 'lng', 'address', 'image',\n 'open', 'capacity', 'transitive_invites', 'venmo', 'alcohol'):\n if field in raw:\n setattr(self, field, raw[field])", "title": "" }, { "docid": "c8368210c501b017fa21fb400c31dc2c", "score": "0.48573732", "text": "def updateTimeStamp(self, ts):\r\n self.timeStampGA = ts", "title": "" }, { "docid": "97ff0320ab022e31bbab0534f2c72c11", "score": "0.48559353", "text": "def save(self):\n from models import storage\n self.updated_at = datetime.today()\n storage.save()", "title": "" }, { "docid": "e7b47a48579a58695925795d98ebc9ac", "score": "0.48400417", "text": "def setAbsoluteTimeStamps(self, timestamps):\n\t\tself.absoluteTimestamps = timestamps", "title": "" }, { "docid": "da888cf30620f74ff1da3affe091e3dc", "score": "0.48381987", "text": "def _update_dynamic_data(self):\r\n for uuid, submission_time in self.uuid_to_submission_times.iteritems():\r\n xform_instances.update(\r\n {UUID: uuid}, {'$set': {SUBMISSION_TIME: submission_time}})", "title": "" }, { "docid": "fc60a6c6839bda98056a9a9b8132bcc8", "score": "0.48362747", "text": "def save(self, context=None):\n updates = self.obj_get_changes()\n self.dbapi.update_audit(self.uuid, updates)\n\n self.obj_reset_changes()", "title": "" }, { "docid": "d0a61cd2cb342614aa85d4d4ed50c6dd", "score": "0.48295665", "text": "def save(self, *args: Any, **kwargs: Any) -> None:\n super().save(*args, **kwargs)\n\n # If there are no attributes to update, return early.\n if not self._unsaved_changes:\n return\n\n # Upsert the record attributes.\n RecordAttribute = cast(Any, self._flexible_model_for(BaseRecordAttribute))\n attribute_map = {a.field.name: a for a in self.attributes.all()}\n\n value_fields: Set[str] = set()\n update: List[BaseRecordAttribute] = []\n insert: List[BaseRecordAttribute] = []\n for field_name, value in self._unsaved_changes.items():\n # Find the existing attribute object or create a new one.\n attribute = attribute_map.get(field_name) or RecordAttribute(\n record=self,\n field=self._fields[field_name],\n )\n\n # Set the value for the attribute.\n attribute.value = value\n\n # Add the attribute object to the appropriate operation set.\n operation = update if attribute.pk else insert\n operation.append(attribute)\n\n # If we're updating, track which value column should be updated.\n if operation is update:\n value_fields.add(attribute.value_field_name)\n\n # Perform bulk updates and inserts as necessary.\n if update:\n RecordAttribute._default_manager.bulk_update(\n update, fields=(*value_fields,)\n )\n if insert:\n RecordAttribute._default_manager.bulk_create(insert)\n\n # Invalidate the data cache.\n self._invalidate_caches(\"_data\")", "title": "" }, { "docid": "2fec19bbea95e4da35df8b8e82de99ad", "score": "0.482816", "text": "def attendance(bot, update):\n # Get chatID and user details based on chatID\n chatID = update.message.chat_id\n user_info = get_user_info(chatID)\n Student_ID = user_info['PID']\n password = user_info['password']\n\n if not rate_limited(bot, chatID, \"attendance\"):\n bot.send_chat_action(chat_id=chatID, action='upload_photo')\n scrape_attendance(Student_ID, password, chatID)", "title": "" }, { "docid": "ae8f194801a9458c48328d5521d8cc24", "score": "0.48265362", "text": "def update_db_record(db_conn, record_id):\n cur = db_conn.cursor()\n \n sql = f'UPDATE startrek SET tweet = 1 WHERE rowid == {record_id}'\n cur.execute(sql)\n \n # Commit the changes\n db_conn.commit()", "title": "" }, { "docid": "3be1f5e5ee22a78d1d082e310a3aa583", "score": "0.48245746", "text": "def _auto_fill_checkpoint_metrics(self, result: dict) -> dict:\n current_datetime = datetime.now()\n\n auto_filled_metrics = {\n TIMESTAMP: int(time.mktime(current_datetime.timetuple()))\n }\n result = result.copy()\n result.update(auto_filled_metrics)\n return result", "title": "" }, { "docid": "b658ec201836482317903dfc5704339a", "score": "0.48220554", "text": "def refresh():\n DB.drop_all()\n DB.create_all()\n utc_value = aqmeasurements(city='Los Angeles', parameter='pm25')\n for x in utc_value:\n db_record = Record(datetime=x[0], value=x[1])\n DB.session.add(db_record)\n\n DB.session.commit()\n return 'Data refreshed!'", "title": "" }, { "docid": "5d24eecdec94b847569e21dd8474a68f", "score": "0.48162192", "text": "def update(self, data, overwrite=Overwrite.PRIMARY, skip_duplicates=False):\n data = self._format_data(data)\n added_entries = []\n backups = []\n to_delete = {col: set() for col in self._columns[: self._key_columns]}\n with self._writable():\n for row in data:\n self._add_entry(\n row, added_entries, backups, to_delete, overwrite, skip_duplicates\n )\n for col, keys in to_delete.items():\n for key in keys:\n del self._key_dicts[col][key]", "title": "" }, { "docid": "acc4226ebf953482f8531b5eeebf8abc", "score": "0.4806309", "text": "def update_client_record(self, dict):", "title": "" }, { "docid": "2d587904f271b69929901e4917b2c524", "score": "0.48030695", "text": "def refresh():\n DB.drop_all()\n DB.create_all()\n results = dt_values()\n for i, val in enumerate(results):\n record = Record(id=i, datetime=val[0],\n value=val[1])\n DB.session.add(record)\n DB.session.commit()\n return 'Data refreshed!'", "title": "" }, { "docid": "2c8c3fc1a88ba8ba8ab281e856b037e1", "score": "0.4797856", "text": "def _update_onu_status(self, onus, timestamp):\n for onu_id, onu_status in onus.iteritems():\n if onu_id in self._onu_by_id:\n onu = self._onu_by_id[onu_id]\n onu.timestamp = timestamp\n onu.rssi = onu_status.rssi\n onu.equalization_delay = onu_status.equalization_delay\n onu.equalization_delay = onu_status.equalization_delay\n onu.fiber_length = onu_status.fiber_length\n onu.password = onu_status.reported_password", "title": "" }, { "docid": "0d7398d1b21639d6d8633b9ca627c483", "score": "0.4788021", "text": "def merge_timestamps(self, created_at, put_timestamp, delete_timestamp):\n with self.get() as conn:\n old_status = self._is_deleted(conn)\n conn.execute('''\n UPDATE %s_stat SET created_at=MIN(?, created_at),\n put_timestamp=MAX(?, put_timestamp),\n delete_timestamp=MAX(?, delete_timestamp)\n ''' % self.db_type, (created_at, put_timestamp, delete_timestamp))\n if old_status != self._is_deleted(conn):\n timestamp = Timestamp.now()\n self._update_status_changed_at(conn, timestamp.internal)\n\n conn.commit()", "title": "" }, { "docid": "c5c05218e6711e5c53b6b33177df2ba3", "score": "0.4786019", "text": "def updateStamp(self, stamp):\n self._dictSet('stamp', stamp)", "title": "" }, { "docid": "618b9f0714e6ea933160a2f7072aa98f", "score": "0.47855195", "text": "def update(self, **kwargs):\n self.last_modified_at = dt.datetime.utcnow()\n super().update(**kwargs)", "title": "" }, { "docid": "53980b49959c64e7ee0c248c5107ff9a", "score": "0.47740236", "text": "def _set_timestamp(self, value):\n self.dt = self.dt.fromtimestamp(value)", "title": "" }, { "docid": "8f6d03ee9582064aa382cf1f0bda3ca7", "score": "0.4771267", "text": "def handle_tad_post_save(sender, instance, **kwargs):\n _update_user_time_account_by_id(instance.user_to_update)", "title": "" }, { "docid": "3100dc5f8f6f8cab5893893af8c01088", "score": "0.47598955", "text": "def set(self, start_time, end_time, weekdays=None, instance_ids=None):\n if start_time >= end_time:\n raise ValueError(\n 'Start time can\\'t be greater than or equal to end time'\n )\n start_time = start_time.isoformat()\n end_time = end_time.isoformat()\n updated = '{0}-{1}'.format(start_time, end_time)\n # integer representation of `weekdays`\n if weekdays:\n weekdays = [self.weekdays.index(weekday) for weekday in weekdays]\n else:\n # `weekdays` not set, assign times for entire week\n weekdays = range(len(self.weekdays))\n instances = self.conn.get_only_instances(instance_ids=instance_ids)\n for instance in instances:\n if instance.state == 'terminated':\n # Do not tag a terminated instance\n continue\n times = instance.tags.get('times')\n if times is None:\n # No `times` tag - set defaults\n times = ';'.join([str(None)] * 7)\n tags = {\n 'times': times,\n 'tz': self.tz,\n }\n try:\n instance.add_tags(tags)\n except self.conn.ResponseError as e:\n raise e\n times = times.split(';')\n if self.iso:\n # Need to take into consideration that the user may pass the\n # `iso` argument as True when instantiating the `Timely` class\n times.insert(0, None)\n for weekday in weekdays:\n try:\n # `weekday` already exists - perform in-place operation\n times[weekday] = updated\n except IndexError:\n # If the actual weekday index does not exist, create\n # default time tags set to `None` until the desired index\n # is met\n actual = len(times)\n desired = weekday\n while actual < desired:\n times.append(None)\n actual += 1\n # Append the `updated` weekday\n times.append(updated)\n finally:\n # If the length of the `times` list object is less than 7,\n # then extend the list object to include the remaining\n # times\n if len(times) < 7:\n diff = 7 - len(times)\n times.extend([None] * diff)\n if self.iso:\n # Remove first element `None` from `times` object\n times.pop(0)\n times = ';'.join([str(time) for time in times])\n tags = {\n 'times': times,\n 'tz': self.tz,\n }\n try:\n # Overwrite existing `times` tag with new value\n instance.add_tags(tags)\n except self.conn.ResponseError as e:\n raise e", "title": "" }, { "docid": "456a41b5739b43744c0891ad18b99861", "score": "0.47478387", "text": "def _update(self):\n try:\n forecasts = self._msw.get_future()\n self.currently = forecasts.data[0]\n for forecast in forecasts.data[:8]:\n hour = dt_util.utc_from_timestamp(forecast.localTimestamp).strftime(\n \"%-I%p\"\n )\n self.hourly[hour] = forecast\n except ConnectionError:\n _LOGGER.error(\"Unable to retrieve data from Magicseaweed\")", "title": "" }, { "docid": "2d7e4d45f7521476d6d58fd4459f5636", "score": "0.4733308", "text": "def mutateRowsTs(self, tableName, rowBatches, timestamp):\r\n pass", "title": "" }, { "docid": "abe304b6c78f0450aa0fcc1bdf077f8a", "score": "0.47250202", "text": "def update_timestamp( self ):\n self.ctl_fields[ '005' ] = datetime.now().strftime(\n '%Y%m%d%H%M%S'\n ) + '.0'", "title": "" }, { "docid": "8384ddf6889b4698d5d37629fe372db2", "score": "0.47206557", "text": "def update_data_with_timestamp(instance_id, database_id):\n spanner_client = spanner.Client()\n instance = spanner_client.instance(instance_id)\n\n database = instance.database(database_id)\n\n with database.batch() as batch:\n batch.update(\n table=\"Albums\",\n columns=(\"SingerId\", \"AlbumId\", \"MarketingBudget\", \"LastUpdateTime\"),\n values=[\n (1, 1, 1000000, spanner.COMMIT_TIMESTAMP),\n (2, 2, 750000, spanner.COMMIT_TIMESTAMP),\n ],\n )\n\n print(\"Updated data.\")", "title": "" }, { "docid": "8384ddf6889b4698d5d37629fe372db2", "score": "0.47206557", "text": "def update_data_with_timestamp(instance_id, database_id):\n spanner_client = spanner.Client()\n instance = spanner_client.instance(instance_id)\n\n database = instance.database(database_id)\n\n with database.batch() as batch:\n batch.update(\n table=\"Albums\",\n columns=(\"SingerId\", \"AlbumId\", \"MarketingBudget\", \"LastUpdateTime\"),\n values=[\n (1, 1, 1000000, spanner.COMMIT_TIMESTAMP),\n (2, 2, 750000, spanner.COMMIT_TIMESTAMP),\n ],\n )\n\n print(\"Updated data.\")", "title": "" }, { "docid": "22361b813dd52b59ba7d39fdd9781d8c", "score": "0.4713031", "text": "def update_put_timestamp(self, timestamp):\n with self.get() as conn:\n conn.execute(\n 'UPDATE %s_stat SET put_timestamp = ?'\n ' WHERE put_timestamp < ?' % self.db_type,\n (timestamp, timestamp))\n conn.commit()", "title": "" }, { "docid": "8929901f1b6ff9dbaa9d9154d4e7af70", "score": "0.47092578", "text": "def set_from_dict(self, **kwargs):\n for (k, v) in kwargs.items():\n if k in ('created_at', 'updated_at'):\n self.__dict__[k] = datetime.datetime\\\n .strptime(v,\n \"%Y-%m-%dT%H:%M:%S.%f\")\n else:\n self.__dict__[k] = v", "title": "" }, { "docid": "d2738a0e9ac08bb6d432d8f8d246da1b", "score": "0.47079244", "text": "def update_records(records_to_update):\n total_length = len(records_to_update)\n plog(\"Updating %s records\" % total_length)\n for i, record_to_update in enumerate(records_to_update):\n if i % PRINT_COEF == 0:\n plog(\"%d / %d\" % (i, total_length))\n\n # The object from the db to update\n obj_to_update = record_to_update['obj_to_update']\n # A dict containing values to update in the object\n update_values = record_to_update['update_values']\n\n # Set all values to update to the object\n for key, val in update_values.items():\n setattr(obj_to_update, key, val)\n obj_to_update.save()\n plog(\"Done updating records\")", "title": "" }, { "docid": "7e5f2a9f3c2b7cbfbfc65e4450d86bb4", "score": "0.47031114", "text": "def sync_to_database(self): \n\n cursor = self.connection.cursor()\n cursor.execute(\"TRUNCATE TABLE \" + self.table_name);\n for t, v in self.timeline.iteritems():\n v.save_to_database(t, cursor)\n self.connection.commit();", "title": "" }, { "docid": "453895aef0e232d809ef0e188c43e623", "score": "0.4695303", "text": "def update_records(redis, localFile=None):\n if localFile == None:\n siteListXML = xml_from_url(SITE_LIST_URL)\n else:\n siteListXML = localFile\n\n locations = site_list_urls(siteListXML)\n db_add_locations(redis, locations)\n\n for loc, urlInfo in locations.items():\n url = location_url(urlInfo=urlInfo)\n xml = xml_from_url(url)\n yesterday = yesterday_conditions(xml)\n\n if yesterday is not None:\n # yesterdays values are not provided for all areas\n insert_yesterday(redis, loc, yesterday)", "title": "" }, { "docid": "fcd3c3a8c3d3fefefb8b8b30c85aa4da", "score": "0.46914068", "text": "def updated_at(self, value):\r\n self.logger.warn(\"Setting values on updated_at will NOT update the remote Canvas instance.\")\r\n self._updated_at = value", "title": "" }, { "docid": "fcd3c3a8c3d3fefefb8b8b30c85aa4da", "score": "0.46914068", "text": "def updated_at(self, value):\r\n self.logger.warn(\"Setting values on updated_at will NOT update the remote Canvas instance.\")\r\n self._updated_at = value", "title": "" }, { "docid": "823676b5022b1a9b5cd69e1b534b57d4", "score": "0.46782014", "text": "def update(self, instance, validated_data):\n situation = self.context['situation']\n instance.hours = validated_data.get('hours', instance.hours)\n instance.shifts = validated_data.get('shifts', instance.shifts)\n instance.total = validated_data.get('total', instance.total)\n instance.taken_shifts = validated_data.get('taken_shifts', instance.taken_shifts)\n instance.given_shifts = validated_data.get('given_shifts', instance.given_shifts)\n if situation == 'giver':\n instance.given_shifts = instance.given_shifts + 1\n if situation == 'taker':\n instance.taken_shifts = instance.taken_shifts + 1\n instance.save()\n return instance", "title": "" }, { "docid": "984d5b206ec76340f1af87afbb3c3235", "score": "0.46728712", "text": "def update(self, dt):\n self.perlin_asteroid.update(dt)", "title": "" }, { "docid": "51c90992ddb27fa5766791116949ce1a", "score": "0.46722987", "text": "def async_update_records(self, zc: 'Zeroconf', now: float, records: List[RecordUpdate]) -> None:\r\n for record in records:\r\n self.update_record(zc, now, record[0])", "title": "" }, { "docid": "d50cd0ba7695cee77c975f51ef1a175f", "score": "0.46557885", "text": "def _set_timestamps(source_stat, dest):\n os.utime(dest, ns=(source_stat.st_atime_ns, source_stat.st_mtime_ns))", "title": "" }, { "docid": "137296fdf47379d674736b9ae3a79d9c", "score": "0.46539056", "text": "def set_existing_data(self, existing_records):\n self.existing_records = existing_records\n # the same without ids\n self.existing_records_without_id = {\n table: [{k: str(v) for k, v in nt.items() if k != 'id'} for nt in existing]\n for table, existing in existing_records.items()\n }", "title": "" }, { "docid": "f0fa1a0fd2e8e842f362765a9c5f4c92", "score": "0.46489805", "text": "def update_statistics(clock, duration):\n instance = AlertNotified.objects.create(date_pressed=clock,duration_Seconds=duration)", "title": "" }, { "docid": "16986ec326adb63266a1d96a81a81ab4", "score": "0.4641254", "text": "def _update_ts(a_ts_x, a_ts_y, a_tweet_toks,\n a_pos, a_neg, a_neut,\n a_pos_re=NONMATCH_RE, a_neg_re=NONMATCH_RE):\n if not a_tweet_toks:\n return\n tweet = ' '.join([w\n if isinstance(w, basestring)\n else ' '.join(w)\n for w in sorted(a_tweet_toks)])\n added = False\n if a_tweet_toks & a_pos or a_pos_re.search(tweet):\n a_ts_y.append(POSITIVE)\n added = True\n elif a_tweet_toks & a_neg or a_neg_re.search(tweet):\n a_ts_y.append(NEGATIVE)\n added = True\n elif a_tweet_toks & a_neut:\n a_ts_y.append(NEUTRAL)\n added = True\n if added:\n a_ts_x.append(_toks2feats(a_tweet_toks))\n a_tweet_toks.clear()", "title": "" }, { "docid": "b832d3c0bc0fec868407a1587b297fde", "score": "0.4636004", "text": "def _update_model_after_commit(self, model):\n model.uid = self.uid\n model.created_at = self.created_at\n model.updated_at = self.updated_at", "title": "" }, { "docid": "7a1fcdf88fd4023c64bb0ff97ec5dded", "score": "0.46283996", "text": "def updated(self) -> datetime:\n raise NotImplementedError", "title": "" }, { "docid": "b92f05d1bcbdc1fdecc39fd428e0688d", "score": "0.46261755", "text": "def set_update_table_base(self):\n now = int(time.time())\n self.update_time = now", "title": "" }, { "docid": "8eb7bcaf756dea03fabe47a92e12f1c0", "score": "0.46246383", "text": "def update_record(self, row_id, field, value):\n return", "title": "" }, { "docid": "0b0377bf9a5934df11db0a5c5965ee56", "score": "0.4619634", "text": "def save(self, **kwargs):\n\t\tself.updated = datetime.datetime.now()\n\t\tsuper(ReadStatus, self).save(**kwargs)", "title": "" }, { "docid": "193bde4e44153140ccfd5a2e9c2112a6", "score": "0.46140862", "text": "def update_attendance_matrix(i, j, value, attendance_matrix):\n\tprint 'updating attendance_matrix'\n\tattendance_matrix[i][j] = value\n\tsave_attendance_matrix(attendance_matrix)", "title": "" }, { "docid": "f1a531d9626462556ca87447f06bac6a", "score": "0.46066797", "text": "def update(self, instance, validated_data):\n instance.user = validated_data.get('user', instance.user)\n instance.meetup = validated_data.get('meetup', instance.meetup)\n instance.user_check_in = validated_data.get('user_check_in', instance.user_check_in)\n\n instance.save()\n return instance", "title": "" }, { "docid": "1a989dac767c39d010cb5a64518d1beb", "score": "0.4605577", "text": "def _updateTrafficData(self,t,isRealAcc=True):\n \n # Add empty attempts if there is more than one second gap between last \n # attmpt and new attempt.\n if self.listOfAccessInLast60Min.hasAcc():\n preT=self.listOfAccessInLast60Min.getLastAcc().t+LocalData.oneSecDelay\n while self.listOfAccessInLast60Min.hasAcc() and t-preT>LocalData.zeroDelay:\n totalNumOfAcc=self.listOfAccessInLast60Min.addAccess(preT,isRealAcc=False)\n self.dynamicMaxTenValRecorder.updateRecords(preT,totalNumOfAcc)\n preT=preT+LocalData.oneSecDelay\n \n # Add/Update list and the counter of the attempts in last 60 mins a\n totalNumOfAcc=self.listOfAccessInLast60Min.addAccess(t,isRealAcc)\n \n # Update the total history\n self.dynamicMaxTenValRecorder.updateRecords(t,totalNumOfAcc)", "title": "" }, { "docid": "29fd6913acedbc2f26042360c09d873f", "score": "0.45989418", "text": "async def update_user_action_skill_ts(self, user_id: int, current_ts: int) -> None:\n\n mycursor, db = await the_database()\n await mycursor.execute(\"UPDATE UserCurrency SET last_skill_ts = %s WHERE user_id = %s\", (current_ts, user_id))\n await db.commit()\n await mycursor.close()", "title": "" }, { "docid": "56d51606dad82c487f7bd3978ac450f4", "score": "0.45973355", "text": "def logs_update(self, logs):\n try: self.check_for_table_exits(table='logs')\n except Exception as e: print(e)\n\n try: self.check_for_table_exits(table='chat')\n except Exception as e: print(e)\n\n try:\n logs['login_user'] = current_user\n logs['log_time'] = str(current_date_to_day())[0:19]\n con.execute(self.insert_query(table='logs',\n columns=self.sqlite_queries['columns']['logs'][1:],\n values=logs\n ))\n except Exception as e: print(e)\n\n try:\n con.execute(self.insert_query(table='chat', columns=self.sqlite_queries['columns']['chat'][1:],\n values=self.info_logs_for_chat(logs['info'])))\n except Exception as e: print(e)", "title": "" }, { "docid": "2263d740b02be199681dfa8963933e85", "score": "0.45962694", "text": "def changeTimetoLive(self,hours_to_live):\n\t\tself.time_to_live = hours_to_live", "title": "" }, { "docid": "5e84e3f31d2e1e2a925186324a5a2ced", "score": "0.45939356", "text": "def update_migration_record(**instance_details):\n table_name = \"instances\"\n\n w_dict = OrderedDict([('src_uuid', instance_details[\"src_uuid\"]),\n ('src_cloud', instance_details[\"src_cloud\"]),\n ('dst_cloud', instance_details[\"dst_cloud\"])])\n\n update_table(table_name, instance_details, w_dict, True)", "title": "" }, { "docid": "4df170911a105601b90145bfe57e6e7b", "score": "0.45868564", "text": "def save(self):\n\t\tself.updated = datetime.datetime.now()\n\t\tsuper(Topic, self).save()", "title": "" }, { "docid": "e745fb39e716d6575641b88e5429e49a", "score": "0.45776975", "text": "def synchronize_timestamps(self):\n raise NotImplementedError", "title": "" }, { "docid": "aafcad89657cdb32e0791a89cd6e4745", "score": "0.45758414", "text": "def update(\n self,\n session: Session,\n changes: dict = None,\n commit: bool = True,\n validate_metadata: bool = True,\n ):\n # Since commit=False, this will only apply changes to the in-memory\n # TrialMetadata instance, not the corresponding db record\n super().update(session=session, changes=changes, commit=False)\n\n # metadata_json was possibly updated in above method call,\n # so check that it's still valid if validate_metadata=True\n if validate_metadata:\n self.validate_metadata_json(self.metadata_json)\n\n if commit:\n session.commit()", "title": "" }, { "docid": "59d3be310a1e33ab0b06aac5ba8e2c22", "score": "0.45708567", "text": "def insert_record(records, record):\r\n records.update(record)\r\n print(\"New record added!\\n\")", "title": "" }, { "docid": "e0d5c4212b378f2d79c798447f42a78e", "score": "0.45700285", "text": "def save(self):\n from models import storage\n self.updated_at = datetime.now()\n storage.save()", "title": "" }, { "docid": "9d0d955106d487bb59298bcd90c67832", "score": "0.4569637", "text": "def add_item_timestamps(status_dict, timestamp):\n s = dict(status_dict) # make a copy\n for device_type in s:\n if type(s[device_type]) != dict: continue\n for device_instance in s[device_type]:\n if type(s[device_type][device_instance]) != dict: continue\n for status_key in s[device_type][device_instance]:\n s[device_type][device_instance][status_key] = {\n \"val\": s[device_type][device_instance][status_key],\n \"timestamp\": timestamp\n }\n return s", "title": "" }, { "docid": "aa14a0855df514aebdc6198e2ac2dd99", "score": "0.45690656", "text": "def _set_tuple(self, value):\n self.dt = self.dt.fromtimestamp(int(time.mktime(value)))", "title": "" }, { "docid": "8d01600bb9b1d1f9b77818a1ea2f2ccc", "score": "0.45657778", "text": "def update_db(self) -> None:\n for calendar in self._calendars:\n if self._needs_update(calendar, remember=True):\n self._db_update(calendar)", "title": "" }, { "docid": "857c9723705c678a5121636fd48547de", "score": "0.4565692", "text": "def update_record(rec_id, record, vcf_object):\n index = vcf_object.df.index[vcf_object.df['ID'] == rec_id].to_list()\n\n for key in record.keys():\n vcf_object.df.loc[index, key] = record.get(key)\n vcf_object.write()", "title": "" }, { "docid": "04c83cd8d54f5d3c685b6fe47f993b1e", "score": "0.45654747", "text": "def _update_with(self, instance, data):\n for field, value in list(data.items()):\n setattr(instance, field, value)\n instance.save()", "title": "" }, { "docid": "a86bef1a0822d0cc3627a6e9de7f0851", "score": "0.45631707", "text": "def _update_object(self, v, timestamp):\n assert isinstance(v, Vehicle)\n assert isinstance(timestamp, int)\n\n # try to read state data for this timestamp\n state = self._data_handler.get_state(int(timestamp), v.id)\n\n if len(v.timestamps) == 0:\n v.timestamps.create_and_add(timestamp)\n # create a timestamp if it does not exist\n elif v.timestamps.latest().timestamp != timestamp:\n v.timestamps.create_and_add(timestamp)\n else:\n warnings.warn(\"Timestamp is already present in Timestamps!\")\n\n v.timestamps.latest().state = state", "title": "" }, { "docid": "563b58da055db6e73d4d43a0ab2e4ab9", "score": "0.45583063", "text": "def test_update_schedule_using_put(self):\n pass", "title": "" }, { "docid": "ffc765d0d8fcc39f8bebeebba6b8bfba", "score": "0.45550472", "text": "def update(self, dt):\n self.group.update(dt)", "title": "" }, { "docid": "d93d5a17eb0a11b40447a663e003cbc6", "score": "0.45395333", "text": "def save(self, *args, **kwargs):\n self.last_modified = datetime.now().replace(tzinfo=utc)\n super(Data, self).save(*args, **kwargs)", "title": "" }, { "docid": "2a3df503a3615599c7b168bc4665f063", "score": "0.45377424", "text": "def set_data(self, values, timestamp):\r\n pass", "title": "" }, { "docid": "8ede4efd8d19553b50bc3fd888ae46f6", "score": "0.4536256", "text": "def end_update(self):\n\t\tfor app_name in self._apps:\n\t\t\tif not self._apps[app_name][-1][1]:\n\t\t\t\tif app_name not in self._running_apps:\n\t\t\t\t\tself._apps[app_name][-1][1] = datetime.datetime.now()", "title": "" }, { "docid": "da18c1345805564d4d80724d8bb6d853", "score": "0.45338085", "text": "def update(self, dt):\n pass", "title": "" }, { "docid": "da18c1345805564d4d80724d8bb6d853", "score": "0.45338085", "text": "def update(self, dt):\n pass", "title": "" }, { "docid": "da18c1345805564d4d80724d8bb6d853", "score": "0.45338085", "text": "def update(self, dt):\n pass", "title": "" }, { "docid": "da18c1345805564d4d80724d8bb6d853", "score": "0.45338085", "text": "def update(self, dt):\n pass", "title": "" } ]
2557a78fa169d34f7413297f037539f7
Convert a naive datetime that uses the machine's timezone to a UTC one.
[ { "docid": "08004ba29b1d185fd4128e6bebd710b7", "score": "0.7784218", "text": "def to_utc(dt):\n # Don't modify it if it already has a timezone -- even if it's not UTC!\n # Yes, this is kinda limited, but should be enough for working with Taskwarrior.\n if dt.tzinfo is not None:\n return dt\n epoch = mktime(dt.timetuple())\n utc = datetime.utcfromtimestamp(epoch).replace(tzinfo=UTC())\n return utc", "title": "" } ]
[ { "docid": "f9217e57807189c69cdcecfdc048ff30", "score": "0.80217886", "text": "def to_naive_utc(d):\n assert isinstance(d, datetime)\n if not is_naive_datetime(d):\n d = d.astimezone(pytz.UTC).replace(tzinfo=None)\n return d", "title": "" }, { "docid": "e4409cefe9bdb32ff193e366d6fb7efd", "score": "0.79139006", "text": "def to_utc(localized_datetime: datetime) -> datetime:\n return to_timezone(localized_datetime, utc)", "title": "" }, { "docid": "ea14e98d9bb150749fba526d98d20978", "score": "0.78699976", "text": "def dt2utcdt(dt): #Wrapper\n assert isinstance(dt, datetime.datetime)\n return dt.astimezone(pytz.timezone('UTC'))", "title": "" }, { "docid": "763e86fc971a9da80c07d3ef19ccb17f", "score": "0.7800841", "text": "def get_utc_naive(dt):\n return dt.astimezone(timezone.utc).replace(tzinfo=None)", "title": "" }, { "docid": "1685173bf8ef9d512991fa665cdfc2a4", "score": "0.7690285", "text": "def datetime_naive_to_utc(dt: datetime.datetime) -> datetime.datetime:\n\n if type(dt) != datetime.datetime:\n raise TypeError(f\"dt must be type datetime.datetime, not {type(dt)}\")\n\n if dt.tzinfo is not None and dt.tzinfo.utcoffset(dt) is not None:\n # has timezone info\n raise ValueError(\n \"dt must be naive/timezone unaware: \"\n f\"{dt} has tzinfo {dt.tzinfo} and offset {dt.tzinfo.utcoffset(dt)}\"\n )\n\n return dt.replace(tzinfo=datetime.timezone.utc)", "title": "" }, { "docid": "2b5e2d64b1440ad61b179cfd4cae3a76", "score": "0.7594501", "text": "def local2utc(t):\n\n # I'm not sure but maybe I should just set tzinfo here too??\n # tzinfo = timezone.utc\n return t.astimezone(timezone.utc).replace(microsecond=0, second=0)", "title": "" }, { "docid": "bbf3381fc2a152db63033ce22e81c0da", "score": "0.7491393", "text": "def as_datetime_utc(datetime_string):\n return dateutil_parse(datetime_string).replace(tzinfo=utc)", "title": "" }, { "docid": "6bb3b2657ca0bc7665e603bf9cfc12d4", "score": "0.7470424", "text": "def to_utc(dt):\n cst = pytz.timezone(u'America/Regina')\n localized_datetime = cst.localize(dt)\n\n return localized_datetime.astimezone(pytz.UTC)", "title": "" }, { "docid": "372288d6a004d1366a692bc4b3a2ebe7", "score": "0.74220693", "text": "def as_utc_timezone(date_time: datetime) -> datetime:\n return date_time.astimezone(pytz.timezone('GMT'))", "title": "" }, { "docid": "41a17c8f28b8ba34b1113421973e357d", "score": "0.7352622", "text": "def convert_time_to_utc(d):\n d = datetime.datetime(\n 1900, 1, 1, d.hour, d.minute, d.second, d.microsecond, d.tzinfo\n )\n d -= d.utcoffset()\n return d.time()", "title": "" }, { "docid": "3b621f85b5ad61581f96c94b09ed4596", "score": "0.73322713", "text": "def forceutc(t):\n#%% polymorph to datetime\n if isinstance(t,str):\n t = parse(t)\n elif isinstance(t,datetime64):\n t=t.astype('M8[ms]').astype('O') #for Numpy 1.10 at least...\n elif isinstance(t,datetime):\n pass\n elif isinstance(t,(ndarray,list,tuple)):\n return asarray([forceutc(T) for T in t])\n else:\n raise TypeError('datetime only input')\n#%% enforce UTC on datetime\n if t.tzinfo == None: #datetime-naive\n t = t.replace(tzinfo = UTC)\n else: #datetime-aware\n t = t.astimezone(UTC) #changes timezone, preserving absolute time. E.g. noon EST = 5PM UTC\n return t", "title": "" }, { "docid": "8689c225aa61e31abe14fce33d87f44d", "score": "0.7327076", "text": "def datetime_tz_to_utc(dt: datetime.datetime) -> datetime.datetime:\n\n if type(dt) != datetime.datetime:\n raise TypeError(f\"dt must be type datetime.datetime, not {type(dt)}\")\n\n if dt.tzinfo is not None and dt.tzinfo.utcoffset(dt) is not None:\n return dt.replace(tzinfo=dt.tzinfo).astimezone(tz=datetime.timezone.utc)\n else:\n raise ValueError(\"dt does not have timezone info\")", "title": "" }, { "docid": "2b36144df80dc5dc6431feba811e7e83", "score": "0.73203063", "text": "def datetime_to_utc(dt, tz):\n if not (\n dt.tzinfo is None or dt.tzinfo.utcoffset(dt) is None\n ):\n # preserve tzinfo if it is there\n pass\n else:\n if tz:\n # If we get one from user, use tz to localize.\n dt = timezone(tz).localize(dt)\n else:\n # if noting else: local timezone\n dt = tzlocal.get_localzone().localize(dt)\n\n return dt.astimezone(timezone(\"UTC\"))", "title": "" }, { "docid": "c694f03fa1e7b41cd944a5b40f1857ae", "score": "0.7223939", "text": "def dt_tzinfo_utc(dt):\n return dt.replace(tzinfo=pytz.utc)", "title": "" }, { "docid": "678eab39a9d8e0c197d6c919f3e4df3f", "score": "0.71856344", "text": "def get_now_utc_notz() -> datetime.datetime:\n return get_now_utc().replace(tzinfo=None)", "title": "" }, { "docid": "50510fc9b0f27c38051d7b96969c4db1", "score": "0.7152577", "text": "def cast_naive_datetime_to_tz(dt, tz=UTC()):\n if has_tz(dt):\n return dt\n return dt.replace(tzinfo=tz)", "title": "" }, { "docid": "d9722037f775335442ac76b87e8a43be", "score": "0.71494293", "text": "def get_utc_time():\n return datetime.datetime.now(pytz.UTC)", "title": "" }, { "docid": "05465c934254d6ed28766abe2fdcbe7e", "score": "0.7143855", "text": "def normalize_datetime_to_utc(dt):\n # This forces a new object to be returned, which fixes an issue with\n # serialization to XML in PyXB. PyXB uses a mixin together with\n # datetime.datetime to handle the XML xs:dateTime. That type keeps track of\n # timezone information included in the original XML doc, which conflicts if we\n # return it here as part of a datetime mixin.\n\n return datetime.datetime(\n *dt.utctimetuple()[:6], microsecond=dt.microsecond,\n tzinfo=datetime.timezone.utc\n )\n\n # datetime.utctimetuple()\n # if has_tz(dt):\n # dt = dt.astimezone(datetime.timezone.utc)\n # return\n #\n # a = dt.replace(tzinfo=datetime.timezone.utc)\n # b = a.astimezone(datetime.timezone.utc)\n # return a", "title": "" }, { "docid": "c005d79d663ece8eca6f402ec082bcbf", "score": "0.7124563", "text": "def utc_dt():\r\n return datetime.now(pytz.utc)", "title": "" }, { "docid": "588d4ae802170d8505518fe489344aad", "score": "0.70933765", "text": "def as_utc_time(timestamp):\n return tzlocal.get_localzone().localize(timestamp).astimezone(pytz.utc)", "title": "" }, { "docid": "6f5bccdf77b7b232a8bee23135bd718a", "score": "0.7049374", "text": "def to_utc(dt, tz, is_dst=False):\n # TODO - cleanup docstring / is this the right spot for this function?\n return dt - pytz.timezone(tz).utcoffset(dt, is_dst=is_dst)", "title": "" }, { "docid": "6a89c023b758a9c7231bb844a67b9c3a", "score": "0.703538", "text": "def test_roundtrip_UTC(self):\n newdate = datetime.datetime(2010, 1, 15, 8, tzinfo=dateutil.tz.tzutc())\n newtzadt = tzaware_datetime.TZAwareDateTime(utcdt = newdate)\n self.assertEqual(newdate, newtzadt.realdate)\n self.assertEqual(newdate, newtzadt.utcdt)", "title": "" }, { "docid": "e474fdfb7111dd4a6d6b9c0901e59d8c", "score": "0.702795", "text": "def datetime_utc_to_local(dt: datetime.datetime) -> datetime.datetime:\n\n if type(dt) != datetime.datetime:\n raise TypeError(f\"dt must be type datetime.datetime, not {type(dt)}\")\n\n if dt.tzinfo is not datetime.timezone.utc:\n raise ValueError(f\"{dt} must be in UTC timezone: timezone = {dt.tzinfo}\")\n\n return dt.replace(tzinfo=datetime.timezone.utc).astimezone(tz=None)", "title": "" }, { "docid": "fa500aae9b8f4b34b1fb6a8d522e7d90", "score": "0.7010546", "text": "def convert_time_to_UTC_timezone(self, my_time):\n my_time_format = datetime.datetime.strptime(my_time, \"%Y-%m-%d %H:%M:%S.%f\")\n my_time_local = pytz.timezone(\"America/Los_Angeles\").localize(my_time_format, is_dst=None)\n\n my_time_utc = my_time_local.astimezone(pytz.utc)\n return datetime.datetime.strftime(my_time_utc, \"%Y-%m-%dT%H:%M:%S.%f\")[:-3] + \"Z\"", "title": "" }, { "docid": "2c734e457117fbb3996c066de2c6cba5", "score": "0.6994145", "text": "def test_to_utc_naive(self):\n for (naive_grdt, grdt) in [\n (\n GregorianDateTime(1970, 1, 1, 0, 0, 0, None),\n GregorianDateTime(1970, 1, 1, 9, 0, 0, \"+09:00\"),\n ),\n (\n GregorianDateTime(1970, 1, 1, 0, 0, 0, None),\n GregorianDateTime(1970, 1, 1, 9, 0, 0, \"Asia/Tokyo\"),\n ),\n (\n GregorianDateTime(1970, 1, 1, 0, 0, 0, None),\n GregorianDateTime(1969, 12, 31, 15, 0, 0, \"-09:00\"),\n ),\n ]:\n with self.subTest(grdt=grdt):\n self.assertEqual(naive_grdt, grdt.to_utc_naive())\n for (second, timezone) in [\n (second, timezone)\n for second in range(0, 60)\n for timezone in [\"+00:00\", \"-00:00\", \"UTC\"]\n ]:\n naive_grdt = GregorianDateTime(1970, 1, 1, 0, 0, second, None)\n grdt = GregorianDateTime(1970, 1, 1, 0, 0, second, timezone)\n with self.subTest(grdt=grdt):\n self.assertEqual(naive_grdt, grdt.to_utc_naive())", "title": "" }, { "docid": "186565d6df74ac69830b52ba517df12d", "score": "0.69939506", "text": "def _localize_to_utc(time, location):\n \n try:\n time_utc = time.tz_convert('UTC')\n pvl_logger.debug('tz_convert to UTC')\n except TypeError:\n time_utc = time.tz_localize(location.tz).tz_convert('UTC')\n pvl_logger.debug('tz_localize to {} and then tz_convert to UTC'.format(location.tz))\n \n return time_utc", "title": "" }, { "docid": "e0ad1d6d314a77304ecf75da86a4905d", "score": "0.69809145", "text": "def convert_input_to_utc(timedate, timezone):\n if not timezone or not timedate:\n return timedate\n if isinstance(timedate, basestring):\n timedate = datetime.datetime.strptime(timedate, \"%m/%d/%Y %H:%M\")\n #first make sure the date is represented as the user would see it\n try:\n timedate = timezone.localize(timedate)\n except ValueError:\n pass #this already has a tzz\n # convert that sucker\n timedate = timedate.astimezone(pytz.utc)\n #and finally strip off the timezone, sine mysql won't let us save it with it and we want to be db independent.\n timedate = timedate.replace(tzinfo=None)\n return timedate", "title": "" }, { "docid": "761a66d537382e6210a0fe12079dda91", "score": "0.69665575", "text": "def get_now_utc() -> datetime.datetime:\n return datetime.datetime.now(pytz.utc)", "title": "" }, { "docid": "32a29910e5ae75e30b77a387b99e64e6", "score": "0.69428885", "text": "def utc_now() -> datetime:\n return datetime.now(tz=timezone.utc)", "title": "" }, { "docid": "a701cf601bbd5b1cc7d1a17145170e08", "score": "0.6908304", "text": "def getUTCDateTime(timeSinceEpoch=None):\n return time.gmtime(timeSinceEpoch if timeSinceEpoch != None else getNow())", "title": "" }, { "docid": "822a0f51580637104e7d23a49e32d5da", "score": "0.6826588", "text": "def _localtime_to_utc(self, localtime: str):\r\n date = dt.datetime.strptime(localtime, \"%Y/%m/%d %H:%M:%S\")\r\n loc = tzl.get_localzone()\r\n as_utc = loc.localize(date).astimezone(pytz.utc)\r\n return as_utc", "title": "" }, { "docid": "0a768d0463091409d055ce1c57e2fbe4", "score": "0.6809506", "text": "def timestamp_to_utc_datetime(ts):\n return datetime.datetime.utcfromtimestamp(ts)", "title": "" }, { "docid": "758ea092c8d321bdcdd491dd41f73ee0", "score": "0.68046707", "text": "def tz_convert(dt, timezone):\n tz = pytz.timezone(timezone)\n return dt.astimezone(tz)", "title": "" }, { "docid": "76e53c4dd4ce23be79fa8f5181ac90b5", "score": "0.67974234", "text": "def _make_local(utc_datetime):\n return (\n utc_datetime.replace(tzinfo=dateutil.tz.tzutc())\n .astimezone(dateutil.tz.tzlocal())\n .replace(tzinfo=None)\n )", "title": "" }, { "docid": "645cef9d2358b5d1738c7243cf362820", "score": "0.6758482", "text": "def local_to_utc(local_dt):\n # get the utc_dt as if the local_dt is utc and calculate the timezone\n # difference and add it to the local dt object\n return local_dt - (utc_to_local(local_dt) - local_dt)", "title": "" }, { "docid": "3556db1fcd83096e72e30c29a65c3069", "score": "0.6743345", "text": "def get_utc_time(timezone_aware_ts, time_zone, is_date=False):\n tz = pytz.timezone(time_zone)\n if is_date:\n return tz.localize(parser.parse(timezone_aware_ts))\n return parser.parse(timezone_aware_ts)", "title": "" }, { "docid": "980937da45b23b518008004b447acd30", "score": "0.674157", "text": "def iso2utcdt(dstr): #Tested\n\n assert isinstance(dstr, str)\n dt_loc = dateutil.parser.parse(dstr)\n dt_utc = dt_loc.astimezone(pytz.timezone('UTC'))\n\n return dt_utc", "title": "" }, { "docid": "a786b807df9c7246a1e387176d642f7c", "score": "0.6732876", "text": "def get_observation_datetime_utc(self):\n if self.observation_time:\n obs = datetime(self.observation_date.year, self.observation_date.month, self.observation_date.day, self.observation_time.hour, self.observation_time.minute, tzinfo=settings.AWST)\n else:\n obs = datetime(self.observation_date.year, self.observation_date.month, self.observation_date.day, 0, 0, tzinfo=settings.AWST)\n return obs.astimezone(settings.UTC)", "title": "" }, { "docid": "9116d25897885a45ff69f6912bab3822", "score": "0.6732776", "text": "def _mts2utc(self, timestamp):\n return dt.utcfromtimestamp(timestamp/1000).replace(tzinfo=tz.utc)", "title": "" }, { "docid": "192534ed2bae7610e77a8c8937c3669c", "score": "0.66683906", "text": "def fromutc(self, arg0):\n return None", "title": "" }, { "docid": "6243d0b908aa8ccea492bfadfd59d91f", "score": "0.666188", "text": "def date_utc(dt):\n return dt.isoformat()", "title": "" }, { "docid": "5b42890fed43796517265d71585190a7", "score": "0.6655371", "text": "def _replace_timezone(dateutil_parser: object) -> datetime:\n try:\n # try to get the timezone from tzlocal\n tzinfo = pytz.timezone(get_localzone().zone)\n except pytz.exceptions.UnknownTimeZoneError: # pragma: no cover\n try:\n # try to get the timezone from python's time package\n tzinfo = pytz.timezone(time.tzname[0])\n except pytz.exceptions.UnknownTimeZoneError:\n # seeing as all else has failed: use UTC as the timezone\n tzinfo = pytz.timezone('UTC')\n return tzinfo.localize(dateutil_parser)", "title": "" }, { "docid": "7b2866a16bec50d5d53faea2bc02d880", "score": "0.6639499", "text": "def omniscient_datetime(*args):\n d = original_datetime(*args)\n if settings.USE_TZ:\n d = timezone.make_aware(d, timezone.utc)\n return d", "title": "" }, { "docid": "8a9a7a179c32c8892cf7b11cb301dd2a", "score": "0.6616611", "text": "def make_tz_aware(the_datetime,time_zone=None):\n if the_datetime.tzinfo != None:\n return the_datetime # not needed\n \n if time_zone == None:\n time_zone = get_time_zone_setting()\n if time_zone == None:\n time_zone = 'UTC'\n \n tz = timezone(time_zone)\n return tz.localize(the_datetime)", "title": "" }, { "docid": "844ccc88a317a08a6adc34fcee2fe9af", "score": "0.6612576", "text": "def localize_datetime(dt, tz_name='UTC'):\n import pytz\n assert dt.tzinfo == None\n utc = pytz.timezone('UTC')\n aware = utc.localize(dt)\n timezone = pytz.timezone(tz_name)\n tz_aware_dt = aware.astimezone(timezone)\n return tz_aware_dt", "title": "" }, { "docid": "c6a14a33658635ecb6f3027081c595db", "score": "0.6612006", "text": "def set_timezone(dt, tz=\"UTC\"):\n tzinfo = dateutil.tz.gettz(tz)\n return dt.replace(tzinfo=tzinfo)", "title": "" }, { "docid": "e141247d7d7dd526b6dad6e577dcd1be", "score": "0.6594663", "text": "def strip_timezone(dt):\n return dt.replace(tzinfo=None)", "title": "" }, { "docid": "48aa92dc577c0df3518455704e5e9e7c", "score": "0.6592702", "text": "def aware_utcnow():\n\treturn datetime.datetime.utcnow().replace(tzinfo = pytz.UTC)", "title": "" }, { "docid": "b51818b32f4b71eca91deb7a6002124f", "score": "0.6585313", "text": "def convert_to_utc(self, value, from_server):\n\n localized_value = self.try_add_timezone(value, server=from_server)\n return self._normalize(localized_value, TimezoneEnum.UTC)", "title": "" }, { "docid": "9942375f132100792e9ad8ffde400bda", "score": "0.65806127", "text": "def aslocaltime(self, naive_date):\n utc_date = pytz.utc.localize(naive_date)\n\n user_tz = pytz.timezone(self.timezone)\n if user_tz == pytz.utc:\n return utc_date\n\n return utc_date.astimezone(user_tz)", "title": "" }, { "docid": "2ca85b835594283cecff907500effefa", "score": "0.65799445", "text": "def datetime_remove_tz(dt: datetime.datetime) -> datetime.datetime:\n\n if type(dt) != datetime.datetime:\n raise TypeError(f\"dt must be type datetime.datetime, not {type(dt)}\")\n\n return dt.replace(tzinfo=None)", "title": "" }, { "docid": "7814aabab1caba82ec390e78db70e9a9", "score": "0.6563487", "text": "def convert_from_utc(self, value, to_server):\n\n localized_value = self._try_add_timezone(value, TimezoneEnum.UTC)\n return self.normalize(localized_value, to_server)", "title": "" }, { "docid": "71ce770df7be0109610607c4a455376b", "score": "0.6556642", "text": "def localize_datetime(value, zone_name='US/Eastern'):\n # print(tz.all_timezones)\n utc_dt = tz.utc.localize(value)\n return utc_dt.astimezone(tz.timezone(zone_name))", "title": "" }, { "docid": "f633ddbdafe88c5c55642cf5fbc580c8", "score": "0.653319", "text": "def _get_utc_now() -> datetime.datetime:\n return datetime.datetime.utcnow()", "title": "" }, { "docid": "fcbfa24b30ab55f533880b57a6badfd9", "score": "0.65330625", "text": "def localized_datetime(*args, **kwargs):\n if args or kwargs:\n dt = datetime(*args, **kwargs)\n else:\n dt = datetime.now()\n\n return as_utc(DEFAULT_TZ.localize(dt))", "title": "" }, { "docid": "2acb7bce9a1bd00341640856dfa4ce29", "score": "0.65277827", "text": "def localtime(input: datetime, tz) -> datetime:\n return tz.normalize(tz.localize(input))", "title": "" }, { "docid": "38220468b872a98975a9f6511b751fc6", "score": "0.6518873", "text": "def telematics_to_datetime(cls, datetime_string):\n return dateutil.parser.parse(datetime_string).astimezone(pytz.utc)", "title": "" }, { "docid": "db8bb5c7e532bd2b965683458ca761cd", "score": "0.65105027", "text": "def _zulu_datetime(date_object):\n if type(date_object) is datetime:\n date_object = date_object.replace(microsecond=0, tzinfo=utc)\n return date_object", "title": "" }, { "docid": "1d6e0298d6087ebc7ebc220c4f0d3cf2", "score": "0.6497329", "text": "def getUTCdatetime():\n\t\treturn datetime.utcnow()", "title": "" }, { "docid": "cf24f065d65dd1e1757e52fbd5f1be28", "score": "0.6485351", "text": "def utc_now():\n return datetime.now(timezone.utc).replace(microsecond=0)", "title": "" }, { "docid": "e92d2615f6b44281670eb555f3b6ad3a", "score": "0.6484282", "text": "def _get_utctz_dt(ts):\n return datetime.datetime.utcfromtimestamp(ts)", "title": "" }, { "docid": "3fd177ac08ae128d59f6f681daaba9b7", "score": "0.64747155", "text": "def get_utc_timestamp(dt):\n\n dt = pd.to_datetime(dt)\n try:\n dt = dt.tz_localize('UTC')\n except TypeError:\n dt = dt.tz_convert('UTC')\n return dt", "title": "" }, { "docid": "cbe97acbc02858181c62325938c2b711", "score": "0.64703554", "text": "def utcnow(with_timezone=False):\n if utcnow.override_time:\n try:\n return utcnow.override_time.pop(0)\n except AttributeError:\n return utcnow.override_time\n if with_timezone:\n return datetime.datetime.now(tz=iso8601.iso8601.UTC)\n return datetime.datetime.utcnow()", "title": "" }, { "docid": "f13d4953a00f0e7793bf91718973e3b2", "score": "0.64679223", "text": "def local_time_to_utc(cls, obj: Union[datetime, str], local_tz: str = 'US/Central',\n fmt: str = None, as_str: bool = False) -> Union[datetime, str]:\n dt_obj = cls._tz_convert(local_tz, 'UTC', obj, fmt)\n if as_str:\n return dt_obj.strftime('%F %T')\n else:\n return dt_obj", "title": "" }, { "docid": "cd559403c114a6f3ed5c2976e99eb291", "score": "0.6463221", "text": "def create_utc_datetime(*datetime_parts):\n return datetime.datetime(*datetime_parts, tzinfo=UTC())", "title": "" }, { "docid": "2317e6811871831778d7ce5aa4e29cdf", "score": "0.64575946", "text": "def datetime_naive_to_local(dt: datetime.datetime) -> datetime.datetime:\n\n if type(dt) != datetime.datetime:\n raise TypeError(f\"dt must be type datetime.datetime, not {type(dt)}\")\n\n if dt.tzinfo is not None and dt.tzinfo.utcoffset(dt) is not None:\n # has timezone info\n raise ValueError(\n \"dt must be naive/timezone unaware: \"\n f\"{dt} has tzinfo {dt.tzinfo} and offset {dt.tzinfo.utcoffset(dt)}\"\n )\n\n return dt.replace(tzinfo=get_local_tz(dt))", "title": "" }, { "docid": "f42ba4a777e85fee0dab294cdc46a300", "score": "0.64551306", "text": "def get_with_timezone(\n datetime_object: datetime.datetime,\n to_timezone: str='',\n default_from_timezone: str='UTC',\n) -> datetime.datetime:\n # If no to_timezone, try to grab from current user\n if not to_timezone and tmpl_context.current_user:\n to_timezone = tmpl_context.current_user.timezone\n\n # If no to_timezone, return original datetime\n if not to_timezone:\n return datetime_object\n\n # If datetime_object have not timezone, set new from default_from_timezone\n if not datetime_object.tzinfo:\n from_tzinfo = pytz.timezone(default_from_timezone)\n datetime_object = from_tzinfo.localize(datetime_object)\n\n new_tzinfo = pytz.timezone(to_timezone)\n return datetime_object.astimezone(new_tzinfo)", "title": "" }, { "docid": "0d1aa72f4d5444dbd96ebfe47774eed1", "score": "0.64418066", "text": "def _make_naive(value, timezone):\n value = value.astimezone(timezone)\n if hasattr(timezone, 'normalize'):\n # available for pytz time zones\n value = timezone.normalize(value)\n return value.replace(tzinfo=None)", "title": "" }, { "docid": "ef64c579d1890ecc44e8609f2c31d591", "score": "0.64389837", "text": "def utcfromtimestamp(self, timestamp):\n return None", "title": "" }, { "docid": "a0854eb7d5db164032bff6c99565e394", "score": "0.64346635", "text": "def localize_datetime(dt, tz_name=\"UTC\"):\n # type: (datetime, Optional[str]) -> datetime\n tz_aware_dt = dt\n if dt.tzinfo is None:\n utc = pytz.timezone(\"UTC\")\n aware = utc.localize(dt)\n timezone = pytz.timezone(tz_name)\n tz_aware_dt = aware.astimezone(timezone)\n else:\n warnings.warn(\"tzinfo already set\", TimeZoneInfoAlreadySetWarning)\n return tz_aware_dt", "title": "" }, { "docid": "59f117075bc2f7914154667759ad600a", "score": "0.6429064", "text": "def _local_to_utc(self, timestamp, local_zone):\n\n # pacific = pytz.timezone('US/Pacific') # Setting timezone for data grab\n\n timestamp_new = pd.to_datetime(\n timestamp, format='%Y-%m-%d', errors='coerce')\n # end_new = pd.to_datetime(end, format='%Y-%m-%d', errors='coerce') #Changing to datetime format so can convert to local time\n\n timestamp_new = timestamp_new.tz_localize(\n local_zone) # .tz_convert(pacific)\n # end_new = end_new.tz_localize('America/Los_Angeles')# pytz.utc .tz_convert(pacific) # Localizing times so request reflects PT time and not utc\n\n #start_new = start_new.tz_localize(None)\n #end_new = end_new.tz_localize(None)\n\n timestamp_new = timestamp_new.strftime('%Y-%m-%d %H:%M:%S')\n # end_new = end_new.strftime('%Y-%m-%d %H:%M:%S') # Converting datetime back to string for get request\n\n return timestamp_new # , end_new", "title": "" }, { "docid": "3088b7915f1aa59fbb3a3c7fa131d050", "score": "0.640204", "text": "def test_from_utc_naive(self):\n for (naive_grdt, grdt) in [\n (\n GregorianDateTime(1970, 1, 1, 0, 0, 0, None),\n GregorianDateTime(1970, 1, 1, 9, 0, 0, \"+09:00\"),\n ),\n (\n GregorianDateTime(1970, 1, 1, 0, 0, 0, None),\n GregorianDateTime(1970, 1, 1, 9, 0, 0, \"Asia/Tokyo\"),\n ),\n (\n GregorianDateTime(1970, 1, 1, 0, 0, 0, None),\n GregorianDateTime(1969, 12, 31, 15, 0, 0, \"-09:00\"),\n ),\n ]:\n with self.subTest(grdt=grdt):\n self.assertEqual(\n grdt, GregorianDateTime.from_utc_naive(naive_grdt, grdt.timezone)\n )\n for (second, timezone) in [\n (second, timezone)\n for second in range(0, 60)\n for timezone in [\"+00:00\", \"-00:00\", \"UTC\"]\n ]:\n naive_grdt = GregorianDateTime(1970, 1, 1, 0, 0, second, None)\n grdt = GregorianDateTime(1970, 1, 1, 0, 0, second, timezone)\n with self.subTest(grdt=grdt):\n self.assertEqual(\n grdt, GregorianDateTime.from_utc_naive(naive_grdt, timezone)\n )", "title": "" }, { "docid": "e98edf4145ab2c88b599b05da8971ffb", "score": "0.63925093", "text": "def set_timezone(unlocalized_datetime: datetime, target_timezone: Union[BaseTzInfo, str]) -> datetime:\n if unlocalized_datetime.tzinfo is not None:\n # remove current tz info and call this function again\n return set_timezone(unlocalized_datetime.replace(tzinfo=None), target_timezone)\n\n if isinstance(target_timezone, str):\n return set_timezone(unlocalized_datetime, timezone(target_timezone))\n\n return target_timezone.localize(unlocalized_datetime)", "title": "" }, { "docid": "43104acd03466ce82db960175d375993", "score": "0.63858587", "text": "def utc_now():\n return datetime.datetime.now(datetime.timezone.utc)", "title": "" }, { "docid": "08b3eba6d025a3f5ed568a3c5b7e8e78", "score": "0.6367028", "text": "def DateToUTC(date):\n date += str(\" 00:00:00 UTC\")\n pattern = str(\"%m/%d/%Y %H:%M:%S %Z\")\n timestamp = int(calendar.timegm(time.strptime(date, pattern)))\n return timestamp", "title": "" }, { "docid": "767db49d51704809db550a7812d8c3e3", "score": "0.6354033", "text": "def datetime_to_local(created_at):\n d = datetime.datetime.strptime(created_at, \"%a %b %d %H:%M:%S +0000 %Y\")\n d = pytz.UTC.localize(d) # add timezone info\n europe_london = pytz.timezone(\"Europe/London\")\n\n return d.astimezone(europe_london)", "title": "" }, { "docid": "800897e2fa7cfb8b0a161f274e101ff1", "score": "0.6339644", "text": "def convert_to_localtime(dt):\n tz = pytz.timezone('Europe/Stockholm')\n dt = dt.replace(tzinfo=pytz.utc)\n dt = dt.astimezone(tz)\n return dt", "title": "" }, { "docid": "fd3e1002446d21f7ddf839d1abc28d5e", "score": "0.6338096", "text": "def _fromutc(self, dt):\n\n # Re-implement the algorithm from Python's datetime.py\n dtoff = dt.utcoffset()\n if dtoff is None:\n raise ValueError(\"fromutc() requires a non-None utcoffset() \"\n \"result\")\n\n # The original datetime.py code assumes that `dst()` defaults to\n # zero during ambiguous times. PEP 495 inverts this presumption, so\n # for pre-PEP 495 versions of python, we need to tweak the algorithm.\n dtdst = dt.dst()\n if dtdst is None:\n raise ValueError(\"fromutc() requires a non-None dst() result\")\n delta = dtoff - dtdst\n\n dt += delta\n # Set fold=1 so we can default to being in the fold for\n # ambiguous dates.\n dtdst = enfold(dt, fold=1).dst()\n if dtdst is None:\n raise ValueError(\"fromutc(): dt.dst gave inconsistent \"\n \"results; cannot convert\")\n return dt + dtdst", "title": "" }, { "docid": "ffa5912cf9f023d0e9bb69f3074aca83", "score": "0.6335905", "text": "def get_current_utc() -> datetime:\n return datetime.now(utc)", "title": "" }, { "docid": "becccdc286a4eefe84b7959eae0cfbac", "score": "0.6322598", "text": "def convert_initial_to_local(timedate, timezone):\n if not timezone or not timedate:\n return timedate\n if isinstance(timedate, basestring):\n timedate = datetime.datetime.strptime(timedate, \"%m/%d/%Y %H:%M\")\n timedate = timedate.replace(tzinfo=pytz.utc)\n timedate = timedate.astimezone(timezone)\n return timedate", "title": "" }, { "docid": "c635f4638a01008f93133e3898bd428e", "score": "0.6321041", "text": "def convert_timezone(dt, tz, tzin=None):\n # Ensure datetime object is timesone aware\n if dt.tzinfo is None:\n assert isinstance(tzin, str), \\\n \"\\n datetime object must either be timezone aware, OR, you should\"\\\n \"\\n provide original timezone as a string in the `tzin` argument\"\n tzinfo_in = dateutil.tz.tzlocal() if (tzin==\"local\") else dateutil.tz.gettz(tzin)\n dt = dt.replace(tzinfo=tzinfo_in)\n\n # Convert to new timesone\n tzinfo_out = dateutil.tz.tzlocal() if (tz==\"local\") else dateutil.tz.gettz(tz)\n return dt.astimezone(tzinfo_out)", "title": "" }, { "docid": "1c6e66862858a9a6decd8267d76f35d6", "score": "0.6300296", "text": "def convert_datetime_across_timezones(d, t1, t2):\n d_dt = t1.localize(d, is_dst=True)\n return d_dt.astimezone(t2).replace(tzinfo=None)", "title": "" }, { "docid": "3e9cab1228725337619c7ddc6e86954f", "score": "0.6297499", "text": "def datetime_helper(time):\n # utcdt = weather_json['dt'] # returns epoch integer\n # convert api epoch to datetime string using datetime.datetime\n new = datetime.datetime.fromtimestamp(time).strftime('%H:%M %d/%m/%Y')\n datetime_object = datetime.datetime.strptime(new, '%H:%M %d/%m/%Y')\n\n local_tz = pytz.timezone('Australia/Perth')\n local_time = datetime_object.replace(tzinfo=pytz.utc).astimezone(local_tz)\n return local_time", "title": "" }, { "docid": "e4aadb097491f3995538102b31a6bab8", "score": "0.6294159", "text": "def to_local_timezone(date_time_object):\n if date_time_object.tzinfo is None:\n return date_time_object\n\n return date_time_object.astimezone(dateutil.tz.tzlocal()).replace(tzinfo=None)", "title": "" }, { "docid": "2e41e1f7fbe03601b64be0f9b1c26b06", "score": "0.62862885", "text": "def utc_timestamp_to_datetime(timestamp):\n if timestamp is not None:\n return datetime.fromtimestamp(timestamp, utc)", "title": "" }, { "docid": "92e90025b020ba22499cd3b45973e4b7", "score": "0.62771845", "text": "def posix_dt_user(posix_tstamp: float, user_tz) -> datetime.datetime:\n dt = datetime.datetime.fromtimestamp(posix_tstamp)\n return dt.astimezone(user_tz)", "title": "" }, { "docid": "f55a36ecd7f59a4c9c77e8a2d4a3b449", "score": "0.6275422", "text": "def test_datetime(*args):\n d = original_datetime(*args)\n if settings.USE_TZ:\n d = timezone.make_aware(d, timezone.utc)\n return d", "title": "" }, { "docid": "b0431009a2c1eb448d5f3c84ebd75087", "score": "0.62365085", "text": "def dt2utc(dt, decimals=3):\n if decimals > 6:\n raise ValueError('decimals must be integer in range: 0 <= decimals < 7')\n microsec = int( 1e6 * np.around(dt.microsecond / 1e6, decimals=decimals) )\n return UTCDateTime(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, microsec)", "title": "" }, { "docid": "0e3d33077a762a3b01aca854aef6c61f", "score": "0.62357855", "text": "def to_datetime(e):\n return datetime.datetime.fromtimestamp(e, pytz.timezone('utc'))", "title": "" }, { "docid": "131d346f059ff6d22897792c2fcddfaf", "score": "0.6235669", "text": "def dt_tz(cr, date, return_str=False):\n # TODO: Database Management System (PostgreSQL) must have the same Timezone that Operating System\n query = \"SELECT to_date('%s', 'YYYY-MM-DD') AT TIME ZONE 'UTC'\" % (date,)\n cr.execute(query)\n res = cr.fetchone()\n dt = res and res[0] or False\n if dt:\n dt = datetime.strptime(dt, DEFAULT_SERVER_DATETIME_FORMAT)\n else:\n dt = datetime.strptime(date, DEFAULT_SERVER_DATETIME_FORMAT)\n if return_str:\n dt.strftime(DEFAULT_SERVER_DATETIME_FORMAT)\n return dt", "title": "" }, { "docid": "244225bfd1a305f0e1488118b7aa0c28", "score": "0.6225515", "text": "async def get_instrument_utc_date_time(self):\n\n date_data = await self._execute_command('#GetInstrumentUtcDateTime').content\n\n return datetime(*unpack('HHHHHH', date_data))", "title": "" }, { "docid": "45252c562e7ba5cd31b3add89a90ae2a", "score": "0.62248963", "text": "def str_to_datetime(str_date):\n date = datetime.datetime.strptime(str_date, TIME_FMT)\n return date.astimezone(pytz.utc)", "title": "" }, { "docid": "7ca13f09f46f8ef8d869473560bfc563", "score": "0.6212321", "text": "def test_roundtrip_NZST(self):\n newdate = datetime.datetime(2010, 1, 15, 8, tzinfo=dateutil.tz.tzstr('NZST'))\n newtzadt = tzaware_datetime.TZAwareDateTime(realdate=newdate)\n self.assertEqual(newdate, newtzadt.realdate)\n self.assertEqual(newdate.astimezone(dateutil.tz.tzutc()), newtzadt.utcdt)", "title": "" }, { "docid": "47c8f47c49fc70ade882cbeafb47a547", "score": "0.6205382", "text": "def to_timezone(localized_datetime: datetime, target_timezone: Union[BaseTzInfo, str]) -> datetime:\n if isinstance(target_timezone, str):\n tz = timezone(target_timezone)\n return to_timezone(localized_datetime, tz)\n\n return datetime.fromtimestamp(localized_datetime.timestamp(), tz=target_timezone)", "title": "" }, { "docid": "0c0eedfed4dc36929a9de9f080d0e54b", "score": "0.61902344", "text": "def to_python(self, value):\n if value in self.empty_values:\n return None\n if isinstance(value, datetime.datetime):\n return to_utc_timezone(value)\n if isinstance(value, datetime.date):\n result = datetime.datetime(value.year, value.month, value.day)\n return to_utc_timezone(result)\n result = super(DateTimeField, self).to_python(value)\n return to_utc_timezone(result)", "title": "" }, { "docid": "410655f4a2733901c49c5eefff77f79c", "score": "0.61897296", "text": "def to_local(dt):\n # Don't modify it if it's already naive.\n if dt.tzinfo is None:\n return dt\n epoch = calendar.timegm(dt.timetuple())\n local = datetime.fromtimestamp(epoch)\n return local", "title": "" }, { "docid": "fa76c820b6269f4971fa07d929944a82", "score": "0.6185447", "text": "def getUTCDateTimeString(timeSinceEpoch=None):\n return convertDateTimeToString(getUTCDateTime(timeSinceEpoch))", "title": "" }, { "docid": "efa4af44286e19e39de546a995634856", "score": "0.61798966", "text": "def parseUTC(s):\n return datetime.strptime(s[:19], \"%Y-%m-%dT%H:%M:%S\").replace(\n tzinfo=timezone.utc\n )", "title": "" }, { "docid": "9a01989e3ae8d2028fb2671b1d472523", "score": "0.6178298", "text": "def fix_datetime(value) -> Optional[datetime]:\n if value is None:\n return None\n\n if isinstance(value, datetime):\n if not value.tzinfo:\n value = _set_to_local_timezone(value)\n\n return value", "title": "" }, { "docid": "3443ecb2e7ec9f9c23ce18186dfc0b6c", "score": "0.6168673", "text": "def adjusted_datetime(dt, tz='US/Eastern'):\n zone = pytz.timezone(tz)\n return dt + zone.utcoffset(dt)", "title": "" } ]
2d932ff2982e42f300390ffbc62bb91c
Get the nth variable saved internally in the tuple (used for inhomogeneous variables)
[ { "docid": "38f8b634d70dc51636de80aa80500e02", "score": "0.60192716", "text": "def get_var(self, variable_idx):\n return self._vars[variable_idx]", "title": "" } ]
[ { "docid": "42e60083787ccb0582288c5fbc1a95cf", "score": "0.68985456", "text": "def get_variable_by_index(self, i):\n if not isinstance(i, int):\n raise TypeError('i must be int: %s' % i)\n var = None\n if i < self.get_num_variables():\n name, var = self._vars.items()[i]\n return var", "title": "" }, { "docid": "f75a4bb420c33aef7b51f229bb085798", "score": "0.6694196", "text": "def get_var_index(a):\n if z3_debug():\n _z3_assert(is_var(a), \"Z3 bound variable expected\")\n return int(Z3_get_index_value(a.ctx.ref(), a.as_ast()))", "title": "" }, { "docid": "76178256b5671e4f0314c1bc683493a9", "score": "0.6606815", "text": "def __getitem__(self, idx: Tuple):\n assert (1 <= idx[0] <= len(self._var_list)\n and 1 <= idx[1] <= len(self._var_list[0]))\n\n return self._var_list[idx[0]-1][idx[1]-1]", "title": "" }, { "docid": "0a515bc3a62288b33a494e5343de2801", "score": "0.64349264", "text": "def getVarIdx(self): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "2cd1a135f8d77585176794984c5baf64", "score": "0.6404753", "text": "def __getitem__(self, index):\n return self.variables_[index]", "title": "" }, { "docid": "cf704e4066d1ae3709a1232364d5ea7f", "score": "0.6334768", "text": "def get_var(self):\n var_lst = [self.Board1.get_var,\n self.Board2.get_var,\n self.Board3.get_var,\n self.Board4.get_var,\n self.Board5.get_var,\n self.Board6.get_var,\n self.Board7.get_var,\n self.Board8.get_var,\n self.Board9.get_var]\n var = [ x[1] for x in var_lst ]\n idx = var.index(max(var))\n return (idx + 1, var_lst[idx][0], max(var))", "title": "" }, { "docid": "ff42362fdad2049b7db7603bef3ca818", "score": "0.61664206", "text": "def _getvar(self,vname):\n return self.nc.variables[vname]", "title": "" }, { "docid": "a6e0d668fd8290e02131e2aa10759743", "score": "0.60928905", "text": "def __getitem__(n):", "title": "" }, { "docid": "0082f64e3525ee8cd215cdc88e19b13f", "score": "0.6031233", "text": "def nvar(self):\n return self._nvar", "title": "" }, { "docid": "2e9a3b48622b0b6f9a32895a833f46d1", "score": "0.6026095", "text": "def getNVars(self): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "4f827ebab411cac77eab89ad4d13fb78", "score": "0.59315", "text": "def indices_of_var(v):\n name = v.varName\n indices = name[2:].split(',')\n i, j = int(indices[0]), int(indices[1])\n return i, j", "title": "" }, { "docid": "9756b44f318e71297e78bc9191ff16f4", "score": "0.5897514", "text": "def f(t: Tuple[int, ...], idx: int) -> int:\n return t[idx]", "title": "" }, { "docid": "57fc322d3499bede64c72f8cc03f7f31", "score": "0.5891767", "text": "def get_variable(self):\n return self._var_names", "title": "" }, { "docid": "9aa3078c1bf2557df055309e44bb4765", "score": "0.5849156", "text": "def var_index(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]:\n pass", "title": "" }, { "docid": "9aa3078c1bf2557df055309e44bb4765", "score": "0.5849156", "text": "def var_index(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]:\n pass", "title": "" }, { "docid": "9aa3078c1bf2557df055309e44bb4765", "score": "0.5849156", "text": "def var_index(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]:\n pass", "title": "" }, { "docid": "9aa3078c1bf2557df055309e44bb4765", "score": "0.5849156", "text": "def var_index(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]:\n pass", "title": "" }, { "docid": "9aa3078c1bf2557df055309e44bb4765", "score": "0.5849156", "text": "def var_index(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]:\n pass", "title": "" }, { "docid": "9dcae023bd7cce65e7196bf62f1139d8", "score": "0.5817944", "text": "def getitem(v, n):\n try:\n return v[n]\n except TypeError:\n return v[int(n)]", "title": "" }, { "docid": "1e79db681fa9962eeebded3e17d5b7b3", "score": "0.5810613", "text": "def _get_tuple_n(x, n, tp):\n assert tp is not list\n if isinstance(x, tp):\n x = [x,] * n\n assert len(x) == n, 'Parameters should be {} or list of N elements.'.format(\n tp)\n for i in x:\n assert isinstance(i, tp), 'Elements of list should be {}.'.format(tp)\n return x", "title": "" }, { "docid": "5a2ef23998b1ffd21a1cf6c8e390c253", "score": "0.5798147", "text": "def __getitem__(self, pos): \n return self.variables[pos]", "title": "" }, { "docid": "8370d7a2f873a928b35ea9a7d0f7361d", "score": "0.5722829", "text": "def scan_variable(text, idx):\n var = check_variable(text, idx)\n if var:\n if var[-1] == \"$\":\n var = (Element.strvar, var)\n else:\n var = (Element.numvar, var)\n return var, idx + len(var[1])\n else:\n raise GBasicSyntaxError(idx, \"Variable name expected\")", "title": "" }, { "docid": "46ab7580d8ee67da1d98ba8342896577", "score": "0.568494", "text": "def vals_nod_var_named(self,*args):\r\n\t\tvariableIndex = self.find_var_index(self.name_nod_var(),len(self.name_nod_var()),args[0])\r\n\t\tname = \"vals_nod_var\"+str(variableIndex+1)\r\n\t\tif len(args) == 1:\r\n\t\t\treturn self.src.variables[name].getValue()\r\n\t\tif len(args) == 3:\r\n\t\t\treturn self.src.variables[name].getValue()[args[1],args[2]]\r\n\t\tif len(args)==2:\r\n\t\t\tif isinstance(args[1],ndarray):\r\n\t\t\t\tself.src.variables[name] = args[1]\r\n\t\t\telse:\r\n\t\t\t\treturn self.src.variables[name].getValue()[args[1],:]", "title": "" }, { "docid": "6b615899e6cfa79ba9ed8334c0f29b16", "score": "0.567275", "text": "def __getitem__(self, index):\r\n return OnnxVar(self, index=index, op=OnnxOperatorItem)", "title": "" }, { "docid": "cd76bdcb6eed81a72f75edfde20d724f", "score": "0.5659723", "text": "def getValue(self, n = None):\n\t\tif n is None: return self.data\n\t\treturn self.data[n]", "title": "" }, { "docid": "1c07da76e6c1cb22272260a6e15e6a3f", "score": "0.5647797", "text": "def __getitem__(self, n: int) -> Tuple[Tensor, Union[str, List[str]]]:\n fileid = self._Walker[n]\n if not isinstance(fileid, list):\n fileid = [fileid]\n return self.load_item(fileid, self._path)", "title": "" }, { "docid": "7a2c573926600ed1b2d3233a13dee297", "score": "0.5614701", "text": "def ttake_nth(n, seq):\n return tuple(take_nth(n, seq))", "title": "" }, { "docid": "9213144a96193d05d18756e34f237b7f", "score": "0.56141704", "text": "def var_name(self, idx):\n if z3_debug():\n _z3_assert(idx < self.num_vars(), \"Invalid variable idx\")\n return _symbol2py(self.ctx, Z3_get_quantifier_bound_name(self.ctx_ref(), self.ast, idx))", "title": "" }, { "docid": "2ae78f53f42de967fa260a23a243e234", "score": "0.56020737", "text": "def _call_ntuple(self, tx, args, kwargs, options):\n if self.value is torch.nn.modules.utils._ntuple:\n count = args[0].as_python_constant()\n else:\n count = self.value.__closure__[0].cell_contents\n assert isinstance(count, int)\n\n def handle_ntuple(value):\n if value.has_unpack_var_sequence(tx):\n return variables.TupleVariable(\n list(value.unpack_var_sequence(tx)),\n **VariableTracker.propagate(self, value, args, kwargs.values()),\n )\n elif value.is_python_constant():\n # constant prop through it\n return variables.ConstantVariable(\n torch.nn.modules.utils._ntuple(count)(value.as_python_constant()),\n **VariableTracker.propagate(self, value, args, kwargs.values()),\n )\n else:\n unimplemented(f\"torch.nn.modules.utils._ntuple({value})\")\n\n if self.value is torch.nn.modules.utils._ntuple:\n return variables.LambdaVariable(handle_ntuple, **options)\n else:\n return handle_ntuple(args[0])", "title": "" }, { "docid": "67361c629a630ee0e91fedc1ec861f31", "score": "0.5595162", "text": "def var_name(i, j):\n return \"x_\" + str(i) + \",\" + str(j)", "title": "" }, { "docid": "276ebaf3be5ae6eed09fcea8fc26e025", "score": "0.55669194", "text": "def vars(self, name, nr, rc=None, e=None):\n gd = self.variable_dict()\n return tuple([gd[e] for e in self.varstrs(name, nr, rc, e)])", "title": "" }, { "docid": "d802990516d09d6a6f9e143112af131b", "score": "0.5559362", "text": "def __getitem__(self, n):\n return self.images[n], self.invvars[n], self.skies[n], self.lams[n], self.kernels[n]", "title": "" }, { "docid": "878f0b9fa83ec7cbb932106e1b756283", "score": "0.5556388", "text": "def extract_mip_idx(variables):\n def ravel_multi_index(multi_index, x, vert_offset):\n \"\"\"Ravel a multi-index and add a vertical offset to it.\n \"\"\"\n ravel_idx = np.ravel_multi_index(multi_index, max(x.shape, (1,)), order='F')\n return [(vert_offset + idx,) for idx in ravel_idx]\n boolean_idx = []\n integer_idx = []\n vert_offset = 0\n for x in variables:\n if x.boolean_idx:\n multi_index = list(zip(*x.boolean_idx))\n boolean_idx += ravel_multi_index(multi_index, x, vert_offset)\n if x.integer_idx:\n multi_index = list(zip(*x.integer_idx))\n integer_idx += ravel_multi_index(multi_index, x, vert_offset)\n vert_offset += x.size\n return boolean_idx, integer_idx", "title": "" }, { "docid": "d9763b15a90177d7091b2a0be945b4ad", "score": "0.55453736", "text": "def var(*args, **kwds):\n if len(args)==1:\n name = args[0]\n else:\n name = args\n G = salvus.namespace\n v = sage.all.SR.var(name, **kwds)\n if isinstance(v, tuple):\n for x in v:\n G[repr(x)] = x\n else:\n G[repr(v)] = v\n return v", "title": "" }, { "docid": "e399cca0dd9a2e098f7e64d45a4058b3", "score": "0.5515444", "text": "def variable_node(self, var):\n for n in self.nodes:\n if n.variable == var:\n return n\n raise Exception(\"No such variable: {}\".format(var))", "title": "" }, { "docid": "7f650cf8c933a084ef38b62eb1c17666", "score": "0.5507114", "text": "def __getitem__(self, n):\r\n return self.adj[n]", "title": "" }, { "docid": "c30d5f56a1a02b02296f5e48c03a14de", "score": "0.5502471", "text": "def __getitem__(number):", "title": "" }, { "docid": "c37d7f1e725252d3a2dd35bb57684cc2", "score": "0.5463807", "text": "def vals_elem_var_named(self,*args):\r\n\t\tvariableIndex = self.find_var_index(self.name_elem_var(),len(self.name_elem_var()),args[1])\r\n\t\tname = \"vals_elem_var\"+str(variableIndex+1)\r\n\t\tname = name+\"eb\"+str(args[0])\r\n\t\tif len(args) == 2:\r\n\t\t\treturn self.src.variables[name].getValue()\r\n\t\tif len(args) == 4:\r\n\t\t\treturn self.src.variables[name].getValue()[args[2],args[3]]\r\n\t\tif len(args)==3:\r\n\t\t\tif isinstance(args[2],ndarray):\r\n\t\t\t\tself.src.variables[name] = args[2]\r\n\t\t\telse:\r\n\t\t\t\treturn self.src.variables[name].getValue()[args[2],:]", "title": "" }, { "docid": "a9f5659457b640d1c04b7b4ae37b65b1", "score": "0.5460597", "text": "def nth(f, *N):\n if all(isinstance(n, int) for n in N):\n return dmp_ground_nth(f.rep, N, f.lev, f.dom)\n else:\n raise TypeError(\"a sequence of integers expected\")", "title": "" }, { "docid": "d59489691ddb619549124432a435943c", "score": "0.54532313", "text": "def __getitem__(self, n: int) -> Tuple[Tensor, int, str, int, int, int]:\n fileid, line = self._filelist[n]\n return self._load_tedlium_item(fileid, line, self._path)", "title": "" }, { "docid": "e946aab47e3e0dd97db33d88c501cf38", "score": "0.54397917", "text": "def extract_index_nparray(nparray):\n index = None\n for num in nparray[0]:\n index = num\n break\n return index", "title": "" }, { "docid": "c9e59c492c0a3d3b9f11c5a80107a37d", "score": "0.5423054", "text": "def __getitem__(self, index):\r\n if isinstance(index, OnnxVar):\r\n # scenario 2\r\n return OnnxVar(self, index, op='filter')\r\n\r\n if isinstance(index, int):\r\n # Use Gather instead.\r\n return OnnxVar(\r\n self, numpy.array(index, dtype=numpy.int64),\r\n axis=0, op=OnnxGather)\r\n\r\n if not isinstance(index, tuple):\r\n index = (index, )\r\n\r\n # only one integer?\r\n ni = None\r\n ax = None\r\n for i, a in enumerate(index):\r\n if isinstance(a, int):\r\n if ni is None:\r\n ni = i\r\n ax = a\r\n else:\r\n ax = None\r\n ni = None\r\n break\r\n if (isinstance(a, slice) and a.start is None and\r\n a.stop is None and a.step is None):\r\n continue\r\n ax = None\r\n ni = None\r\n break\r\n if ni is not None and ax is not None:\r\n # Use Gather instead.\r\n return OnnxVar(\r\n self, numpy.array(ni, dtype=numpy.int64),\r\n axis=ax, op=OnnxGather)\r\n\r\n # scenario 1\r\n starts = []\r\n ends = []\r\n axes = []\r\n steps = []\r\n axis_squeeze = []\r\n needs_shape = []\r\n for i, ind in enumerate(index):\r\n if isinstance(ind, int):\r\n starts.append(ind)\r\n ends.append(ind + 1)\r\n axes.append(i)\r\n steps.append(1)\r\n axis_squeeze.append(i)\r\n continue\r\n if isinstance(ind, slice):\r\n if ind.start is None and ind.stop is None and ind.step is None:\r\n continue\r\n start = 0 if ind.start is None else ind.start\r\n end = (None, i) if ind.stop is None else ind.stop\r\n step = 1 if ind.step is None else ind.step\r\n starts.append(start)\r\n ends.append(end)\r\n axes.append(i)\r\n steps.append(step)\r\n if isinstance(end, tuple):\r\n needs_shape.append(len(ends) - 1)\r\n elif isinstance(end, OnnxVar):\r\n needs_shape.append(end)\r\n continue\r\n raise NotImplementedError( # pragma: no cover\r\n \"Not implemented for type %r.\" % type(ind))\r\n\r\n if max(steps) == min(steps) == 1:\r\n steps = None\r\n else:\r\n steps = numpy.array(steps, dtype=numpy.int64)\r\n\r\n starts = numpy.array(starts, dtype=numpy.int64)\r\n axes = numpy.array(axes, dtype=numpy.int64)\r\n\r\n if len(needs_shape) > 0:\r\n shape = self.shape\r\n conc = []\r\n for e in ends:\r\n if isinstance(e, tuple):\r\n conc.append(\r\n OnnxVar(shape, numpy.array([e[1]], numpy.int64),\r\n op=OnnxGather))\r\n elif isinstance(e, OnnxVar):\r\n conc.append(\r\n e.reshape(numpy.array([-1], dtype=numpy.int64)))\r\n else:\r\n conc.append(numpy.array([e], dtype=numpy.int64))\r\n if len(conc) > 1:\r\n ends = OnnxVar(*conc, op=OnnxConcat, axis=0)\r\n else:\r\n ends = conc[0]\r\n else:\r\n ends = numpy.array(ends, dtype=numpy.int64)\r\n\r\n if steps is None:\r\n sliced = OnnxVar(self, starts, ends, axes, op=OnnxSlice)\r\n else:\r\n sliced = OnnxVar(self, starts, ends, axes, steps, op=OnnxSlice)\r\n if len(axis_squeeze) > 0:\r\n return OnnxVar(\r\n sliced, numpy.array(axis_squeeze, dtype=numpy.int64),\r\n op=OnnxSqueeze)\r\n return sliced", "title": "" }, { "docid": "28c93ac77e72fb87a2092e85a25995b0", "score": "0.5419556", "text": "def RealVar(idx, ctx=None):\n return Var(idx, RealSort(ctx))", "title": "" }, { "docid": "0e09f504e68c45f451f0331d2073cc8e", "score": "0.53997", "text": "def repackage_var(h):\n return Variable(h.data) if type(h) == Variable else tuple(repackage_var(v) for v in h)", "title": "" }, { "docid": "0e09f504e68c45f451f0331d2073cc8e", "score": "0.53997", "text": "def repackage_var(h):\n return Variable(h.data) if type(h) == Variable else tuple(repackage_var(v) for v in h)", "title": "" }, { "docid": "6024b876e9461ae59b4e90b5d254e075", "score": "0.5383264", "text": "def __getitem__(self, pos: int) -> Tuple[Tensor]:\n return tuple([x[pos] for x in self._tensors.values()])", "title": "" }, { "docid": "ae8e1036e50c52087b3cc334a30b5a60", "score": "0.5361433", "text": "def find_nth_element_in_gp(int_a, int_r, int_n):\r\n nth_element = int_a*(int_r**int_n)\r\n return nth_element", "title": "" }, { "docid": "0a439f3d19dc9ec5ba16f5d8e7176b43", "score": "0.53553385", "text": "def get_nth_sexpr(self, n):\n if n < 0:\n raise ValueError(\"n must be greater than 0!\")\n\n if n==0:\n return self\n \n for arg in self.args:\n return arg.get_nth_sexpr(n-1)\n \n raise ValueError(\"n is too large!\")", "title": "" }, { "docid": "b04e19ec851462c7de32cbea05fd3843", "score": "0.53408957", "text": "def get_variable(x):\n if use_cuda:\n return x.cuda()\n return x", "title": "" }, { "docid": "0b6b66e39a1e064b3901ddd52bbc47f5", "score": "0.53398645", "text": "def __getitem__(self, index):\n if isinstance(index, tuple):\n a, b = index\n return self.state[a][b]\n else:\n return self.state[index]", "title": "" }, { "docid": "1113b37f60d692e34fee7d3159ea820c", "score": "0.53358346", "text": "def _GetVarSub(test, w):\n test.assertEqual(1, len(w.parts))\n return w.parts[0]", "title": "" }, { "docid": "6dd7aada89221a5eeec0e11c7270356d", "score": "0.53283995", "text": "def get_variablename(cursor, ReportVariableDataDictionaryIndex):\n sql = \"SELECT VariableName FROM ReportVariableDataDictionary WHERE ReportVariableDataDictionaryIndex = %s\" % (ReportVariableDataDictionaryIndex, )\n cursor.execute(sql)\n matrix = [row for row in cursor]\n return matrix[0][0]", "title": "" }, { "docid": "1dc3699731f0b596c00a65293176477e", "score": "0.5324723", "text": "def _tuple_get(tup, index, default=None):\n return (tup[index : index + 1] or [default])[0]", "title": "" }, { "docid": "657e0cea32e461dd7d311cf69e33809b", "score": "0.5313453", "text": "def get_vars(n):\n op = n[0]\n if op.startswith('_') and op.endswith('_'):\n op = op.strip('_')\n if op == 'var':\n return [n[1]]\n return []\n else:\n ret = []\n for c in n[1:]:\n vs = get_vars(c)\n if vs:\n ret.extend(vs)\n return ret", "title": "" }, { "docid": "0e88d6e311f70d602b5f817bd0935313", "score": "0.53107363", "text": "def getNthFloatingSpeciesId (self, index):\n return self.getListOfFloatingSpecies()[index]", "title": "" }, { "docid": "48847d03d6c68513c737b3f115a6e1e4", "score": "0.5309058", "text": "def tup(self):\n return (self.ndim,self.idx)", "title": "" }, { "docid": "f594f9fbbb087ecc7354cb2bad9da3ca", "score": "0.5279657", "text": "def nc_get_variable(self, construct, default=None):\n return construct.nc_get_variable(default=default)", "title": "" }, { "docid": "f1b58d9b5ee6aa660421c10889d9b03a", "score": "0.52716625", "text": "def _take_first(elem: tuple) -> int:\r\n return elem[0]", "title": "" }, { "docid": "9539d897b01a8628a7a5c8e034d5233f", "score": "0.52715605", "text": "def get_var(key,dim):\n global global_var, stack_var,vm, gen_code\n value = None\n found = False\n if len(stack_var) > 0:\n if key in stack_var[-1]:\n if len(dim) == 2:\n c_space = vm.request(\"local\",dereference(stack_var[-1][key][\"type\"]))#Assumes **datatype\n gen_code.append((_INDEX_,stack_var[-1][key][\"address\"],dim[0],c_space))\n gen_code.append((_INDEX_,c_space,dim[1],c_space))\n value = c_space\n found = True\n elif len(dim) == 1:\n c_space = vm.request(\"local\",stack_var[-1][key][\"type\"])#Assumes *datatype\n gen_code.append((_INDEX_,stack_var[-1][key][\"address\"],dim[0],c_space))\n value = c_space\n found = True\n else:\n value = stack_var[-1][key][\"address\"]\n found = True\n \n if key in global_var:\n if len(dim) == 2:\n c_space = vm.request(\"local\",dereference(global_var[key][\"type\"]))#Assumes **datatype\n gen_code.append((_INDEX_,global_var[key][\"address\"],dim[0],c_space))\n gen_code.append((_INDEX_,c_space,dim[1],c_space))\n value = c_space\n found = True\n elif len(dim) == 1:\n c_space = vm.request(\"local\",global_var[key][\"type\"])#Assumes *datatype\n gen_code.append((_INDEX_,global_var[key][\"address\"],dim[0],c_space))\n value = c_space\n found = True\n else:\n value = global_var[key][\"address\"]\n found = True\n \n if not found:\n print(\"Error: undeclared variable\",key, \"on get_var\")\n raise SyntaxError\n \n return value", "title": "" }, { "docid": "b2828e54521fa8176a9738f7cf8c10fe", "score": "0.52463555", "text": "def __getitem__(self, n):\n return self.stages[n]", "title": "" }, { "docid": "04541976d9ba07d0d713c5e90340943c", "score": "0.523593", "text": "def get(self, var_name:str, t:int, batch_dims:Optional[tuple(int,int)]=None) -> torch.Tensor:\n assert var_name in self.variables, \"Unknoanw variable '\" + var_name + \"'\"\n return self.variables[var_name].get(t, batch_dims=batch_dims)", "title": "" }, { "docid": "5955507564645ee347ea7aa93bff6e59", "score": "0.52342683", "text": "def __getitem__(self, i):\n return self.decomposition()[i]", "title": "" }, { "docid": "e8f6167a60bdbcc199be070615db5ee0", "score": "0.523398", "text": "def getEventVariable (self, event, assignmentIndex):\n myEvent = self.model.getEvent(event)\n eventAss = myEvent.getEventAssignment(assignmentIndex) \n return eventAss.getVariable()", "title": "" }, { "docid": "77ef2e83167106d0fe1e2a7e538840e6", "score": "0.52294725", "text": "def _get_tuple_element(instance, position):\n if isinstance(instance, tuple) and position < len(instance):\n return instance[position]\n return tuple()", "title": "" }, { "docid": "ea8555dc8a6bde60a692418d92205428", "score": "0.5215369", "text": "def num_elem_var(self,*args):\r\n\t\tif len(args) == 0:\r\n\t\t\treturn self.src.dimensions[\"num_elem_var\"]\r\n\t\tif len(args) == 1:\r\n\t\t\tself.src.dimensions[\"num_elem_var\"] = args[0]", "title": "" }, { "docid": "7a9689e4399103b1916c822c5c1df3c5", "score": "0.52001745", "text": "def __getitem__(self, idx):\n if _is_int(idx):\n if idx >= len(self):\n raise IndexError\n num_consts = Z3_model_get_num_consts(self.ctx.ref(), self.model)\n if (idx < num_consts):\n return FuncDeclRef(Z3_model_get_const_decl(self.ctx.ref(), self.model, idx), self.ctx)\n else:\n return FuncDeclRef(Z3_model_get_func_decl(self.ctx.ref(), self.model, idx - num_consts), self.ctx)\n if isinstance(idx, FuncDeclRef):\n return self.get_interp(idx)\n if is_const(idx):\n return self.get_interp(idx.decl())\n if isinstance(idx, SortRef):\n return self.get_universe(idx)\n if z3_debug():\n _z3_assert(False, \"Integer, Z3 declaration, or Z3 constant expected\")\n return None", "title": "" }, { "docid": "fe8406a254d2d05075a6af3052a9793a", "score": "0.5189003", "text": "def t(self, n):\n\n if n not in self.dataInv:\n return ()\n Crank = self.api.C.rank.data\n if self.doValues:\n return tuple(\n sorted(self.dataInv[n].items(), key=lambda mv: Crank[mv[0] - 1])\n )\n else:\n return tuple(sorted(self.dataInv[n], key=lambda m: Crank[m - 1]))", "title": "" }, { "docid": "9f4530e27cedb37fb64da71e8e5e7284", "score": "0.51885104", "text": "def __getitem__(self, i):\n return self.nodes[i][1]", "title": "" }, { "docid": "5400fd9ec17d73caa6eab5adb1672027", "score": "0.5179158", "text": "def level_of_var(self, var):", "title": "" }, { "docid": "35d5000ba45192587b894c494d23b16d", "score": "0.5156873", "text": "def Var(idx, s):\n if z3_debug():\n _z3_assert(is_sort(s), \"Z3 sort expected\")\n return _to_expr_ref(Z3_mk_bound(s.ctx_ref(), idx, s.ast), s.ctx)", "title": "" }, { "docid": "cef7ccc27ecaf9ac9e60d8b068ca02d7", "score": "0.5152383", "text": "def extract_variable(var_info, raw_info, out_dir, attrs):\n var = var_info.short_name\n with catch_warnings():\n filterwarnings(\n action='ignore',\n message='Ignoring netCDF variable .* invalid units .*',\n category=UserWarning,\n module='iris',\n )\n cubes = iris.load(raw_info['file'])\n rawvar = raw_info['name']\n for cube in cubes:\n if cube.var_name == rawvar:\n # Extracting a certain vegetation transition code\n itr = raw_info['iTr']\n itr_index = np.where(\n cube.coord('Vegetation transition code').points == itr)[0][0]\n cube = cube[itr_index, :, :, :]\n # Add the vegetation transition code as an attribute\n cube.attributes['Vegetation transition code'] = itr\n # Remove it as a coordinate, since otherwise it would\n # violate CMOR standards\n cube.remove_coord('Vegetation transition code')\n # Fix metadata\n fix_var_metadata(cube, var_info)\n # Fix coords\n fix_coords(cube)\n # Now set the time coordinate properly\n fix_time_coord_duveiller2018(cube)\n # Latitude has to be increasing so flip it\n # (this is not fixed in fix_coords)\n logger.info(\"Flipping dimensional coordinate latitude\")\n cube = cube[:, ::-1, :]\n # Global attributes\n set_global_atts(cube, attrs)\n save_variable(cube, var, out_dir, attrs, local_keys=['positive'])", "title": "" }, { "docid": "b2b3e3eae2ef0f59c4ed220f39f94d53", "score": "0.5149892", "text": "def fst(t: Tuple) -> Any:\n\n return t[0]", "title": "" }, { "docid": "2044771f6e57946ef718953c3d0dd267", "score": "0.5146585", "text": "def snd(t: Tuple) -> Any:\n\n return t[1]", "title": "" }, { "docid": "89d1a65bfc1e2dff9a2974295ae75c48", "score": "0.51395494", "text": "def _nth(n):\n t = 'th'\n a = {'11': t, '12': t, '13': t, '1': 'st', '2': 'nd', '3': 'rd'}\n n = str(int(n))\n return n + a.get(n[-2:], a.get(n[-1], t))", "title": "" }, { "docid": "cb71dd9bb18eaec6b26f85ed74ec59b4", "score": "0.51390857", "text": "def get_var_soln(self, label):\n\n i_label = self.var_labels.index(label)\n return self.var_array[:, i_label]", "title": "" }, { "docid": "e3bcd80c67b8bff8cacaabe927d819c6", "score": "0.5136781", "text": "def getcolumn(data, n):\n column = data[:,n]\n return column", "title": "" }, { "docid": "f6fffd894dcef96ba1f0f5fed9a97de6", "score": "0.5134121", "text": "def vals_nod_var(self,*args):\r\n\t\tif len(args) == 0:\r\n\t\t\treturn self.src.variables[\"vals_nod_var\"].getValue()\r\n\t\tif len(args) == 3:\r\n\t\t\treturn self.src.variables[\"vals_nod_var\"].getValue()[args[0],args[1],args[2]]\r\n\t\tif len(args) == 1:\r\n\t\t\tself.src.variables[\"vals_nod_var\"] = args[0]", "title": "" }, { "docid": "e1ae858ce4c465919b47626ffeb062d7", "score": "0.5126214", "text": "def get(self, name):\n\t\t\treturn self.variables[name].val", "title": "" }, { "docid": "9e11ccd13b2b25df8b7190a12231de86", "score": "0.5120599", "text": "def tuple_get_item(attrs, in_xlayers):\n # type: (str, List[XLayer]) -> XLayer\n\n assert len(in_xlayers) == 1\n assert isinstance(in_xlayers[0].shapes, TupleShape)\n\n index = attrs['index']\n\n shape = in_xlayers[0].shapes[index][:]\n\n return {'shape': shape}", "title": "" }, { "docid": "1f8e023d1bec0c81c4a194b5fa364acd", "score": "0.511645", "text": "def nth(f, n):\n if isinstance(n, int):\n return dup_nth(f.rep, n, f.dom)\n else:\n raise TypeError(\"`int` expected, got %s\" % type(n))", "title": "" }, { "docid": "fb074a494ed8979a4e0347fc356d7e7c", "score": "0.5114274", "text": "def find_value(inp, n):\n\tpass", "title": "" }, { "docid": "7e2baeb51d02801e11e55ab1a8b0384b", "score": "0.5113121", "text": "def second_part(self, input_list: list, keyword_index: int) -> tuple:\n first_type = input_list[1]\n first_variable = self.second_part_values(first_type, input_list[1: keyword_index])\n second_type = input_list[keyword_index + 1]\n second_variable = self.second_part_values(second_type, input_list[keyword_index + 1:])\n return first_variable, second_variable", "title": "" }, { "docid": "05880968e9cc7539ea4abec23f3da2cb", "score": "0.5112769", "text": "def getValue(self, n):\n ai, bp = self.getPosition(n)\n return self.array[ai] >> bp & 1", "title": "" }, { "docid": "09f126e15e60b4a3a98342f3608cb399", "score": "0.5103756", "text": "def loc( self, ind ):\n try:\n out = self.neurons[ self.ids()[ind] ]\n except TypeError:\n raise TypeError\n except IndexError:\n raise IndexError\n return out", "title": "" }, { "docid": "64d627be494ddf43fc5fd5c539d2d52f", "score": "0.5094776", "text": "def get_var_indices(tensor, element):\n vector_index = tensor.index_vectorized(*element)\n lin_dim = int(np.sqrt(tensor.size))\n return (vector_index // lin_dim, vector_index % lin_dim)", "title": "" }, { "docid": "ae6ab13ccaff8deceab1feaf5201bbb1", "score": "0.50778484", "text": "def get_variables_at(self, frame, vars_indexes):\n f = self.file['hook']\n endian = self.file['endian']\n ftype, fsize = self.file['float']\n if fsize == 4:\n z = np.zeros((len(vars_indexes), self.npoin3), dtype=np.float32)\n else:\n z = np.zeros((len(vars_indexes), self.npoin3), dtype=np.float64)\n # if tags has 31 frames, len(tags)=31 from 0 to 30,\n # then frame should be >= 0 and < len(tags)\n if frame < len(self.tags['cores']) and frame >= 0:\n f.seek(self.tags['cores'][frame])\n f.seek(4+fsize+4, 1)\n for ivar in range(self.nvar):\n f.seek(4, 1)\n if ivar in vars_indexes:\n z[vars_indexes.index(ivar)] = \\\n unpack(endian+str(self.npoin3)+ftype,\n f.read(fsize*self.npoin3))\n else:\n f.seek(fsize*self.npoin3, 1)\n f.seek(4, 1)\n return z", "title": "" }, { "docid": "213ff1f9634bf621b2816c502c8eb264", "score": "0.50738424", "text": "def lookup(self, n):\n if n > self.length-1:\n raise IndexError(\"index out of range\")\n else:\n dummy = self.head\n\n for i in range(n):\n dummy = dummy.next\n\n return dummy.val", "title": "" }, { "docid": "c5760b112bd414229edc25bcf19d3d7a", "score": "0.5072549", "text": "def __getitem__(self, i):\r\n items = tuple.__getitem__(self, i)\r\n if type(i) is slice:\r\n return Attrs(items)\r\n return items", "title": "" }, { "docid": "db177169209ffc1c0a000d0d5111b827", "score": "0.50714797", "text": "def get(self, no: int) -> T:\n return self.__stack[-no]", "title": "" }, { "docid": "1a12a34f4a709c549331b069a1ddace5", "score": "0.50700265", "text": "def lookup(self, n):\r\n if n > self.length-1:\r\n raise IndexError(\"index out of range\")\r\n else:\r\n dummy = self.head\r\n\r\n for i in range(n):\r\n dummy = dummy.next\r\n\r\n return dummy.val", "title": "" }, { "docid": "f3beb6fbbe900cd4901631f853346b8b", "score": "0.5065242", "text": "def variable(self):\n return self._variable", "title": "" }, { "docid": "f3beb6fbbe900cd4901631f853346b8b", "score": "0.5065242", "text": "def variable(self):\n return self._variable", "title": "" }, { "docid": "4dd82f8371732a87da9d9293c161d08b", "score": "0.5058048", "text": "def get_variable_name(read_variable_op):\n assert read_variable_op.type == 'ReadVariableOp'\n op = read_variable_op\n # Depending on whether we're on TPU or CPU, and whether control flow v2 is\n # enabled, the graph will have different structure. This loop is written to\n # support all known cases.\n while True:\n if op.type == 'VarHandleOp':\n return op.name\n if op.type == 'Placeholder':\n return op.name.split('/ReadVariableOp/')[1]\n assert len(op.inputs) == 1\n op = op.inputs[0].op", "title": "" }, { "docid": "144caa9b2ecb5bdba32d9070f12f7202", "score": "0.505112", "text": "def read_variablenumber(self):\n result = 1\n result = (result << 1) + self.read_bit()\n while self.read_bit():\n result = (result << 1) + self.read_bit()\n return result", "title": "" }, { "docid": "d54a938c370d55040395c6297490e483", "score": "0.5050429", "text": "def to_mathematica_index(*args):\n return tuple(i + 1 for i in args)", "title": "" }, { "docid": "6b2c33ef6e51c7610fab1b16d8ca09d0", "score": "0.5046238", "text": "def test_idx_to_var(self):\n f = [[-1, -2], [2], [-3, -4]]\n env = LocalSearchSAT(f)\n self.assertEqual(1, env.idx_to_var[0])\n self.assertEqual(2, env.idx_to_var[1])\n self.assertEqual(3, env.idx_to_var[2])\n self.assertEqual(4, env.idx_to_var[3])", "title": "" }, { "docid": "c254d624fa107ba24314b1fe93ec657d", "score": "0.50461596", "text": "def _nested_variable(init, name=None, trainable=False):\n if isinstance(init, list) or isinstance(init, tuple):\n result = [_nested_variable(i, name, trainable) for i in init]\n if isinstance(init, tuple):\n return tuple(result)\n return result\n else:\n return tf.Variable(init, name=name, trainable=trainable)", "title": "" }, { "docid": "4e9b399144a04db179d99ddc81d6df32", "score": "0.5038278", "text": "def visit_Variable(self, node):\n return Variable(node.name)", "title": "" }, { "docid": "d5112a160b99e4c25fa96165c5ef9c7a", "score": "0.50342244", "text": "def transform_variable(self):\n return self.args[2]", "title": "" }, { "docid": "bc1dc57768f20107295276f357d9c673", "score": "0.50328666", "text": "def __getitem__(self, index):\n if not self.init:\n L1Ana.log.error(\"L1Ntuple is not yet initialized! Aborting iteration.\")\n raise IndexError(\"L1Ntuple is not yet initialized!\")\n if not index < self.nevents:\n raise IndexError(\"Reached the end\")\n\n self.tree_main.GetEntry(index)\n return self.data", "title": "" } ]
4742e9667543d7672b73995437beb541
Returns source minus comments and docstrings.
[ { "docid": "8857504e72a705b5c341849824fc17b4", "score": "0.6218315", "text": "def remove_comments(source, docstrings=False):\n io_obj = io.StringIO(source)\n out = \"\"\n prev_toktype = tokenize.INDENT\n last_lineno = -1\n last_col = 0\n for tok in tokenize.generate_tokens(io_obj.readline):\n token_type = tok[0]\n token_string = tok[1]\n start_line, start_col = tok[2]\n end_line, end_col = tok[3]\n if start_line > last_lineno:\n last_col = 0\n if start_col > last_col:\n out += (\" \" * (start_col - last_col))\n # Remove comments:\n if token_type == tokenize.COMMENT:\n pass\n # This series of conditionals removes docstrings:\n elif docstrings and token_type == tokenize.STRING:\n if prev_toktype != tokenize.INDENT:\n # This is likely a docstring; double-check we're not inside an operator:\n if prev_toktype != tokenize.NEWLINE:\n # Note regarding NEWLINE vs NL: The tokenize module\n # differentiates between newlines that start a new statement\n # and newlines inside of operators such as parens, brackes,\n # and curly braces. Newlines inside of operators are\n # NEWLINE and newlines that start new code are NL.\n # Catch whole-module docstrings:\n if start_col > 0:\n # Unlabelled indentation means we're inside an operator\n out += token_string\n # Note regarding the INDENT token: The tokenize module does\n # not label indentation inside of an operator (parens,\n # brackets, and curly braces) as actual indentation.\n # For example:\n # def foo():\n # \"The spaces before this docstring are tokenize.INDENT\"\n # test = [\n # \"The spaces before this string do not get a token\"\n # ]\n else:\n out += token_string\n prev_toktype = token_type\n last_col = end_col\n last_lineno = end_line\n return out", "title": "" } ]
[ { "docid": "d3b9a8f99b5f4315f21e1380ddb89caa", "score": "0.7017617", "text": "def source(self):\r\n # return source only for that part of code\r\n return py.code.Source(self.raw)", "title": "" }, { "docid": "c41a2ee9aac8a62938784c9bb5443b37", "score": "0.7007371", "text": "def doctest_FuncSource_no_source_lines():", "title": "" }, { "docid": "8ce72cfb1f8cd96b5bf63ac343f84773", "score": "0.6892079", "text": "def fullsource(self):\r\n full, _ = source.findsource(self.raw)\r\n return full", "title": "" }, { "docid": "82091f51adc7e5988fd38df619d98871", "score": "0.6881105", "text": "def doctest_FuncSource_no_source_file():", "title": "" }, { "docid": "f59810c09ac479652d2f57bcb5e1e22f", "score": "0.6804334", "text": "def get_source_code(self):\n\t\treturn self.content", "title": "" }, { "docid": "0ab0e015905cbf16cdb0ba18f30b5185", "score": "0.6783657", "text": "def get_source(fullname):", "title": "" }, { "docid": "902a5658ad0ca4720e08f32b18e924a5", "score": "0.67235464", "text": "def parse_source(self):\n self._raw_parse()\n\n excluded_lines = self.first_lines(self.excluded)\n ignore = excluded_lines + list(self.docstrings)\n lines = self.first_lines(self.statement_starts, ignore)\n\n return lines, excluded_lines", "title": "" }, { "docid": "397b54729d08a8105ee1945d04039c21", "score": "0.660287", "text": "def get_doc(src) :\n pat = re.compile(r'((?:def|class)\\s+[^\\n]*\\s*)\"\"\"(.*?)\"\"\"',re.MULTILINE|re.DOTALL)\n return [gs for gs in pat.findall(src)]", "title": "" }, { "docid": "76e8cd3ed10437977af1d710ab16082e", "score": "0.65672874", "text": "def read_source_code() -> str:\n return this.browser.page_source", "title": "" }, { "docid": "d5370addbb11c09c6fde7cf328e8d5b7", "score": "0.653031", "text": "def __get_source(self, func):\n #get lines of the source and adjust indent\n sourcelines = inspect.getsourcelines(func)[0]\n #remove indentation from the first line\n sourcelines[0] = sourcelines[0].lstrip()\n return \"\".join(sourcelines)", "title": "" }, { "docid": "a535fc91abc630f374bb6a110085e391", "score": "0.65008384", "text": "def getSource(self):\n from zLOG import LOG, INFO\n source_code = getattr(self.getDocumentedObject(), \"src\")\n portal_transforms = getattr(self, 'portal_transforms', None)\n if portal_transforms is not None:\n REQUEST = getattr(self, 'REQUEST', None)\n if REQUEST is not None:\n if REQUEST.get('portal_skin', 'View' ) != 'View':\n return \"\"\n else:\n LOG('DCWorkflowScriptDocumentationHelper', INFO,\n 'Transformation Tool is not installed. No convertion of python script to html')\n return source_code\n src_mimetype='text/plain'\n mime_type = 'text/html'\n source_html = portal_transforms.convertTo(mime_type, source_code, mimetype = src_mimetype)\n return source_html.getData()", "title": "" }, { "docid": "188390240ba1cc1697320dc0f6164adb", "score": "0.642758", "text": "def remove_comments_and_docstrings(source):\n io_obj = io.StringIO(source)\n out = \"\"\n prev_toktype = tokenize.INDENT\n last_lineno = -1\n last_col = 0\n for tok in tokenize.generate_tokens(io_obj.readline):\n token_type = tok[0]\n token_string = tok[1]\n start_line, start_col = tok[2]\n end_line, end_col = tok[3]\n if start_line > last_lineno:\n last_col = 0\n if start_col > last_col:\n out += (\" \" * (start_col - last_col))\n # Remove comments:\n if token_type == tokenize.COMMENT:\n pass\n # This series of conditionals removes docstrings:\n elif token_type == tokenize.STRING:\n if prev_toktype != tokenize.INDENT:\n # This is likely a docstring; double-check we're not inside an operator:\n if prev_toktype != tokenize.NEWLINE:\n # Note regarding NEWLINE vs NL: The tokenize module\n # differentiates between newlines that start a new statement\n # and newlines inside of operators such as parens, brackes,\n # and curly braces. Newlines inside of operators are\n # NEWLINE and newlines that start new code are NL.\n # Catch whole-module docstrings:\n if start_col > 0:\n # Unlabelled indentation means we're inside an operator\n out += token_string\n # Note regarding the INDENT token: The tokenize module does\n # not label indentation inside of an operator (parens,\n # brackets, and curly braces) as actual indentation.\n # For example:\n # def foo():\n # \"The spaces before this docstring are tokenize.INDENT\"\n # test = [\n # \"The spaces before this string do not get a token\"\n # ]\n else:\n out += token_string\n prev_toktype = token_type\n last_col = end_col\n last_lineno = end_line\n return out", "title": "" }, { "docid": "c5598b189539828cd187fb93f5d2cea1", "score": "0.6339551", "text": "def remove_comments_and_docstrings(source):\n io_obj = StringIO.StringIO(source)\n out = \"\"\n prev_toktype = tokenize.INDENT\n last_lineno = -1\n last_col = 0\n for tok in tokenize.generate_tokens(io_obj.readline):\n token_type = tok[0]\n token_string = tok[1]\n start_line, start_col = tok[2]\n end_line, end_col = tok[3]\n ltext = tok[4] # lint:ok\n # The following two conditionals preserve indentation.\n # This is necessary because we're not using tokenize.untokenize()\n # (because it spits out code with copious amounts of oddly-placed\n # whitespace).\n if start_line > last_lineno:\n last_col = 0\n if start_col > last_col:\n out += (\" \" * (start_col - last_col))\n # Remove comments:\n if token_type == tokenize.COMMENT:\n pass\n # This series of conditionals removes docstrings:\n elif token_type == tokenize.STRING:\n flag = False\n if prev_toktype != tokenize.INDENT:\n # This is likely a docstring; double-check we're not inside an operator:\n if prev_toktype != tokenize.NEWLINE:\n # Note regarding NEWLINE vs NL: The tokenize module\n # differentiates between newlines that start a new statement\n # and newlines inside of operators such as parens, brackes,\n # and curly braces. Newlines inside of operators are\n # NEWLINE and newlines that start new code are NL.\n # Catch whole-module docstrings:\n if start_col > 0:\n # Unlabelled indentation means we're inside an operator\n out += token_string\n flag = True\n # Note regarding the INDENT token: The tokenize module does\n # not label indentation inside of an operator (parens,\n # brackets, and curly braces) as actual indentation.\n # For example:\n # def foo():\n # \"The spaces before this docstring are tokenize.INDENT\"\n # test = [\n # \"The spaces before this string do not get a token\"\n # ]\n if not flag:\n # an empty string must be kept, for contructs like\n #\n # def f():\n # \"\"\"this function only has a docstring\"\"\"\n #\n # removing the docstring would result in an indentation error\n\n out += '\"\"'\n else:\n out += token_string\n prev_toktype = token_type\n last_col = end_col\n last_lineno = end_line\n return out", "title": "" }, { "docid": "44fec50bd19713c8a8fd27a6aed67c3f", "score": "0.6315168", "text": "def getsource(object):\n name = inspect.getfile(object)\n if hasattr(name, '__source__'):\n src = str(name.__source__)\n else:\n try:\n src = inspect.getsource(object)\n except Exception: # catch IOError, IndentationError, and also rarely\n return None # some other exceptions like IndexError\n if hasattr(name, \"__sourceargs__\"):\n return src % name.__sourceargs__\n return src", "title": "" }, { "docid": "b33b6d1fc585a53d1f065d721263ae5c", "score": "0.63150585", "text": "def source_contents(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"source_contents\")", "title": "" }, { "docid": "1d57fe79b79c72a4014908d4945410f0", "score": "0.6257052", "text": "def get_python_src(self, f):\n\n # get the source and remove the decorator line.\n src = inspect.getsource(f)\n src = re.sub(r'^\\s*@\\w+.*\\n', '', src)\n\n # Strip off indentation if the function is not defined at top\n # level.\n src = re.sub(r'^\\s*', '', src)\n return src", "title": "" }, { "docid": "632050eac292e27da2b7596979f395e7", "score": "0.6249193", "text": "def source(sid):\n f = open(sourceFiles[0])\n src = f.read() \n f.close()\n return src", "title": "" }, { "docid": "94046cb0ba8a92e789bb881c36891a1e", "score": "0.62187344", "text": "def _get_sources():\n sp = [\n SRC_DIR / \"_pyots\" / \"bindings.cpp\",\n ]\n\n # woff2 sources\n sp.append(SRC_SUB_DIR / f\"woff2-{WOFF2_TAG}\" / \"src\" / \"table_tags.cc\")\n sp.append(SRC_SUB_DIR / f\"woff2-{WOFF2_TAG}\" / \"src\" / \"variable_length.cc\") # noqa: E501\n sp.append(SRC_SUB_DIR / f\"woff2-{WOFF2_TAG}\" / \"src\" / \"woff2_common.cc\")\n sp.append(SRC_SUB_DIR / f\"woff2-{WOFF2_TAG}\" / \"src\" / \"woff2_dec.cc\")\n sp.append(SRC_SUB_DIR / f\"woff2-{WOFF2_TAG}\" / \"src\" / \"woff2_out.cc\")\n\n return [str(p) for p in sp]", "title": "" }, { "docid": "e440d221e101934433c26e052e254589", "score": "0.620918", "text": "def full_cleaning(source: Source) -> Source:\n result = []\n previous_token = INDENT\n (previous_end_row, previous_end_col) = (-1, 0)\n text = str(source)\n text = Cleanup.suppress_first_comments(text)\n text = Cleanup.suppress_main_guard(text)\n text = Cleanup.suppress_sys_path_injection(text)\n text = text.replace(\"\\t\", \" \")\n lines = iter(text.split(\"\\n\"))\n for token_info in generate_tokens(lambda: next(lines) + \"\\n\"):\n (token, string, (start_row, start_col), (end_row, end_col), _) = token_info\n if start_row > previous_end_row:\n previous_end_col = 0\n result.append(\" \" * max(0, start_col - previous_end_col))\n if token == COMMENT:\n (string, n) = Cleanup.normalize_paroxython_comments(string)\n if n == 0:\n continue\n result.append(string)\n elif token == STRING and previous_token in (INDENT, DEDENT, NEWLINE):\n result.append(\"pass\\n\") # replace the docstring by a pass statement\n else:\n result.append(string)\n if (previous_token, token) == (NEWLINE, NL):\n previous_token = NEWLINE\n else:\n previous_token = token\n (previous_end_row, previous_end_col) = (end_row, end_col)\n text = \"\".join(result).strip()\n text = Cleanup.suppress_blank_lines(text)\n text = Cleanup.suppress_useless_pass_statements(text)\n return Source(text)", "title": "" }, { "docid": "240dd5188fe13bcb9335560b49fa2880", "score": "0.61927736", "text": "def as_wiki_source(self):\n return self.__class__.as_wiki_source.__doc__", "title": "" }, { "docid": "240dd5188fe13bcb9335560b49fa2880", "score": "0.61927736", "text": "def as_wiki_source(self):\n return self.__class__.as_wiki_source.__doc__", "title": "" }, { "docid": "3818b203309644f663cb66e5c48dd2d7", "score": "0.61401033", "text": "def get_source_description(self, sourcefile):\n for item in self.schema:\n if item.get('source', None) == sourcefile:\n return item", "title": "" }, { "docid": "581846ad692d5eda4f31057e1aded15c", "score": "0.61307853", "text": "def _scanSource(self):\n source = self.getSource()\n sourceDescPat = re.compile(r\"/\\*.*?\\*/\",re.S)\n sourceDesc = sourceDescPat.search(source)\n\n if sourceDesc:\n sourceDesc = sourceDesc.group()\n else:\n raise Exception(\"Source description not founded\")\n \n temp = re.search(r\"@author\\s+(.+)\",sourceDesc)\n if temp:\n self.author = temp.groups()[0]\n\n temp = re.search(r\"@url\\s+(.+)\",sourceDesc)\n if temp:\n self.url = temp.groups()[0]\n\n temp = re.search(r\"@module\\s+(.+)\",sourceDesc)\n if temp:\n self.module = temp.groups()[0]\n\n temp = re.search(r\"@licence\\s+(.+)\",sourceDesc)\n if temp:\n self.licence = temp.groups()[0]", "title": "" }, { "docid": "9a239a731098759e3f556433c2d07e5f", "score": "0.60576487", "text": "def source(self) -> str:\n return SOURCE", "title": "" }, { "docid": "bab2de9ef97ed4665893ada151d54b93", "score": "0.6057572", "text": "def get_source_info(self):\n return self.source_info", "title": "" }, { "docid": "0b3fa9ec40a81553b9cfcb75fe2c82e5", "score": "0.6039459", "text": "def remove_comments(source):\n return re.sub(r\";.*\\n\", \"\\n\", source)", "title": "" }, { "docid": "8715f5f49e2af53cd893d6630c67e1d3", "score": "0.60383", "text": "def compile_src(source: str) -> Tuple[Any, List[Error]]:\n result_tuple = compile_source(source, get_preprocessor(), get_grammar(), get_transformer(),\n get_compiler())\n return result_tuple[:2] # drop the AST at the end of the result tuple", "title": "" }, { "docid": "321d891ec443597cb268e5fdecc5b0e4", "score": "0.60246676", "text": "def _scanSource(self):\n source = self.getSource()\n sourceDescPat = re.compile(r\"/\\*.*?\\*/\",re.S)\n requiresPat = re.compile(r\"@requires\\s?([^@]+)[@\\*]\",re.S)\n includesPat = re.compile(r\"@includes\\s?(.+)[@\\*]\",re.S)\n sourceDesc = sourceDescPat.search(source)\n\n if sourceDesc:\n sourceDesc = sourceDesc.group()\n else:\n raise Exception(\"Source description not founded\")\n \n temp = re.search(r\"@author\\s+(.+)\",sourceDesc)\n if temp:\n self.author = temp.groups()[0]\n\n temp = re.search(r\"@version\\s+(.+)\",sourceDesc)\n if temp:\n self.version = temp.groups()[0]\n\n temp = re.search(r\"@package\\s+(.+)\",sourceDesc)\n if temp:\n self.package = temp.groups()[0]\n\n temp = re.search(r\"@module\\s+(.+)\",sourceDesc)\n if temp:\n self.module = temp.groups()[0]\n\n temp = requiresPat.search(sourceDesc)\n\n if temp:\n for line in temp.groups()[0].splitlines():\n line = re.search(r\"(\\w.+)\",line) \n if line:\n self.requires.add(line.groups()[0])\n self._requires.add(line.groups()[0])\n\n #includes bind to requires\n temp = includesPat.search(sourceDesc)\n\n if temp:\n for line in temp.groups()[0].splitlines():\n line = re.search(r\"(\\w.+)\",line) \n if line:\n self.requires.add(line.groups()[0])\n self.includes.add(line.groups()[0])", "title": "" }, { "docid": "4a73ec03333016e7b2cd55c690711452", "score": "0.5993199", "text": "def source(self):\n try:\n return self.afficher(\n self.document.afficher_source()\n )\n except AttributeError as err:\n f.traiter_erreur(err)\n # Si le type de document est inconnu ou ne prévoit pas d'affichage\n # de la source, on essaie de le traiter comme un document texte.\n # Sinon, on abandonne.\n try:\n return self.afficher(\n TXT.Document(self.chemin).afficher_source()\n )\n except TXT.FichierIllisible as err:\n f.traiter_erreur(err)\n return self.afficher(\n _(\"Extension inconnue : {}.\").format(err)\n + h.BR()\n + _('Voici les données de la requète :')\n + h.BR()\n + '{}'.format(\n str(h.BR()).join(\n \"{!s}={!r}\".format(item[0], item[1])\n for item in rq.query.items()\n )\n )\n )\n except NameError as err:\n f.traiter_erreur(err)\n b.abort(404)\n except TXT.FichierIllisible as err:\n f.traiter_erreur(err)\n return self.afficher(_(\"Ce fichier est illisible.\"))", "title": "" }, { "docid": "932d5f1cf32f19f999f2e03f2aa57494", "score": "0.59713495", "text": "def getSrcString(self):\n srcString = 'c -------------------------- Source Defination ----------------------------\\n'\n srcString += 'c 1 nanogram Cf-252 source = 1E-9 grams = 6.623E-11 cc \\n'\n srcString += 'sdef pos=-200 0 108.85 cel=70 par=SF rad=d1 \\n'\n srcString += 'si1 0 2.510E-04 \\n'\n srcString += 'sp1 -21 1 \\n'\n return srcString", "title": "" }, { "docid": "176c1bab1079f433c73b37f787a55ed1", "score": "0.5967901", "text": "def get_source(self):\n return self.browser.source()", "title": "" }, { "docid": "35158041c2aa3127ed664ade0619d68a", "score": "0.5954709", "text": "def get_source(fun):\n return inspect.getsource(fun)", "title": "" }, { "docid": "9bd0b4a364bd4535488ad060b7d657c3", "score": "0.59508634", "text": "def source(self) -> str:\n raise NotImplementedError()", "title": "" }, { "docid": "a6b1fbb088d9f8633f09373f00ddd0a5", "score": "0.59390116", "text": "def _get_source(function):\n\n name = function.__name__\n file = getsourcefile(function)\n\n # Get lines\n linecache.checkcache(file)\n module = getmodule(function, file)\n lines = linecache.getlines(file, module.__dict__)\n\n # Parse lines\n regex = \"(def {}\\()\".format(name)\n pat = re.compile(regex)\n\n for lnum, line in enumerate(lines):\n if pat.match(line):\n break\n\n firstline = lnum\n src = getblock(lines[lnum:])\n src = \"\".join(src)\n\n return src", "title": "" }, { "docid": "c27348b1bd25e2a85b1c87e2bbc1c253", "score": "0.5938962", "text": "def source(self) -> str:\n raise NotImplementedError", "title": "" }, { "docid": "88e675733d762350adfa3568b52488b7", "score": "0.59365237", "text": "def remove_comments(source, _, __):\n sanitized = []\n in_string = False\n for line in source:\n save_line = True\n if line[0] == '#':\n # don't add if it's a commentcontinue\n continue\n elif \"#'\" in line:\n # these lines are flagged to be ignored\n continue\n elif '#' in line:\n # don't add the latter half if line contains comment\n line = line.split('#')[0]\n continue\n # logic to check for docstrings\n if len(line) > 2:\n if line[0:3] == '\"\"\"' and not in_string:\n in_string = True\n save_line = False\n if len(line)>5:\n if line[-3:] == '\"\"\"':\n in_string = False\n elif line[-3:] == '\"\"\"' and in_string:\n in_string = False\n save_line = False\n if save_line and not in_string:\n sanitized.append(line)\n return sanitized", "title": "" }, { "docid": "243698a137c2de258278772842e9cdb4", "score": "0.5934301", "text": "def HelpSource(self) -> str:", "title": "" }, { "docid": "a373b7dbda66c7b7ed1acbe43576c4a3", "score": "0.5933849", "text": "def get_source(self, fullmodname):\n submodname, is_package, fullpath, source = self._get_source(fullmodname)\n return source", "title": "" }, { "docid": "6c8f084f21135522faf53e672fd8d5b4", "score": "0.5932881", "text": "def _pruned_source(func):\n try:\n lines = inspect.getsource(func).split(\"\\n\")\n leading_space = len(lines[0]) - len(lines[0].lstrip(\" \"))\n lines = [line[leading_space:] for line in lines]\n return \"\\n\".join(lines)\n except IOError as err:\n if sys.version_info[0] == 2 and str(err) == \"could not get source code\":\n logging.log(\n logging.CRITICAL,\n \"This module is not fully operated under Python2... \" \"Please move to Python3!\",\n )\n raise err", "title": "" }, { "docid": "f6e9803aa7d4d0ce08a2a8c84f3fe578", "score": "0.592557", "text": "def source(self):\n # Call separate method for polymorphism\n return self._get_source()", "title": "" }, { "docid": "2e8a3097e3fd4f2ab0f8ae8b94756bf4", "score": "0.59225076", "text": "def flattened_source_code(self) -> Optional[\"str\"]:\n return self.other_data[\"abi\"][\"source\"]", "title": "" }, { "docid": "9157c2dc9edbffeaa23d98c0b73a3e64", "score": "0.5912145", "text": "def _copy_doc(source):\n def deco(f):\n f.__doc__ = source.__doc__\n return f\n return deco", "title": "" }, { "docid": "53b30cd5661f728eecf0a086e65ad0d0", "score": "0.58990675", "text": "def get_source_files(self):\n return [item['source'] for item in self.schema if 'source' in item]", "title": "" }, { "docid": "c32fbea028093c979cbabfe16dfa8a45", "score": "0.58850145", "text": "def source(self) -> str:\n return", "title": "" }, { "docid": "009fbef9b46352bc0401901fdc5120a1", "score": "0.58815163", "text": "def module_source(self, string):\r\n (out, c) = self(string)\r\n iout = self.indent(out)\r\n result = module_source_template % iout\r\n return result", "title": "" }, { "docid": "f054ff20c7d2144ab6b05f0df37b719b", "score": "0.58530706", "text": "def testsource(module, name):\r\n module = _normalize_module(module)\r\n tests = DocTestFinder().find(module)\r\n test = [t for t in tests if t.name == name]\r\n if not test:\r\n raise ValueError(name, \"not found in tests\")\r\n test = test[0]\r\n testsrc = script_from_examples(test.docstring)\r\n return testsrc", "title": "" }, { "docid": "f054ff20c7d2144ab6b05f0df37b719b", "score": "0.58530706", "text": "def testsource(module, name):\r\n module = _normalize_module(module)\r\n tests = DocTestFinder().find(module)\r\n test = [t for t in tests if t.name == name]\r\n if not test:\r\n raise ValueError(name, \"not found in tests\")\r\n test = test[0]\r\n testsrc = script_from_examples(test.docstring)\r\n return testsrc", "title": "" }, { "docid": "c39c73773f67bc9cee8853292010dbd0", "score": "0.5849763", "text": "def source(self) -> str:\n return self._source", "title": "" }, { "docid": "c39c73773f67bc9cee8853292010dbd0", "score": "0.5849763", "text": "def source(self) -> str:\n return self._source", "title": "" }, { "docid": "4409032c7f31a551de85ecd811479b2c", "score": "0.5831922", "text": "def source(self):\n return \", \".join(self._source)", "title": "" }, { "docid": "893aae3037a0610ec6cabef2cf7bb633", "score": "0.5828887", "text": "def getsource_if_available(obj):\n import inspect\n try:\n return inspect.getsource(obj)\n except:\n return repr(obj)", "title": "" }, { "docid": "605fc7ee7b8853b985a194b31eae89ba", "score": "0.58058566", "text": "def split_py_source_file(text):\n found = []\n comm = False\n for line in text.splitlines(True):\n if line.strip():\n if line.startswith('#'):\n found.append(line)\n continue\n if line.startswith('\"\"\"') or line.startswith(\"'''\"):\n comm = not comm\n found.append(line)\n continue\n if not comm:\n break\n found.append(line)\n head = ''.join(found)\n return head, text[len(head):]", "title": "" }, { "docid": "7533d83a4938d5c1ec53cd3f5757538f", "score": "0.58015645", "text": "def source_contents(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"source_contents\")", "title": "" }, { "docid": "7533d83a4938d5c1ec53cd3f5757538f", "score": "0.58015645", "text": "def source_contents(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"source_contents\")", "title": "" }, { "docid": "4fac653903dd57fe92d0161c03319acb", "score": "0.58004814", "text": "def get_page_source(self):\n return self._get(\"source\")", "title": "" }, { "docid": "7cbc4f5ff31ced8e063c14a2925681f6", "score": "0.5785778", "text": "def source(self):\r\n if self.is_encrypted:\r\n raise TxeFileIsEncrypted('It is not possible to decode an encrypted source code list')\r\n\r\n source_list = self._read_variable_size_item(TxeDataItem.SOURCE).rstrip(b'\\x00')\r\n iterator = (entry.decode('utf-8') for entry in source_list.split(b'\\x00'))\r\n return (SourceElement(name, contents) for name, contents in zip(iterator, iterator))", "title": "" }, { "docid": "beb0716ead7cfa3e0c2f1ef6634202e0", "score": "0.577575", "text": "def get_source(self):\n raise NotImplementedError()", "title": "" }, { "docid": "20cd8b387bce3425539ce894cd6b9b63", "score": "0.57634395", "text": "def get_source_string(fname):\n with open(fname, \"r\") as source_file:\n source_string = source_file.read()\n return source_string", "title": "" }, { "docid": "3f2336f28ca14088c04bb35335269c0a", "score": "0.57570547", "text": "def get_page_source(self):\n return self.driver.page_source.replace('xmlns=\"http://www.w3.org/1999/xhtml\"', '')", "title": "" }, { "docid": "d506681df2f6e25c262ebcb2d216f6dd", "score": "0.5738536", "text": "def source(self):\n return self.__source", "title": "" }, { "docid": "ebdce218533cfb1bb12ecda870ac313d", "score": "0.5737059", "text": "def getSource(self):\n self.graphFooter()\n return self._text.getvalue()", "title": "" }, { "docid": "943291eed0ec042c05ee8fe8c8771078", "score": "0.57369244", "text": "def _summary_source(self):\n return \"\"", "title": "" }, { "docid": "2654839d298465785ea4f2b08dffa4fc", "score": "0.5730508", "text": "def sourcelinks_process_docstring(\n\t\tapp: Sphinx,\n\t\twhat,\n\t\tname: str,\n\t\tobj,\n\t\toptions,\n\t\tlines: List[str],\n\t\t):\n\n\tshow_sourcelink = options.get(\"sourcelink\", app.config.autodoc_show_sourcelink) # type: ignore\n\n\tif isinstance(obj, ModuleType) and what == \"module\" and obj.__file__.endswith(\".py\") and show_sourcelink:\n\t\tlines.insert(0, f\"**Source code:** :source:`{name.replace('.', '/')}.py`\")\n\t\tlines.insert(1, '')\n\t\tlines.insert(2, \"--------------------\")\n\t\tlines.insert(3, '')", "title": "" }, { "docid": "22b6e9b4e323caef3a2f147c78f7c250", "score": "0.5720324", "text": "def test_from_class_without_source() -> None:\n\n class WithoutSource:\n \"\"\"Short description\"\"\"\n\n attr_one: str\n \"\"\"Description for attr_one\"\"\"\n\n with patch(\n \"inspect.getsource\", side_effect=OSError(\"could not get source code\")\n ):\n docstring = parse_from_object(WithoutSource)\n\n assert docstring.short_description == \"Short description\"\n assert len(docstring.params) == 0", "title": "" }, { "docid": "3f93b2bd2ed6048932f39f560c49aecf", "score": "0.5714116", "text": "def get_source(self):\n return self.source", "title": "" }, { "docid": "3f93b2bd2ed6048932f39f560c49aecf", "score": "0.5714116", "text": "def get_source(self):\n return self.source", "title": "" }, { "docid": "47c1e6c6f8d6058e0ed452cfbca5ef5d", "score": "0.56891495", "text": "def source(self):\n return self._get_source()", "title": "" }, { "docid": "36c02f536e58778a005418a83e4884f4", "score": "0.56853396", "text": "def get_doxygen(self):\n\n # Assumption: This function is either called at the beginning of a\n # statement or at the end of a statement\n\n if self.comments:\n comments = self.comments\n else:\n comments = []\n # only look for comments until a newline (including lookahead)\n for tok in self.lookahead:\n if tok.type == \"NEWLINE\":\n return \"\"\n\n while True:\n tok = self.lex.token()\n comments.extend(self.comments)\n\n if tok is None:\n break\n\n tok.location = (self.filename, tok.lineno - self.line_offset)\n ttype = tok.type\n if ttype == \"NEWLINE\":\n self.lookahead.append(tok)\n break\n\n if ttype not in self._discard_types:\n self.lookahead.append(tok)\n\n if ttype == \"NAME\":\n break\n\n del self.comments[:]\n\n comments = \"\\n\".join(comments)\n del self.comments[:]\n return comments", "title": "" }, { "docid": "bc83643fd05f3ff8c7af5e63eb2c0eb9", "score": "0.5682249", "text": "def get_source(self) -> str:\n return self.driver.execute_script(\"return document.documentElement.outerHTML\")", "title": "" }, { "docid": "0f3002fc9e23737bc525bdad58fe8ed4", "score": "0.5681728", "text": "def get_source(cls, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "0f3002fc9e23737bc525bdad58fe8ed4", "score": "0.5681728", "text": "def get_source(cls, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "38fcd772369be05f986367264666d9bc", "score": "0.567823", "text": "def page_source(self):\n return self.execute(Command.GET_PAGE_SOURCE)['value']", "title": "" }, { "docid": "caa7e2e9f4df95389ab817ae9979a9c8", "score": "0.5657669", "text": "def rough_source(nb):\n\n if isinstance(nb, str):\n nb = json.loads(nb)\n\n return \"\\n\".join(\n textwrap.dedent(\"\".join(x[\"source\"]))\n for x in nb.get(\"cells\", [])\n if x[\"cell_type\"] == \"code\"\n )", "title": "" }, { "docid": "ed3a665b249b97d09bc139b399a2f15a", "score": "0.5646933", "text": "def get_source_files(self):\n return self.cmake.get_source_files()", "title": "" }, { "docid": "367386f80dd1b6f0a6d1de8262f5407d", "score": "0.56460655", "text": "def remove_comments(source_string):\n\n def is_comment(line):\n \"\"\" returns true if line is a single-line comment. (html or js) \"\"\"\n return ((line.startswith('<!--') and line.endswith('-->')) or\n (line.startswith('/*') and line.endswith('*/')))\n\n if ('\\n' in source_string):\n keeplines = []\n\n for sline in source_string.split('\\n'):\n strim = sline.replace('\\t', '').replace(' ', '')\n if not is_comment(strim):\n keeplines.append(sline)\n return '\\n'.join(keeplines)\n else:\n return source_string", "title": "" }, { "docid": "f121052996bf6430f2697b390c0fb3cb", "score": "0.5632072", "text": "def source(self) -> pulumi.Input['ContentSourceArgs']:\n return pulumi.get(self, \"source\")", "title": "" }, { "docid": "bfe94172b341029e12c504e0b0d9a5a8", "score": "0.5631311", "text": "def main():\n logger = logging.getLogger('generate_source_docs')\n\n sources = SourceBase.sources()\n logger.info('Registered sources: %s', sources)\n\n sources_list = ''\n full_docs = ''\n\n for source in sources:\n class_name = source.__name__ # e.g. ConstSource\n source_name = source.NAME # e.g. common/const\n\n if source_name is None:\n continue\n\n sources_list += \"* `%s`: %s\\n\" % (source_name, source.get_short_description())\n\n full_docs += \"\"\"\n### %s\n\nSource name: `%s`\n\n> %s\n\"\"\".strip() % (class_name, source_name, source.get_description()) + '\\n\\n'\n\n # print the list of sources\n print('## Available sources\\n')\n print(sources_list.strip())\n print()\n\n # full docs\n print(full_docs.strip())", "title": "" }, { "docid": "e75a897c563f88b95e62f82d26550430", "score": "0.56269765", "text": "def get_source(html):\n\n with urllib.request.urlopen(html) as response:\n source = response.read()\n\n return source", "title": "" }, { "docid": "10d7015d26cafff37fd76ed99d7bde22", "score": "0.56163174", "text": "def source(self):\n with open(self.filename) as f:\n data = f.read()\n return data", "title": "" }, { "docid": "3e63df198685cdc394517c97c0a8eb60", "score": "0.55968887", "text": "def ExtendedContentSource(self) -> str:", "title": "" }, { "docid": "f7e85f6afb1ca6ac397c9696db1a786f", "score": "0.5587861", "text": "def source(self, cutoff):\n with open(self.filename, \"r\") as f:\n return f.read()", "title": "" }, { "docid": "1cca18c1194d2604b17174ade7a06791", "score": "0.55858153", "text": "def source(self):\n return self.identifier.source", "title": "" }, { "docid": "6965c631fa8c6aad1cfb8e700f758c92", "score": "0.5578935", "text": "def comment_remove(source):\n return re.sub(r\"\\/\\/.*\\n\", \"\\n\", source)", "title": "" }, { "docid": "471edffc3e60a135bae1405596cd83e8", "score": "0.55685014", "text": "def sourcecode(obj):\n try:\n filename, source, firstlineno = _get_source(obj)\n print(colorize(f\"{filename}:{firstlineno}\", ColorScheme.SOURCEFILENAME),\n file=stderr)\n # TODO: No syntax highlighting for now, because we'd have to parse and unparse,\n # TODO: which loses the original formatting and comments.\n for line in source:\n print(line.rstrip(\"\\n\"))\n except NotImplementedError:\n print(colorize(\"<no source code available>\", ColorScheme.GREYEDOUT))", "title": "" }, { "docid": "7348ee3d9840108fa680289c2e8de26a", "score": "0.5563385", "text": "def SourceShowFile(self, node):\n if not node.directory:\n # TODO: any cases other than built-ins?\n return None\n if node.filename == '~':\n # TODO: look up C/Cython/whatever source???\n return None\n path = os.path.join(node.directory, node.filename)\n if self.sourceFileShown != path:\n try:\n data = open(path).read()\n except Exception, err:\n # TODO: load from zips/eggs? What about .pyc issues?\n return None\n else:\n self.sourceCodeControl.SetText(data)\n return path", "title": "" }, { "docid": "6358a4997de252d56edd524d18476746", "score": "0.55617464", "text": "def description(doc=__doc__):\n for line in doc.splitlines():\n return line.strip()", "title": "" }, { "docid": "e9ca420ee34c82c09f3ae9ec50857400", "score": "0.5560708", "text": "def __get_text(self):\r\n return self.lsource_node.text", "title": "" }, { "docid": "84cd6e4d6dc2f0c2090983192db8df64", "score": "0.55550295", "text": "def contents(self) -> Generator[str, None, None]:\n start = self.start - 1 if self.start is not None else 0\n end = self.end if self.end is not None else None\n if self.verbatim:\n lang = \"\" if self.lang is None else self.lang\n yield f\"```{lang}\\n\"\n\n with self.source.open(\"r\") as fin:\n yield from islice(fin, start, end)\n if self.verbatim:\n yield \"```\\n\"", "title": "" }, { "docid": "88b0dbad36ab2a0dc5b0dd28a2fd2cd9", "score": "0.55534583", "text": "def get_source(self, fmt=\"\"):\n return _GetSource(self, fmt)", "title": "" }, { "docid": "f398d614961e863fec68e013524d9b98", "score": "0.55490166", "text": "def _handle_source_file(line: str, path_file: Path) -> List[str]:\n key, path_rel = [*_parse_var_comment(line).items()][0]\n path_base = DG.meta.path_project if path_rel.startswith('/') else path_file.resolve().parent\n path_source = path_base / path_rel.lstrip('/')\n language = path_source.suffix.lstrip('.')\n lines_source = [f'```{language}', *read_lines(path_source), '```']\n if not path_source.is_file():\n logger.warning(f'Could not locate: {path_source}')\n\n line_start = f'<!-- {{cts}} {key}={path_rel}; -->'\n line_end = '<!-- {cte} -->'\n return [line_start] + lines_source + [line_end]", "title": "" }, { "docid": "ea934a824b07f4ea565b9185d9a5063e", "score": "0.5529507", "text": "def getSource(self):\n return _libsedml.SedModel_getSource(self)", "title": "" }, { "docid": "cdd9571957b1cc4b0354da1a1524ebfa", "score": "0.55226076", "text": "def source_config_text(self):\n return self.__source_config_text", "title": "" }, { "docid": "45546dfaaeb52980431e7bd3353d14dd", "score": "0.5517431", "text": "def sources():\n return gv.all_sources", "title": "" }, { "docid": "d17695f277a7d0028dda63076e58599a", "score": "0.55045277", "text": "def get_snippet(path):\n current_file_dir = os.path.dirname(__file__)\n absolute_path = os.path.join(current_file_dir, path)\n with open(absolute_path) as src:\n return src.read()", "title": "" }, { "docid": "70d35aa527a383b0f5853029817cadda", "score": "0.5492689", "text": "def source(self):\n\n return self.src", "title": "" }, { "docid": "d6830e8b53d2f087d4ce06916a1054d8", "score": "0.54916906", "text": "def generate_source(self, sourcefile=None):\n pass", "title": "" }, { "docid": "a86513997b05f6fe8b7f14ce2faff2f1", "score": "0.5483441", "text": "def _parse_source(source: str) -> str:\n return HTML_TAG_RE.sub(\"\", source)", "title": "" }, { "docid": "a12263c1e55b56f2e1847edc619f5fde", "score": "0.54809666", "text": "def get_code_content(cls, included):\n return dedent('''\n #include <iostream>\n //#include \"%s\"\n\n int main(int argc, char* argv[])\n {\n std::cout << \"Hello, world!\" << std::endl;\n return 0;\n }\n ''') % included", "title": "" }, { "docid": "5d71300f4cd0a8139bfb10607b42308e", "score": "0.5480432", "text": "def get_source_file_name(self):\n return self.file_dialog_open.getOpenFileName(\n self,\n 'Open source file',\n './')[0]", "title": "" }, { "docid": "84d1102dc1289f2d0def1d76ea6fe741", "score": "0.54762053", "text": "def get_user_source(source):\n lines = source.split(\"\\n\")\n user_source = []\n for line in lines:\n if line == \"// --- SKELCL END USER CODE ---\":\n return \"\\n\".join(user_source)\n user_source.append(line)\n\n raise FeatureExtractionError(\"Failed to find end of user code marker\")", "title": "" } ]
ad67e6976ca0a0dbf36c4e643c745b65
Return the curve with rounded corners Replaces two points with their midpoint while retaining the start and end points
[ { "docid": "30b1f13669be15452c8148cddd08e497", "score": "0.6989627", "text": "def round_corners(self, curve):\n round_weight = 3\n rounded_curve = [curve[0]] # retain the first point\n current_point = curve[0]\n for next_point in curve[1:]:\n mid_point = (\n (current_point[0] + next_point[0] * round_weight) / (1 + round_weight),\n (current_point[1] + next_point[1] * round_weight) / (1 + round_weight))\n rounded_curve.append(mid_point)\n current_point = next_point\n round_weight = 1 / round_weight\n rounded_curve.append(curve[-1]) # retain the last point\n return rounded_curve", "title": "" } ]
[ { "docid": "71e372926e3cc59de3c212a9607ba596", "score": "0.6365892", "text": "def roundJoin(p1, p2, x, y, dist):\n\n (x0, y0) = p1.atend()\n (x1, y1) = p2.atbegin()\n\n (dx0, dy0) = (pyx.unit.topt(x - x0), pyx.unit.topt(y - y0))\n (dx1, dy1) = (pyx.unit.topt(x - x1), pyx.unit.topt(y - y1))\n\n alpha = math.atan2(dy0, dx0)\n delta = math.atan2(dy1, dx1)\n\n angle = delta - alpha\n\n beta = (alpha + delta) * 0.5\n\n if (abs(angle) < math.pi / 2):\n\n p = []\n\n for stop in range(0, 4):\n\n beta = alpha + angle / 3.0 * stop\n\n (x1, y1) = (x - math.cos(beta) * abs(dist),\n y - math.sin(beta) * abs(dist))\n\n p.append((x1, y1))\n\n A = Splines.naturalSpline(p)\n\n return pyx.path.curve(*A)\n\n else:\n\n p = []\n\n for stop in range(0, 4):\n\n beta = alpha + angle / 6.0 * stop\n\n (x1, y1) = (x - math.cos(beta) * abs(dist),\n y - math.sin(beta) * abs(dist))\n\n p.append((x1, y1))\n\n A = Splines.naturalSpline(p)\n\n p = []\n\n for stop in range(3, 7):\n\n beta = alpha + angle / 6.0 * stop\n\n (x1, y1) = (x - math.cos(beta) * abs(dist),\n y - math.sin(beta) * abs(dist))\n\n p.append((x1, y1))\n\n B = Splines.naturalSpline(p)\n\n c1 = pyx.path.curve(*A)\n c2 = pyx.path.curve(*B)\n\n return c1 << c2", "title": "" }, { "docid": "11bfb66bb55658a00f32adbbf3618155", "score": "0.6192633", "text": "def create_round_rectangle(self, x1, y1, x2, y2, radius=25, **kwargs):\n points = [\n x1 + radius,\n y1,\n x1 + radius,\n y1,\n x2 - radius,\n y1,\n x2 - radius,\n y1,\n x2,\n y1,\n x2,\n y1 + radius,\n x2,\n y1 + radius,\n x2,\n y2 - radius,\n x2,\n y2 - radius,\n x2,\n y2,\n x2 - radius,\n y2,\n x2 - radius,\n y2,\n x1 + radius,\n y2,\n x1 + radius,\n y2,\n x1,\n y2,\n x1,\n y2 - radius,\n x1,\n y2 - radius,\n x1,\n y1 + radius,\n x1,\n y1 + radius,\n x1,\n y1,\n ]\n\n kwargs[\"smooth\"] = True\n return self._create(\"polygon\", points, kwargs)", "title": "" }, { "docid": "16d1bc4813e2822eff52c1a3c6ab5c38", "score": "0.6036745", "text": "def _circle_nurbscurve(c1, n2):", "title": "" }, { "docid": "399096a92b583810797ece771742bdbe", "score": "0.60202044", "text": "def draw_curve(start_point, end_point, missing_ind=-1):\r\n result = \"X\"+str(start_point[0]) +\"Y\"+str(start_point[1])+\"D02*\\n\"\r\n result += \"D14*\\n\"\r\n result += \"D03*\\n\"\r\n result += \"D22*\\n\"\r\n start_cir_loc = (int((end_point[0] - start_point[0]) / 4 ), start_point[1] ) # initial line\r\n result += \"X\"+str(start_cir_loc[0]) +\"Y\"+str(start_cir_loc[1])+\"D01*\\n\"\r\n mid_point = (int((end_point[0] + start_point[0]) / 2 ), int((end_point[1] + start_point[1]) / 2 ) ) # mid_point\r\n dist_to_mid_point = (mid_point[0] - start_cir_loc[0], mid_point[1] - start_cir_loc[1])\r\n #rising circ\r\n num_cir_lines = 120\r\n for ind in range(num_cir_lines):\r\n x = int(start_cir_loc[0] + ind * dist_to_mid_point[0] / num_cir_lines)\r\n x_normalized_dist = (x - start_cir_loc[0]) / dist_to_mid_point[0]\r\n y = mid_point[1] - int(dist_to_mid_point[1]* (1 - (x_normalized_dist)**2 ) ** 0.5)\r\n if ind == missing_ind:\r\n x_btoken = int(start_cir_loc[0] + (ind-0.225) * dist_to_mid_point[0] / num_cir_lines)\r\n result += \"X\"+str(x_btoken) +\"Y\"+str(y)+\"D01*\\n\"\r\n else:\r\n result += \"X\"+str(x) +\"Y\"+str(y)+\"D01*\\n\"\r\n result += \"X\"+str(x) +\"Y\"+str(y)+\"D02*\\n\"\r\n result += \"X\"+str(mid_point[0]) +\"Y\"+str(mid_point[1])+\"D01*\\n\"#draw to mid point\r\n \r\n \r\n end_cir_loc = (int(3 * (end_point[0] - start_point[0]) / 4 ), end_point[1] )\r\n dist_to_end_cir = (end_cir_loc[0] - mid_point[0], end_cir_loc[1] - mid_point[1])\r\n for ind in range(num_cir_lines):\r\n x = int(mid_point[0] + ind * dist_to_end_cir[0] / num_cir_lines)\r\n x_normalized_dist = (x - mid_point[0]) / dist_to_end_cir[0]\r\n y = mid_point[1] + int(dist_to_end_cir[1]* (1 - (1 - x_normalized_dist)**2 ) ** 0.5)\r\n result += \"X\"+str(x) +\"Y\"+str(y)+\"D01*\\n\"\r\n result += \"X\"+str(x) +\"Y\"+str(y)+\"D02*\\n\"\r\n result += \"X\"+str(end_point[0]) +\"Y\"+str(end_point[1])+\"D01*\\n\"\r\n result += \"D14*\\n\"\r\n result += \"D03*\\n\"\r\n return result", "title": "" }, { "docid": "9e32ddb88c7f57ff0400644f8b3f4616", "score": "0.6000487", "text": "def midpoint(pt1, pt2):\n return (0.5 * (pt1[0] + pt2[0]), 0.5 * (pt1[1] + pt2[1]))", "title": "" }, { "docid": "cbde95649531856c1f44f4232369bbb0", "score": "0.5994136", "text": "def midpoint2(x1: float, y1: float, x2: float, y2: float) -> complex:\n\n x = (x1 + x2) / 2\n y = (y1 + y2) / 2\n\n return complex(x, y)", "title": "" }, { "docid": "b02bfc629ba49a16f66b90cbbe95a288", "score": "0.5767674", "text": "def _geodesic_between_two_points(x1,y1,x2,y2):\n pi=RR.pi()\n #print \"z1=\",x1,y1\n #print \"z2=\",x2,y2\n if( abs(x1-x2)< 1E-10):\n # The line segment [x=x1, y0<= y <= y1]\n return line([[x1,y1],[x2,y2]]) #[0,0,x0,infinity]\n c=RR(y1**2 -y2**2 +x1**2 -x2**2 )/RR(2 *(x1-x2))\n r=RR(sqrt(y1**2 +(x1-c)**2 ))\n r1=RR(y1/r); r2=RR(y2/r)\n if(abs(r1-1 )< 1E-12 ):\n r1=RR(1.0)\n elif(abs(r2+1 )< 1E-12 ):\n r2=-RR(1.0)\n if(abs(r2-1 )< 1E-12 ):\n r2=RR(1.0)\n elif(abs(r2+1 )<1E-12 ):\n r2=-RR(1.0)\n if(x1>=c):\n t1 = RR(arcsin(r1))\n else:\n t1 = RR(pi)-RR(arcsin(r1))\n if(x2>=c):\n t2 = RR(arcsin(r2))\n else:\n t2 = RR(pi)-arcsin(r2)\n tmid = (t1+t2)*RR(0.5)\n a0=min(t1,t2)\n a1=max(t1,t2)\n #print \"c,r=\",c,r\n #print \"t1,t2=\",t1,t2\n return _circ_arc(t1,t2,c,r)", "title": "" }, { "docid": "454df344681b6db68112b40bbf329a69", "score": "0.57438695", "text": "def getCorrection(start, end, pos):\n (xs, ys) = start\n (xe, ye) = end\n (xp, yp) = pos\n\n # Discard edge cases with no sense\n assert(xs != xe or ys != ye)\n assert(xp != xe or yp != ye)\n assert(xs != xp or ys != yp)\n\n # First get the line equation from start to end points.\n # line equation follows the following pattern: y = m * x + b\n m = 0.0\n b = 0.0\n if abs(xe - xs) > PRECISION:\n m = (ye - ys) / (xe - xs)\n b = ys - m * xs\n else:\n m = 1\n b = - xs\n\n # Get the perpendicular line equation to the first line\n mp = 0.0\n bp = 0.0\n if abs(xe - xs) < PRECISION:\n bp = yp\n elif abs(m) < PRECISION:\n mp = 1\n bp = - xp\n else:\n mp = - 1 / m\n bp = yp - mp * xp\n\n # Get the point at the intersection of the two lines\n xi = 0.0\n yi = 0.0\n if abs(xe - xs) < PRECISION:\n xi = b\n yi = bp\n elif abs(m) < PRECISION:\n xi = bp\n yi = b\n else:\n xi = - (bp - b) / (mp - m)\n yi = m * xi + b\n\n # Get the distance between the tree points\n dist_pi = math.sqrt((xp - xi) * (xp - xi) + (yp - yi) * (yp - yi))\n dist_pe = math.sqrt((xp - xe) * (xp - xe) + (yp - ye) * (yp - ye))\n dist_sp = math.sqrt((xs - xp) * (xs - xp) + (ys - yp) * (ys - yp))\n\n # Get the offset angles alpha and beta\n alpha = math.asin(dist_pi / dist_pe)\n beta = math.asin(dist_pi / dist_sp)\n\n return - (alpha + beta)", "title": "" }, { "docid": "689afdbfd65017c9f3ecce09ffb99e5d", "score": "0.5736394", "text": "def rad_of_curvature(left_line, right_line):\n\n ploty = left_line.ally\n leftx, rightx = left_line.allx, right_line.allx\n\n leftx = leftx[::-1]\n rightx = rightx[::-1]\n\n width_lanes = abs(right_line.startx - left_line.startx)\n ym_per_pix = 30 / 720\n xm_per_pix = 3.7*(720/1280) / width_lanes\n\n y_eval = np.max(ploty)\n\n left_fit_cr = np.polyfit(ploty * ym_per_pix, leftx * xm_per_pix, 2)\n right_fit_cr = np.polyfit(ploty * ym_per_pix, rightx * xm_per_pix, 2)\n\n left_curverad = ((1 + (2 * left_fit_cr[0] * y_eval * ym_per_pix + left_fit_cr[1]) ** 2) ** 1.5) / np.absolute(\n 2 * left_fit_cr[0])\n right_curverad = ((1 + (2 * right_fit_cr[0] * y_eval * ym_per_pix + right_fit_cr[1]) ** 2) ** 1.5) / np.absolute(\n 2 * right_fit_cr[0])\n\n left_line.radius_of_curvature = left_curverad\n right_line.radius_of_curvature = right_curverad", "title": "" }, { "docid": "d08d746bc0ba36a946ca9a6592c9c15a", "score": "0.5683035", "text": "def midpoint(ptA, ptB): \n \n return ((ptA[0] + ptB[0]) * 0.5, (ptA[1] + ptB[1]) * 0.5)", "title": "" }, { "docid": "7b7b0117a622b5f9b92a2307cba805ff", "score": "0.5608825", "text": "def corners(self, s, k_min, proximity, length):\n return define_corners(self.mid, s, k_min, proximity, length)", "title": "" }, { "docid": "73efb589155e1628f055b07d3803183d", "score": "0.56075686", "text": "def _point_nurbscurve(p1, n2):", "title": "" }, { "docid": "87986489350c29f930184369b9d8620a", "score": "0.5597784", "text": "def positionAlongCurve():\n\n pass", "title": "" }, { "docid": "199f1d837a39a79fb7d4be31accd1cff", "score": "0.55913544", "text": "def midpoint(p1, p2):\n return np.array([int((p1.x + p2.x) / 2), int((p1.y + p2.y) / 2)])", "title": "" }, { "docid": "68386dcd88582b043fff9383c0272af0", "score": "0.5589129", "text": "def midpoint(self, p1, p2):\n \n return ((p1[0] + p2[0]) * 0.5, (p1[1] + p2[1]) * 0.5)", "title": "" }, { "docid": "325b5134792b92be45a0e63c58d66ee7", "score": "0.55685306", "text": "def midpoint1(x1: float, y1: float) -> complex:\n\n x = x1 / 2\n y = y1 / 2\n\n return complex(x, y)", "title": "" }, { "docid": "ac8c02724121b56f720719a8fdf5e664", "score": "0.55669737", "text": "def _point_circle(p1, c2):", "title": "" }, { "docid": "aa425f3d03a5f0037bbd68835787f606", "score": "0.55107385", "text": "def createRational(self, controlPoints, degree, knots, weights, isPeriodic):\n return NurbsCurve2D()", "title": "" }, { "docid": "16f1ba5146c22ec972daddf380579743", "score": "0.55019015", "text": "def _circle_circle(c1, c2):", "title": "" }, { "docid": "cd5f2fd735b89d05deff24f3f84597e2", "score": "0.54760617", "text": "def find_points(self):\n\n radius = self.radius\n thickness = self.thickness\n bottom_outer_x, bottom_outer_y = self.outer_start_point\n top_outer_y = bottom_outer_y + (4 * radius)\n top_outer_x = bottom_outer_x\n inner_r = radius - thickness\n (bottom_outer_x, bottom_outer_y, thickness, radius, top_outer_x, top_outer_y, inner_r,) = (\n float(bottom_outer_x),\n float(bottom_outer_y),\n float(thickness),\n float(radius),\n float(top_outer_x),\n float(top_outer_y),\n float(inner_r),\n )\n\n point_1 = (bottom_outer_x, bottom_outer_y, \"circle\")\n point_3 = (point_1[0] + radius, point_1[1] + radius, \"straight\")\n point_4 = (point_3[0], point_3[1] + radius * 2, \"circle\")\n point_6 = (top_outer_x, top_outer_y, \"straight\")\n point_7 = (point_6[0], point_6[1] - thickness, \"circle\")\n point_9 = (point_4[0] - thickness, point_4[1], \"straight\")\n point_10 = (point_3[0] - thickness, point_3[1], \"circle\")\n point_12 = (point_1[0], point_1[1] + thickness, \"straight\")\n point_2 = (\n (point_1[0]) + (radius * math.cos((3 * math.pi) / 8)),\n (point_1[1] + radius) - (radius * math.sin((3 * math.pi) / 8)),\n \"circle\",\n )\n point_5 = (\n (point_6[0] + (radius * math.cos((2 * math.pi) / 8))),\n (point_6[1] - radius) + (radius * math.sin((2 * math.pi) / 8)),\n \"circle\",\n )\n point_8 = (\n (point_7[0] + (inner_r * math.cos((2 * math.pi) / 8))),\n (point_7[1] - inner_r) + (inner_r * math.sin((2 * math.pi) / 8)),\n \"circle\",\n )\n point_11 = (\n (point_12[0]) + (inner_r * math.cos((3 * math.pi) / 8)),\n (point_12[1] + inner_r) - (inner_r * math.sin((3 * math.pi) / 8)),\n \"circle\",\n )\n\n self.points = [\n point_1,\n point_2,\n point_3,\n point_4,\n point_5,\n point_6,\n point_7,\n point_8,\n point_9,\n point_10,\n point_11,\n point_12,\n ]", "title": "" }, { "docid": "731d5c320ac4e9c4b4d9c99104f3d5a0", "score": "0.5465036", "text": "def midpoint(a, b):\n return (\n (a[0]+b[0])/2,\n (a[1]+b[1])/2,\n (a[2]+b[2])/2 )", "title": "" }, { "docid": "698e358c81e75c8964414baf45e7e2ee", "score": "0.54537404", "text": "def _line_circle(l1, c2):", "title": "" }, { "docid": "bbbe1476fde579696dbc99fa0f972f81", "score": "0.53872186", "text": "def update_end_points(self):\n \"\"\" Must do in pixel units because the scale doesn't allow line length to be converted, just points\"\"\"\n\n midx, midy = self.convert_arr_to_pixels(self.mid)\n #theta = m.atan(self.slope) ## in radians\n theta = m.radians(self.degrees) ## in radians\n\n ## Do trig\n opp = m.sin(theta) * self.hypoteneuse\n adj = m.cos(theta) * self.hypoteneuse\n\n ## Get start/end by applying deltas to mid coord\n start_x = int(midx - adj)\n start_y = int(midy + opp)\n end_x = int(midx + adj)\n end_y = int(midy - opp) # account for pygame negative stupidity\n\n self.start = self.convert_pixel_to_arr( (start_x, start_y) )\n self.end = self.convert_pixel_to_arr( (end_x, end_y) )", "title": "" }, { "docid": "5665891832b108af48c9de36eff0662a", "score": "0.53804934", "text": "def _line_nurbscurve(l1, n2):", "title": "" }, { "docid": "ebc7061641b942c5b9cfaac9e405bddd", "score": "0.53786194", "text": "def circle_segment(\n radius: float,\n start: np.ndarray,\n stop: np.ndarray,\n interval: float = 0.1,\n minimum_times: int = 10,\n times: Optional[int] = None,\n) -> np.ndarray:\n interval = np.arctan(float(interval) / radius)\n start, stop = np.radians(start), np.radians(stop)\n\n if times is None:\n times = max(int((stop - start) / interval), minimum_times)\n ramp = cached_linspace(start, stop, times)\n return radius * np.c_[np.cos(ramp), np.sin(ramp)]", "title": "" }, { "docid": "2022a798fe32d2c71f7b6ec46a861587", "score": "0.5376266", "text": "def midpoint(x1, y1, x2, y2):\n\n bracket1 = (x1 + x2)/2\n bracket2 = (y1 + y2)/2\n\n return \"x: %s, y: %s.\" % (bracket1, bracket2)", "title": "" }, { "docid": "fa537ecf7922ccd5f06aef0575098964", "score": "0.53521", "text": "def get_line_1(start, end):\n # Setup initial conditions\n x1, y1 = start\n x2, y2 = end\n dx = x2 - x1\n dy = y2 - y1\n \n # Determine how steep the line is\n is_steep = abs(dy) > abs(dx)\n \n # Rotate line\n if is_steep:\n x1, y1 = y1, x1\n x2, y2 = y2, x2\n \n # Swap start and end points if necessary and store swap state\n swapped = False\n if x1 > x2:\n x1, x2 = x2, x1\n y1, y2 = y2, y1\n swapped = True\n \n # Recalculate differentials\n dx = x2 - x1\n dy = y2 - y1\n \n # Calculate error\n error = int(dx / 2.0)\n ystep = 1 if y1 < y2 else -1\n \n # Iterate over bounding box generating points between start and end\n y = y1\n points = []\n for x in range(x1, x2 + 1):\n coord = (y, x) if is_steep else (x, y)\n points.append(coord)\n error -= abs(dy)\n if error < 0:\n y += ystep\n error += dx\n \n # Reverse the list if the coordinates were swapped\n if swapped:\n points.reverse()\n return points", "title": "" }, { "docid": "ef7ba15884a622a0ce969e3075a20fa4", "score": "0.5349511", "text": "def __ComputeCurved(vpercent, w, vec, via, pts, segs):\n\n radius = via[1]/2.0\n\n #Compute the bezier middle points\n req_angle = asin(vpercent/100.0);\n oppside = tan(req_angle)*(radius-(w/sin(req_angle)))\n length = sqrt(radius*radius + oppside*oppside)\n d = req_angle - acos(radius/length)\n vecBC = [vec[0]*cos(d)+vec[1]*sin(d) , -vec[0]*sin(d)+vec[1]*cos(d)]\n pointBC = via[0] + wxPoint(int(vecBC[0] * length), int(vecBC[1] * length))\n d = -d\n vecAE = [vec[0]*cos(d)+vec[1]*sin(d) , -vec[0]*sin(d)+vec[1]*cos(d)]\n pointAE = via[0] + wxPoint(int(vecAE[0] * length), int(vecAE[1] * length))\n\n curve1 = __Bezier(pts[1], pointBC, pts[2], n=segs)\n curve2 = __Bezier(pts[4], pointAE, pts[0], n=segs)\n\n return curve1 + [pts[3]] + curve2", "title": "" }, { "docid": "1439b05af4a2d60c3324425d38789920", "score": "0.5339895", "text": "def getMidpoint(p1, p2):\r\n return (p1.x + p2.x) / 2, (p1.y + p2.y) / 2", "title": "" }, { "docid": "33ce0a13f0286a5bc14bf0b13501e05f", "score": "0.5336291", "text": "def concentric_xys(self, radius, start):", "title": "" }, { "docid": "b5680e8674264dd194df9a1475a17833", "score": "0.53350806", "text": "def test_convert_circle_to_spline(self):\n\n new_points = paramak.utils.convert_circle_to_spline(\n p_0=(200., 0.),\n p_1=(250., 50.),\n p_2=(200., 100.),\n tolerance=0.2\n )\n\n # these points can change from 200. to values like 200.00000000000009\n assert pytest.approx(new_points[0][0], abs=0.0000000000001) == 200\n assert pytest.approx(new_points[0][1], abs=0.0000000000001) == 0\n assert pytest.approx(new_points[-1][0], abs=0.0000000000001) == 200\n assert pytest.approx(new_points[-1][1], abs=0.0000000000001) == 100\n\n new_points_more_details = paramak.utils.convert_circle_to_spline(\n p_0=(200, 0),\n p_1=(250, 50),\n p_2=(200, 100),\n tolerance=0.1\n )\n\n assert len(new_points_more_details) > len(new_points)", "title": "" }, { "docid": "8edfb6edffa5684cd753573588c83b5a", "score": "0.53187805", "text": "def round_corner(self, radius):\n corner = Image.new(\"L\", (radius, radius), 0)\n draw = ImageDraw.Draw(corner)\n draw.pieslice((0, 0, radius * 2, radius * 2), 180, 270, fill=255)\n return corner", "title": "" }, { "docid": "e3f40961cbf6248f1bab34a92b669973", "score": "0.52999246", "text": "def solve_curvature(p1, p2, n1, n2, fac, fallback):\r\n from mathutils.geometry import (intersect_line_line,\r\n )\r\n\r\n p1_a = p1 + n1\r\n p2_a = p2 - n2\r\n\r\n isect = intersect_line_line(p1,\r\n p1_a,\r\n p2,\r\n p2_a,\r\n )\r\n\r\n if isect:\r\n corner = isect[0].lerp(isect[1], 0.5)\r\n else:\r\n corner = None\r\n\r\n if corner:\r\n p1_first_order = p1.lerp(corner, fac)\r\n p2_first_order = corner.lerp(p2, fac)\r\n co = p1_first_order.lerp(p2_first_order, fac)\r\n\r\n return co\r\n else:\r\n # cant interpolate. just return interpolated value\r\n return fallback.copy() # p1.lerp(p2, fac)\r", "title": "" }, { "docid": "707f882ffd2946f04fab3b764106ef64", "score": "0.52709275", "text": "def find_line_start_end(m, b, w, h):\n intersect = []\n # print(\"m =\", m, \"b =\", b)\n if np.isnan(m) or np.isnan(b):\n return None\n\n if b >= 0 and b <= h:\n intersect.append((0, b))\n if (-b / m) >= 0 and (-b / m) <= w:\n if (-b / m, 0) not in intersect:\n intersect.append((-b / m, 0))\n if (m * w + b) >= 0 and (m * w + b) <= h:\n if (w, m * w + b) not in intersect:\n intersect.append((w, m * w + b))\n if (h - b) / m >= 0 and (h - b) / m <= w:\n if ((h - b) / m, h) not in intersect:\n intersect.append(((h - b) / m, h))\n if len(intersect) is not 2:\n raise RuntimeError(\n \"Unknown line end points calculation error. Intersect = \" +\n str(intersect) + \", m = \" + str(m) + \", b = \" + str(b))\n return [(int(round(i[0])), int(round(i[1]))) for i in intersect]", "title": "" }, { "docid": "bfc62035eb548cb637ef2e0338fa14e4", "score": "0.5266367", "text": "def interpolate_x(p1, p2, y):\n (x1, y1) = p1\n (x2, y2) = p2\n # In case result is undefined, midpoint is as good as any value.\n if y1 == y2:\n return (x1 + x2) / 2.\n x = x1 + (x2 - x1) * (y - y1) / float(y2 - y1)\n # print \"interpolate_x [%g,%g,%g][%g,%g,%g]\" % (x1,x,x2,y1,y,y2)\n return x", "title": "" }, { "docid": "07082ba430d5c92fd708cabf1172d166", "score": "0.5264112", "text": "def extract(self, startParam, endParam):\n return NurbsCurve2D()", "title": "" }, { "docid": "dc8b8a352981c39c337c60cb0de9df1a", "score": "0.5251435", "text": "def round_corners(self, radius, N):\n\n dpoly = super().round_corners(radius, radius, N)\n self.assign(dpoly)\n return self", "title": "" }, { "docid": "2f6eda4e1925810288c4b06b91f2682f", "score": "0.5242262", "text": "def round_rect(\n self, x: int, y: int, width: int, height: int, radius: int, color: int\n ) -> None:\n self._gfx_mode()\n self._curve_helper(x + radius, y + radius, radius, radius, 1, color, False)\n self._curve_helper(\n x + width - radius - 1, y + radius, radius, radius, 2, color, False\n )\n self._curve_helper(\n x + radius, y + height - radius, radius, radius, 0, color, False\n )\n self._curve_helper(\n x + width - radius - 1, y + height - radius, radius, radius, 3, color, False\n )\n self.hline(x + radius, y, width - (radius * 2) - 1, color)\n self.hline(x + radius, y + height, width - (radius * 2) - 1, color)\n self.vline(x, y + radius, height - (radius * 2), color)\n self.vline(x + width - 1, y + radius, height - (radius * 2), color)", "title": "" }, { "docid": "559998ef561a0e76e729e17e8ab78fe8", "score": "0.52398854", "text": "def _nurbscurve_nurbscurve(n1, n2):", "title": "" }, { "docid": "dac3aa474734f3356c82cc009eb06ca7", "score": "0.5228479", "text": "def find_points(self):\n\n # / p4\n # / /¦\n # / / ¦\n # / / ¦\n # / / ¦\n # p1/ ¦\n # ¦ ¦\n # x ¦ ¦\n # ¦ ¦\n # p2\\ ¦\n # \\ \\ ¦\n # \\ \\ ¦\n # \\ \\ ¦\n # \\ p3\n\n if self.radius_type == \"corner\":\n distance_to_inner_corner = self.inner_radius\n distance_to_rear_corner = self.outer_radius\n # this section calculates a new distance to the corners now that we\n # know the user provided the distance to the straight\n if self.radius_type == \"straight\":\n angle = 360 / (self.number_of_coils * 2)\n distance_to_inner_corner = self.inner_radius / math.cos(math.radians(angle))\n distance_to_rear_corner = self.outer_radius / math.cos(math.radians(angle))\n\n if self.gap_size * self.number_of_coils > 2 * math.pi * distance_to_inner_corner:\n msg = (\n \"Gap_size is too large. The gap_size * number of coils must \"\n \"be less than the circumference of the circle made by \"\n \"the inner_radius\"\n )\n raise ValueError(msg)\n\n if distance_to_inner_corner != 0.0:\n theta_inner = ((2 * math.pi * distance_to_inner_corner) - (self.gap_size * self.number_of_coils)) / (\n distance_to_inner_corner * self.number_of_coils\n )\n omega_inner = math.asin(self.gap_size / (2 * distance_to_inner_corner))\n\n # inner points\n point_1 = (\n (distance_to_inner_corner * math.cos(-omega_inner)),\n (-distance_to_inner_corner * math.sin(-omega_inner)),\n )\n point_2 = (\n (\n distance_to_inner_corner * math.cos(theta_inner) * math.cos(-omega_inner)\n + distance_to_inner_corner * math.sin(theta_inner) * math.sin(-omega_inner)\n ),\n (\n -distance_to_inner_corner * math.cos(theta_inner) * math.sin(-omega_inner)\n + distance_to_inner_corner * math.sin(theta_inner) * math.cos(-omega_inner)\n ),\n )\n points = [(point_1[0], point_1[1]), (point_2[0], point_2[1])]\n\n else:\n\n points = [(0, 0)]\n\n # print(point_1)\n # print(point_2)\n\n theta_outer = ((2 * math.pi * distance_to_rear_corner) - (self.gap_size * self.number_of_coils)) / (\n distance_to_rear_corner * self.number_of_coils\n )\n omega_outer = math.asin(self.gap_size / (2 * distance_to_rear_corner))\n\n # outer points\n point_4 = (\n (distance_to_rear_corner * math.cos(-omega_outer)),\n (-distance_to_rear_corner * math.sin(-omega_outer)),\n )\n point_6 = (\n (\n distance_to_rear_corner * math.cos(theta_outer) * math.cos(-omega_outer)\n + distance_to_rear_corner * math.sin(theta_outer) * math.sin(-omega_outer)\n ),\n (\n -distance_to_rear_corner * math.cos(theta_outer) * math.sin(-omega_outer)\n + distance_to_rear_corner * math.sin(theta_outer) * math.cos(-omega_outer)\n ),\n )\n points.append((point_6[0], point_6[1]))\n points.append((point_4[0], point_4[1]))\n\n self.points = points", "title": "" }, { "docid": "d1f5de42017ddfc9418a4519b1e4280a", "score": "0.5222381", "text": "def closest_point(p1, p2, s):\n \n #if the line is a single point, the closest point is the only point\n if p1==p2:\n return (0,p1)\n \n seg_vector = vector_diff(p2,p1)\n seg_mag = mag(seg_vector)\n #print( \"seg_vector, length\", seg_vector, seg_mag )\n seg_unit = vector_div( seg_vector, seg_mag )\n stop_vector = vector_diff(s,p1)\n #print( \"stop_vector\", stop_vector )\n \n #scalar projection of A onto B = (A dot B)/|B| = A dot unit(B)\n sp = dot_product( stop_vector, seg_unit )\n \n #print( \"scalar projection\", sp )\n \n if sp < 0:\n #closest point is startpoint\n #print( \"startpoint\" )\n return (0, p1)\n elif sp > seg_mag:\n #closest point is endpoint\n #print( \"endpoint\" )\n return (1, p2)\n else:\n #closest point is midspan\n #print( \"midpoint\" )\n return (sp/seg_mag, vector_sum(p1,vector_mult( seg_unit, sp )))", "title": "" }, { "docid": "e974735b7206c7393dfaa1bf8906fc18", "score": "0.5222275", "text": "def createByThreePoints(self, startPoint, point, endPoint):\n return Arc2D()", "title": "" }, { "docid": "565ab3ac57b73688b6f47209b32598f7", "score": "0.52110666", "text": "def curve():\n\n x1 = np.linspace(-2, .99, 100)\n x2 = np.linspace(1.01,6, 100)\n y1 = 1.0/(x1-1)\n y2 = 1.0/(x2-1)\n\n plt.plot(x1,y1, \"m--\", x2,y2, \"m--\", linewidth = 5)\n plt.ylim([-6,6])\n #plt.plot(x2,y2)\n plt.show()", "title": "" }, { "docid": "1240c234fe3e278b0e2a59a930558cd6", "score": "0.5203733", "text": "def get_line(start, end):\n # Setup initial conditions\n x1, y1 = start\n x2, y2 = end\n dx = x2 - x1\n dy = y2 - y1\n\n # Determine how steep the line is\n is_steep = abs(dy) > abs(dx)\n\n # Rotate line\n if is_steep:\n x1, y1 = y1, x1\n x2, y2 = y2, x2\n\n # Swap start and end points if necessary and store swap state\n swapped = False\n if x1 > x2:\n x1, x2 = x2, x1\n y1, y2 = y2, y1\n swapped = True\n\n # Recalculate differentials\n dx = x2 - x1\n dy = y2 - y1\n\n # Calculate error\n error = int(dx / 2.0)\n ystep = 1 if y1 < y2 else -1\n\n # Iterate over bounding box generating points between start and end\n y = y1\n points = []\n for x in range(x1, x2 + 1):\n coord = (y, x) if is_steep else (x, y)\n points.append(coord)\n error -= abs(dy)\n if error < 0:\n y += ystep\n error += dx\n\n # Reverse the list if the coordinates were swapped\n if swapped:\n points.reverse()\n return points", "title": "" }, { "docid": "1240c234fe3e278b0e2a59a930558cd6", "score": "0.5203733", "text": "def get_line(start, end):\n # Setup initial conditions\n x1, y1 = start\n x2, y2 = end\n dx = x2 - x1\n dy = y2 - y1\n\n # Determine how steep the line is\n is_steep = abs(dy) > abs(dx)\n\n # Rotate line\n if is_steep:\n x1, y1 = y1, x1\n x2, y2 = y2, x2\n\n # Swap start and end points if necessary and store swap state\n swapped = False\n if x1 > x2:\n x1, x2 = x2, x1\n y1, y2 = y2, y1\n swapped = True\n\n # Recalculate differentials\n dx = x2 - x1\n dy = y2 - y1\n\n # Calculate error\n error = int(dx / 2.0)\n ystep = 1 if y1 < y2 else -1\n\n # Iterate over bounding box generating points between start and end\n y = y1\n points = []\n for x in range(x1, x2 + 1):\n coord = (y, x) if is_steep else (x, y)\n points.append(coord)\n error -= abs(dy)\n if error < 0:\n y += ystep\n error += dx\n\n # Reverse the list if the coordinates were swapped\n if swapped:\n points.reverse()\n return points", "title": "" }, { "docid": "1240c234fe3e278b0e2a59a930558cd6", "score": "0.5203733", "text": "def get_line(start, end):\n # Setup initial conditions\n x1, y1 = start\n x2, y2 = end\n dx = x2 - x1\n dy = y2 - y1\n\n # Determine how steep the line is\n is_steep = abs(dy) > abs(dx)\n\n # Rotate line\n if is_steep:\n x1, y1 = y1, x1\n x2, y2 = y2, x2\n\n # Swap start and end points if necessary and store swap state\n swapped = False\n if x1 > x2:\n x1, x2 = x2, x1\n y1, y2 = y2, y1\n swapped = True\n\n # Recalculate differentials\n dx = x2 - x1\n dy = y2 - y1\n\n # Calculate error\n error = int(dx / 2.0)\n ystep = 1 if y1 < y2 else -1\n\n # Iterate over bounding box generating points between start and end\n y = y1\n points = []\n for x in range(x1, x2 + 1):\n coord = (y, x) if is_steep else (x, y)\n points.append(coord)\n error -= abs(dy)\n if error < 0:\n y += ystep\n error += dx\n\n # Reverse the list if the coordinates were swapped\n if swapped:\n points.reverse()\n return points", "title": "" }, { "docid": "9fa28213e1fa5da162094c9e21598b4d", "score": "0.52012336", "text": "def generate_quad_corners(indices, x, y):\n (i, j, k, l) = indices\n\n def gpi(index_1, index_2):\n return generate_point_indices(index_1, index_2, len(x))\n\n xis = np.empty(4)\n yis = np.empty(4)\n xis.fill(np.nan)\n yis.fill(np.nan)\n\n if j <= i or k <= j or l <= k:\n pass\n else:\n (xis[0], yis[0]) = line_intersection(x[gpi(i, j)],\n y[gpi(i, j)])\n (xis[1], yis[1]) = line_intersection(x[gpi(j, k)],\n y[gpi(j, k)])\n (xis[2], yis[2]) = line_intersection(x[gpi(k, l)],\n y[gpi(k, l)])\n (xis[3], yis[3]) = line_intersection(x[gpi(l, i)],\n y[gpi(l, i)])\n\n return (xis, yis)", "title": "" }, { "docid": "49e8510c501cd5ff0cd40f442c306c6b", "score": "0.5193536", "text": "def midpoint(self,other):\n return self.interpolant(other,0.5).toPoint()", "title": "" }, { "docid": "49e8510c501cd5ff0cd40f442c306c6b", "score": "0.5193536", "text": "def midpoint(self,other):\n return self.interpolant(other,0.5).toPoint()", "title": "" }, { "docid": "b3202b4aa5b4f8de759bca79ad134b91", "score": "0.51823646", "text": "def midpoint(self):\n x = (self.a.x + self.b.x) / 2.0\n y = (self.a.y + self.b.y) / 2.0\n\n return Point(x, y)", "title": "" }, { "docid": "d406af560861792730353d91f8e66443", "score": "0.5177173", "text": "def interpolate(pointImpl: 'PointImpl', pointImpl2: 'PointImpl', double: float) -> cern.accsoft.commons.value.Point:\n ...", "title": "" }, { "docid": "0d595ca085407f5800e0ac1a52bbe978", "score": "0.51695335", "text": "def arc(x, y, radius, start_angle, stop_angle, fill_color=\"\", stroke_color=\"\", stroke_width=-1):\n raise NotImplementedError(\"arc() not implemented\")", "title": "" }, { "docid": "8fa2f5dcf162cca0a32e0b0399d23c4a", "score": "0.5166751", "text": "def preparePixmap(self):\n start = self.startPixmap()\n end = self.endPixmap()\n painter = QPainter(self.outPixmap())\n painter.drawPixmap(0, 0, start)\n size = start.size().expandedTo(end.size())\n width = size.width()\n height = size.height()\n radius = int((width**2 + height**2) ** 0.5) / 2\n start_rect = QRect(width / 2, height / 2, 0, 0)\n end_rect = QRect(width / 2, height / 2, radius, radius)\n return start_rect, end_rect", "title": "" }, { "docid": "c4bfc1331af755077475b49d7955ffed", "score": "0.5150121", "text": "def circleCurve(h, k, t, r, v):\n x = h + r * cos(v * t)\n y = k + r * sin(v * t)\n return x, y", "title": "" }, { "docid": "08eba72e9f7b8a0ca1d557116dce5915", "score": "0.5129027", "text": "def bresenham_circle(center, curve):\n radius = pts.distance_between_points(center, curve)\n\n f = 1 - radius\n ddf_x = 1\n ddf_y = -2 * radius\n\n x = 0\n y = radius\n\n\n points = list()\n\n points.append(pts.Point(center.x, round(center.y + radius)))\n points.append(pts.Point(center.x, round(center.y - radius)))\n points.append(pts.Point(round(center.x + radius), center.y))\n points.append(pts.Point(round(center.x + radius), center.y))\n\n while x < y:\n if f >= 0: \n y -= 1\n ddf_y += 2\n f += ddf_y\n x += 1\n ddf_x += 2\n f += ddf_x \n points.append(pts.Point(round(center.x + x), round(center.y + y)))\n points.append(pts.Point(round(center.x - x), round(center.y + y)))\n points.append(pts.Point(round(center.x + x), round(center.y - y)))\n points.append(pts.Point(round(center.x - x), round(center.y - y)))\n points.append(pts.Point(round(center.x + y), round(center.y + x)))\n points.append(pts.Point(round(center.x - y), round(center.y + x)))\n points.append(pts.Point(round(center.x + y), round(center.y - x)))\n points.append(pts.Point(round(center.x - y), round(center.y - x)))\n\n\n return points", "title": "" }, { "docid": "39f67966efb822389c74f1600a60a4da", "score": "0.5126685", "text": "def createNonRational(self, controlPoints, degree, knots, isPeriodic):\n return NurbsCurve2D()", "title": "" }, { "docid": "06280bd0339e64197adc4a3a445da6f5", "score": "0.5114326", "text": "def get_curve_intersection(x1, y1, x2, y2, log=False):\n if log:\n x1 = np.log10(x1)\n y1 = np.log10(y1)\n x2 = np.log10(x2)\n y2 = np.log10(y2)\n\n itp = interp1d(x1, y1, kind='linear', bounds_error=False)\n x_root = []\n y_root = []\n for idx in range(len(y2)-1):\n x_l, x_r = x2[[idx, idx+1]]\n y2_l, y2_r = y2[[idx, idx+1]]\n a = (y2_r - y2_l)/(x_r-x_l)\n b = y2_l - a*x_l\n err = lambda x: itp(x) - (a*x+b)\n if err(x_l)*err(x_r)<0: # there is an intersection\n x_sol = brentq(err, x_l, x_r)\n y_sol = a*x_sol+b\n x_root.append(x_sol)\n y_root.append(y_sol)\n else:\n continue\n x_root = np.array(x_root)\n y_root = np.array(y_root)\n if log:\n x_root = np.power(10, x_root)\n y_root = np.power(10, y_root)\n return x_root, y_root", "title": "" }, { "docid": "3c279dc64cc327ad9341a1576678fc10", "score": "0.50966436", "text": "def get_line_equation(start_point: np.ndarray,\n end_point: np.ndarray) -> typing.Tuple[float, float, float]:\n [x1, y1] = start_point\n [x2, y2] = end_point\n\n x = x2 - x1\n y = y2 - y1\n\n if x == 0 and y == 0:\n raise ArithmeticError\n if x == 0:\n a = 1\n b = 0\n elif y == 0:\n a = 0\n b = 1\n else:\n b = 1\n a = -b * y / x\n\n c = -a * x1 - b * y1\n return a, b, c", "title": "" }, { "docid": "bb6cb74ac1f80e3e22b32d9e28d11be4", "score": "0.50926507", "text": "def get_intersection_pts(p1, r1, p2, r2):\n dist = np.linalg.norm(p1 - p2)\n if dist >= (r1 + r2):\n # circles are too far apart\n return [], CircleIntersectionType.TOO_FAR\n elif dist <= abs(r1 - r2):\n if r1 < r2:\n return [], CircleIntersectionType.CIRCLE1_IN_CIRCLE2\n else:\n return [], CircleIntersectionType.CIRCLE2_IN_CIRCLE1\n else:\n # two intersection pts\n r1_sq = r1**2\n r2_sq = r2**2\n d_sq = dist**2\n pt_base = 0.5 * (p1 + p2) + \\\n 0.5 * ((r1_sq - r2_sq) / d_sq) * (p2 - p1)\n \n delta_y = p2[1] - p1[1]\n delta_x = p1[0] - p2[0]\n plus_minus = 0.5 * np.sqrt((2 * (r1_sq + r2_sq) / d_sq) - ((r1_sq - r2_sq)/d_sq)**2 - 1) * np.array([delta_y, delta_x])\n intersect_pts = [pt_base+plus_minus, pt_base-plus_minus]\n\n\n d1 = (r2_sq - r1_sq - d_sq) / (2 * dist)\n if d1 > 0:\n return intersect_pts, CircleIntersectionType.RADIAL_AXIS_NONCENTERED_SMALL_CIRCLE1\n d2 = (r1_sq - r2_sq - d_sq) / (2 * dist)\n if d2 > 0:\n return intersect_pts, CircleIntersectionType.RADIAL_AXIS_NONCENTERED_SMALL_CIRCLE2\n return intersect_pts, CircleIntersectionType.RADIAL_AXIS_CENTERED", "title": "" }, { "docid": "2097afecdbc6f5ae740a84ee23b759e5", "score": "0.5071573", "text": "def curvature(left_fit, right_fit):\n ploty = np.linspace(0, 719, num=720) # to cover same y-range as image\n\n # For each y position generate random x position within +/-50 pix\n # of the line base position in each case (x=200 for left, and x=900 for right)\n leftx = np.array([200 + (y ** 2) * left_fit[0] + left_fit[1]\n for y in ploty])\n rightx = np.array([900 + (y ** 2) * right_fit[0] + right_fit[1]\n for y in ploty])\n\n leftx = leftx[::-1] # Reverse to match top-to-bottom in y\n rightx = rightx[::-1] # Reverse to match top-to-bottom in y\n\n y_eval = np.max(ploty)\n\n # Define conversions in x and y from pixels space to meters\n ym_per_pix = 30 / 720 # meters per pixel in y dimension\n xm_per_pix = 3.7 / 700 # meters per pixel in x dimension\n\n # Fit new polynomials to x,y in world space\n left_fit_cr = np.polyfit(ploty * ym_per_pix, leftx * xm_per_pix, 2)\n right_fit_cr = np.polyfit(ploty * ym_per_pix, rightx * xm_per_pix, 2)\n # Calculate the new radii of curvature\n left_curverad = ((1 + (2 * left_fit_cr[0] * y_eval * ym_per_pix + left_fit_cr[1]) ** 2) ** 1.5) / np.absolute(\n 2 * left_fit_cr[0])\n right_curverad = ((1 + (2 * right_fit_cr[0] * y_eval * ym_per_pix + right_fit_cr[1]) ** 2) ** 1.5) / np.absolute(\n 2 * right_fit_cr[0])\n\n return np.average([left_curverad, right_curverad])", "title": "" }, { "docid": "7db2dc99fdff2f02542de6f2f23a81c7", "score": "0.50624436", "text": "def make_circle(radius, an_points, rn_points, n_borders):\n if an_points < 1:\n Logger.serror('You must give a \"an_points\" of at least 1.', stackoffset=3)\n if rn_points < 1:\n Logger.serror('You must give a \"rn_points\" of at least 1.', stackoffset=3)\n if isinstance(an_points, float):\n Logger.slog('You passed in a float \"an_points\" parameter: the value will be '\n 'truncated to an integer.', level='warning', stackoffset=3)\n if isinstance(rn_points, float):\n Logger.slog('You passed in a float \"rn_points\" parameter: the value will be '\n 'truncated to an integer.', level='warning', stackoffset=3)\n\n # create nodes\n radiuses = np.linspace(0, radius, rn_points+1)\n lx, ly = [0.], [0.]\n m = np.linspace(0,2*np.pi,an_points,endpoint=False)\n for r in radiuses[1:]:\n lx.extend(list(r*np.cos(m)))\n ly.extend(list(r*np.sin(m)))\n x, y = np.array(lx), np.array(ly)\n z = np.zeros((len(x), 1))\n nodes = np.column_stack((x,y,z))\n\n # create connectivity\n elts = []\n # .. create triangles (surface)\n tri = mtri.Triangulation(nodes[:,0], nodes[:,1])\n for elt in tri.triangles: elts.append([2,10,elt[0],elt[1],elt[2]])\n # .. create edges (border)\n if n_borders == 0: pass\n elif n_borders == 1:\n for n in range(len(m)-1):\n elts.append([1,1,len(x)-len(m)+n,len(x)-len(m)+n+1,-1])\n elts.append([1,1,len(x)-1,len(x)-len(m),-1])\n else:\n Logger.slog('A \"circle\" primitive can only be created with 0 or 1 '\n 'distinct borders.\\nInvalid value: {} (the domain will '\n 'be created with 1 border).'.format(n_borders),\n level='warning')\n for n in range(len(m)-1):\n elts.append([1,1,len(x)-len(m)+n,len(x)-len(m)+n+1,-1])\n elts.append([1,1,len(x)-1,len(x)-len(m),-1])\n elements = np.array(elts)\n \n return nodes, elements", "title": "" }, { "docid": "899821480d6d2ef30481dfe58860d716", "score": "0.50422966", "text": "def circle_from_two(points, p, q):\r\n circ = diameter(p, q)\r\n left = None\r\n right = None\r\n px, py = p\r\n qx, qy = q\r\n\r\n # For each point not in the two-point circle, form a circumcircle and\r\n # classify it on left or right side.\r\n for r in points:\r\n if in_circle(r, circ):\r\n continue\r\n\r\n cross = cross_prod(px, py, qx, qy, r[0], r[1])\r\n c = circumcircle(p, q, r)\r\n if c is None:\r\n continue\r\n elif cross > 0.0 and (left is None or cross_prod(px, py, qx, qy, c[0], c[1]) > cross_prod(px, py, qx, qy, left[0], left[1])):\r\n left = c\r\n elif cross < 0.0 and (right is None or cross_prod(px, py, qx, qy, c[0], c[1]) < cross_prod(px, py, qx, qy, right[0], right[1])):\r\n right = c\r\n\r\n # Select which circle to return.\r\n if left is None and right is None:\r\n return circ\r\n elif left is None:\r\n return right\r\n elif right is None:\r\n return left\r\n else:\r\n return left if (left[2] <= right[2]) else right", "title": "" }, { "docid": "8ad83ccecb2632a5d2c5375f8c58453a", "score": "0.50414443", "text": "def midpoint(self, p):\n return Point([simplify((a + b)*S.Half) for a, b in zip(self.args, p.args)])", "title": "" }, { "docid": "ddaa8fd7d68095f5a446b0c071da1103", "score": "0.502761", "text": "def __ComputePoints(track, via, hpercent, vpercent, segs):\n start = track.GetStart()\n end = track.GetEnd()\n\n if (segs>2) and (vpercent>70.0):\n #If curved via are selected, max angle is 45 degres --> 70%\n vpercent = 70.0\n\n # ensure that start is at the via/pad end\n d = end - via[0]\n if sqrt(d.x * d.x + d.y * d.y) < via[1]:\n start, end = end, start\n\n # get normalized track vector\n # it will be used a base vector pointing in the track direction\n pt = end - start\n norm = sqrt(pt.x * pt.x + pt.y * pt.y)\n vec = [t / norm for t in pt]\n\n # find point on the track, sharp end of the teardrop\n w = track.GetWidth()/2\n radius = via[1]/2\n n = radius*(1+hpercent/100.0)\n dist = sqrt(n*n + w*w)\n d = atan2(w, n)\n vecB = [vec[0]*cos(d)+vec[1]*sin(d) , -vec[0]*sin(d)+vec[1]*cos(d)]\n pointB = start + wxPoint(int(vecB[0] * dist), int(vecB[1] * dist))\n vecA = [vec[0]*cos(-d)+vec[1]*sin(-d) , -vec[0]*sin(-d)+vec[1]*cos(-d)]\n pointA = start + wxPoint(int(vecA[0] * dist), int(vecA[1] * dist))\n\n # via side points\n radius = via[1] / 2\n d = asin(vpercent/100.0);\n vecC = [vec[0]*cos(d)+vec[1]*sin(d) , -vec[0]*sin(d)+vec[1]*cos(d)]\n d = asin(-vpercent/100.0);\n vecE = [vec[0]*cos(d)+vec[1]*sin(d) , -vec[0]*sin(d)+vec[1]*cos(d)]\n pointC = via[0] + wxPoint(int(vecC[0] * radius), int(vecC[1] * radius))\n pointE = via[0] + wxPoint(int(vecE[0] * radius), int(vecE[1] * radius))\n\n # Introduce a last point in order to cover the via centre.\n # If not, the zone won't be filled\n vecD = [-vec[0], -vec[1]]\n radius = (via[1]/2)*0.5 #50% of via radius is enough to include\n pointD = via[0] + wxPoint(int(vecD[0] * radius), int(vecD[1] * radius))\n\n pts = [pointA, pointB, pointC, pointD, pointE]\n if segs > 2:\n pts = __ComputeCurved(vpercent, w, vec, via, pts, segs)\n\n return pts", "title": "" }, { "docid": "d6e6ea7668c9557bb9e80def97f65140", "score": "0.5022576", "text": "def curve_intersection_points(self, i1, i2):\n # Check for splits of this curve along axis i1\n ci_points = np.full((2,3), self.c[self.i0])\n cnt = 0\n # If the curve just touches a bound along i1, add a single point...\n if np.isclose(self.c[i1] + self.r, self.domain.L[i1]): # ...on the upper bound...\n ci_points[cnt,i1] = self.domain.L[i1]\n ci_points[cnt,i2] = self.c[i2]\n cnt += 1\n elif np.isclose(self.c[i1] - self.r, 0.): # ...or on the lower bound.\n ci_points[cnt,i1] = 0.\n ci_points[cnt,i2] = self.c[i2]\n cnt += 1\n # Otherwise, if the sphere is split along i1 the curve may cross the bounds.\n elif self.sphere.split_axis[i1]:\n # Add two points at upper bound along i1...\n if self.c[i1] + self.r > self.domain.L[i1]:\n ci_points[cnt:cnt+2,i1] = self.domain.L[i1]\n # ...or add two points at lower bound along i1...\n elif self.c[i1] - self.r < 0.:\n ci_points[cnt:cnt+2,i1] = 0.\n # ...or add no points at bounds along i1.\n else:\n return ci_points[:cnt]\n di1 = ci_points[cnt,i1] - self.c[i1]\n di2 = np.sqrt(self.r**2 - di1**2)\n ci_points[cnt,i2] = self.c[i2] + di2\n ci_points[cnt+1,i2] = self.c[i2] - di2\n cnt += 2\n return ci_points[:cnt] - self.c", "title": "" }, { "docid": "efa1e7eb944845077d42084af5df59ef", "score": "0.50172305", "text": "def boundDestination(pStart, pEnd):\n (xStart, yStart, zStart) = pStart\n zPenStart = zStart - PEN_DIST\n \n (xEnd, yEnd, zEnd) = pEnd\n zPenEnd = zEnd - PEN_DIST\n \n (xOut, yOut, zOut) = (0, 0, 0)\n \n if penInCylinder(pStart):\n if penInCylinder(pEnd):\n (xOut, yOut, zOut) = pEnd\n else: \n # Project xy onto outer circle:\n (xOut, yOut, zOut) = boundByCircle(pEnd)\n # Deal with z issues:\n if zEnd > Z_MAX: # Deal with z\n zOut = Z_MAX\n if not inRectangle(pStart):\n if zPenEnd < Z_CLAMP:\n zOut = Z_CLAMP + PEN_DIST\n else: # if pStart is in rectangle\n if zPenEnd < Z_CLAMP:\n if inRectangle(pEnd):\n if zPenEnd < Z_PAPER:\n zOut = Z_PAPER + PEN_DIST\n else: # if pEnd is not in rectangle\n zOut = Z_CLAMP + PEN_DIST\n elif penInPrism(pStart):\n if penInPrism(pEnd):\n (xOut, yOut, zOut) = pEnd\n else:\n # Project xy onto rectangle:\n (xOut, yOut, zOut) = boundByRectangle(pEnd)\n # Deal with z issues:\n if zPenEnd < Z_PAPER:\n zOut = Z_PAPER + PEN_DIST\n if zEnd > Z_MAX:\n zOut = Z_MAX\n else:\n # This should never be entered, because pStart should always be valid. If\n # it isn't, send the point to home.\n (xOut, yOut, zOut) = HOME\n \n return (xOut, yOut, zOut)", "title": "" }, { "docid": "90570ec9f1798a02adce5c215f6db31d", "score": "0.5015211", "text": "def _generate_ring(radius: int):\n ring = set()\n ring.add((radius, 0))\n if radius - 1 > 0:\n for x in range(1, radius):\n min_value = 1.0\n for y in range(1, radius + 1):\n value = abs(((x**2 + y**2) / radius**2) - 1)\n if min_value > value:\n min_value = value\n min_x, min_y = x, y\n ring.add((min_x, min_y))\n\n temp_ring = ring.copy()\n for x, y in temp_ring:\n ring.add((y, x))\n\n temp_ring = ring.copy()\n for x, y in temp_ring:\n ring.add((-x, y))\n\n temp_ring = ring.copy()\n for x, y in temp_ring:\n ring.add((x, -y))\n\n temp_ring = ring.copy()\n for x, y in temp_ring:\n ring.add((-x, -y))\n\n return ring", "title": "" }, { "docid": "04c23ad002952322f506445f090905f6", "score": "0.5014824", "text": "def _trim_path_ends_to_layer(vm, px, py, pz, pi=None):\n # get points for portion of segment that crosses layer\n if pi is None:\n pi = assign_points_to_layers(vm, px, py, pz)\n\n # trim start of line\n if pi[0] != pi[1]:\n iref = pi[1] - 1\n pt0 = [px[0], py[0], pz[0]]\n pt1 = [px[1], py[1], pz[1]]\n isect = find_line_intersection(vm, iref, pt0, pt1) \n\n assert isect is not None, 'Problem finding intersection with'\\\n ' iref = {:} between {:} and {:}.'.format(iref, pt0, pt1)\n\n px[0], py[0], pz[0] = isect\n\n # trim end of line\n if pi[-2] != pi[-1]:\n iref = pi[-1] - 1\n pt0 = [px[-2], py[-2], pz[-2]]\n pt1 = [px[-1], py[-1], pz[-1]]\n\n isect = find_line_intersection(vm, iref, pt0, pt1) \n\n assert isect is not None, 'Problem finding intersection with'\\\n ' iref = {:} between {:} and {:}.'.format(iref, pt0, pt1)\n\n px[-1], py[-1], pz[-1] = isect\n\n return px, py, pz", "title": "" }, { "docid": "1d3c8ab36725a7647e3df319555e52ee", "score": "0.49978265", "text": "def create_RLine(RRmin,Rmin,xD):\n global xR, yR\n R = RRmin * Rmin\n xR = np.linspace(0,1,num=1000)\n yR = ((R/(R+1))*xR) + ((1/(R+1))*xD)\n findintersect(yq, yR, xR)\n xR = np.array([xint, xD])\n yR = ((R/(R+1))*xR) + ((1/(R+1))*xD)\n return yR, xR", "title": "" }, { "docid": "7498e496d1c298564d48cadd698586ba", "score": "0.49734038", "text": "def snap(self):\n\n x_values = [self.vertices[0].x, self.vertices[1].x, self.vertices[2].x, self.vertices[3].x]\n y_values = [self.vertices[0].y, self.vertices[1].y, self.vertices[2].y, self.vertices[3].y]\n\n\n for i in range(4):\n if x_values[i] == 0 or isinstance(x_values[i], int):\n pass\n elif x_values[i] - math.floor(x_values[i]) < 0.5:\n x_values[i] = math.floor(x_values[i])\n else:\n x_values[i] = math.ceil(x_values[i])\n\n for i in range(4):\n if y_values[i] == 0 or isinstance(y_values[i], int):\n pass\n elif y_values[i] - math.floor(y_values[i]) < 0.5:\n y_values[i] = math.floor(y_values[i])\n else:\n y_values[i] = math.ceil(y_values[i])\n\n if x_values[0] == x_values[1] == x_values[2] == x_values[3] and \\\n y_values[0] == y_values[1] == y_values[2] == y_values[3]:\n return self\n\n # for i in range(4):\n # print(x_values[i], y_values[i])\n\n return Quadrilateral(x_values[0], y_values[0],\n x_values[1], y_values[1],\n x_values[2], y_values[2],\n x_values[3], y_values[3]) # TODO", "title": "" }, { "docid": "5a65f0cf79997f633cc7b1bf9c5938f5", "score": "0.49693984", "text": "def GetBendLineValues2(self, Up=defaultNamedNotOptArg, Angle=defaultNamedNotOptArg, Radius=defaultNamedNotOptArg, Points=defaultNamedNotOptArg):\n\t\treturn self._ApplyTypes_(115, 1, (11, 0), ((16395, 3), (16389, 3), (16389, 3), (16396, 3)), u'GetBendLineValues2', None,Up\n\t\t\t, Angle, Radius, Points)", "title": "" }, { "docid": "fa6ba1c1a7b5507703ee8abf22f39c29", "score": "0.49614617", "text": "def test_two_segments_join_on_boundary(self):\n # clang-format off\n # 3========2========5\n # | | |\n # | 0====1=============2\n # | | |\n # 0--------1--------4\n # clang-format on\n\n points = [(0, 0, 0), (1, 0, 0), (1, 1, 0), (0, 1, 0), (2, 0, 0), (2, 1, 0)]\n cells = [UGrid.cell_type_enum.QUAD, 4, 0, 1, 2, 3, UGrid.cell_type_enum.QUAD, 4, 1, 4, 5, 2]\n ugrid = UGrid(points, cells)\n extractor = UGrid2dPolylineDataExtractor(ugrid, 'points')\n\n point_scalars = [0, 2, 3, 1, 4, 5]\n extractor.set_grid_scalars(point_scalars, [], 'points')\n\n polyline = [(0.5, 0.5, 0.0), (1.0, 0.5, 0.0), (2.5, 0.5, 0.0)]\n extractor.set_polyline(polyline)\n extracted_locations = extractor.extract_locations\n extracted_data = extractor.extract_data()\n\n expected_data = [1.5, 2.5, 3.5, 4.5, float('nan')]\n np.testing.assert_array_equal(expected_data, extracted_data)\n expected_locations = [(0.5, 0.5, 0.0), (1.0, 0.5, 0.0), (1.5, 0.5, 0.0),\n (2.0, 0.5, 0.0), (2.5, 0.5, 0.0)]\n np.testing.assert_array_equal(expected_locations, extracted_locations)", "title": "" }, { "docid": "dfe1e20e8415052a22afa553b3feed7a", "score": "0.49564895", "text": "def quadratic_bezier_curve(p0,p1,p2,n=10) :\n curve_pts = []\n for t in numpy.arange(0,1+1./n,1./n) :\n curve_pts.append(quadratic_bezier_point(p0,p1,p2,t))\n return curve_pts", "title": "" }, { "docid": "9731039ecc73ee00207b907c06fe41a7", "score": "0.49543917", "text": "def fill_round_rect(\n self, x: int, y: int, width: int, height: int, radius: int, color: int\n ) -> None:\n self._gfx_mode()\n self._curve_helper(x + radius, y + radius, radius, radius, 1, color, True)\n self._curve_helper(\n x + width - radius - 1, y + radius, radius, radius, 2, color, True\n )\n self._curve_helper(\n x + radius, y + height - radius, radius, radius, 0, color, True\n )\n self._curve_helper(\n x + width - radius - 1, y + height - radius, radius, radius, 3, color, True\n )\n self._rect_helper(\n x + radius, y, x + width - radius - 1, y + height - 1, color, True\n )\n self._rect_helper(\n x, y + radius, x + width - 1, y + height - radius - 1, color, True\n )", "title": "" }, { "docid": "a4ef37f4be6791b12df37b164fb4b90a", "score": "0.49534315", "text": "def _circular_radii(self):\n shift = self.radius_step / 2\n min_radius = self.min_radius - shift\n max_radius = self.max_radius + shift\n nsteps = int(math.floor((max_radius - min_radius)\n / self.radius_step))\n max_radius = min_radius + (nsteps * self.radius_step)\n return np.linspace(min_radius, max_radius, nsteps + 1)", "title": "" }, { "docid": "0dc0ea9e247be4cdac98570e634236a3", "score": "0.4951872", "text": "def get_subrectangle(x1,y1,x2,y2):\r\n diff_x = x2 - x1\r\n diff_y = y2 - y1\r\n x1_a = x1 + diff_x/3\r\n x2_a = x1 + 2*diff_x/3\r\n y1_a = y1 + diff_y/3\r\n y2_a = y1 + 2*diff_y/3\r\n \r\n return x1_a,y1_a,x2_a,y2_a", "title": "" }, { "docid": "bd56c63aaf48b4575293d5ace0c8a8b5", "score": "0.4944744", "text": "def _fast_fit_circle(x,y,use_median=False) :\n # init\n nn=len(x)\n i1=np.arange(nn)\n ### i2=(i1+1)%nn\n i2=(i1+nn//2-1)%nn\n\n # midpoints\n mx=((x[i1]+x[i2])/2.)\n my=((y[i1]+y[i2])/2.)\n nx=(y[i2]-y[i1])\n ny=-(x[i2]-x[i1])\n\n # solve for intersection of perpendicular bisectors\n # with s1,s2 are affine parameters of 2 adjacent perpendicular bisectors\n # 2 equations:\n # mx1 + nx1*s1 = mx2 + nx2*s2\n # my1 + ny1*s1 = my2 + ny2*s2\n num = (ny[i2]*mx[i2]-nx[i2]*my[i2]-ny[i2]*mx[i1]+nx[i2]*my[i1])\n denom = (ny[i2]*nx[i1]-nx[i2]*ny[i1])\n ok = (denom!=0)\n s1 = np.zeros(nn)\n s1[ok] = num[ok]/denom[ok]\n\n # coordinates of intersections are estimates of center of circle\n xc=mx[i1]+nx[i1]*s1[i1]\n yc=my[i1]+ny[i1]*s1[i1]\n\n # first estimate of center is mean of all intersections\n if use_median :\n xc=np.median(xc)\n yc=np.median(yc)\n r=np.median(np.hypot(x-xc, y-yc))\n else :\n xc=np.mean(xc)\n yc=np.mean(yc)\n r=np.mean(np.hypot(x-xc, y-yc))\n\n return xc,yc,r", "title": "" }, { "docid": "59df700aa7779ebf780d35869f92aa1e", "score": "0.49429443", "text": "def line_between_points(self, x1, y1, x2, y2, n_values=100):\n x1_, y1_ = self.image_coords_as_array_coords(x1, y1)\n x2_, y2_ = self.image_coords_as_array_coords(x2, y2)\n n_values = ifloor(math.sqrt((x2_-x1_)**2 + (y2_-y1_)**2))\n delta_x = (x2_ - x1_) / (n_values - 1)\n delta_y = (y2_ - y1_) / (n_values - 1)\n vals = []\n img_coords = []\n d = self._raw.linearintdata\n # TODO remarkably, this is reasonably fast in Python, but it would\n # probably be more at home in scitbx.math\n for n in range(n_values):\n x = x1_ + (n * delta_x)\n y = y1_ + (n * delta_y)\n xd, yd = self.array_coords_as_detector_coords(x, y)\n img_coords.append((xd,yd))\n x_1 = ifloor(x)\n x_2 = iceil(x)\n y_1 = ifloor(y)\n y_2 = iceil(y)\n v11 = d[(x_1, y_1)]\n v12 = d[(x_1, y_2)]\n v21 = d[(x_2, y_1)]\n v22 = d[(x_2, y_2)]\n if (x_2 == x_1):\n if (y_2 == y_1):\n vxy = v11\n else :\n vxy = ((v12 * (y - y_1)) + (v11 * (y_2 - y))) / (y_2 - y_1)\n elif (y_2 == y_1):\n vxy = ((v21 * (x - x_1)) + (v11 * (x_2 - x))) / (x_2 - x_1)\n else :\n dxdy = (y_2 - y_1) * (x_2 - x_1)\n vxy = ((v11 / dxdy) * (x_2 - x) * (y_2 - y)) + \\\n ((v21 / dxdy) * (x - x_1) * (y_2 - y)) + \\\n ((v12 / dxdy) * (x_2 - x) * (y - y_1)) + \\\n ((v22 / dxdy) * (x - x_1) * (y - y_1))\n vals.append(vxy)\n lattice_length = None\n if (len(vals) > 5):\n # first find peaks in the profile\n peaks = []\n avg = sum(vals) / len(vals)\n filtered_vals = []\n for x in vals :\n if (x <= avg*3):\n filtered_vals.append(x)\n background = sum(filtered_vals) / len(filtered_vals)\n i = 2\n while (i < len(vals) - 2):\n x = vals[i]\n if (x <= background):\n pass\n elif ((x > vals[i-1]) and (x > vals[i-2]) and\n (x > vals[i+1]) and (x > vals[i+2])):\n peaks.append(i)\n i += 1\n if (len(peaks) > 0):\n # calculate the average lattice length\n center_x, center_y = self.get_beam_center_mm()\n distances = []\n i = 1\n while (i < len(peaks)):\n x1,y1 = img_coords[peaks[i-1]]\n x2,y2 = img_coords[peaks[i]]\n rs_distance = rstbx.utils.reciprocal_space_distance(x1, y1, x2, y2,\n wavelength=self.get_wavelength(),\n center_x=center_x,\n center_y=center_y,\n distance=self.get_detector_distance(),\n detector_two_theta=self.get_detector_2theta(),\n distance_is_corrected=True)\n assert (rs_distance > 0)\n distances.append(1 / rs_distance)\n i += 1\n lattice_length = sum(distances) / len(distances)\n distance = self.distance_between_points(x1, y1, x2, y2)\n return line_profile(vals, distance, lattice_length)", "title": "" }, { "docid": "eea6d8fd47305207da2df37e671a3a3b", "score": "0.49427333", "text": "def curvature(self):\n return 2.0 / (self.left.radius() + self.right.radius())", "title": "" }, { "docid": "65cebad806a124548ebb3351039aac06", "score": "0.49414554", "text": "def __add__(self, other):\n\n # X9.62 B.3:\n\n if not isinstance(other, Point):\n return NotImplemented\n if other == INFINITY:\n return self\n if self == INFINITY:\n return other\n assert self.__curve == other.__curve\n if self.__x == other.__x:\n if (self.__y + other.__y) % self.__curve.p() == 0:\n return INFINITY\n else:\n return self.double()\n\n p = self.__curve.p()\n\n l = ((other.__y - self.__y) * \\\n numbertheory.inverse_mod(other.__x - self.__x, p)) % p\n\n x3 = (l * l - self.__x - other.__x) % p\n y3 = (l * (self.__x - x3) - self.__y) % p\n\n return Point(self.__curve, x3, y3)", "title": "" }, { "docid": "5108a828f93ce340172759443fd52792", "score": "0.49352363", "text": "def quadratic_bezier_point(p0,p1,p2,t) :\n return (1-t)**2*p0 + 2*(1-t)*t*p1 + t**2*p2", "title": "" }, { "docid": "62727ab26e250091d1d51520e7499374", "score": "0.4930479", "text": "def _bezier_math(t: float, p1: Tuple[float, float], p2: Tuple[float, float]) -> Tuple[float, float]:\n\tx0, y0 = 0, 0\n\tx1, y1 = p1\n\tx2, y2 = p2\n\tx3, y3 = 1, 1\n\tx = (1 - t) ** 3 * x0 + 3 * (1 - t) ** 2 * t * x1 + 3 * (1 - t) * t ** 2 * x2 + t ** 3 * x3\n\ty = (1 - t) ** 3 * y0 + 3 * (1 - t) ** 2 * t * y1 + 3 * (1 - t) * t ** 2 * y2 + t ** 3 * y3\n\treturn x, y", "title": "" }, { "docid": "325bcbfcf6527df6e16d03eb78adcea6", "score": "0.49298626", "text": "def __newRoundCorners( self ):\n\n for l in self.newLines:\n l.roundLowerRight = False\n l.roundUpperRight = False\n\n lines = self.newLines\n for i in range( len(lines)-1 ):\n if lines[i+1].ragWidth < lines[i].ragWidth:\n lines[i].roundLowerRight = True\n\n for i in range( len(lines)-1 ):\n if lines[i].ragWidth < lines[i+1].ragWidth:\n lines[i+1].roundUpperRight = True\n\n lines[-1].roundLowerRight = True\n self.newLines = lines", "title": "" }, { "docid": "9b6681c19f0ea78102a0939e1fed13c8", "score": "0.4919724", "text": "def __intersecting_circles(self,R1,R2,d):\n # ensure standard order R1>=R2\n if R1<R2: (R1,R2) = (R2,R1);\n \n # CASE 1: circles do not intersect\n if R1+R2 <= d: return 0;\n \n # CASE 2: circle 2 completely in circle 1\n if d <= R1-R2: return np.pi*R2**2;\n \n # CASE 3: intersecting circles\n # [see e.g. http://mathworld.wolfram.com/Circle-CircleIntersection.html]\n r1 = (R1**2-R2**2+d**2)/2/d; \n r2 = (R2**2-R1**2+d**2)/2/d; # distance of origin from radical line\n \n C1 = R1**2 * np.arccos(r1/R1); # area of circle segments \n C2 = R2**2 * np.arccos(r2/R2); # defined by radical line\n \n T = d*np.sqrt(R1**2-r1**2); # area of the rhombus spanned by circle \n # origins and intersection points\n return C1+C2-T;", "title": "" }, { "docid": "e64a1d89db2fc15ae03b37095d7133ee", "score": "0.49084508", "text": "def get_curve(start, end, peak, peak_max):\n points = np.array([(start, 0), (peak, peak_max), (end, 0)])\n # get x and y vectors\n x = points[:,0]\n y = points[:,1]\n\n # calculate polynomial\n z = np.polyfit(x, y, 2)\n f = np.poly1d(z)\n\n # calculate new x's and y's\n x_new = np.linspace(x[0], x[-1], 100)\n y_new = f(x_new)\n\n x_new = map(int, x_new)\n xy = []\n curr = None\n for x,y in zip(x_new, y_new):\n if x == curr:\n continue\n curr = x\n xy.append((x,y))\n x,y = zip(*xy)\n y = [abs(round(y_hat/peak_max,3)) for y_hat in y]\n return y", "title": "" }, { "docid": "778ab236acfa7588c22d5e3acf81c4e3", "score": "0.48993328", "text": "def find_center(self, point1, point2):\r\n self. point1 = point1\r\n self. point2 = point2\r\n x1 = self.point1[0]\r\n x2 = self.point2[0]\r\n y1 = self.point1[1]\r\n y2 = self.point2[1]\r\n# (self.xc, self.yc) = (0, 0)\r\n self.xm = (x1+x2)/2\r\n self.ym = (y1+y2)/2\r\n \r\n secant = np.sqrt((x1-x2)**2 + (y1-y2)**2)\r\n A = (1/4)*(secant**2/(math.tan((point1[2]-point2[2])/2))**2)\r\n B = (A - self.R**2 - (self.xm**2+self.ym**2-x1**2-y1**2))/(2*y1 - 2*self.ym)\r\n k = (self.xm-x1)/(y1 - self.ym)\r\n criteria = (2*(k*B - k*y1 -x1))**2 - 4*(1+k**2)*((B-y1)**2 +x1**2-self.R**2)\r\n print(criteria)\r\n xc1 = (-2*(k*B-k*y1-x1) + np.sqrt(criteria))/(2*(1+k**2)) \r\n xc2 = (-2*(k*B-k*y1-x1) - np.sqrt(criteria))/(2*(1+k**2)) \r\n yc1 = k*xc1 + B\r\n yc2 = k*xc2 + B\r\n# self.point = point\r\n \r\n self.center1 = (xc1, yc1)\r\n self.center2 = (xc2, yc2)\r\n \r\n return (self.center1, self.center2)", "title": "" }, { "docid": "e6b57bd2d15bcd3fb679797aa0bb4e87", "score": "0.48988172", "text": "def _intesectLines(pt1, pt2):\n (pt1, pt2), (pt3, pt4) = pt1, pt2\n denom = (pt1[0] - pt2[0]) * (pt3[1] - pt4[1]) - (pt1[1] - pt2[1]) * (pt3[0] - pt4[0])\n if _roundFloat(denom) == 0:\n return None\n x = (pt1[0] * pt2[1] - pt1[1] * pt2[0]) * (pt3[0] - pt4[0]) - (pt1[0] - pt2[0]) * (pt3[0] * pt4[1] - pt3[1] * pt4[0])\n x /= denom\n y = (pt1[0] * pt2[1] - pt1[1] * pt2[0]) * (pt3[1] - pt4[1]) - (pt1[1] - pt2[1]) * (pt3[0] * pt4[1] - pt3[1] * pt4[0])\n y /= denom\n return (x, y)", "title": "" }, { "docid": "92975a9ccf2e4be63b02f1ab3ceb688c", "score": "0.4889182", "text": "def _ellipse(self,a2,b,r):\n return a2 * np.sqrt(1 - (r/b)**2)", "title": "" }, { "docid": "0cfe92aa6244b616018dac0b97e8d24a", "score": "0.4886405", "text": "def get_r(Data, start=0):\n # pace IS A PROBLEM\n # construct the line\n Len = len(Data)\n a = (Data[0] - Data[-1]) / (1 - Len)\n b = Data[0] - a * start\n\n # calculate r2\n AveData = sum(Data[1:-1]) / Len - 2\n SStot = 0\n SSres = 0\n for i in range(1, Len - 1):\n SStot += abs(Data[i] - AveData)\n SSres += abs(Data[i] - (a * (start + i) + b))\n try:\n return - SSres / SStot\n except ZeroDivisionError:\n # this means that the dot are all (or nearly all because of rounding on the computer) on a horizontal straight line\n return 0 # the highest possible r.", "title": "" }, { "docid": "4eee34b06cb4e1c2a2a390979b61d01a", "score": "0.48860067", "text": "def roundness2(self):\n\n if np.isnan(self.hx) or np.isnan(self.hy):\n return np.nan\n else:\n return 2.0 * (self.hx - self.hy) / (self.hx + self.hy)", "title": "" }, { "docid": "76ede3ddbcc1598c7845364b5b6c322b", "score": "0.4879624", "text": "def PointBetweenPoints(pt1, pt2, t = .5):\n line = rg.Line(pt1, pt2)\n return line.PointAt(t)", "title": "" }, { "docid": "2b794e4210faa6ef6d32f59adba573d6", "score": "0.48759794", "text": "def end_pose(start_pose, curvature, length):\r\n x, y, theta = start_pose\r\n if curvature == 0.0:\r\n # Linear movement.\r\n x += length * cos(theta)\r\n y += length * sin(theta)\r\n return (x, y, theta)\r\n else:\r\n # Curve segment of radius 1/curvature.\r\n tx = cos(theta)\r\n ty = sin(theta)\r\n radius = 1.0/curvature\r\n xc = x - radius * ty # Center of circle.\r\n yc = y + radius * tx\r\n angle = length / radius\r\n cosa = cos(angle)\r\n sina = sin(angle)\r\n nx = xc + radius * (cosa * ty + sina * tx)\r\n ny = yc + radius * (sina * ty - cosa * tx)\r\n ntheta = (theta + angle + pi) % (2*pi) - pi\r\n return (nx, ny, ntheta)", "title": "" }, { "docid": "e7bdc5139df753d53f7a74aa8d03e219", "score": "0.4874375", "text": "def approximate(self, points, using=None, scheme=None, vb=True):\n # First, reset the interpolation scheme if one is passed\n # explicitly:\n if scheme is not None:\n self.scheme = scheme\n\n # Now make the interpolation, using the current scheme:\n self.interpolator = self.interpolate(using=using, vb=vb)\n if vb: print('interpolating between '+str(min(points))+' and '+str(max(points)))\n interpolated = self.interpolator(points)\n interpolated = qp.utils.normalize_gridded((points, interpolated), vb=False)\n # interpolated[interpolated<0.] = 0.\n\n return interpolated#(points, interpolated)", "title": "" }, { "docid": "8ba729f4d7a40ca42e6114448566272c", "score": "0.4874125", "text": "def test_intersect_line_circle2(self):\n self.assertFalse(intersect_line_circle(0, 0, 10, -10, 10, 10, 11))", "title": "" }, { "docid": "fd1f5e9cd067dce41939181bf47f3b0e", "score": "0.48682445", "text": "def bisect_rocs(rocpoints, predicate, start=0, end=None):\n if end is None:\n end = len(rocpoints)\n if end <= start:\n raise ValueError(\n 'Start (%d) must be less than end (%d).' % (start, end))\n new_index = (start + end) / 2\n if start == new_index:\n return end # We narrowed down the range completely\n else:\n if predicate(rocpoints[new_index]):\n # Look in lower half\n return bisect_rocs(rocpoints, predicate, start, new_index)\n else:\n # Look in upper half\n return bisect_rocs(rocpoints, predicate, new_index, end)", "title": "" }, { "docid": "36e9e53bb8880aa266a58b582869a451", "score": "0.48619306", "text": "def test_point_in_circle2(self):\n pos = (5, -9)\n start = (0, 0)\n radius = 10\n self.assertFalse(point_in_circle(pos, start, radius))", "title": "" }, { "docid": "19709577605e175bb9ea170076c5d567", "score": "0.48590878", "text": "def midpoint_2d(points):\n \n n = len(points)\n if n is 0:\n return None\n\n x_sum = 0\n y_sum = 0\n # center of set of finite points is average of those points\n for point in points:\n x_sum += point.x\n y_sum += point.y\n\n return (x_sum / n, y_sum / n)", "title": "" }, { "docid": "72ad19cab377b0a6e5a019277d1579f8", "score": "0.48504642", "text": "def corners(self):\r\n\r\n return [(self.x_1, self.y_1), (self.x_1, self.y_2),\r\n (self.x_2, self.y_1), (self.x_2, self.y_2)]", "title": "" }, { "docid": "b113c5c3ac7d2c60576fdb0ad191a7d1", "score": "0.4845425", "text": "def midpoint_to(self,obj):\n if isinstance(obj,Point):\n return obj.midpoint_to(obj.projected_on(self))\n elif isinstance(obj,Line):\n d = obj.r - self.r\n t1t2 = dot(self.t,obj.t)\n if abs(abs(t1t2)-1) < 1e-12: #parallel case \n d = orthogonalized_to(d,self.t)\n return Point(self.r + 0.5*d)\n else:\n t1d = dot(d,self.t)\n t2d = dot(d,obj.t)\n s = (t1t2*t2d - t1d)/(t1t2**2-1)\n u = (t1t2*t1d - t2d)/(t1t2**2-1)\n return Point(0.5*(obj.r + u*obj.t + self.r + s*self.t)) \n else:\n return obj.midpoint_to(self)", "title": "" }, { "docid": "10e1ef73fb92d31ad2ae4306de42e096", "score": "0.4842004", "text": "def linear_interpolate(pt, corners, values):\n N = len(pt)\n\n if N==1:\n xd = (pt[0] - corners[0]) / (corners[1] - corners[0])\n return values[0]*(1 - xd) + values[1]*xd\n\n #interpolate out the first dimension\n new_values = np.empty(2**(N-1), dtype=np.float64)\n x_corners = np.array([corners[0,0], corners[0,1]])\n jump = 2**(N-1)\n for i in range(2**(N-1)):\n new_values[i] = linear_interpolate(np.array([pt[0]]), x_corners, \n np.array([values[i], values[i+jump]]))\n \n new_corners = np.empty((N-1, 2), dtype=np.float64)\n new_pt = np.empty(N-1, dtype=np.float64)\n for i in range(N-1):\n new_corners[i,0] = corners[i+1, 0]\n new_corners[i,1] = corners[i+1, 1]\n new_pt = pt[i+1]\n\n return linear_interpolate(new_pt, new_corners, new_values)", "title": "" } ]
ecb0ee01b6ccb11e75677aa7d9ee8dfb
Plot the strain data in the frequency domain
[ { "docid": "d9bd7a000ba278376e63a5fe428de478", "score": "0.6397094", "text": "def _frequency_domain_plot(self, *args, **kwargs):\n from pesummary.gw.plots.detchar import frequency_domain_strain_data\n\n return frequency_domain_strain_data(*args, **kwargs)[self.IFO]", "title": "" } ]
[ { "docid": "9a1712a9bdcd3cfbf3521c02ffd920db", "score": "0.6559505", "text": "def plot(self):\n x_range = np.arange(0, 1.0, 0.01)\n y_vals = [self.get_freq(x) for x in x_range]\n plt.plot(x_range, y_vals)\n plt.show()", "title": "" }, { "docid": "32df4677233cf5ac08efc6bfb875f532", "score": "0.6464078", "text": "def plot_strain_rate(self, strain):\r\n self.acq_rate\r\n pass", "title": "" }, { "docid": "10d0240a7b2c318a70684ee5aea6f0a1", "score": "0.64597017", "text": "def plotWaveandFrequencyRange(self):\n\n\n times = -np.arange(WWProperties.signalSamples) * WWProperties.signalTimestep + WWProperties.signalStart\n signal = self.waveMaker.makeWave(WWProperties.m1, WWProperties.m2, WWProperties.phic, WWProperties.tc, times) \n frequency = self.waveMaker.omega / (2 * np.pi)\n\n plt.figure(1)\n plt.subplot(211)\n plt.title(\"Inspiral for (10 M${}_\\odot$, 1.4M${}_\\odot$) non-spining system (WW fig 8)\")\n plt.plot(times, frequency)\n plt.ylabel('Orbital Frequency')\n\n plt.subplot(212)\n plt.plot(times, signal)\n plt.xlabel('Time (s)')\n plt.ylabel('$R h_+$')\n\n plt.savefig(self.figFolderName + \"/\" + Params + \" \" + GandC0 + \" units signal signalStart %f signalSamples %d signalTimestep %f (WW fig 8).pdf\" % (WWProperties.signalStart, WWProperties.signalSamples, WWProperties.signalTimestep), bbox_inches='tight')\n plt.cla()\n\n print (\"frequency hight\", frequency[0], \"frequency low\", frequency[-1], \"\\n\")", "title": "" }, { "docid": "7cd38308831333d2194321b987d5e54a", "score": "0.64455694", "text": "def plotSpectrum(y,Fs):\n \n n = len(Data) # length of the signal\n k = arange(n)\n T = n/Fs\n frq = k/T # two sides frequency range\n frq = frq[range(n/2)] # one side frequency range\n\n Y = fft(y)/n # fft computing and normalization\n Y = Y[range(n/2)]\n \n plot(frq,abs(Y),'r') \n xlabel('Freq (Hz)')\n ylabel('|Y(freq)|')", "title": "" }, { "docid": "1d146498a3afe5dbeb0b5200a35b7813", "score": "0.6393334", "text": "def plot(self, *args, **kw):\n return self.frequency.plot(*args, **kw)", "title": "" }, { "docid": "43dc6d572557709820fdf37ce6d96e46", "score": "0.6373058", "text": "def plot(self):\n fig, (ax0,ax1) = plt.subplots(2,figsize=(14,14))\n ax0.plot(SineSignal.t,self.injected_signal)\n ax1.plot(self.inj_freq[:SineSignal.n_t//2],self.freq_prob)\n ax0.set(title='injected signal',ylabel='signal value',xlabel='signal time chunk')\n ax1.set(title='discrete FFT',ylabel='freq strength in signal',xlabel='frequency')\n plt.savefig('example_signal.pdf',bbox_inches='tight')\n plt.clf()", "title": "" }, { "docid": "19aaefa1345a282c92b5900b512f451e", "score": "0.6189974", "text": "def plotSpectrum(y,Fs):\n n = len(y) # length of the signal\n k = arange(n)\n T = n/Fs\n frq = k/T # two sides frequency range\n frq = frq[range(n/2)] # one side frequency range\n \n Y = fft(y)/n # fft computing and normalization\n Y = Y[range(n/2)]\n plot(frq,abs(Y),'r') # plotting the spectrum\n xlabel('Freq (Hz)')\n ylabel('|IR_Spectrum(freq)|')\n rospy.loginfo('[sharp_listener] DONE PLOTING!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n show()", "title": "" }, { "docid": "01cc1e93736eff282d7d80f527ae3d74", "score": "0.61449003", "text": "def plot_wavelength(spectrum, wavelength):\n #print(len(spectrum[0]))\n #print(len(wavelength[0]))\n for index, element in enumerate(spectrum):\n plt.plot(wavelength[index], element)\n plt.xlabel(\"Angstroms\")\n plt.ylabel(\"Count\")\n plt.title(\"No Interpolation\")\n plt.show()", "title": "" }, { "docid": "be1260d4ed484e7a2db67cbcc8b6dcf5", "score": "0.60464585", "text": "def plotSpectrum(self, ax, signal, color):\n n = len(signal) # length of the signal\n k = arange(n)\n T = n/self.sample_rate\n frq = k/T # two sides frequency range\n frq = frq[range(int(n/2))] # one side frequency range\n\n Y = fft(signal)/n # fft computing and normalization\n Y = Y[range(int(n/2))]\n\n ax.plot(frq, abs(Y), color) # plotting the spectrum\n xlabel('Freq (Hz)')\n ylabel('Gain')", "title": "" }, { "docid": "1e21d7a7639204bf306e85af3f9fef3e", "score": "0.60293746", "text": "def plot_sfrd_data(ax):\n\n ObsSFRdensity = np.array([\n [0, 0.0158489, 0, 0, 0.0251189, 0.01000000],\n [0.150000, 0.0173780, 0, 0.300000, 0.0181970, 0.0165959],\n [0.0425000, 0.0239883, 0.0425000, 0.0425000, 0.0269153, 0.0213796],\n [0.200000, 0.0295121, 0.100000, 0.300000, 0.0323594, 0.0269154],\n [0.350000, 0.0147911, 0.200000, 0.500000, 0.0173780, 0.0125893],\n [0.625000, 0.0275423, 0.500000, 0.750000, 0.0331131, 0.0229087],\n [0.825000, 0.0549541, 0.750000, 1.00000, 0.0776247, 0.0389045],\n [0.625000, 0.0794328, 0.500000, 0.750000, 0.0954993, 0.0660693],\n [0.700000, 0.0323594, 0.575000, 0.825000, 0.0371535, 0.0281838],\n [1.25000, 0.0467735, 1.50000, 1.00000, 0.0660693, 0.0331131],\n [0.750000, 0.0549541, 0.500000, 1.00000, 0.0389045, 0.0776247],\n [1.25000, 0.0741310, 1.00000, 1.50000, 0.0524807, 0.104713],\n [1.75000, 0.0562341, 1.50000, 2.00000, 0.0398107, 0.0794328],\n [2.75000, 0.0794328, 2.00000, 3.50000, 0.0562341, 0.112202],\n [4.00000, 0.0309030, 3.50000, 4.50000, 0.0489779, 0.0194984],\n [0.250000, 0.0398107, 0.00000, 0.500000, 0.0239883, 0.0812831],\n [0.750000, 0.0446684, 0.500000, 1.00000, 0.0323594, 0.0776247],\n [1.25000, 0.0630957, 1.00000, 1.50000, 0.0478630, 0.109648],\n [1.75000, 0.0645654, 1.50000, 2.00000, 0.0489779, 0.112202],\n [2.50000, 0.0831764, 2.00000, 3.00000, 0.0512861, 0.158489],\n [3.50000, 0.0776247, 3.00000, 4.00000, 0.0416869, 0.169824],\n [4.50000, 0.0977237, 4.00000, 5.00000, 0.0416869, 0.269153],\n [5.50000, 0.0426580, 5.00000, 6.00000, 0.0177828, 0.165959],\n [3.00000, 0.120226, 2.00000, 4.00000, 0.173780, 0.0831764],\n [3.04000, 0.128825, 2.69000, 3.39000, 0.151356, 0.109648],\n [4.13000, 0.114815, 3.78000, 4.48000, 0.144544, 0.0912011],\n [0.350000, 0.0346737, 0.200000, 0.500000, 0.0537032, 0.0165959],\n [0.750000, 0.0512861, 0.500000, 1.00000, 0.0575440, 0.0436516],\n [1.50000, 0.0691831, 1.00000, 2.00000, 0.0758578, 0.0630957],\n [2.50000, 0.147911, 2.00000, 3.00000, 0.169824, 0.128825],\n [3.50000, 0.0645654, 3.00000, 4.00000, 0.0776247, 0.0512861],\n ], dtype=np.float32)\n\n ObsRedshift = ObsSFRdensity[:, 0]\n xErrLo = ObsSFRdensity[:, 0]-ObsSFRdensity[:, 2]\n xErrHi = ObsSFRdensity[:, 3]-ObsSFRdensity[:, 0]\n\n ObsSFR = np.log10(ObsSFRdensity[:, 1])\n yErrLo = np.log10(ObsSFRdensity[:, 1])-np.log10(ObsSFRdensity[:, 4])\n yErrHi = np.log10(ObsSFRdensity[:, 5])-np.log10(ObsSFRdensity[:, 1])\n\n ax.errorbar(\n ObsRedshift,\n ObsSFR,\n yerr=[yErrLo, yErrHi],\n xerr=[xErrLo, xErrHi],\n color='g',\n lw=1.0,\n alpha=0.3,\n marker='o',\n ls='none',\n label='Observations'\n )\n\n return ax", "title": "" }, { "docid": "4eddb8873c3833a0aab5857e3cccbae8", "score": "0.6019912", "text": "def plot_ssd(self, condition='all', counts=False, bw=.01, xlims=None):\n cidx=self._get_cidx(condition)\n a=stats.itemfreq(self.SSD[(cidx) & np.isfinite(self.SSD)])#.astype(np.int)\n if counts==False:\n a[:,1]/=np.sum(a[:,1])\n pl.bar(a[:,0]-bw/2.0, a[:,1], width=bw)\n pl.xlabel('SSD')\n pl.ylabel('freq')\n pl.title('data=%s, condition=%s'%(self.name, 'all' if condition=='all' else \":\".join(self.design.condidx(condition))))\n if xlims!=None:\n pl.xlim(*xlims)\n if not counts:\n pl.ylim(0,1) # probs ", "title": "" }, { "docid": "764745d5cc8079a761da3f6bce628b39", "score": "0.60031587", "text": "def plot(self):\n timebin = np.linspace(0, self.total_duration, self.specx.shape[0])\n freq_vector = [self.fc - (self.fs/2), (self.fs/2) + self.fc]\n \n plt.figure(figsize=(12,8)) \n plt.imshow(self.specx[::self.jump], extent=freq_vector + self.rightlim, origin='lower', aspect='auto') \n plt.plot(self.track_center[0], self.time_bins[0], color = 'k')\n plt.plot(self.raw_center[:, 0], timebin, color = 'white', marker='.', alpha=0.5)\n plt.xlabel('Frequency (Hz) \\n'+'\\nFile:'+ self.filename + '\\nRuntime:' + str(self.t_fs))\n plt.ylabel('Time (s)')\n plt.title('Waterfall')\n plt.xlim([self.f_chan[0] - self.BW[0]/2, self.f_chan[0] + self.BW[0]/2])\n plt.colorbar()\n plt.savefig('waterfal_plot.png', dpi=400, transparent=False)\n plt.show()", "title": "" }, { "docid": "b38d8bba19a1986cf110f47f667f66fb", "score": "0.59973717", "text": "def plot_tsys(self, S):\n plot_scale = 0.\n nscale = 0\n for ipix in range(S.npix):\n indx_fin = np.where(np.isfinite(S.roach[ipix].tsys_spectrum))\n indx_inf = np.where(np.isinf(S.roach[ipix].tsys_spectrum))\n indx_nan = np.where(np.isnan(S.roach[ipix].tsys_spectrum))\n print(ipix, 'fin------------', indx_fin[0])\n print(ipix, 'inf------------', indx_inf[0])\n print(ipix, 'nan------------', indx_nan[0])\n l_fin = len(indx_fin[0])\n if l_fin > 0 and S.roach[ipix].tsys > 0 and \\\n S.roach[ipix].tsys < 500:\n plot_scale = plot_scale + S.roach[ipix].tsys\n nscale = nscale + 1\n if nscale > 0:\n plot_scale = plot_scale / nscale\n plot_order = [1,5,9,13,2,6,10,14,3,7,11,15,4,8,12,16];\n for ipix in range(S.npix):\n pixel_id = S.roach_pixel_ids[ipix]\n ax = pl.subplot(4, 4, plot_order[pixel_id])\n ax.tick_params(axis='both', which='major', labelsize=6)\n ax.tick_params(axis='both', which='minor', labelsize=6)\n indx_fin = np.where(np.isfinite(S.roach[ipix].tsys_spectrum))\n l_fin = len(indx_fin[0])\n if l_fin > 0:\n pl.plot(S.roach[ipix].tsys_spectrum)\n pl.text(S.nchan / 2, 10, '%d %6.0fK'%(pixel_id, \n S.roach[ipix].tsys), \n horizontalalignment='center')\n if plot_scale != 0:\n pl.axis([0, S.nchan, 0, plot_scale * 1.5])\n else:\n pl.text(0.1, 0.5, '%d NaN'%(pixel_id))\n pl.suptitle('TSys: ObsNum %d\\n%s %s GHz'%(S.obsnum, S.receiver, \n S.line_rest_frequency))", "title": "" }, { "docid": "6861c4bf1e79fc5f1cb4ab9f65f58ddc", "score": "0.5983932", "text": "def plot(self, noised=False):\n\n if noised and self.ndata is None:\n raise ValueError(\n 'Can not display noised data when snr has been set to None.')\n\n # Show data\n hs_tmp = hs.signals.Signal2D(self.data if not noised else self.ndata)\n hs_tmp.axes_manager = self.hsdata.axes_manager\n hs_tmp.metadata = self.hsdata.metadata\n hs_tmp.plot()", "title": "" }, { "docid": "3acd03f47ed6284b9846c2ff82dca9ac", "score": "0.5945903", "text": "def plotFourier(self, widgetName, data, pen):\n try:\n N = len(data)\n # Normalization\n yplot = 2* np.abs(data[: N//2]) / N\n widgetName.plotItem.plot(self.signalFourier['dataFrequencies'][: N//2], yplot, pen= pen)\n except:\n pass", "title": "" }, { "docid": "91a1bfa74dc58b5ae7b3fe03e08df65e", "score": "0.5928069", "text": "def plot_data(self\n ) -> None:\n self.simulate_data()\n\n plt.plot(self.t, self.spleen_signal, color='red')\n plt.plot(self.t, self.liver_signal, color='blue')\n plt.plot(self.spleen_sampling_times,\n self.spleen_data, marker=\"x\", color='red', linewidth=0)\n plt.plot(self.liver_sampling_times, self.liver_data,\n marker=\"x\", color='blue', linewidth=0)\n plt.show()", "title": "" }, { "docid": "020261c8d490d9b2375b232dd988f26f", "score": "0.59164333", "text": "def draw_latent_frequency(encoder=None):\n \n color_id = {1: 'red', 2: 'green', 3:'blue', 4: 'orange', 5: 'purple'}\n\n if encoder is None:\n encoder = load_model('encoder.h5')\n linear_1, amp_1 = sin_signal_freq()\n \n sin_latents_1 = encoder.predict(linear_1) \n plt.cla()\n plt.clf()\n clr = [color_id[i] for i in amp_1]\n plt.scatter(sin_latents_1[:, 0], sin_latents_1[:, 1], s=3.5, color=clr)\n #plt.savefig('frequency_with_phase_latents.png', dpi=200)\n plt.show()", "title": "" }, { "docid": "b62a559972d796680c393dc4d7c9736e", "score": "0.58984387", "text": "def plot(self):\n from matplotlib import pyplot\n import numpy as np\n\n x = unit.Quantity(np.arange(0, np.pi, 0.1), unit=unit.radians)\n pyplot.plot(x,\n self.k * (1 + np.cos(self.periodicity * x - self.phase)),\n 'r--')\n\n pyplot.show()", "title": "" }, { "docid": "b21b21cfb8950b17961e02645fbed45a", "score": "0.58799034", "text": "def plot_rx_f(data_z):\n freq, z_fft = fft_rx(data_z)\n plt.plot(freq, z_fft)\n plt.show()", "title": "" }, { "docid": "3c0726ec78152ae796254111f38ed0a3", "score": "0.58260477", "text": "def simpleFreqPlot(x, y, plot=plt, xscale=False, yscale=False):\n plot.scatter(x, y)\n\n if xscale:\n plt.xscale('log') \n if yscale:\n plt.yscale('log')", "title": "" }, { "docid": "60fa5d6ed28306c4d2c22893009d51b4", "score": "0.5802264", "text": "def draw(self):\n\t\tts = pd.Series(self.R[2:], index=xrange(2,self.N+2))\n\t\tts.plot()\n\t\tplt.show()", "title": "" }, { "docid": "72f4c34f5e39fcdc2c4c161c2570dc46", "score": "0.5779021", "text": "def plot_as1D(self, noised=False):\n\n if noised and self.ndata is None:\n raise ValueError(\n 'Can not display noised data when snr has been set to None.')\n\n if noised:\n hs_data = hs.signals.Signal1D(self.ndata)\n else:\n hs_data = hs.signals.Signal1D(self.data)\n\n hs_data.axes_manager = self.hsdata.axes_manager\n hs_data.metadata = self.hsdata.metadata\n\n hs_data.plot()\n hs_data._plot.navigator_plot.ax.images[-1].set_cmap(\"viridis\")", "title": "" }, { "docid": "e3899bf53a12913b0f5c1d2d78c61dce", "score": "0.57586116", "text": "def plotFrequency(b,a=1):\n w,h = signal.freqz(b,a)\n h_dB = abs(h);#20 * np.log(abs(h))/np.log(10)\n subplot(211)\n plot(w/max(w),h_dB)\n #plt.ylim(-150, 5)\n ylabel('Magnitude (db)')\n xlabel(r'Normalized Frequency (x$\\pi$rad/sample)')\n title(r'Frequency response')\n subplot(212)\n h_Phase = np.unwrap(np.arctan2(np.imag(h),np.real(h)))\n plot(w/max(w),h_Phase)\n ylabel('Phase (radians)')\n xlabel(r'Normalized Frequency (x$\\pi$rad/sample)')\n title(r'Phase response')\n plt.subplots_adjust(hspace=0.5)\n return w;", "title": "" }, { "docid": "b3508db5d40f46209add10843063a723", "score": "0.57523036", "text": "def stability_plotter(df, waves):\n stability = []\n for header in df:\n array = df[header].to_numpy()\n\n std_dev = np.std(array)\n average = np.average(array)\n stability.append(std_dev/average) \n \n # nice plot for stability, how it changes per wavelength\n plt.plot(waves, stability) \n plt.title(\"Stability per wavelength\")\n plt.xlabel(\"Wavelength (nm)\")\n plt.ylabel(\"Stability\")\n plt.savefig(\"stability.png\")\n plt.show()\n plt.close() \n \n return stability", "title": "" }, { "docid": "e81ae6b25f4095438c8c6ae76050a372", "score": "0.5750488", "text": "def plot_seabird(netcdffile):\n\n f = netcdf.netcdf_file(netcdffile, 'r')\n\n depth = f.variables['DEPTH'].data\n salinity = f.variables['PSAL'].data\n temp = f.variables['TEMP'].data\n\n f.close()\n\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n ln1 = ax1.plot(salinity, depth, 'r.', label='Salinity')\n ax1.invert_yaxis()\n\n ax2 = ax1.twiny()\n ln2 = ax2.plot(temp, depth, 'g.', label='Temperature')\n\n ax1.grid(True)\n ax1.set_ylabel('Depth (m)')\n ax1.set_xlabel('Salinity (PSU)')\n ax2.set_xlabel('Temperature ($^\\circ$C)')\n\n lns = ln1+ln2\n labs = [l.get_label() for l in lns]\n ax2.legend(lns, labs, loc=0)\n\n return fig", "title": "" }, { "docid": "d7bac4221056958e0be52693fb1669b1", "score": "0.5747702", "text": "def plot_temporal_distribution(self, fname=None):\n fname = self._fname(fname)\n \n figname = 'temps1.Simulation.plot_temporal_distribution.' + fname.replace(\" \", \"_\")\n fig, axs = plt.subplots(2, 1, num=figname, clear=True, figsize=[6.4, 7.19])\n axs[0].set_title(f'{fname.capitalize()} filter\\nTemporal distribution of S1 candidates')\n \n ax = axs[0]\n ax.set_xlabel('Time relative to true S1 location [ns]')\n ax.set_ylabel('Inverse of neighbor temporal gap [ns$^{-1}$]')\n \n for k in ['all', 'dcr']:\n time = self.times[fname][k]\n time = np.sort(time)\n ddecdf = 1 / np.diff(time)\n x = time - self.s1loc\n y = np.concatenate([ddecdf, ddecdf[-1:]])\n ax.plot(x, y, drawstyle='steps-post', **self.plotkw[k])\n \n ax.axvspan(-self.deadradius, self.deadradius, color='#eee', zorder=-9, label='$\\\\pm$ dead radius')\n\n ax.legend(loc='upper right')\n ax.set_xlim(1.5 * self.deadradius * np.array([-1, 1]))\n ax.set_yscale('log')\n ax.minorticks_on()\n ax.grid(True, which='major', linestyle='--')\n ax.grid(True, which='minor', linestyle=':')\n \n \n ax = axs[1]\n ax.set_xlabel('Time relative to true S1 location [ns]')\n ax.set_ylabel('Histogram bin density [ns$^{-1}$]')\n \n times1 = self.hits1.reshape(-1) - self.s1loc \n time = self.times[fname]['all'] - self.s1loc\n signal = self.signal[fname]['all']\n time_match = time[signal]\n sigma = qsigma.qsigma(time_match)\n \n # t = np.linspace(..., ..., 1000)\n # ax.plot(t, pS1.p_S1_gauss(t, self.VL, self.tauV, self.tauL, self.tres), label='S1 pdf')\n histkw = dict(bins='auto', density=True, histtype='step', zorder=10)\n ax.hist(times1, label=f'S1 photons ({len(times1)})', linestyle=':', **histkw)\n ax.hist(time_match, label=f'matching candidates ($\\\\sigma_q$={sigma:.3g} ns)', **histkw)\n \n ax.axvspan(0, self.deadradius, color='#eee', zorder=-9, label=f'dead radius ({self.deadradius} ns)')\n\n textbox.textbox(ax, self.infotext(), loc='upper left', zorder=11)\n \n ax.legend(loc='upper right', fontsize='small')\n ax.set_yscale('log')\n linthreshx = 100 #10 ** np.ceil(np.log10(15 * sigma))\n ax.set_xscale('symlog', linthreshx=linthreshx)\n ax.minorticks_on()\n ax.xaxis.set_minor_locator(symloglocator.MinorSymLogLocator(linthreshx))\n ax.grid(True, which='major', linestyle='--')\n ax.grid(True, which='minor', linestyle=':')\n\n fig.tight_layout()\n \n return fig", "title": "" }, { "docid": "42942931756a0fae0105213e21666ea5", "score": "0.5739101", "text": "def plot(self):\n from matplotlib import pyplot\n import numpy as np\n\n x = unit.Quantity(np.arange(0, np.pi, 0.1), unit=unit.radians)\n pyplot.plot(x, self.constant * (1 + self.prefactor\n * np.cos(self.periodicity * x\n + self.phase)),\n 'r--')\n\n pyplot.show()", "title": "" }, { "docid": "97916c7a9496d26b389c541e02f8a532", "score": "0.573355", "text": "def do_stability_plotting() -> None:\n # data = gather_stability_data('500runs_lowerL')\n data = pickle_load('500runs_lowerL')\n plt.plot_stability(data)", "title": "" }, { "docid": "4f1f1b272d5c909d338e7f4486fb3d7d", "score": "0.5661861", "text": "def plot(y, si):\n x = si.timer.series.magnitude\n plot_power(x, y, si)\n plot_reactivity(x, si)\n plot_power_w_reactivity(x=x, y=y, si=si)\n plot_zetas(x, y, si)\n if si.ne._ndg > 0:\n plot_omegas(x, y, si)\n plot_temps_together(x, y, si)\n plot_temps_separately(x, y, si)", "title": "" }, { "docid": "f8cb646c3958e42160b2bfa23202d86f", "score": "0.56223905", "text": "def plot_data(self, cutoff=200):\n\n # If data not loaded, call load_data function\n if len(self.data) == 0:\n self.load_data()\n\n ix1 = index_of(self.energy, - min(self.energy))\n ix2 = index_of(self.energy, cutoff)\n yHeight = max(self.nor_counts[ix2:])\n\n x = self.energy[:ix1]\n y = self.counts[:ix1]\n model = GaussianModel()\n pars = model.guess(y, x=x)\n result = model.fit(y, pars, x=x)\n fwhm = result.params['fwhm'].value\n\n # Plot\n plt.figure(figsize=(10, 3))\n gs = gridspec.GridSpec(1, 3)\n ax0 = plt.subplot(gs[0, 0])\n ax0.plot(x, y, 'bo', markersize=4)\n ax0.plot(x, result.best_fit, 'r-')\n ax0.set_xlabel('wavenumber (cm-1)', fontsize=8)\n ax0.set_ylabel('counts', color='k', fontsize=8)\n plt.title('FWHM ='+\"{0:.0f}\".format(fwhm), fontsize=10)\n plt.xlim(-200, 200)\n ax0.locator_params(axis='x', nbins=5)\n ax0.locator_params(axis='y', nbins=8)\n\n ax1 = plt.subplot(gs[0, 1:3])\n ax1.plot(self.energy, self.nor_counts, 'k-')\n ax1.set_xlabel('wavenumber (cm-1)', fontsize=8)\n # Make the y-axis label and tick labels match the line color.\n for tl in ax1.get_yticklabels():\n tl.set_color('k')\n\n ax2 = ax1.twinx()\n ax2.plot(self.energy, self.nor_counts, 'r-')\n ax2.set_ylabel('counts', color='r', fontsize=8)\n for tl in ax2.get_yticklabels():\n tl.set_color('r')\n\n plt.ylim(0, yHeight)\n plt.xlim(min(self.energy), max(self.energy))\n plt.title(self.filename, fontsize=10)\n ax2.locator_params(axis='x', nbins=15)\n ax2.locator_params(axis='y', nbins=8)\n plt.show()", "title": "" }, { "docid": "7184530a51eb0ca2b3baf9fd9e7ecb36", "score": "0.56194925", "text": "def plot(t, y, oversampling=5, figsize=(12, 20)):\n freqs, mag, phase, Pn, proba = extract(t, y, hifac=1, oversampling=oversampling)\n fig, ax = plt.subplots(6, 1, figsize=figsize)\n ax[0].scatter(t, y, color='black')\n ax[0].set_title('Flux')\n ax[0].set_xlabel('time [$mjd$]')\n \n ax[1].plot(freqs, mag, color='black')\n ax[1].set_title('Spectrum magnitude')\n ax[1].set_xlabel('Freq [$mjd^{-1}$]')\n \n ax[2].plot(freqs, phase, color='black')\n ax[2].set_title('Spectrum phase')\n ax[2].set_xlabel('Freq [$mjd^{-1}$]')\n \n ax[3].plot(1/freqs, Pn, color='black')\n ax[3].set_title('LombScargle periodogram')\n ax[3].set_xlabel('Period [$mjd$]')\n \n ax[4].plot(1/freqs, proba, color='black')\n ax[4].set_title('Periodogram true alarm probability')\n ax[4].set_xlabel('Period [$mjd$]')\n # Estimated phase\n h = 1/freqs[np.argmax(Pn)]\n phase_hat = (t/h)%1\n ax[5].scatter(phase_hat, y, color='black')\n ax[5].set_title('Phase from estimated period, where h=%.4f'%h)\n fig.tight_layout()\n return freqs, mag, phase, Pn, proba", "title": "" }, { "docid": "68ce11b2bc075e32138591177c931ee5", "score": "0.5619293", "text": "def plot(self, iwidth=None):\n if iwidth is None:\n snr = self.snrs.max(axis=1)\n else:\n snr = self.snrs[:, iwidth]\n\n plt.plot(self.periods, snr, marker='o', markersize=2, alpha=0.5)\n plt.xlim(self.periods.min(), self.periods.max())\n plt.xlabel('Trial Period (s)', fontsize=16)\n plt.ylabel('S/N', fontsize=16)\n\n if iwidth is None:\n plt.title('Best S/N at any trial width', fontsize=18)\n else:\n width_bins = self.widths[iwidth]\n plt.title('S/N at trial width = %d' % width_bins, fontsize=18)\n\n plt.xticks(fontsize=14)\n plt.yticks(fontsize=14)\n plt.grid(linestyle=':')\n plt.tight_layout()", "title": "" }, { "docid": "e6c72ac3c0c6b364b049b8287a00d8ca", "score": "0.5612675", "text": "def plot_freq_tau(self):\n plt.plot(self.frequency, self.tau)\n plt.xlabel(\"Frequency (MHz)\")\n plt.ylabel(r\"$\\tau (\\mu s)$\")\n plt.show()", "title": "" }, { "docid": "ecbe146e7ef623fe833d3baac843b889", "score": "0.5577341", "text": "def plot_as2D(self, noised=False):\n\n if noised and self.ndata is None:\n raise ValueError(\n 'Can not display noised data when snr has been set to None.')\n\n if noised:\n hs_data = hs.signals.Signal2D(\n np.moveaxis(self.ndata, [0, 1, 2], [1, 2, 0]))\n else:\n hs_data = hs.signals.Signal2D(\n np.moveaxis(self.data, [0, 1, 2], [1, 2, 0]))\n\n hs_data.axes_manager = self.hsdata.axes_manager\n hs_data.metadata = self.hsdata.metadata\n\n hs_data.plot(cmap='viridis')", "title": "" }, { "docid": "2a46e6e350f17c7266ca0fb74d298350", "score": "0.55732363", "text": "def plot_signals(self\n ) -> None:\n self.calculate_signals()\n\n plt.plot(self.t, self.spleen_signal, color='red')\n plt.plot(self.t, self.liver_signal, color='blue')\n plt.show()", "title": "" }, { "docid": "e6b0d0ac14c98bef106090cdeba380f9", "score": "0.5559552", "text": "def plot_ant_gain(self):\n import pylab\n pylab.plot(self.config['freqs']/1e6,self.config['antenna_bandpass'])\n pylab.title('Antenna gain %s'%self.config['antenna_bandpass_calfile'])\n pylab.xlabel('Frequency (MHz)')\n pylab.ylabel('Relative response (dBi)')", "title": "" }, { "docid": "e7744e0d9ac9a0ab4dc9b64addef70b9", "score": "0.55551803", "text": "def plot_converted(spectrum):\n for index, element in enumerate(spectrum):\n plt.plot(element[0][0], element[0][1])\n plt.xlabel(\"Angstroms\")\n plt.ylabel(\"Count\")\n plt.title(\"Interpolated\")\n plt.show()", "title": "" }, { "docid": "5feb152157af5a94623b346dcc2104bb", "score": "0.5549776", "text": "def plot_waterfall(d, freqs, lsts, t_unit='hr', f_unit='MHz',\n raster=True):\n\n plt.imshow(d, aspect='auto', interpolation='none',\n rasterized=raster,\n extent=(freqs[0], freqs[-1], lsts[0], lsts[-1]))\n plt.xlabel(\"Frequency [%s]\" % f_unit)\n plt.ylabel(\"Time [%s]\" % t_unit)", "title": "" }, { "docid": "4633c1fe42fab5ba5f5baaa1f8632282", "score": "0.55475754", "text": "def plot_spe_freq(res, points=None, record=-1, time=None, fig_name=''):\n # If time is given searched for record\n if time is not None:\n record = res.get_closest_record(time)\n time = res.times[record]\n else:\n time = res.times[record]\n\n if points is None:\n points = res.get_list_spectrum_points()\n\n fig, axe = plt.subplots()\n\n for point in points:\n # Getting list of frequencies and spectrum value\n freq, spectrum = res.get_spectrum(point, record)\n # Plotting it\n plot1d(axe, freq, spectrum, plot_label='point {:06d}'.format(point),\n x_label='Frequencies', y_label='Spectrum')\n\n axe.legend()\n\n axe.set_title(\"At time {}\".format(time))\n\n if fig_name != '':\n print(\" \"*8+\"~> Plotting {}\".format(fig_name))\n fig.savefig(fig_name)\n else:\n plt.show()\n\n plt.close(fig)", "title": "" }, { "docid": "9d2121dbb71c965bf21c71ee0f085cd5", "score": "0.5524251", "text": "def plot(self):\r\n test_domain = np.linspace(0, 1, 100, dtype = intprim.constants.DTYPE)\r\n test_range = self.get_basis_functions(test_domain)\r\n\r\n fig = plt.figure()\r\n\r\n for basis_func in test_range:\r\n plt.plot(test_domain, basis_func)\r\n\r\n fig.suptitle('Basis Functions')\r\n\r\n plt.show(block = False)", "title": "" }, { "docid": "321bcdea5c2ecb4b5d058d69d46fb14b", "score": "0.55236715", "text": "def plot_sxx(self, bloc, chan, num_samples=None):\n\n pol0, pol1 = self.get_data(bloc, chan, num_samples)\n sxx0, sxx1 = np.fft.fftshift(np.abs(np.fft.fft(pol0)) ** 2), np.fft.fftshift(np.abs(np.fft.fft(pol1)) ** 2)\n plt_title = \"SPD of bloc: {}, channel {}\".format(str(bloc), str(chan))\n lfreq, ufreq = self._calc_chan_freq_range(chan) # get lower bound and upper bound frequency of the channel\n freq_axis = np.linspace(lfreq, ufreq, len(sxx0))\n self._plot(sxx0, sxx1, plt_title, x=freq_axis)\n return sxx0, sxx1", "title": "" }, { "docid": "2877ac82537c408c7259bbc2c2e06982", "score": "0.5521294", "text": "def generateSinewave(nrCycles, nrdataPointsPerCycles = 2048):\n cycles = np.linspace(0,nrCycles * 2 * np.pi, nrdataPointsPerCycles)\n cyclesArray = np.array(np.sin(cycles))\n #plt.plot(cyclesArray)\n #plt.xlabel('angle [rad]')\n #plt.ylabel('sin (x)')\n #plt.axis('tight')\n #plt.show()\n \n wave = int16scale(cyclesArray)\n plt.plot(wave)\n plt.xlabel('angle [rad]')\n plt.ylabel('sin (x)')\n plt.axis('tight')\n plt.show()", "title": "" }, { "docid": "d8a7cb7b48c07db5456c3a268448f9ba", "score": "0.55171424", "text": "def display_stft(X,\n fs,\n len_sig,\n low_freq=0,\n high_freq=3000,\n min_db=-10,\n max_db=0,\n normalize=True):\n # normalize : largest coefficient magnitude is unity\n X_temp = X.copy()\n if normalize:\n X_temp /= np.amax(abs(X_temp))\n\n # compute frequencies array\n Freqs = np.array([low_freq, high_freq])\n Fd = (Freqs * X_temp.shape[0] / fs).astype(int)\n\n # compute values matrix\n Z = X_temp[Fd[1]:Fd[0]:-1, :]\n Z = np.clip(np.log(np.abs(Z) + 1e-50), min_db, max_db)\n Z = 255 * (Z - min_db) / (max_db - min_db)\n\n # compute duration\n time = float(len_sig) / float(fs)\n\n # plotting\n plt.imshow(Z,\n extent=[0, time, low_freq / 1000, high_freq / 1000],\n aspect=\"auto\")\n plt.ylabel('Frequency (Khz)')\n plt.xlabel('Time (sec)')\n plt.show()", "title": "" }, { "docid": "1e633f063f5d07dbb8d1057a1cb1afda", "score": "0.55149215", "text": "def plot_restfreq_spectrum(SSC, band):\n\n def get_spectrum(SSC, band):\n spectrum = spectra[str(SSC['no'])][band]\n frequency = spectrum['frequency'].to(u.MHz)\n intensity = spectrum['spectrum'].to(u.K)\n\n # shift spectrum to rest frequency\n velshift = SSC['velshift']\n frequency = [(-vsys-velshift).to(u.GHz, equivalencies=u.doppler_optical(f)).value for f in frequency]*u.GHz\n\n # remove NaNs\n frequency, intensity = crossmatch(frequency.to(u.GHz).value, intensity.to(u.K).value)\n return frequency, intensity\n\n def set_up_figure(SSC, specie, band):\n fig,ax = plt.subplots(nrows=1, ncols=1, squeeze=True, sharex='col', sharey='row', figsize=(10,8))\n ax.text(0.05, 0.9, 'SSC '+str(SSC['no'])+': '+band, color='k', transform=ax.transAxes, ha='left', va='top', weight='bold', fontsize=16, bbox=props)\n return fig,ax\n\n def plot_spectrum(ax, frequency, spectrum):\n ax.plot(frequency, spectrum, ls='-', color='k', zorder=3)\n ax.fill_between(frequency, spectrum, [0. for f in frequency], color='grey', alpha=0.5, zorder=2)\n\n def get_detected_lines(band=None):\n # get detected species\n all_species = []\n for SSC in SSCs:\n for specie in detected_species[str(SSC['no'])]:\n if not specie in all_species:\n all_species.append(specie)\n # get all lines of the detected species\n all_lines = []\n for specie in all_species:\n slines = [l for l in lines if l['XCLASS']==specie]\n for sl in slines:\n all_lines.append(sl)\n # keep only lines of given band\n if not band==None:\n bandlines = []\n for line in all_lines:\n if band=='LSB':\n if line['restfreq']<350*u.GHz:\n bandlines.append(line)\n elif band=='USB':\n if line['restfreq']>350*u.GHz:\n bandlines.append(line)\n return sorted(bandlines, key=lambda k: k['restfreq'])\n else:\n return sorted(all_lines, key=lambda k: k['restfreq'])\n\n def label_lines(ax, spectrum, band):\n detected_lines = get_detected_lines(band=band)\n for idx,line in enumerate(detected_lines):\n restfreq = line['restfreq'].to(u.GHz).value\n if (restfreq>frequency[0] and restfreq<frequency[-1]):\n if band=='LSB':\n xlim = [342.4, 346.2]\n elif band=='USB':\n xlim = [354.3, 358.1]\n xloc = xlim[0] +((idx+0.5)/len(detected_lines))*(xlim[1]-xlim[0])\n ax.axvline(x=restfreq, ymin=0, ymax=1, color='dimgrey', ls='--', lw=0.5, zorder=1)\n ax.plot([restfreq,xloc], [1.05*np.nanmax(spectrum), 1.05*1.05*np.nanmax(spectrum)], color='dimgrey', ls='--', lw=0.5, zorder=1, clip_on=False)\n ax.text(xloc, 1.06*1.05*np.nanmax(spectrum), line_tex(line), color='dimgrey', fontsize=10, rotation=90, ha='center', va='bottom')\n\n\n def velocity_indicator(ax, frequency, spectrum):\n x = np.nanmedian(frequency)\n y = 0.95*np.nanmax(spectrum)\n ax.plot([x, x+0.05836398],[y,y], c='k', ls='-', lw='3', zorder=11)\n ax.text(x, y, r'- $\\sim 50$\\,km\\,s$^{-1}$', color='k', ha='left', va='center', weight='bold', bbox=props, zorder=10)\n\n def format_figure(ax, frequency, spectrum):\n ax.set_xlim([frequency[0], frequency[-1]])\n ax.set_ylim(-0.05*np.nanmax(spectrum), 1.05*np.nanmax(spectrum))\n ax.xaxis.set_major_locator(MultipleLocator(0.5))\n ax.xaxis.set_minor_locator(MultipleLocator(0.1))\n ax.yaxis.set_major_locator(MultipleLocator(10))\n ax.yaxis.set_minor_locator(MultipleLocator(2))\n ax.tick_params(axis='both', which='major', labelsize=12)\n ax.set_axisbelow(True)\n ax.grid(ls=':', c='grey')\n ax.set_xlabel(r'$\\nu_\\mathrm{rest}$ [GHz]', fontsize=12)\n ax.set_ylabel(r'T$_\\mathrm{b}$ [K]', fontsize=12)\n fig.set_tight_layout(True)\n\n def save_figure(fig, specie, band):\n savepath = escape_fname(os.path.join(plotdir, '03.XCLASS_fit', 'restfreq_spectra', str(SSC['no'])+'.'+band+'.restfreq.pdf'))\n os.system('mkdir -p '+os.path.dirname(savepath))\n fig.savefig(savepath, dpi=300, bbox_inches='tight')\n\n\n frequency, spectrum = get_spectrum(SSC, band)\n fig,ax = set_up_figure(SSC, specie, band)\n plot_spectrum(ax, frequency, spectrum)\n label_lines(ax, spectrum, band)\n velocity_indicator(ax, frequency, spectrum)\n format_figure(ax, frequency, spectrum)\n save_figure(fig, specie, band)", "title": "" }, { "docid": "5ed08779bec6b098fe75436b5dcbf85f", "score": "0.54813236", "text": "def control_sinwave(num,filename=\"control_sinwave.txt\",display=False):\n filename = datadir + filename\n N = num\n min_period = 2\n max_period = 20\n n_changepoints = N/200\n rng = np.random.RandomState(42)\n\n changepoints = np.insert(np.sort(rng.randint(0,N,n_changepoints)),[0,n_changepoints],[0,N])\n const_intervals = zip(changepoints,np.roll(changepoints,-1))[:-1]\n frequency_control = np.zeros((N,1))\n for (t0,t1) in const_intervals:\n frequency_control[t0:t1] = rng.rand()\n periods = frequency_control * (max_period - min_period) + max_period\n frequency_output = np.zeros((N,1))\n z = 0\n for i in range(N):\n z = z + 2 * np.pi / periods[i]\n frequency_output[i] = (np.sin(z) + 1)/2\n input,output = map(lambda x:x[0],frequency_control),map(lambda x:x[0],frequency_output)\n pd.DataFrame(zip(input,output),columns=['input','output']).to_csv(filename,index=False)\n if display:\n display_range = 1000\n plt.figure(figsize =(12,1.5))\n plt.plot(range(display_range),input[:display_range])\n plt.plot(range(display_range),output[:display_range]) # output[:display_range]ではうまくいかない。なぜ?\n plt.show()", "title": "" }, { "docid": "e1cda1f5c61d48cc3a8e5f6e447b11e7", "score": "0.54711914", "text": "def draw_figure(self, data):\n count_data = data[:, 1:len(self.get_channels())+1]\n time_data = data[:, 0]\n\n # Scale count values using SI prefix\n prefix = ['', 'k', 'M', 'G']\n prefix_index = 0\n while np.max(count_data) > 1000:\n count_data = count_data / 1000\n prefix_index = prefix_index + 1\n counts_prefix = prefix[prefix_index]\n\n # Use qudi style\n plt.style.use(self._save_logic.mpl_qd_style)\n\n # Create figure\n fig, ax = plt.subplots()\n ax.plot(time_data, count_data, linestyle=':', linewidth=0.5)\n ax.set_xlabel('Time (s)')\n ax.set_ylabel('Fluorescence (' + counts_prefix + 'c/s)')\n return fig", "title": "" }, { "docid": "3fe8f2e42a540b5737fd6f01c8b9f78b", "score": "0.5427506", "text": "def plot_series(rr, hist=True):\r\n x1, x2 = cl.beats_to_lists(rr)\r\n return [go.Scatter(x=x1, y=x2)]", "title": "" }, { "docid": "8dc8698e2675f02af917d8ee0af41af3", "score": "0.5418549", "text": "def SignalSeries():\r\n pass", "title": "" }, { "docid": "a4240a5aadf84ac63cc47713c81c8741", "score": "0.5413463", "text": "def plot(self):\n pass", "title": "" }, { "docid": "a4240a5aadf84ac63cc47713c81c8741", "score": "0.5413463", "text": "def plot(self):\n pass", "title": "" }, { "docid": "a4240a5aadf84ac63cc47713c81c8741", "score": "0.5413463", "text": "def plot(self):\n pass", "title": "" }, { "docid": "a4240a5aadf84ac63cc47713c81c8741", "score": "0.5413463", "text": "def plot(self):\n pass", "title": "" }, { "docid": "325c82d73d4b3299aa9d72d0e620f80a", "score": "0.5408115", "text": "def plot_sine_wave(start_x, stop_x, amplitude):\n x_array = np.linspace(start_x, stop_x, 1000)\n print(x_array)\n y_array = amplitude * np.sin(x_array)\n plt.plot(x_array, y_array,\":\",color=\"red\")", "title": "" }, { "docid": "e06bc26f07f27d7a19d7e80c20b15559", "score": "0.54054606", "text": "def graph(data, title=\"Graph\"):\n plt.figure(figsize=(7, 6))\n plt.title(title)\n plt.plot(data.keys(), data.values())\n plt.ylabel(\"Sound Level\")\n plt.xlabel(\"Time (s)\")\n plt.show()", "title": "" }, { "docid": "76f87307f248dc083c3f817bc812cdf9", "score": "0.54031557", "text": "def plot_fits(direction_rates,fit_curve,title):", "title": "" }, { "docid": "7bd4e06e92975f2fccc3c57ae66f54fc", "score": "0.5398139", "text": "def plot_trust_func(x):\r\n plt.plot(x, trust_value, 'ro')\r\n plt.xlabel('Variance value')\r\n plt.ylabel('Trust value')\r\n plt.title('Trust function (%s elements)' % (trust_value.shape[0]))\r\n plt.show()", "title": "" }, { "docid": "20f6f18a7cd137d69fec0682a6e9826c", "score": "0.5390575", "text": "def plot_spectra(inst):\n for i in range(3):\n for j in range(4):\n if (i < 2 or j < 2):\n index = i*4 + j\n else:\n break\n lmda, flux = inst.data[index]\n plt.subplot(4, 1, j+1)\n plt.title('Day {}'.format(inst.days[index]))\n plt.plot(lmda, flux)\n plt.xlabel('Wavelength, [nm]')\n plt.ylabel('Relative flux')\n plt.suptitle('Flux relative to the continuum flux around spectral line')\n plt.show()", "title": "" }, { "docid": "b436ce02d66b805de84dce1cea9dac0d", "score": "0.5388317", "text": "def fourierGraph(ivalueData=None, N=10):\n valueData = ivalueData\n #### Create a plot ####\n origPlot = figure(title=\"Fourier curve fitting\", plot_width=400, plot_height=200, responsive=True)\n origPlot.toolbar.logo = None\n origPlot.xaxis.minor_tick_line_color = None # turn off x-axis minor ticks\n origPlot.yaxis.minor_tick_line_color = None # turn off x-axis minor ticks\n\n # call the fourierTrigSeries() to generate lambda func series and the list of tuples trigTerms (coefs, trigFuncs)\n series, trigTerms = ta.fourierTrigSeries(valueData, n=N)\n # Original Line\n xline = [i for i in range(len(valueData))]\n origPlot.line(xline, valueData, legend=\"Original data\", color='green', line_width=3, line_dash='dashed')\n # Fourier Line\n xs = np.arange(0, len(xline), .3)\n origPlot.line(xs, series(xs), legend=\"Fourier Trig Series n={}\".format(N), line_width=3)\n # Legend color setup\n origPlot.legend.background_fill_color = \"LightGray\"\n\n #### Amplitude plot ####\n coef, trigFuncs = zip(*trigTerms)\n trigFuncsString = [str(trigfunc) for trigfunc in trigFuncs[2:]]\n ampPlot = figure(title=\"Frequency Amp\", plot_width=400, plot_height=200,x_range=trigFuncsString,responsive=True)\n ampPlot.toolbar.logo = None\n ampPlot.yaxis.minor_tick_line_color = None\n ampPlot.circle(trigFuncsString, coef[2:], line_width=3, size=6)\n ampPlot.xaxis.major_label_orientation = math.pi/4\n # Create Stem graph\n x0 = [i + 1 for i in range(len(coef)-2)]\n x1 = coef[2:]\n ampPlot.ray(x=0, y=0, angle=0, length=(N*2 + 2), line_width=3, line_color=\"purple\", line_alpha=0.5)\n ampPlot.segment(x0=x0, y0=0, x1=x0, y1=coef[2:], line_width=3)\n\n #### Frequency plot ####\n freqPlot = figure(title=\"Frequency decomposition\", plot_width=400, plot_height=400, responsive=True)\n freqPlot.toolbar.logo = None\n freqPlot.xaxis.minor_tick_line_color = None\n freqPlot.yaxis.minor_tick_line_color = None\n\n\n x = sp.Symbol('x')\n numLines = len(trigFuncs)\n color_pallete = Spectral11*numLines\n\n for i in range(numLines):\n # ignore the first two terms as they are not trig functions but numbers\n if i < 2:\n continue\n f = sp.lambdify(x, trigFuncs[i], modules=['numpy'])\n # plot the functions\n freqPlot.line(xs,coef[i]*f(xs), color=color_pallete[i])\n\n #### Convolve begin #####\n trendPlot = figure(title=\"Trend Plot\",plot_width=400, plot_height=200,responsive=True)\n\n trendPlot.line(xs, series(xs), line_width=3)\n\n origScript, origDiv = components(origPlot,INLINE)\n ampScript, ampDiv = components(ampPlot, INLINE)\n freqScript, freqDiv = components(freqPlot, INLINE)\n trendScript, trendDiv = components(trendPlot, INLINE)\n return (origScript, origDiv,ampScript, ampDiv,freqScript, freqDiv, trendScript, trendDiv)", "title": "" }, { "docid": "f7460b939220ce4fbf6728465186e014", "score": "0.53771937", "text": "def plot(self):\n plt.figure(figsize=(14,7))\n plt.plot(SignalGrid.bin_time,\n SignalGrid.meander(SignalGrid.bin_time)+\n SignalGrid.initial_frequency,'.',label='meandering frequency') \n plt.plot(SignalGrid.bin_time,self.post_freq,'r.',label='highest strength frequency')\n plt.legend()\n plt.title('highest strength frequency and actual meandering frequency of signal')\n plt.ylabel('signal frequency')\n plt.xlabel('bin time')\n plt.savefig('meandering_frequency.pdf',bbox_inches='tight')\n plt.clf()\n\n plt.figure(figsize=(7,14))\n plt.imshow(self.ngrid, cmap='viridis')\n plt.gca().xaxis.tick_top()\n plt.gca().xaxis.set_label_position('top')\n cbar = plt.colorbar() \n cbar.set_label('frequency probability distribution')\n plt.title('grid of signals in frequency domain as frequency changes')\n plt.ylabel('signal frequency bins')\n plt.xlabel('long time bins')\n plt.savefig('signal_grid_raw.pdf',bbox_inches='tight')\n plt.clf()\n\n plt.figure(figsize=(7,14))\n plt.imshow(self.lngrid, cmap='viridis')\n plt.gca().xaxis.tick_top()\n plt.gca().xaxis.set_label_position('top') \n cbar = plt.colorbar() \n cbar.set_label('log(frequency) probability distribution')\n plt.title('grid of signals in log(frequency) domain as frequency changes')\n plt.ylabel('signal frequency bins')\n plt.xlabel('long time bins')\n plt.savefig('lnwandering.pdf',bbox_inches='tight')\n plt.clf()\n\n plt.figure(figsize=(7,14))\n plt.imshow(self.path_grid, cmap='viridis')\n plt.gca().xaxis.tick_top()\n plt.gca().xaxis.set_label_position('top') \n plt.title('viterbi path through signal frequency grid')\n plt.ylabel('signal frequency bins')\n plt.xlabel('long time bins')\n plt.savefig('viterbi_path.pdf',bbox_inches='tight')\n plt.clf()", "title": "" }, { "docid": "6de07e4f38a23a0e036128fc9a64c3b9", "score": "0.5373745", "text": "def initializeSpectrumGraph(self):\r\n\r\n # X axis definition\r\n self.f = self.RATE * np.arange(self.N/2) / self.N\r\n\r\n # Initializing plot\r\n self.fig, axis = plt.subplots(1, 1)\r\n self.linef, = axis.plot(self.f, np.zeros(len(self.f)))\r\n\r\n # Setting axis labels\r\n axis.set_xlabel(\"Freq [Hz]\")\r\n axis.set_ylabel(\"X(f)\")\r\n\r\n # Setting axis limits\r\n axis.set_xlim(20, self.RATE / 2) # Nyquist\r\n axis.set_ylim(0, 200)\r\n plt.show(block = False)", "title": "" }, { "docid": "3a597102eac7b1851de3951c85c28f66", "score": "0.53708905", "text": "def plot_v_frequency(self, y: NumberLike, *args, **kwargs):\n\n try:\n if len(npy.shape(y)) > 2:\n # perhaps the dimensions are empty, try to squeeze it down\n y = y.squeeze()\n if len(npy.shape(y)) > 2:\n # the dimensions are full, so lets loop and plot each\n for m in range(npy.shape(y)[1]):\n for n in range(npy.shape(y)[2]):\n self.plot(y[:, m, n], *args, **kwargs)\n return\n if len(y) == len(self):\n pass\n else:\n\n raise IndexError(['thing to plot doesn\\'t have same'\n ' number of points as f'])\n except(TypeError):\n y = y * npy.ones(len(self))\n\n # plt.plot(self.f_scaled, y, *args, **kwargs)\n plt.plot(self.f, y, *args, **kwargs)\n ax = plt.gca()\n scale_frequency_ticks(ax, self.unit)\n plt.autoscale(axis='x', tight=True)\n self.labelXAxis()", "title": "" }, { "docid": "071ccad6af0331bddf513c0c2c2dd045", "score": "0.5365488", "text": "def plot_filter_response(\n frequency,\n sample_rate,\n filter_type,\n filter_order=2,\n show_grid=True,\n fig_size=(10, 5),\n):\n\n b, a = sensormotion.signal.build_filter(\n frequency, sample_rate, filter_type, filter_order\n )\n\n # Plot the frequency response\n w, h = freqz(b, a, worN=8000)\n f, axarr = plt.subplots(figsize=fig_size)\n axarr.plot(0.5 * sample_rate * w / np.pi, np.abs(h), \"b\")\n axarr.set_xlim(0, 0.5 * sample_rate)\n axarr.set_xlabel(\"Frequency (Hz)\")\n axarr.grid(show_grid)\n\n # Add lines and markers at the cutoff frequency/frequencies\n if filter_type == \"bandpass\":\n for i in range(len(frequency)):\n axarr.axvline(frequency[i], color=\"k\")\n axarr.plot(frequency[i], 0.5 * np.sqrt(2), \"ko\")\n else:\n axarr.axvline(frequency, color=\"k\")\n axarr.plot(frequency, 0.5 * np.sqrt(2), \"ko\")\n\n plt.suptitle(\"Filter Frequency Response\", size=16)\n plt.show()", "title": "" }, { "docid": "17b033440e6d4eb79b494049bf1885e0", "score": "0.5365277", "text": "def plot_data(self):\n pass", "title": "" }, { "docid": "82e83e8f050f1745066a1238f3180589", "score": "0.5361473", "text": "def FW_trend_plot(quant, empty=False): # Fig. 9c\n assert quant in ['FW', 'SALT']\n if quant=='FW': q, Q, Q1 = 0, 'F', 'W'\n elif quant=='SALT': q, Q, Q1 = 1, 'S', 'S'\n lat_bounds = ['90N', '60N', '45N', '10N', '10S', '34S']\n\n f = plt.figure(figsize=(6.4,2.5))\n hap = dict(x=0, dy=0, width=.04, length_includes_head=True, head_width=.06, head_length=.02, ec=None, lw=0, clip_on=False) # horizontal arrow properties\n vap = dict(y=0, dx=0, width=.06, length_includes_head=True, head_width=.08, head_length=.03, ec=None, lw=0, clip_on=False) # vertical arrow properties\n\n for i, (latS, latN) in enumerate(lat_bands):\n #region: define axes\n if i==0: # Atlantic 34S-60N\n ax = f.add_axes([13.5/16,.2,1/8,.55])\n ax.xaxis.set_ticks([])\n ax.yaxis.set_ticks([])\n\n axb = f.add_axes([13.5/16,-.05,1/8 ,.5 ])\n axt = f.add_axes([13.5/16,.5 ,1/8 ,.5 ])\n axl = f.add_axes([12 /16,.2 ,3/16,.55])\n axr = f.add_axes([14 /16,.2 ,3/16,.55])\n vert_lim = [(-.3,.3),(-8e7,8e7)][q]\n hor_lim = [(-.2,.2),(-3e8,3e8)][q]\n draw_scales(f=f,left=False,vlim=vert_lim[1],hlim=hor_lim[1])\n draw_labels(f,'Sv/century')\n\n else:\n ax = f.add_axes([(2*i+1)/16-1.5/16,.2,1/8,.55])\n ax.xaxis.set_ticks([])\n ax.yaxis.set_ticks([])\n\n axb = f.add_axes([(2*i+1)/16-1.5/16,-.05,1/8 ,.5 ])\n axt = f.add_axes([(2*i+1)/16-1.5/16,.5 ,1/8 ,.5 ])\n axl = f.add_axes([(2*i+1)/16-3/16 ,.2 ,3/16,.55])\n axr = f.add_axes([(2*i+1)/16-1/16 ,.2 ,3/16,.55])\n\n vert_lim = [(-.25,.25),(-2e7,2e7)][q]\n hor_lim = [(-.2,.2),(-.7e8,.7e8)][q]\n if i==1:\n draw_scales(f=f,left=True,vlim=vert_lim[1],hlim=hor_lim[1])\n for ax in [axl, axr]:\n ax.set_xlim(hor_lim)\n ax.set_ylim((-.35,1.3))\n\n for ax in [axb, axt, axl, axr]:\n # ax.patch.set_alpha(0)\n ax.axis('off')\n ax.xaxis.set_ticks([])\n\n for ax in [axt, axb]:\n ax.set_ylim(vert_lim)\n ax.set_xlim((-.25,1.45))\n\n #endregion\n #region: plotting\n for s, sim in enumerate(['HIGH', 'LOW']):\n if empty: continue\n nlat_ = nlat_at(sim, latS)\n run, rcp = ['ctrl','lpd'][s], ['rcp', 'lr1'][s]\n\n #region: d/dt [axb]\n if quant=='SALT': fac = 1\n elif quant=='FW': fac = -1e-6/S0\n ddtS_ctl, ddtS_rcp = get_ddt_SALT(sim=sim, latS=latS, latN=latN)\n label = [r'$\\Delta \\bar{W}$', r'$\\Delta \\bar{S}$'][q]\n axt.arrow(x=.8+s/10, dy=ddtS_rcp*fac, **vap, color=plt.cm.tab20(18+s))\n #endregion\n\n #region: meridional flux (convergences) means/trends, incl. BS/Med [axl, axr, axb]\n inflow, inflow_trend = get_BS_Med(sim)\n if latS==60: # BS in Arctic\n fluxes, conv = get_fluxes(sim=sim, quant=quant, lat=latS, latS=None, latN=None)\n # conv['ov'], conv['az'], conv['ed'] = 0, 0, 0\n # conv['to'] = fluxes['to'] + inflow[f'{Q}_BS'].values/[1e6,1][q] # [m^3/s] -> [Sv]\n conv['tov'], conv['taz'], conv['ted'], conv['tto'] = 0, 0, 0, fluxes['tto']\n \n axr.set_xlim(hor_lim)\n axr.arrow(y=.425+s/10, dx=-inflow_trend[f'{Q}_BS'].values/[1e6,1][q], color=plt.cm.tab20(16+s), **hap)\n\n else:\n fluxes, conv = get_fluxes(sim=sim, quant=quant, lat=latS, latS=latS, latN=latN)\n if latS<35 and latN>35: # Med\n axb.arrow(x=1.05+s/10, dy=-inflow_trend[f'{Q}_Med'].values/[1e6,1][q], color=plt.cm.tab20(16+s), **vap)\n\n axl.arrow(y=.55+s/10, dx=fluxes['tov'], color=plt.cm.tab20(0+s), **hap)\n axl.arrow(y=.3 +s/10, dx=fluxes['taz'], color=plt.cm.tab20(2+s), **hap)\n axl.arrow(y=.05+s/10, dx=fluxes['ted'], color=plt.cm.tab20(4+s), **hap)\n axl.arrow(y=.8 +s/10, dx=fluxes['tto'], color=plt.cm.tab20(6+s), **hap)\n \n if i==0: # draw 60 North values in Atlantic box\n fluxes_, conv_ = get_fluxes(sim=sim, quant=quant, lat=latN, latS=latS, latN=latN)\n axr.arrow(y=.55+s/10, dx=fluxes_['tov'], color=plt.cm.tab20(0+s), **hap)\n axr.arrow(y=.3 +s/10, dx=fluxes_['taz'], color=plt.cm.tab20(2+s), **hap)\n axr.arrow(y=.05+s/10, dx=fluxes_['ted'], color=plt.cm.tab20(4+s), **hap)\n axr.arrow(y=.8 +s/10, dx=fluxes_['tto'], color=plt.cm.tab20(6+s), **hap)\n \n axb.arrow(x=.3 +s/10, dy=conv['tov'], color=plt.cm.tab20(0+s), **vap)\n axb.arrow(x=.55+s/10, dy=conv['taz'], color=plt.cm.tab20(2+s), **vap)\n axb.arrow(x=.8 +s/10, dy=conv['ted'], color=plt.cm.tab20(4+s), **vap)\n axb.arrow(x=.05+s/10, dy=conv['tto'], color=plt.cm.tab20(6+s), **vap)\n #endregion\n\n #region: surface fluxes [axt]\n sfwf_mean, sfwf_trend = get_SFWF(sim=sim, quant=quant, latS=latS, latN=latN)\n axt.arrow(x=.05+s/10, dy=-sfwf_trend['SFWF'], color=plt.cm.tab20(8+s) , **vap)\n axt.arrow(x=.3 +s/10, dy=-sfwf_trend['R'] , color=plt.cm.tab20b(14+s), **vap)\n axt.arrow(x=.55+s/10, dy=-sfwf_trend['PE'] , color=plt.cm.tab20(12+s) , **vap)\n #endregion\n\n #endregion\n\n #region: legend, numbering, scales\n f.text(.01,.93,'(c)')\n #endregion\n if q==0 and empty==False:\n plt.savefig(f'{path_results}/FW-paper/Fig9c.eps')\n return", "title": "" }, { "docid": "2dd552eb62f0a7b99152133339e48310", "score": "0.53593385", "text": "def plotTauNoise():\n fig = plt.figure(0,figsize=(5, 3.6))\n plt.rc('text', usetex=True)\n font = {'family' : 'sans-serif',\n 'serif' : 'Helvetica',\n 'weight' : 'normal',\n 'size' : 14 }\n plt.rc('lines', lw=2)\n plt.rc('font', **font)\n ax = fig.add_subplot(111)\n fig.subplots_adjust(left=0.15, right =0.9,\\\n bottom=0.15, top =0.9, wspace=0.25)\n\n # tau0 = (L/np.pi)**2/3\n T = np.linspace(1, 150, 100)\n ax.plot(T, tauTheory(1, T, 100), 'b-')\n ax.plot(T, tauTheory(0.1, T, 100), 'r-')\n # ax.plot(T, tauTheory(0.1, T, 100), '-')\n # ax.plot(T, tauTheory(0.01, T, 100), '-')\n\n # load the simulation data and plot as dots\n tauData = np.loadtxt('tauNoise_N100_F1.dat')\n ax.plot(tauData[:,0], tauData[:,1], 'bo')\n tauData = np.loadtxt('tauNoise_N100_F0.1.dat')\n ax.plot(tauData[:,0], tauData[:,1], 'ro')\n\n\n # set format of the figure\n ax.set_yscale('log')\n # ax.set_xlabel(r'$k_B T/\\Delta E$')\n # ax.set_ylabel(r'$\\tau_{\\parallel}/\\tau_{Rouse}$')\n ax.set_xlabel(r'$T$')\n ax.set_ylabel(r'$\\tau_{\\parallel}$')\n ax.legend([r'$F=1$', r'$F=0.1$'], loc='upper right', fontsize=15)\n ax.set_xlim([0,150])\n # ax.set_ylim([0,1.2])\n plt.show()", "title": "" }, { "docid": "e139afc352661e5e4581608f7060213c", "score": "0.53593016", "text": "def plot(self, width=5.51, height=4, n_xticks=6, fname=None):\n plt.figure(dpi=300, figsize=(width, height))\n alpha = min(1, max(0.01, 50/self.n_simulations_))\n for i in range(self.n_simulations_):\n obs = self.data_[i]\n last_state = obs.iloc[-1]\n last_state.t = self.t_max_\n obs = obs.append([last_state], ignore_index=True)\n plt.plot(obs[[\"t\"]], obs[[\"s\"]], c=\"C0\", alpha=alpha) # S\n plt.plot(obs[[\"t\"]], obs[[\"i\"]], c=\"C1\", alpha=alpha) # I\n plt.plot(obs[[\"t\"]], obs[[\"r\"]], c=\"C2\", alpha=alpha) # R\n title = \"Stochastic SIR model \"\n if self.n_simulations_ > 1:\n title += \"(\" + str(self.n_simulations_) + \" samples) \"\n title += \"with \" \\\n \"$\\\\beta=\" + str(self.beta) + \"$ and \" \\\n \"$\\\\gamma=\" + str(self.gamma) + \"$\"\n plt.title(title)\n plt.xlim([0, self.t_max_])\n plt.ylim([0, self.n])\n plt.xticks(np.linspace(0, self.t_max_, n_xticks))\n plt.xlabel(\"$t$\")\n plt.ylabel(\"$S_t,\\ I_t,\\ R_t$\")\n plt.grid()\n legend = plt.legend([\"$S_t$\", \"$I_t$\", \"$R_t$\"])\n for l in legend.get_lines():\n l.set_alpha(1)\n if fname is not None:\n plt.savefig(fname + \".pdf\")", "title": "" }, { "docid": "44f7cba35773a96c2187a59423d24515", "score": "0.53471714", "text": "def plot_sed(w_obs, f_obs, ferr_obs, # observed fluxes\n w_mod, f_mod, # flux measured for best fitting model\n wave_conv_fil, flux_conv_fil, # filter transmission curves\n labs, sedfilename='best_fit_sed.pdf'): # filter and plotfile name\n fig1, ax1 = plt.subplots(figsize=(6, 5))\n\n for f in range(len(w_mod)):\n ax1.scatter(w_mod[f], f_mod[f], alpha=0.7)\n ax1.plot(wave_conv_fil[f], flux_conv_fil[f], alpha=0.7, label=labs[f])\n\n ax1.errorbar(w_obs, f_obs, yerr=ferr_obs,\n ls='None', color='k', label='observations', marker='+')\n\n ax1.set_yscale('log')\n ax1.set_xlabel(r'Wavelength [$\\AA$]')\n ax1.set_ylabel(r'Flux [erg/cm$^2$/s/$\\AA$]')\n ax1.legend(loc='lower right')\n\n fig1.savefig(sedfilename, bbox_inches='tight')\n plt.show()", "title": "" }, { "docid": "86950e208e87ef1d9695c1274e2bb17a", "score": "0.5346543", "text": "def water_freq(df):\n \n assert isinstance(df, xr.Dataset),\"Input has to be a xarray.Dataset.\"\n \n try:\n df.water\n except Exception:\n print(\"'water' band cannot be found. Please use pred_index() to acquire the required band.\")\n try:\n df.time\n except Exception:\n print(\"'time' cannot be found. Please check the time dimension of the dataset.\")\n try:\n df.longitude\n df.latitude\n except Exception:\n print(\"'longitude' or/and 'latitude' cannot be found. Please check the dimension of the dataset.\")\n \n frequency = df.water.sum(dim='time',skipna=True)/len(df.time)\n show(hv.render(frequency.hvplot.image(x=\"longitude\",y=\"latitude\",aspect=1,cmap='bmy_r')))", "title": "" }, { "docid": "9ca67b04880c97c72c135c6551227754", "score": "0.53425175", "text": "def Task67():\r\n print(\"Lab 4 Tasks 6 and 7:\") \r\n\r\n\r\n # Low-pass filter design parameters\r\n Ntaps=16\r\n Fbands=[0, 0.15, 0.2, 0.25, 0.3, 0.5]\r\n Abands=[0, 1, 0]\r\n Weights= [100, 1, 100]\r\n #\r\n BPtaps = signal.remez(Ntaps, Fbands, Abands, Weights)\r\n #\r\n fig = plt.figure()\r\n plt.plot(BPtaps)\r\n plt.xlabel('Sample')\r\n plt.ylabel('Value')\r\n plt.title('Impulse Response of the FIR Filter')\r\n # Frequency response\r\n fig, pltArr = plt.subplots(2, sharex=True) \r\n fig.suptitle(\"Frequency Response\")\r\n [freq, response] = signal.freqz(BPtaps, [1], worN=2000)\r\n pltArr[0].plot(freq, np.log10(np.abs(response)))\r\n pltArr[0].set_title(\"Magnitude of Frequency Reponse\")\r\n angles = np.unwrap(np.angle(response))\r\n pltArr[1].plot(freq, angles)\r\n pltArr[1].set_title(\"Angle of Frequency Reponse\")\r\n # plt.xlabel('Normalized Frequency (pi is Nyquist Frequency)')\r\n # plt.ylabel(\"Magnitude of Frequency Response\")\r\n # plt.title(\"Magnitude of Frequency Response\")\r\n\r\n\r\n # Minimum phase version\r\n rt = np.roots(BPtaps)\r\n [b, r] = signal.deconvolve(BPtaps, [1,-rt[1]])\r\n MinPhTaps = signal.convolve(b,[1,-1/rt[1].conjugate()])\r\n\r\n #impulse response\r\n fig = plt.figure()\r\n plt.plot(MinPhTaps)\r\n plt.xlabel('Sample')\r\n plt.ylabel('Value')\r\n plt.title('Impulse Response of the Minimum Phase Filter')\r\n # Frequency response\r\n fig, pltArr = plt.subplots(2, sharex=True) \r\n fig.suptitle(\"Frequency Response\")\r\n [freq, response] = signal.freqz(MinPhTaps, [1], worN=2000)\r\n pltArr[0].plot(freq, np.log10(np.abs(response)))\r\n pltArr[0].set_title(\"Magnitude of Frequency Reponse\")\r\n angles = np.unwrap(np.angle(response))\r\n pltArr[1].plot(freq, angles)\r\n pltArr[1].set_title(\"Angle of Frequency Reponse\")\r\n # plt.xlabel('Normalized Frequency (pi is Nyquist Frequency)')\r\n # plt.ylabel(\"Magnitude of Frequency Response\")\r\n # plt.title(\"Magnitude of Frequency Response\")\r\n #\r\n plt.show()", "title": "" }, { "docid": "1cbb3d7d23c1d95b8bf18627104c12b5", "score": "0.53419256", "text": "def wave_plot():\n PLOT_RANGE = [1, 2, 3, 4, 5] # Choose n's to plot corresponding psi_n's\n\n N = 100 # Number of points for discretization\n NUM_EIGVALS = max(PLOT_RANGE) # Number of eigenvalues to get\n\n pb = ParticleBox(N, NUM_EIGVALS) \n\n apsi = np.empty((NUM_EIGVALS, pb.N), dtype=float) \n for n in range(1, NUM_EIGVALS+1):\n apsi[n-1] = np.sqrt(2) * np.sin(n*np.pi*pb.x) * np.sign(pb.psi[1, n-1])\n \n plt.figure()\n plt.xlabel(\"x'\")\n plt.ylabel(r\"$\\psi_n(x)$\")\n for n in PLOT_RANGE:\n i = n - 1\n color = cmap(float(i)/NUM_EIGVALS)\n plt.plot(pb.x, pb.psi[:,i], marker=\".\", c=color)\n plt.plot(pb.x, apsi[i], label=r\"$E_%s$\" % str(n), c=color)\n plt.legend(title=r\"$E_n$\")\n plt.savefig(\"wavefuncs.pdf\")", "title": "" }, { "docid": "b940c2c8fd8774760229735f7b3cf092", "score": "0.5341725", "text": "def plot_data(phi, data, mytitle, fnm):\n x = np.linspace(-4,10,100) # x values for plotting\n sample_sizes = [100, 1000, 10000]\n for ss in sample_sizes:\n sample = data[0:ss]\n p = pw.density_function(phi, sample)\n # Evaluate the density funtion for values of x,\n # using the zero-one window function.\n print(\"Plotting density for a sample size of\", ss)\n y = np.array([p(xi) for xi in x])\n plt.plot(x,y, label=str(ss))\n plt.legend(fancybox=True, title=\"Sample Size\", shadow=True)\n plt.title(mytitle, fontsize=18)\n plt.xlabel(\"x\", fontsize=16)\n plt.ylabel(\"p(x)\", fontsize=16)\n plt.show()", "title": "" }, { "docid": "7eb54f008c02fdb89bb8d693e1cbef14", "score": "0.53329366", "text": "def plot(self, writeFiles = False, spec = 'sourceSpec'):\n# \t\tioff()\n# \t\tdata = readOObin(file)\n fig = figure(17, **ssxdef.specsize)\n fig.clear()\n a = axes()\n a.plot(self.wavelengths, self.data[spec], lw='.5')\n a.text(0.93, 0.92, self.shotname, horizontalalignment='right',\n verticalalignment='top', transform = a.transAxes)\n ylabel(self.ylabel)\n xlabel(self.xlabel)\n xlim(200, 1100)\n if writeFiles:\n ssxutil\n fName = ssxutil.ssxPath(self.shotname+'-oo.pdf', 'output',\n self.runYear + '/' + self.runDate + '/' + self.shotname, mkdir\n = True)\n fig.savefig(fName)\n else:\n fig.show()\n # ion()", "title": "" }, { "docid": "eda9a426bc6353fe2a4d7471d12df01a", "score": "0.5332477", "text": "def exercise11():\n x = numpy.arange(11, step=0.01)\n sinx = numpy.sin(x)\n\n plt.figure()\n plt.plot(sinx)\n plt.xlabel('x value')\n plt.ylabel('sin(x)')\n plt.title('Sine Function for x from 0.0 to 10.0')\n plt.savefig('sin_plot.pdf', format='pdf')", "title": "" }, { "docid": "232630898f055a6a0325cf97e338b403", "score": "0.53291404", "text": "def plot_sine_evaluation(real_samples, fake_samples, idx, identifier):\n ### frequency\n seq_length = len(real_samples[0]) # assumes samples are all the same length\n frate = seq_length\n freqs_hz = np.fft.rfftfreq(seq_length)*frate # this is for labelling the plot\n # TODO, just taking axis 0 for now...\n w_real = np.mean(np.abs(np.fft.rfft(real_samples[:, :, 0])), axis=0)\n w_fake = np.mean(np.abs(np.fft.rfft(fake_samples[:, :, 0])), axis=0)\n ### amplitude\n A_real = np.max(np.abs(real_samples[:, :, 0]), axis=1)\n A_fake = np.max(np.abs(fake_samples[:, :, 0]), axis=1)\n ### now plot\n nrow = 2\n ncol = 2\n fig, axarr = plt.subplots(nrow, ncol, sharex='col', figsize=(6, 6))\n # freq\n axarr[0, 0].vlines(freqs_hz, ymin=np.minimum(np.zeros_like(w_real), w_real), ymax=np.maximum(np.zeros_like(w_real), w_real), color='#30ba50')\n axarr[0, 0].set_title(\"frequency\", fontsize=16)\n axarr[0, 0].set_ylabel(\"real\", fontsize=16)\n axarr[1, 0].vlines(freqs_hz, ymin=np.minimum(np.zeros_like(w_fake), w_fake), ymax=np.maximum(np.zeros_like(w_fake), w_fake), color='#ba4730')\n axarr[1, 0].set_ylabel(\"generated\", fontsize=16)\n # amplitude\n axarr[0, 1].hist(A_real, normed=True, color='#30ba50', bins=30)\n axarr[0, 1].set_title(\"amplitude\", fontsize=16)\n axarr[1, 1].hist(A_fake, normed=True, color='#ba4730', bins=30)\n\n fig.savefig('./experiments/plots/' + identifier + '_eval' + str(idx).zfill(4) +'.png')\n plt.clf()\n plt.close()\n return True", "title": "" }, { "docid": "ef580d4a3e7b53d3dde91cfce6f95fcf", "score": "0.5324583", "text": "def PlotData( self ):\n plt.title( 'q = ' + str( self.col ) )\n plt.plot( self.data[ : , 0 ] , self.data[ : , self.col ] , label = 'orig' )\n func = savgol_filter( self.data[ : , self.col ] , 51 , 3 )\n plt.plot( self.data[ : , 0 ] , func , label = 'smooth' )\n for i in range( self.Fits.shape[1] ):\n plt.plot( self.data[ : , 0 ] , self.Fits[ : , i ] , label = self.labels[ i ] )\n\n\n plt.scatter( self.FitParams[ self.indx , 0 ] , np.max( self.Fits ) )\n\n \n plt.legend()\n plt.show()", "title": "" }, { "docid": "46990c8e8cc2aa32f9e48b07db569a20", "score": "0.53186226", "text": "def _time_domain_plot(self, *args, **kwargs):\n from pesummary.gw.plots.detchar import time_domain_strain_data\n\n return time_domain_strain_data(*args, **kwargs)[self.IFO]", "title": "" }, { "docid": "2c0064397489775f8d93e5fe269e3106", "score": "0.5317835", "text": "def plot_n(self, **kwargs):\n if self.verbose > 1:\n print(\"RefractiveIndex.plot_n()\") \n\n self.plot_spectrum(y_unit = \"Refractive index n\", **kwargs)", "title": "" }, { "docid": "6748e27ec5d8284573ee71832baaad52", "score": "0.5315133", "text": "def plot_spectrum(self):\n assert self.traced\n self.log.debug(npArrayInfo(self.twl*1e6,\"Wavlength\"))\n self.log.debug(npArrayInfo(self.tfl,\"Flux\"))\n plt.clf()\n plt.semilogy(self.twl*1e6,self.tfl,\"b.\")\n xmin,xmax,ymin,ymax = plt.axis()\n if ymin < 1e-4:\n ymin = 1e-4\n plt.axis((xmin,xmax,ymin,ymax))\n plt.title(\"Generated, Fluxed Spectra (%d)\" % self.num)\n plt.xlabel(\"Wavelength ($\\mu m$)\")\n plt.ylabel(\"Flux (Electrons)\")\n plt.savefig(\"%(Partials)s/Instrument-%(num)04d-Flux%(ext)s\" % dict(num=self.num, ext=self.config[\"Plots\"][\"format\"],**self.config[\"Dirs\"]))\n plt.clf()", "title": "" }, { "docid": "5ba3e04bd866b846af1d337af2f9e534", "score": "0.53136957", "text": "def wavelength_plot(waves_list, waves,number_of_detectors):\n \n for k in range(0, number_of_detectors):\n newlist = []\n for i in range(k, len(waves_list), 8):\n newlist.append(waves_list[i])\n \n print(newlist) \n print(len(newlist))\n plt.title(\"Efficiency vs wavelength\") \n plt.plot(waves, newlist,label = \"Detector \"+str(k+1)) \n plt.legend()\n \n plt.show()\n plt.savefig(\"wavelength_eff.png\")\n plt.close()", "title": "" }, { "docid": "8f68020d8832f3c46b31b1d1ea865aca", "score": "0.5297882", "text": "def plot_phonon_dispersion_bands(self):\n import matplotlib.pyplot as plt\n\n def replace_list(text_string):\n substitutions = {'GAMMA': u'$\\Gamma$',\n }\n\n for item in substitutions.items():\n text_string = text_string.replace(item[0], item[1])\n return text_string\n\n force_constants = self.get_force_constants()\n bands_and_labels = self.get_path_using_seek_path()\n\n _bands = obtain_phonon_dispersion_bands(self._structure,\n bands_and_labels['ranges'],\n force_constants,\n self._supercell_matrix,\n primitive_matrix=self._primitive_matrix,\n band_resolution=30)\n\n for i, freq in enumerate(_bands[1]):\n plt.plot(_bands[1][i], _bands[2][i], color='r')\n\n # plt.axes().get_xaxis().set_visible(False)\n plt.axes().get_xaxis().set_ticks([])\n\n plt.ylabel('Frequency [THz]')\n plt.xlabel('Wave vector')\n plt.xlim([0, _bands[1][-1][-1]])\n plt.axhline(y=0, color='k', ls='dashed')\n plt.suptitle('Phonon dispersion')\n\n if 'labels' in bands_and_labels:\n plt.rcParams.update({'mathtext.default': 'regular'})\n\n labels = bands_and_labels['labels']\n\n labels_e = []\n x_labels = []\n for i, freq in enumerate(_bands[1]):\n if labels[i][0] == labels[i - 1][1]:\n labels_e.append(replace_list(labels[i][0]))\n else:\n labels_e.append(\n replace_list(labels[i - 1][1]) + '/' + replace_list(labels[i][0]))\n x_labels.append(_bands[1][i][0])\n x_labels.append(_bands[1][-1][-1])\n labels_e.append(replace_list(labels[-1][1]))\n labels_e[0] = replace_list(labels[0][0])\n\n plt.xticks(x_labels, labels_e, rotation='horizontal')\n\n plt.show()", "title": "" }, { "docid": "54b9b221d5f3188b39b57ce49859e4de", "score": "0.5297838", "text": "def BeamElementShearDiagram(f, L):\n \n x = [0, L]\n z = [f[0,0] , -f[2,0]]\n plt.plot(x,z)\n plt.ylabel('Shear Force (kN)')\n plt.show()", "title": "" }, { "docid": "24bd2370716487e55b3f891cb0493efa", "score": "0.5296585", "text": "def plot_data(self, npz_filename):\r\n\t\t# #taken directly from ibmq quantum experience online\r\n\t\t# fig = plt.figure(figsize=(16,10))\r\n\t\t# # counts = {'0000': 60, '0001': 64.5, '0010': 61.5, '0100': 63.5, '0101': 62.5, '0110': 62.5, '0111': 59, '1000': 66, '1001': 57, '1010': 62.5, '1011': 68, '1100': 55, '1110': 62.5, \"1111\":70}\r\n\t\t# # frequencies = [60,64.5,61.5,63.5,62.5,62.5,59,66,57,62.5,68,55,62.5,70]\r\n\t\t# answers = [key for key in counts.keys()]\r\n\t\t# answers.sort()\r\n\t\t# plt.bar(range(len(counts)), frequencies, align='center', tick_label=answers)\r\n\t\t# plt.title('Results for n = 4 (grovers algorithm)', fontsize=25)\r\n\t\t# plt.xlabel('Measurement',fontsize=20)\r\n\t\t# plt.ylabel('Frequency of occurence',fontsize=20)\r\n\t\t# plt.xticks(fontsize=15)\r\n\t\t# plt.yticks(fontsize=15)\r\n\t\t\r\n\t\t\r\n\t\t# fig.savefig('figures/g_counts_histogram_4.png', bbox_inches='tight')\r\n\t\t\r\n\t\t# data = np.load(npz_filename)\r\n\r\n\t\t# run_time_arr = data['run_time_arr']\r\n\t\t# n_list = data['n_list']\r\n\t\t# n_list = [1,2,3,4]\r\n\r\n\t\t# avg_time = np.sum(run_time_arr,1)/np.shape(run_time_arr)[1]\r\n\t\t# avg_time = [10.8,11.5,14.5,31.4]\r\n\r\n\t\t# plt.rcParams[\"font.family\"] = \"serif\"\r\n\t\t# fig = plt.figure(figsize=(16,10))\r\n\r\n\t\t# z = np.polyfit(n_list, avg_time, 10)\r\n\t\t# p = np.poly1d(z)\r\n\r\n\t\t# plt.plot(np.linspace(n_list[0], n_list[-1] + 0.1, 100), p(np.linspace(n_list[0], n_list[-1] + 0.1, 100)), ls = '-', color = 'r')\r\n\t\t# plt.plot(n_list, avg_time, ls = '', markersize = 15, marker = '.',label = 'M')\r\n\t\t# plt.title('Execution time scaling for Grovers algorithm', fontsize=25)\r\n\t\t# plt.xlabel('n (bit string length)',fontsize=20)\r\n\t\t# plt.ylabel('Average time of execution (s)',fontsize=20)\r\n\t\t# plt.xticks(fontsize=15)\r\n\t\t# plt.yticks(fontsize=15)\r\n\r\n\t\t# fig.savefig('figures/g_run_time.png', bbox_inches='tight')\r\n\r\n\t\t# num_times = data['num_times']\r\n\t\t# variance_arr = data['variance_arr']\r\n\t\t# n = data['n']\r\n\r\n\t\t# plt.rcParams[\"font.family\"] = \"serif\"\r\n\t\t# fig = plt.figure(figsize=(16,10))\r\n\r\n\t\t# assemble_time_arr = data['assemble_time_arr']\r\n\t\t# n_list = data['n_list']\r\n\t\t# # n_list = [1,2,3,4]\r\n\r\n\t\t# avg_time = np.sum(assemble_time_arr,1)/np.shape(assemble_time_arr)[1]\r\n\t\t# # avg_time = [4.2,4.5,7.2,14.9] #taken directly from imbq experience\r\n\r\n\t\t# plt.rcParams[\"font.family\"] = \"serif\"\r\n\t\t# fig = plt.figure(figsize=(16,10))\r\n\r\n\t\t# z = np.polyfit(n_list, avg_time, 10)\r\n\t\t# p = np.poly1d(z)\r\n\r\n\t\t# plt.plot(np.linspace(n_list[0], n_list[-1] + 0.1, 100), p(np.linspace(n_list[0], n_list[-1] + 0.1, 100)), ls = '-', color = 'r')\r\n\t\t# plt.plot(n_list, avg_time, ls = '', markersize = 15, marker = '.',label = 'M')\r\n\t\t# plt.title('Compilation time scaling for Grovers algorithm', fontsize=25)\r\n\t\t# plt.xlabel('n (bit string length)',fontsize=20)\r\n\t\t# plt.ylabel('Average time of execution (s)',fontsize=20)\r\n\t\t# plt.xticks(fontsize=15)\r\n\t\t# plt.yticks(fontsize=15)\r\n\r\n\t\t# fig.savefig('figures/g_assemble_time.png', bbox_inches='tight')\r\n\r\n\t\t# correct_arr = data['correct_arr']\r\n\t\t# n_list = data['n_list']\r\n\t\t# # n_list = [1,2,3,4]\r\n\r\n\t\t# avg_correct = np.sum(correct_arr,1)/np.shape(correct_arr)[1]\r\n\t\t# # avg_correct = [55,96,20,1.5]\r\n\r\n\t\t# plt.rcParams[\"font.family\"] = \"serif\"\r\n\t\t# fig = plt.figure(figsize=(16,10))\r\n\r\n\t\t# z = np.polyfit(n_list, avg_correct, 10)\r\n\t\t# p = np.poly1d(z)\r\n\r\n\t\t# plt.plot(np.linspace(n_list[0], n_list[-1] + 0.1, 100), p(np.linspace(n_list[0], n_list[-1] + 0.1, 100)), ls = '-', color = 'r')\r\n\t\t# plt.plot(n_list, avg_correct, ls = '', markersize = 15, marker = '.',label = 'M')\r\n\t\t# plt.title('Percentage correct for grovers algorithm', fontsize=25)\r\n\t\t# plt.xlabel('n (bit string length)',fontsize=20)\r\n\t\t# plt.ylabel('Percentage correct',fontsize=20)\r\n\t\t# plt.xticks(fontsize=15)\r\n\t\t# plt.yticks(fontsize=15)\r\n\r\n\t\t# fig.savefig('figures/g_correct.png', bbox_inches='tight')\r\n\r\n\t\t\r\n\t\t# num_times = data['num_times']\r\n\t\t# variance_arr = data['variance_arr']\r\n\t\t# n = data['n']\r\n\r\n\t\t# plt.rcParams[\"font.family\"] = \"serif\"\r\n\t\t# fig = plt.figure(figsize=(16,10))\r\n\r\n\t\t\r\n\r\n\t\t# plt.hist(variance_arr)\r\n\t\t# plt.title('Dependence of execution time on $U_f$ (grovers algorithm)', fontsize=25)\r\n\t\t# plt.xlabel('Execution time (s)',fontsize=20)\r\n\t\t# plt.ylabel('Frequency of occurence',fontsize=20)\r\n\t\t# plt.xticks(fontsize=15)\r\n\t\t# plt.yticks(fontsize=15)\r\n\r\n\r\n\t\t# fig.savefig('figures/g_hist.png', bbox_inches='tight')\r\n\t\t\r\n\t\t\r\n\t\t#Obtain transpiled depths of circuits\r\n\t\t# list_of_depths = [2,5,309,2984]\r\n\t\t# n_list = [1,2,3,4]\r\n\t\t# plt.rcParams[\"font.family\"] = \"serif\"\r\n\t\t# fig = plt.figure(figsize=(16,10))\r\n\r\n\t\t# z = np.polyfit(n_list, list_of_depths, 10)\r\n\t\t# p = np.poly1d(z)\r\n\r\n\t\t# plt.plot(np.linspace(n_list[0], n_list[-1] + 0.1, 100), p(np.linspace(n_list[0], n_list[-1] + 0.1, 100)), ls = '-', color = 'r')\r\n\t\t# plt.plot(n_list, list_of_depths, ls = '', markersize = 15, marker = '.',label = 'M')\r\n\t\t# plt.title('depth of circuit for grovers algorithm', fontsize=25)\r\n\t\t# plt.xlabel('n (bit string length)',fontsize=20)\r\n\t\t# plt.ylabel('Depth',fontsize=20)\r\n\t\t# plt.xticks(fontsize=15)\r\n\t\t# plt.yticks(fontsize=15)\r\n\r\n\t\t# fig.savefig('figures/g_depth.png', bbox_inches='tight')\r", "title": "" }, { "docid": "73911a61b4c40c2cc94b768075118074", "score": "0.5291009", "text": "def plotting():\n pass", "title": "" }, { "docid": "cfde243e9b097e1d2580c2fd8b464e9c", "score": "0.5280765", "text": "def plot_ongoing_FFT_distribution(ax, fontproperties,\r\n stim_types = ['extended_stim', 'flash'],\r\n windows = [9000.0, 2000.0, 1000.0], \r\n gaps = [0.0, 200.0, 200.0],\r\n padding = 0.0,\r\n freq_range_for_FFT = (1.0, 5.0),\r\n replace_spikes = True, remove_sine_waves = True,\r\n get_new_FFT_results_all_cells = False,\r\n get_new_processed_traces = False,\r\n master_folder_path = 'E:\\\\correlated_variability'):\r\n\r\n fonts = fontproperties\r\n size = fonts.get_size()\r\n mpl.rcParams['mathtext.default'] = 'regular'\r\n \r\n FFT_dict_all_cells = get_FFT(stim_types, windows, gaps, padding,\r\n freq_range_for_FFT, replace_spikes, remove_sine_waves,\r\n get_new_FFT_results_all_cells, get_new_processed_traces,\r\n master_folder_path)\r\n \r\n FFTs_all_cells = []\r\n for cell in FFT_dict_all_cells:\r\n FFTs_all_cells.append(numpy.mean(FFT_dict_all_cells[cell]['ongoing']))\r\n\r\n ### plot the histogram ###\r\n\r\n hist1 = ax.hist(FFTs_all_cells, bins = 15, color = 'k', alpha = 0.4)\r\n\r\n ### format the histogram ### \r\n ax.spines['top'].set_visible(False)\r\n ax.spines['right'].set_visible(False)\r\n ax.spines['left'].set_visible(False)\r\n ax.yaxis.set_visible(False)\r\n ax.get_xaxis().tick_bottom()\r\n\r\n ax.tick_params(axis = 'x', length = 3.0, width = 1.0, direction = 'outward')\r\n loc = plticker.MultipleLocator(base = 200.0)\r\n ax.xaxis.set_major_locator(loc)\r\n x_labels = ax.get_xticks()\r\n ax.set_xticklabels(x_labels, fontsize = size)\r\n ax.set_xlabel(r'$\\langle FFT_\\delta\\rangle$ (mV)', fontsize = size) \r\n\r\n ### report number of cells and preparations ###\r\n experiments = []\r\n for cell_name in FFT_dict_all_cells:\r\n expt_date = cell_name.split('_')[0]\r\n if expt_date not in experiments:\r\n experiments.append(expt_date)\r\n print ' %s cells;'%str(len(FFT_dict_all_cells.keys())), \\\r\n '%s turtles'%str(len(experiments))", "title": "" }, { "docid": "d146a9eff50a412c9d6eb3995bade567", "score": "0.5272843", "text": "def plot_rx_t(data_z):\n time = np.arange(len(data_z)) * (1/SAMPLE_RATE)\n plt.plot(time, data_z.real)\n plt.show()", "title": "" }, { "docid": "1aeb1a8facff6190d400b6f7bf508226", "score": "0.52714753", "text": "def plot_encoded(t, u, s, fig_title='', file_name=''):\n\n dt = t[1]-t[0]\n cs = np.cumsum(s)\n if cs[-1] >= max(t)-min(t):\n raise ValueError('some spike times occur outside of signal''s support')\n\n p.clf()\n p.gcf().canvas.set_window_title(fig_title)\n p.axes([0.125, 0.3, 0.775, 0.6])\n p.vlines(cs+min(t), np.zeros(len(cs)), u[np.asarray(cs/dt, int)], 'b')\n p.hlines(0, 0, max(t), 'r')\n p.plot(t, u, hold=True)\n p.xlabel('t (s)')\n p.ylabel('u(t)')\n p.title(fig_title)\n p.gca().set_xlim(min(t), max(t))\n a = p.axes([0.125, 0.1, 0.775, 0.1])\n p.plot(cs+min(t), np.zeros(len(s)), 'ro')\n a.set_yticklabels([])\n p.xlabel('%d spikes' % len(s))\n p.gca().set_xlim(min(t), max(t))\n p.draw_if_interactive()\n if file_name:\n p.savefig(file_name)", "title": "" }, { "docid": "a29923cc7dc87151a0c34aa43610a6d8", "score": "0.52597255", "text": "def f_sensitivity_per_band():\n\n fig = plt.figure()\n\n j_minimum = minimum.where((minimum.N_j > 50) & (minimum.Stetson < 0.5))\n h_minimum = minimum.where((minimum.N_h > 80) & (minimum.Stetson < 0.5))\n k_minimum = minimum.where((minimum.N_k > 80) & (minimum.Stetson < 0.5))\n\n s1 = plt.subplot(3,1,1)\n s2 = plt.subplot(3,1,2, sharex=s1)\n s3 = plt.subplot(3,1,3, sharex=s1)\n \n s1.plot(j_minimum.j_meanr, j_minimum.j_rmsr, 'b,')\n s2.plot(h_minimum.h_meanr, h_minimum.h_rmsr, 'g,')\n s3.plot(k_minimum.k_meanr, k_minimum.k_rmsr, 'r,')\n\n plt.xlim(10.5, 17.5)\n\n s3.set_xlabel(\"Magnitude\")\n s3.set_ylabel(\"Observed rms\")\n for s in [s1,s2,s3]:\n s.set_ylim(0,0.1)\n s.set_yticks([0, 0.05, 0.1])\n\n s1.text(0.2, 0.6, \"$J$\", fontsize=24, color='b', transform=s1.transAxes)\n s2.text(0.2, 0.6, \"$H$\", fontsize=24, color='g', transform=s2.transAxes)\n s3.text(0.2, 0.6, \"$K$\", fontsize=24, color='r', transform=s3.transAxes) \n\n plt.show()\n\n return fig", "title": "" }, { "docid": "deec8fb4da515f311aa5f56e0bf7d914", "score": "0.52595687", "text": "def show_friction_line(self, sns_context=\"talk\"):\r\n self.mean_friction_frame()\r\n\r\n p_dat = self.mean_fric_frame[self.mean_fric_frame['direction'] == 0]\r\n n_dat = self.mean_fric_frame[self.mean_fric_frame['direction'] == 1]\r\n\r\n xbins = self.mean_fric_frame['load_index'].max() + 1\r\n\r\n sns.set_context(sns_context)\r\n\r\n f, (ax1, ax2) = plt.subplots(nrows=1, ncols=2)\r\n\r\n sns.regplot(x='N', y='F', data=p_dat, x_bins=xbins, ax=ax1)\r\n sns.regplot(x='N', y='F', data=n_dat, x_bins=xbins, ax=ax2)\r\n\r\n ax1.set_title(\"(+)\")\r\n ax2.set_title(\"(-)\")\r\n\r\n plt.tight_layout()\r\n\r\n plt.show()", "title": "" }, { "docid": "7c4470402c93c029d205eb287970fec8", "score": "0.52591443", "text": "def space_plot(data, ax=None, **kwargs):\n x, y = space_from_dict(data)\n x_sm, y_sm = smooth(x, y, 200)\n ax.plot(x_sm, y_sm, **kwargs)\n ax.set(ylabel=f\"{settings.CLI} ($mM$)\", xlabel='Distance from soma ($\\mu m$)')", "title": "" }, { "docid": "e04c6d60e6a67240c77f6180fc064a24", "score": "0.5258989", "text": "def scatter_plot(self):\r\n assert self.channel_count == 2\r\n plt.figure('Stochastic Signals Scatter Plot')\r\n plt.scatter(self.channel[0], self.channel[1])\r\n plt.show()", "title": "" }, { "docid": "78cc970aa7034e2e743e039f26e6a096", "score": "0.5256539", "text": "def plot_fourier(u, fs, fmin=0.0, fmax=None, style='line'):\n\n if fmin < 0.0 or fmin >= fs/2:\n raise ValueError('invalid minimum frequency')\n\n if fmax is None:\n fmax = fs/2\n if fmax <= fmin or fmax > fs/2:\n raise ValueError('invalid maximum frequency')\n\n n = len(u)/2\n uf = fft(u)[0:n]\n f = (fs/2.0)*np.arange(0, n)/n\n\n a = int(2.0*n*fmin/fs)\n b = int(2.0*n*fmax/fs)\n\n p.clf()\n p.subplot(211)\n if style == 'stem':\n p.stem(f[a:b], np.real(uf)[a:b])\n p.ylabel('real')\n elif style == 'semilogy':\n p.semilogy(f[a:b], np.abs(np.real(uf)[a:b]))\n p.ylabel('|real|')\n else:\n p.plot(f[a:b], np.real(uf)[a:b])\n p.ylabel('real')\n p.xlim((f[a], f[b-1]))\n p.subplot(212)\n if style == 'stem':\n p.stem(f[a:b], np.imag(uf)[a:b])\n p.ylabel('imag')\n elif style == 'semilogy':\n p.semilogy(f[a:b], np.abs(np.imag(uf)[a:b]))\n p.ylabel('|imag|')\n else:\n p.plot(f[a:b], np.imag(uf)[a:b])\n p.ylabel('imag')\n p.xlim((f[a], f[b-1]))\n p.xlabel('f (Hz)')", "title": "" }, { "docid": "0fbfb6678402efcb2ddb69854ae2cb56", "score": "0.52535695", "text": "def DFT(signal):\n ts = np.linspace(0, DURATION, (500*(DURATION))+1)\n plt.plot(ts, signal)\n freqs = np.linspace(0, 4*np.pi, 1000)\n dft = []\n for i in range(len(freqs)):\n dft.append(Fourier_Coeff(signal, freqs[i]))\n print(min(dft))\n print(freqs[dft.index(min(dft))])\n plt.figure()\n\n plt.plot(freqs, dft)\n plt.show()", "title": "" }, { "docid": "6c85d6b274551db91812abf48d590831", "score": "0.5251782", "text": "def plot_sot_sig(filename, idxs):\n filename = \"/tmp/dg_\" + filename + \".dat\"\n data, name = read_tracer_file(filename)\n plot_select_traj(data, idxs, name)\n return", "title": "" }, { "docid": "c208be70c9bc718b05411194d1943c0c", "score": "0.524833", "text": "def plot_n_sst_timeseries(satellites): \n ocean_area = 361900000.0\n labels = ['ATSR1','ATSR2','AATSR','NOAA07','NOAA09','NOAA11','NOAA12','NOAA14','NOAA15','NOAA16','NOAA17','NOAA18','NOAA19','METOPA']\n satellites = ['ATSR1','ATSR2','AATSR','AVHRR07_G','AVHRR09_G','AVHRR11_G','AVHRR12_G','AVHRR14_G','AVHRR15_G','AVHRR16_G','AVHRR17_G','AVHRR18_G','AVHRR19_G','AVHRRMTA_G']\n\n fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)\n\n lab = []\n ncolors = len(satellites)\n ax1.set_prop_cycle('color',[plt.cm.gnuplot2(j) for j in np.linspace(0, 1, ncolors)])\n for i in range(0,len(satellites)):\n filename = satellites[i] + '_summary.nc'\n ds = xarray.open_dataset(filename)\n dates = ds['time']\n idx = np.argsort(dates, axis=0) \n t = np.array(dates)[idx]\n days = (t[-1] - t[0]).astype('timedelta64[D]') / np.timedelta64(1, 'D')\n years = days/365.0\n times_duplicates = pd.Series(t)\n times = times_duplicates.drop_duplicates()\n Q4_duplicates = pd.Series(ds['n_sst_q4'].values[idx], index=t)\n Q5_duplicates = pd.Series(ds['n_sst_q5'].values[idx], index=t)\n n_sst_q4 = 365.0 * Q4_duplicates.groupby(Q4_duplicates.index).sum() / ocean_area \n n_sst_q5 = 365.0 * Q5_duplicates.groupby(Q5_duplicates.index).sum() / ocean_area \n df = DataFrame({'Q4' : n_sst_q4, 'Q5' : n_sst_q5}) \n df['Sum'] = df['Q4'] + df['Q5']\n# df['Sum'] = df['Q4'].fillna(0) + df['Q5'].fillna(0)\n# df['Sum_mean'] = df['Sum'].resample(\"1d\").sum().fillna(0).rolling(window=31, min_periods=1).median()\n# df['Sum_mean'].plot(ax=ax1)\n\n lab.append(labels[i])\n ax1.plot(times, df['Sum'], '.', markersize=0.2)\n ax1.set_ylim([0,18])\n print(labels[i] + \",\" + str(df['Sum'].mean()) + \",\" + str(df['Sum'].shape[0]))\n\n plt.tick_params(labelsize=12)\n title_str = 'QL=4 & 5'\n ax1.set_title(title_str, fontsize=10)\n\n lab = []\n ncolors = len(satellites)\n ax2.set_prop_cycle('color',[plt.cm.gnuplot2(j) for j in np.linspace(0, 1, ncolors)])\n for i in range(0,len(satellites)):\n filename = satellites[i] + '_summary.nc'\n ds = xarray.open_dataset(filename)\n dates = ds['time']\n idx = np.argsort(dates, axis=0) \n t = np.array(dates)[idx]\n days = (t[-1] - t[0]).astype('timedelta64[D]') / np.timedelta64(1, 'D')\n years = days/365.0\n times_duplicates = pd.Series(t)\n times = times_duplicates.drop_duplicates()\n Q3_duplicates = pd.Series(ds['n_sst_q3'].values[idx], index=t)\n n_sst_q3 = 365.0 * Q3_duplicates.groupby(Q3_duplicates.index).sum() / ocean_area\n df = DataFrame({'Q3' : n_sst_q3})\n# df['Q3_mean'] = df['Q3'].resample(\"1d\").sum().rolling(window=31, min_periods=1).median()\n# df['Q3_mean'].plot(ax=ax2)\n\n lab.append(labels[i])\n ax2.plot(times, df['Q3'], '.', markersize=0.2)\n ax2.set_ylim([0,18])\n print(labels[i] + \",\" + str(df['Q3'].mean()) + \",\" + str(df['Q3'].shape[0]))\n\n plt.tick_params(labelsize=12)\n title_str = 'QL=3'\n ax2.set_title(title_str, fontsize=10)\n\n fig.legend(lab, fontsize=8, loc=7, markerscale=20, scatterpoints=5)\n fig.subplots_adjust(right=0.8) \n fig.text(0.01, 0.5, 'Observation density / $\\mathrm{km^{-2} \\ yr^{-1}}$', va='center', rotation='vertical')\n plt.savefig('n_sst_timeseries.pdf')\n# plt.savefig('n_sst_timeseries.png', dpi=600)\n# plt.savefig('n_sst_timeseries.eps', format='eps', rasterized=True, dpi=1200)\n plt.close('all')", "title": "" }, { "docid": "9875abe0b1b5428c0aead1ff3f2ec260", "score": "0.5241629", "text": "def plot_freqs(files=FILES):\n def calc_freqs(f):\n t = tokenize(f)\n dist = Counter(t)\n return sorted(dist.values(), reverse=True)\n freqs = [calc_freqs(f) for f in files]\n\n def plot_one(f, n):\n corpora = len(files)\n width = 2 # linlin and loglog\n plt.subplot(corpora, width, n*2+1)\n plt.plot(f)\n plt.subplot(corpora, width, n*2+2)\n plt.loglog(f)\n\n for i, val in enumerate(freqs):\n plot_one(val, i)\n plt.show()", "title": "" }, { "docid": "7493718cfec2906c4ff155a96006fce1", "score": "0.52378213", "text": "def plot_spectrum(gpi):\n # read ts, fill gaps\n ts = prepare_ts(gpi, start_date='2007-01-01', end_date='2011-12-31')\n \n # detrend\n ts_detrend = ts - ts.mean()\n \n # construct sample frequencies\n n = len(ts_detrend) # length of the signal\n \n # define cycles per unit: days = 1, week=7, month=31, years = 365.25\n days = 365.25\n time_step = 1.0 / days\n frq = np.fft.fftfreq(n, d = time_step)\n frq = frq[range(n/2)] # one side frequency range\n \n # fft computing and normalization\n TS = np.fft.fft(ts_detrend)/n \n TS = TS[range(n/2)]\n \n # plot \n f, ax = plt.subplots(3,figsize=(10,10))\n \n f.suptitle(\"Single Sided Amplitude Spectrum of ASCAT-Ts at Gpi: %d\" % gpi,\n fontsize=16)\n ax[0].plot(ts)\n ax[0].set_xlabel('Time')\n ax[0].set_ylabel('SM [%]')\n ax[0].set_yticks(np.arange(0,120,20))\n \n ax[1].plot(frq,abs(TS),'r') # plotting the spectrum\n ax[1].set_xlabel('Frequency (cycles per %d days)' % days)\n ax[1].set_ylabel('|TS(freq)|')\n ax[1].set_xlim(0,182.5)\n ax[1].set_ylim(0,6)\n ax[1].set_xticks(np.arange(0,200,25))\n ax[1].set_yticks(np.arange(0,8,1))\n \n ax[2].plot(frq,abs(TS),'r') # plotting the spectrum\n ax[2].set_xlabel('Frequency (cycles per %d days)' % days)\n ax[2].set_ylabel('|TS(freq)|')\n ax[2].set_xlim(0,13)\n ax[2].set_xticks(range(13))\n ax[2].set_yticks(np.arange(0,13,2))\n \n f.tight_layout()\n f.subplots_adjust(top=0.92)\n plt.show()", "title": "" }, { "docid": "aa49ffe2b571bf04f4d138eb3f1bb17a", "score": "0.5232876", "text": "def plot_ts(data, ax=None, fig=None, fig_kwargs=None):\n # Grab station information to use in plot title\n station_id = data['Station'][0]\n station_dict = _get_station_dict()\n station_name = station_dict[station_id]\n\n # Create figure and/or axis if no existing figure/axis objects are passed\n fig, ax = _fig_setup(ax, fig, fig_kwargs)\n \n # Plot on axis\n ax.plot(data.index, data['Discharge'])\n ax.set_ylim([0, 1.2*data['Discharge'].max()])\n ax.set_xlabel('Date')\n ax.set_ylabel('Discharge (cfs)')\n ax.set_title(station_name)\n return fig, ax", "title": "" }, { "docid": "17ada2679035692327d9814373245722", "score": "0.5231238", "text": "def plot_default(self):\n plt.figure(figsize=(12,8)) \n plt.imshow(self.specx[::2], extent=self.leftlim + self.rightlim, origin='lower', aspect='auto') \n plt.xlabel('Frequency Bins \\n'+'\\nFile:'+ self.filename + '\\nRuntime:' + str(self.t_fs))\n plt.ylabel('Time (s)')\n plt.title('Waterfall')\n plt.colorbar()\n plt.savefig('waterfal_just_plot.png', dpi=400, transparent=False)\n plt.show()", "title": "" }, { "docid": "d9d8f3869d4c8a0dba30e5d30597d93b", "score": "0.52307063", "text": "def PlotPowerSpectrum(g, x0, N, args=()):\n sig = IterateList(g, x0, N+1000, args)[1000:]\n sig_freq = sp.fftpack.fftfreq(sig.size, d=1)\n sig_fft = sp.fftpack.fft(sig)\n pidxs = sp.where(sig_freq > 0)\n freqs, power = sig_freq[pidxs], sp.abs(sig_fft)[pidxs]\n pylab.plot(freqs, power)\n pylab.xlabel('Frequency [Hz]')\n pylab.ylabel('Power')\n pylab.show()\n print(\"Peak Frequency:\")\n print(freqs[power.argmax()])", "title": "" }, { "docid": "b3c8ed7def21d86af46ce4b0aa1689ae", "score": "0.52234256", "text": "def plot_classification_frequency(classification_data):\n classification_frequency = scipy.stats.itemfreq(classification_data)\n\n plt.figure(1)\n\n for row_n, row in enumerate(classification_frequency):\n if row[0] == 'B':\n label = 'Benign'\n color = 'b'\n elif row[0] == 'M':\n label = 'Malignant'\n color = 'r'\n else:\n raise Exception(\"Unkown classification:\", row[0])\n frequency = int(row[1])\n plt.bar(left=row_n, height=frequency, color=color, label=label)\n\n plt.gca().axes.xaxis.set_ticklabels([])\n plt.legend()\n plt.xlabel(\"Diagnosis\")\n plt.ylabel(\"Frequency\")\n plt.title(\"Distribution of Classifications\")\n print(\n \"In order to have our classifier be adept at spotting all classes,\\n\"\n \"we must ensure our data has a reasonably equal distribution.\\n\"\n )\n plt.show()", "title": "" } ]
09c4e58955a8b4e19f869fca7078f2d8
subscribe the web client to this socket
[ { "docid": "8806147f6d946aa383d7fb9f5fb8964f", "score": "0.6003841", "text": "def sub(self, uri, prefix=''):\n\n ident = self.ident(uri, prefix)\n idents = self.recievers.iterkeys()\n self.log('subscribe:', 'requested', ident)\n\n # we don't need to actually create more filtered sockets if we\n # already have a general one on the books.\n # the javascript end will handle choosing the most specific\n # subscriber callback for any message we send over the wire\n if ident in self.recievers or self.any_has_prefix(ident, idents):\n # already subscribed\n self.write_message({'success': 'already subscribed'})\n self.log('subscribe:', 'already has', ident)\n return\n\n # create a new reciever and a callback for it\n self.log('subscribe:', 'subscribing to prefix', repr(prefix), 'at uri', uri)\n rcvr = AsyncReciever(sub(uri), prefix)\n self.recievers[ident] = rcvr\n rcvr.on_recieve(self.write_squidwork)\n\n self.write_message({'success': 'subscribed {}'.format(ident)})", "title": "" } ]
[ { "docid": "109c83d9994e8d99aa6478bff90b40e6", "score": "0.70212555", "text": "def subscribe(self):\n try:\n self.client.subscribe()\n except Exception as e:\n logger.error(\"Unknown error: {}\".format(e))\n raise Exception", "title": "" }, { "docid": "30af9950d2e712e3525c37be2730b9d6", "score": "0.67332", "text": "def on_connect(client, userdata, flags, rc):\n print(\"Connected with result code %s, subscribing to topic: %s\" % (str(rc), userdata[\"topic\"]))\n client.subscribe(userdata[\"topic\"])", "title": "" }, { "docid": "1cb1f02a5e9dd4e21452f5f7b5134ec8", "score": "0.6669246", "text": "def on_connect(client, userdata, flags, rc):\r\n print(\"Connected with result code \"+str(rc))\r\n \r\n # TOPICS I SUBCRIBED TO\r\n \r\n client.subscribe(\"Francis/cpupct\") # (MY PUBLISHER)\r\n\r\n \r\n client.subscribe(\"Damare/cpupct\") # (CLASSMATE'S PUBLISHERS)\r\n client.subscribe(\"Miller/cpupct\")\r\n client.subscribe(\"Markham/cpupct\")\r\n client.subscribe(\"Bingham/cpupct\")", "title": "" }, { "docid": "62a6bc5e285e4fcdc979ea01b8fd1651", "score": "0.6598101", "text": "async def __init_connection__(self): \n\t\tself.session = aiohttp.ClientSession()\n\t\tself.connection = self.session.ws_connect(\n\t\t\t\turl=self.WSS_FEED, heartbeat=self.WSS_TIMEOUT)\n\n\t\tself.websocket = await self.connection.__aenter__()\n\t\tawait self.websocket.send_json(self._subscribe)", "title": "" }, { "docid": "a6b0dc8b8fa7163b13277619e79b2edc", "score": "0.6485489", "text": "def on_run(self, _):\n logging.info('Websocket connected.')\n self.get_and_subscribe_devices()\n self.get_events()", "title": "" }, { "docid": "f6b5f89d88dbfe68a0490a5ab158841c", "score": "0.64582074", "text": "def on_connect(\n self,\n client: mqtt.Client,\n userdata: Any,\n rc: int,\n properties: mqtt.Properties = None,\n ) -> None:\n # subscribe to all topics\n client.subscribe(\"#\")", "title": "" }, { "docid": "3ea6571cbf873c7a6a8e829256bb62a7", "score": "0.64488345", "text": "def subscribe_server(self):\n\t\tsubscribe_data = json_subscribe(MATRICULES, self.__client.port, self.__name)\n\t\tresponse = json_decode(self.__client.send_to_server(subscribe_data))\n\n\t\ttry:\n\t\t\tif response[\"response\"].upper() == \"OK\":\n\t\t\t\tlogging.info(\"Registered to the server\")\n\t\t\telse:\n\t\t\t\tlogging.critical(f\"Error while subscribing : {response['error']}\")\n\t\texcept Exception as e:\n\t\t\tlogging.critical(f\"Error on subscribing to the server : {e}\")\n\t\t\texit()", "title": "" }, { "docid": "1773ee9752c0b4475e44f7118f584b18", "score": "0.63788486", "text": "def on_connect(client, userdata, flags, rc):\n print('Connected with result code ' + str(rc))\n client.subscribe(MQTT_TOPIC)", "title": "" }, { "docid": "1773ee9752c0b4475e44f7118f584b18", "score": "0.63788486", "text": "def on_connect(client, userdata, flags, rc):\n print('Connected with result code ' + str(rc))\n client.subscribe(MQTT_TOPIC)", "title": "" }, { "docid": "de83f252f24f417d7cd393a046a1cfef", "score": "0.632665", "text": "def on_connect(client, userdata, rc):\n print('Connected with result code ' + str(rc))\n # Subscribing in on_connect() means that if we lose the connection and\n # reconnect then subscriptions will be renewed.\n client.subscribe(MQTT_TOPIC, 1)", "title": "" }, { "docid": "34ecf349b262dc88037422352aa62c1d", "score": "0.63254875", "text": "def on_connect(client, userdata, flags, rc):\n\tprint('Connected with result code ' + str(rc))\n\tclient.subscribe(MQTT_TOPIC)", "title": "" }, { "docid": "208a7ef91d240bd0867a02a71e4a044c", "score": "0.6249386", "text": "def on_connect(self, client, userdata, flags, rc):\n\n if rc == 0:\n self.connected = True\n logger.info(self.parse_return_code(0))\n self.client.subscribe(self.appconfig.topic)\n else:\n logger.error(f\"{self.parse_return_code(rc)}\")\n self.connected = False", "title": "" }, { "docid": "96ca8fd2138803d9ab75dc324644bc12", "score": "0.62461495", "text": "def on_subscribe(self):\n\n def subscribe_handler(handler: Callable):\n log_info.info(\"on_subscribe handler accepted\")\n self.client.on_subscribe = handler\n return handler\n\n return subscribe_handler", "title": "" }, { "docid": "9ca443108e3022d9d58f794f0ced2b31", "score": "0.6235847", "text": "def subscribe(self, callback):\n # Avoid duplicate subscription\n if self._subscribe_id:\n return\n\n self._subscribe_id = 'subscribe:%s:%d' % (\n self.name, self.rosbridge.id_counter)\n\n self.rosbridge.on(self.name, callback)\n self.rosbridge.send_on_ready(Message({\n 'op': 'subscribe',\n 'id': self._subscribe_id,\n 'type': self.message_type,\n 'topic': self.name,\n 'compression': self.compression,\n 'throttle_rate': self.throttle_rate,\n 'queue_length': self.queue_length\n }))", "title": "" }, { "docid": "30e9c2e4ccabe60518676a63aff16820", "score": "0.6223954", "text": "def subscribe(self, publisher):\n for line in publisher:\n self.data_socket.connect(\"tcp://\"+str(line[0])+\":\"+str(line[1]))\n self.subscribed_ports.append(line[1])\n self.data_socket.setsockopt(zmq.SUBSCRIBE, '')", "title": "" }, { "docid": "2dee9ed9400f9c6041395dd1afb0bbef", "score": "0.6219262", "text": "async def hass_subscribe_events(websocket):\n # Subscribe to events.\n LOGGER.info(\"Subscribing to events...\")\n await websocket.send(json.dumps(dict(\n id=1,\n type=\"subscribe_events\",\n event_type=\"state_changed\"\n )))\n response = await websocket.recv()\n LOGGER.info(f\"Subscription result: {response}\")", "title": "" }, { "docid": "4cb4db2cd2f949cb3525fe17bfeb4b15", "score": "0.61780894", "text": "async def listen(self, websocket, path):\n\n # send newly connected client initial timestamp\n await self.notify(websocket, 0)\n\n try:\n # incoming message event\n async for message in websocket:\n\n # decode incoming message into an associative array\n data = json.loads(message)\n\n # notify client with event for message with count \"c\"\n await self.notify(websocket, data[\"c\"])\n except asyncio.IncompleteReadError as e:\n pass", "title": "" }, { "docid": "3a9deba0e0758b1168148eea8318f7e6", "score": "0.61297876", "text": "def subscribe(self, topic, url):\n # note: do NOT encode the URL, this is not decoded on the erlang side!\n # (only strings are allowed anyway)\n # url = self._conn.encode_value(url)\n result = self._conn.call('subscribe', [topic, url])\n self._conn.process_result_subscribe(result)", "title": "" }, { "docid": "428dc3f6c80fa87868e02c64a0482b24", "score": "0.6125694", "text": "def subscribe(listener):\n _thread_local.communication_manager.subscribe(listener) # type: ignore[union-attr]", "title": "" }, { "docid": "74056d62b32d840d3379628d4cb168a8", "score": "0.6108331", "text": "def subscribe(self):\n self.bus.subscribe('start', self.start)\n self.bus.subscribe('stop', self.stop)\n if self.subscribe_event is not None:\n self.bus.subscribe(self.subscribe_event, self.receive_input)", "title": "" }, { "docid": "69c034eb6af951481f78d8a153ac4de5", "score": "0.6104722", "text": "def subscribe(self, destination, extra_headers=None):\n raise NotImplementedError(\"%s client does not implement SUBSCRIBE\" % (self.__class__,))", "title": "" }, { "docid": "69c034eb6af951481f78d8a153ac4de5", "score": "0.6104722", "text": "def subscribe(self, destination, extra_headers=None):\n raise NotImplementedError(\"%s client does not implement SUBSCRIBE\" % (self.__class__,))", "title": "" }, { "docid": "f2283ff5fa92e53216cc4f40a9a26c92", "score": "0.60880274", "text": "def subscribe(self, subscribe):\n\n self._subscribe = subscribe", "title": "" }, { "docid": "a165097abb33ee878cb29395443ba874", "score": "0.6080283", "text": "def run(self):\n self.socket = self.context.socket(zmq.SUB)\n self.socket.setsockopt(zmq.SUBSCRIBE,'')\n self.socket.setsockopt(zmq.IDENTITY, self.session.session)\n self.socket.connect('tcp://%s:%i' % self.address)\n self.iostate = POLLIN|POLLERR\n self.ioloop.add_handler(self.socket, self._handle_events, \n self.iostate)\n self.ioloop.start()", "title": "" }, { "docid": "044e96b0f6504480e85d0002d636d8c7", "score": "0.6080151", "text": "def send_subscription(self):\n\n print \"[i] Sending a Subscribe request\"\n\n # Generate the payload\n payload = SUBSCRIBE % ('http://' + self.WAN_IP, self.TOPIC)\n\n # Send the request\n self.send_request(payload)", "title": "" }, { "docid": "a1fbcbacf2bad343fc3e64f67308aa3d", "score": "0.60752404", "text": "def on_connect(client, userdata, flags, return_code):\n\n print('Connected with result code {}'.format(return_code))\n\n client.subscribe(TTN_SUBSCRIPTIONS)", "title": "" }, { "docid": "8730922ab3833b142c6b52138e508107", "score": "0.6073351", "text": "def process_connection():\n logging.debug(\"Processing new connection\")\n mqttc.subscribe(MQTT_TOPIC, 2)", "title": "" }, { "docid": "a09b9f4cfe40ed455a6d9791186b5b54", "score": "0.60667396", "text": "def on_subscribe(client, userdata, mid, granted_qos):\n logging.debug(\"Subscribe with mid \" + str(mid) + \" received.\")", "title": "" }, { "docid": "604fe2fcf17c64f470c26bca917eb12a", "score": "0.6056498", "text": "def listen(self):\n\n print('listening to kafka')\n\n self.source.subscribe(Emitter(self.ws))", "title": "" }, { "docid": "bfcf7ea3905dd36a1e61e6d079cd9c5c", "score": "0.6054609", "text": "async def accept(self, subprotocol=None):\n await super().send({\"type\": \"websocket.accept\", \"subprotocol\": subprotocol})", "title": "" }, { "docid": "c03ce8df3a0931cba47a033064b70a1b", "score": "0.60300183", "text": "def on_connect(self, client, userdata, flags, rc):\n if id(client) == id(self.roomba):\n self.debug(\"Connected to roomba mqtt broker with result code \"+str(rc))\n client.subscribe(\"#\")\n\n elif id(client) == id(self.upstream):\n self.debug(\"Connected to upstream mqtt broker with result code \"+str(rc))\n client.subscribe(self.settings[\"upstream\"][\"subscribe\"])", "title": "" }, { "docid": "e7798393e531003822fc25fea1801866", "score": "0.6025621", "text": "def subscribe(self, publisher):\n for line in publisher:\n self.create_info_log_entry(\"LogWriter subscribes to: \"+str(line[0])+\":\"+str(line[1]))\n self.socket.connect(\"tcp://\"+str(line[0])+\":\"+str(line[1]))\n self.subscribed_ports.append(line[1])\n self.socket.setsockopt(zmq.SUBSCRIBE, '') # remove the subscription filter", "title": "" }, { "docid": "5fd8ca6d8f7652e667e4ae3c65125329", "score": "0.60156804", "text": "def on_mqtt_connect(self, client: Client, userdata, flags, rc):\n logging.info(f\"Connected with result code {rc}\")\n # Subscribing in on_connect() means that if we lose the connection and\n # reconnect then subscriptions will be renewed.\n for t in self.args.topic:\n logging.info(f\"Subscribe to {t}\")\n self.mqtt_client.subscribe(t)", "title": "" }, { "docid": "647db7120a3b62c29e476dd5ecff92ea", "score": "0.59803313", "text": "def on_connect(client, userdata, flags, rc):\n logger.info(f\"Connected with result code {rc}.\")\n # Subscribing in on_connect() means that if we lose the connection and reconnect then subscriptions will be renewed.\n client.subscribe(\"owntracks/+/+\")", "title": "" }, { "docid": "b6404ce39582b128ff7db687c3ecbdbb", "score": "0.5975095", "text": "def on_client_connect(self, client):\n self.clients.append(client)\n log.msg('Client connected')", "title": "" }, { "docid": "81d4d7cf7a27d74bb259be07b0ce69ba", "score": "0.5970741", "text": "def on_subscribe(self, client, userdata, mid, granted_qos):\n\n self.subscribed = True", "title": "" }, { "docid": "e8eca22d65772bb0fae58f229e5d2851", "score": "0.5969631", "text": "def subscribe(self, message):\n try:\n channel_id = self.channel_id(message)\n self.server.connections.add_peer(channel_id, self, self.client_address)\n self.is_subscribed = True\n logger.debug('New peer {}:{} connected to channel: {}'.format(\n self.client_address[0], self.client_address[1], channel_id\n ))\n except Exception as e:\n logger.error('Failed to subscribe new peer {}:{} - {}'.format(\n self.client_address[0], self.client_address[1], str(e)\n ))", "title": "" }, { "docid": "363a8fa65e57fe4a084c237ce48483df", "score": "0.5913931", "text": "def _on_connect(client, userdata, flags, rc):\n logging.info(f'Connected to broker, result code {str(rc)}')\n\n for topic in userdata.keys():\n client.subscribe(topic)\n logging.info(f'Subscribing to topic: {topic}')", "title": "" }, { "docid": "4d804f333d2e59ba836f2b80a93cc046", "score": "0.59032863", "text": "def do_subscribe(self, arg: str) -> None:\n args = [arg]\n libwampli.ready_uri(args)\n\n task = Task(\"subscribe\", args)\n self._send_queue.put_nowait(task)", "title": "" }, { "docid": "321d23e95b7b77305725017b1ad0cc16", "score": "0.590251", "text": "def connect():\n print('connection established')\n # ClientApplication.sio.emit(event='on_client_message', \n # data=data_pkg)", "title": "" }, { "docid": "b77237ee54ab3ba67bd5e2ee6e4d29ca", "score": "0.5901918", "text": "def subscriber(self):\n pass", "title": "" }, { "docid": "6053ec4a1abeb4f6420ef05c189e8e19", "score": "0.5892358", "text": "def _connect(self) -> None:\n super()._connect()\n self.pubsub = self.client.pubsub()", "title": "" }, { "docid": "d81b36044ba981b02c1e9be0bc69af91", "score": "0.5859421", "text": "def on_connect(self, client, userdata, flags, rc):\n # srw2ho 20.04.2023: Threadnames umbiegen for debugging rausnehmen -> kann auch in Thread-Handler for MQTT Request zum Teil erledigt werden\n # threading.current_thread().name = (\n # \"client.py\" + \"_on_connect_\" + str(threading.get_ident())\n # )\n\n # print(\"MQTT-Connected with result code: \" + str_)\n\n # resubscribe to all previously subscribed topics\n # for topic, _ in self._subscriptions.items():\n # self._client.subscribe(topic)\n\n # qos default = 0\n try:\n qos = 0\n topictupels = [(key, qos)\n for key, value in self._subscriptions.items()]\n if len(topictupels) > 0:\n self._client.subscribe(topictupels)\n\n self._isconnected = True\n\n str_ = self.getConnectMessageByReturnValue(rc)\n logger.info(f\"MQTT-Connected with result code: {str_}\")\n\n if self._connectHandler != None:\n self._connectHandler(client, userdata, flags, rc)\n\n except Exception as e:\n logger.error(f\"MQTT-on_connect error result code: {e}\")", "title": "" }, { "docid": "e7f4167d3aea9b93e4071415aa1d5a11", "score": "0.58471465", "text": "def on_connect(connected_client, _, __, rc) -> None:\n logger.info(\"Connected client with result code \" + str(rc))\n # Subscribe in on_connect callback to automatically re-subscribe if the connection was lost\n # Subscribe to all arduino hexnumber actions\n # '+' means single level wildcard. '#' means multi level wildcard.\n # See http://www.hivemq.com/blog/mqtt-essentials-part-5-mqtt-topics-best-practices\n logger.debug(\"Subscribing to \" + str(constants.iRulezTopic) + \"/\" + constants.virtual_IO_board_name + \"/\" +\n constants.actionTopic)\n connected_client.subscribe(constants.iRulezTopic + \"/\" + constants.virtual_IO_board_name + \"/\" +\n constants.actionTopic)", "title": "" }, { "docid": "ffc22ad28f7193b853d28b4ad6d12799", "score": "0.58453065", "text": "def run(self):\n rospy.loginfo(\"Bridge accepted client: %s\", self.client_addr)\n\n try:\n while True:\n val, sig = self.dec.decode()\n if not sig:\n continue\n rosname = id2msg(sig.name)\n if val is not None: # Not as stupid as it looks.\n if rosname == 'subscribe':\n self._handle_subscribe(val)\n elif rosname == 'publish':\n self._handle_publish(val)\n elif sig.name in sample_in_hooks: # TODO: Check reg.?\n for conv in sample_in_hooks[sig.name]:\n conv.put_sample(sig.name, val)\n elif rosname in conf.IMPORTS: # TODO: out.\n self._handle_topic(val, sig)\n else:\n self._handle_service(val, sig)\n else:\n rospy.loginfo(\"Client registered: '%s'\", sig.name)\n except EOFError:\n # Clean up socket and subscriptions on EOF.\n self.client_socket.close()\n for topic,sub in self.subs.iteritems():\n sub.unregister()\n # TODO: Clean conv.\n for pt, conv in self.subscribed_conversions.iteritems():\n conv.unregister_sample_subscriber(pt, self)\n except socket.error as e:\n rospy.logerr(\"Socket error %d: %s\", e.errno, e.strerror)\n\n rospy.loginfo(\"Client disconnected\")", "title": "" }, { "docid": "933058659501a1cf766ddaa6ddecf13b", "score": "0.5836819", "text": "def subscribe(self, feed):\n self.client.subscribe(feed[0], self.youtube_cat_id)", "title": "" }, { "docid": "a4d555a9be9e7853198ab6ea23cfd4b2", "score": "0.58281356", "text": "def subscribe(self, board, channel):\n pass", "title": "" }, { "docid": "231613a6a5cca367ea23dd0a7069a7be", "score": "0.5827039", "text": "def subscribe_rpc(self):\n self.subscribe(self._topic_rpc, qos=1)", "title": "" }, { "docid": "49af65cd12db79cc94a800a9e6491cc9", "score": "0.58243436", "text": "def subscribe(self):\n return Subscribe(self)", "title": "" }, { "docid": "cdb38fb58095d83ff05e91a14acdffdb", "score": "0.58231837", "text": "def on_open(self, info):\n\n LOGGER.info('[ChatWebsocketHandler] Websocket connecition opened: %s ' % self)\n\n # Initialize new pika rabbitmq client object for this websocket.\n self.rabbit_client = RabbitMqClient()\n # Assign websocket object to a Pika client object attribute.\n websocketParticipants.add(self)\n self.rabbit_client.websocket = self\n # connect to rabbitmq\n self.rabbit_client.start()", "title": "" }, { "docid": "9414d002188eeda2419ac3d6b7666c50", "score": "0.5794444", "text": "def subscribe(self, observer: Callable):\n self.subscriber = observer", "title": "" }, { "docid": "06318552a49798a9cecbbe695db29006", "score": "0.578933", "text": "def ReceiveSocketMessage(self):\n while(True):\n conn,addr = self.socketObj.accept()\n self.clients[addr] = c", "title": "" }, { "docid": "ca92506f33ff1c0bf39fc75b3545b935", "score": "0.57771236", "text": "def subscribe(self):\n return self.reddit_session._subscribe(self.name)", "title": "" }, { "docid": "eb1a9bbe722d57f7d2e2ec2c64b59cd8", "score": "0.57768047", "text": "async def ws_rpc_subscribe(self, payload):\n await self.channels.register(\n payload[\"channel\"], payload.get(\"event\"), self.channel_callback\n )\n return dict(subscribed=self.channels.get_subscribed(self.channel_callback))", "title": "" }, { "docid": "a0979b72a46cf7220e9c3febd6f3e1de", "score": "0.57747185", "text": "def _on_subscribe_mqtt(\n self, client: MQTT, user_data: str, topic: int, qos: int\n ) -> None:\n if self.on_subscribe is not None:\n self.on_subscribe(self, user_data, topic, qos)", "title": "" }, { "docid": "29af8ef24fd7803d78143a7057d45605", "score": "0.5749867", "text": "def subscribe(self, type='any'):\n return self.send_command(\n Command(\n 'clientnotifyregister',\n schandlerid=0,\n event=type\n )\n )", "title": "" }, { "docid": "707137a02b24c0d11e8299d4c4c5cd20", "score": "0.57385665", "text": "def handshake(self):\n message = self.rp.raw.decode('utf-8', errors='strict').strip()\n upgrade = re.search('\\nupgrade[\\s]*:[\\s]*websocket', message.lower())\n\n if not upgrade:\n self.keep_alive = False\n self.handle_post()\n return\n\n key = re.search('\\nsec-websocket-key[\\s]*:[\\s]*(.*)\\r\\n', message, re.I)\n if key:\n key = key.group(1)\n else:\n logger.warning(\"Client tried to connect but was missing a key\")\n self.keep_alive = False\n return\n\n response = Adapter.handshake_response(key)\n self.handshake_done = self.request.send(response.encode('utf-8'))\n self.valid_client = True\n\n # Add new client\n self.subscribe(message)", "title": "" }, { "docid": "f55b02acd05f5348c3aaa612d2a59217", "score": "0.5734122", "text": "def on_subscribe(mqtt_client, userdata, mid, granted_qos):\n logging.debug(\"Begin on_subscribe\")\n\n #TODO Add implementation specific logic here \n\n\n logging.debug(\"End on_subscribe\")", "title": "" }, { "docid": "0b6c715a70da27099c956a35b02c4911", "score": "0.57303196", "text": "def on_connect():\n configuration = ioant.get_configuration()\n # There is now a connection\n topic = ioant.get_topic_structure()\n topic['top'] = \"live\"\n topic['global'] = configuration['ioant']['mqtt']['global']\n topic['local'] = configuration['ioant']['mqtt']['local']\n topic['client_id'] = configuration['ioant']['mqtt']['client_id']\n ioant.subscribe(topic)", "title": "" }, { "docid": "923552b333b506bf79c04f8fd0c089f9", "score": "0.5729532", "text": "def handleSubscribe( self, inEvent ):\n sd = inEvent.getPayload()\n# sd = parseSubscribeData( inEvent.getPayload() )\n\n _log.debug( 'handleSubscribe %s' % ( sd ) )\n\n \"\"\"\n When subscriptions are received they could be wild carded by providing blank eventType or eventSource\n so we need to check for these\n \"\"\"\n if sd and (sd[0] > 0):\n et = sd[1]\n es = sd[2]\n if (et == \"http://id.webbrick.co.uk/events/config/get\") or (et == \"\") or (et == None):\n self.doPublish(es)", "title": "" }, { "docid": "481d4fea3e5d9c492189a4a6171d715d", "score": "0.572783", "text": "async def run_client():\n async with websockets.connect(WEBSOCKET_SERVER) as websocket:\n await register_host_id(websocket)\n await response_consumer(websocket)", "title": "" }, { "docid": "eb5d2686edf72f76037f042bd326f31f", "score": "0.5723234", "text": "def subscribe(self, callback_on_message, from_hub=None):\n self.callback = callback_on_message\n self.exchange_name = utils.make_exchange_name(self._namespace, self.EXCHANGE_TYPE, extra=from_hub)\n connection = self._connect()\n return connection.ioloop", "title": "" }, { "docid": "2a56f0eb39aa0a0cd37f9fe9bb589748", "score": "0.57232267", "text": "def recieve_user_connection(self): # 20\n \n # 這是ptt的sample code,但要處理2個clients好像要別的方法,待修改\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as l_s:\n l_s.bind((self.host, self.port))\n l_s.listen()\n c_s, addr = l_s.accept()\n with c_s:\n print(\"Connected\", addr)\n while True:\n data = c_s.recv(1024)\n if not data:\n break\n c_s.sendall(data)\n # 待修改結束\n\n pass", "title": "" }, { "docid": "9f5beb188c4b26919cf9b43206793c2a", "score": "0.5701495", "text": "def subscribeReceived(self, presence):\n log.debug(\"Auto-subscribing to %s\", presence.sender.userhost())\n self.subscribed(presence.sender)\n self.subscribe(presence.sender.userhostJID())", "title": "" }, { "docid": "3b7f804cf63187e53da43b04928e5ddd", "score": "0.5682295", "text": "def on_subscribe(client, user_data, mid, granted_qos):\n print(\"Subscribed: \" + str(mid) + \" \" + str(granted_qos))", "title": "" }, { "docid": "474b1f27522d4eb3d65ee983c742a460", "score": "0.5674448", "text": "def on_message(self, message):\n\n LOGGER.info('[ChatWebsocketHandler] message received on Websocket: %s ' % self)\n\n res = tornado.escape.json_decode(message)\n routing_key = res['routing_key']\n msg = res['msg']\n stage = msg['stage']\n\n if stage == 'start':\n LOGGER.info('[ChatWebsocketHandler] Message Stage : START')\n\n name = msg['name']\n # assign name to rabbit client\n self.rabbit_client._person = name\n # assign clientid to rabbit client\n self.rabbit_client._clientid = self.genid()\n # add no. of current participants/websocket connections\n self.rabbit_client._participants = len(websocketParticipants)\n msg['participants'] = len(websocketParticipants)\n\n msg['clientid'] = self.rabbit_client._clientid\n\n LOGGER.info('[ChatWebsocketHandler] Publishing the received message to RabbitMQ')\n\n self.rabbit_client.publish(msg, routing_key)", "title": "" }, { "docid": "208be905cad6b3f83a5669c1f26b50f2", "score": "0.56721705", "text": "def websocket_handler(request):\n ws = web.WebSocketResponse()\n yield from ws.prepare(request)\n\n token = TOKEN(request)\n identity = yield from register_ws(ws, token)\n\n print('cookies', request.cookies)\n\n while True:\n try:\n msg = yield from ws.receive()\n\n if msg.tp == aiohttp.MsgType.text:\n topics = json.loads(msg.data)['topics']\n subscribe_to_topics(identity, ws, topics)\n\n elif msg.tp == aiohttp.MsgType.close:\n print('websocket connection closed')\n unregister_ws(identity, ws)\n break\n elif msg.tp == aiohttp.MsgType.error:\n print('ws connection closed with exception %s',\n aio.ws.exception())\n unregister_ws(identity, ws)\n break\n\n except RuntimeError:\n # clients.remove(ws)\n print('client disconnected')\n break\n\n return ws", "title": "" }, { "docid": "124fbdb429162a7ccf624ba8f315bd6a", "score": "0.5670587", "text": "async def connect(self):\n self.websocket = await self.ws_connect(self.URL)", "title": "" }, { "docid": "c0af300979cba8e47820d9e57d6744ef", "score": "0.5667329", "text": "def connectionMade(self):\n self.session = {}\n self.session[\"host\"] = self.transport.getHost().host\n self.buffer = \"\"\n Broadcast.get().addClient(self)", "title": "" }, { "docid": "cec9fd0388a9b83a14c51f38982b31fe", "score": "0.5665399", "text": "def open(self):\n self.id = self.get_argument(\"Id\")\n self.stream.set_nodelay(True)\n clients[self.id] = {\"id\": self.id, \"object\": self}\n print(\"WebSocket opened\")", "title": "" }, { "docid": "cec9fd0388a9b83a14c51f38982b31fe", "score": "0.5665399", "text": "def open(self):\n self.id = self.get_argument(\"Id\")\n self.stream.set_nodelay(True)\n clients[self.id] = {\"id\": self.id, \"object\": self}\n print(\"WebSocket opened\")", "title": "" }, { "docid": "b199a24f89224737ce6c24154afe160f", "score": "0.566445", "text": "def run(self):\n # important to start the websocket server in run as opposed to init,\n # otherwise we'll start it in the main thread and block other requests\n # from coming in\n self._server = WebsocketServer(self.websocket_port, host='0.0.0.0')\n self._server.set_fn_new_client(self.new_client)\n self._server.run_forever()", "title": "" }, { "docid": "c8ff23322dfb8d704b0ec629ab47362a", "score": "0.56538796", "text": "def subscribe(topic, callback):\n _global_message_broker.subscribe(topic, callback)", "title": "" }, { "docid": "673e148136fccfcf0487abc64442d7c3", "score": "0.5653784", "text": "def run(self):\n data = self.s.recv(1024)\n headers = self.parse_headers(data)\n\n if not 'Sec-WebSocket-Key' in headers:\n raise ValueError('Missing header: Sec-WebSocket-Key')\n\n accept = base64.b64encode(hashlib.sha1(headers['Sec-WebSocket-Key'] + config.guid).digest())\n\n handshake = ('HTTP/1.1 101 Web Socket Protocol Handshake\\r\\n'\n 'Upgrade: WebSocket\\r\\n'\n 'Connection: Upgrade\\r\\n'\n 'WebSocket-Origin: http://%s\\r\\n'\n 'WebSocket-Location: ws://%s:%s/\\r\\n'\n 'WebSocket-Protocol: sample\\r\\n'\n 'Sec-WebSocket-Accept: %s\\r\\n\\r\\n'\n % (config.http_host, config.socket_host, config.socket_port, accept)\n )\n self.s.send(handshake.encode())\n\n while True:\n data = self.s.recv(1024)\n\n if not data: continue\n\n print('Data from', self.addr, ':', data)\n self.onreceive(data)\n\n self.close()", "title": "" }, { "docid": "e0800383a5ffb0de48f24c277e3e36c1", "score": "0.56505376", "text": "def subscribe(job_id):\n\tdef gen():\n\t\tq = Queue()\n\t\tSUBSCRIPTIONS.append(q)\n\t\ttry:\n\t\t\twhile True:\n\t\t\t\tresult = q.get()\n\t\t\t\tev = ServerSentEvent(str(result))\n\t\t\t\tyield ev.encode()\n\t\texcept GeneratorExit:\n\t\t\tSUBSCRIPTIONS.remove(q)\n\t\n\treturn Response(gen(), mimetype='text/event-stream')", "title": "" }, { "docid": "8cb984db70bfce5674829ca97345f922", "score": "0.5648998", "text": "def _listen_clients(self) -> None:\n\n # TODO: If receive a message call self.set(data.key, data.value, data.expiration_time, False, client_address)\n pass", "title": "" }, { "docid": "8076531e916709fde6f744f89767196d", "score": "0.56457824", "text": "def _make_socket(self):\n import dallinger.db\n from dallinger.experiment_server.sockets import chat_backend\n\n self.redis = dallinger.db.redis_conn\n chat_backend.subscribe(self, 'griduniverse')\n\n self.publish({\n 'type': 'connect',\n 'player_id': self.participant_id\n })", "title": "" }, { "docid": "cc3406805529266c606f02fad0f6d1b9", "score": "0.5639633", "text": "def onConnect(self, client):\n pass", "title": "" }, { "docid": "3b72bf8d5365aead55c4004de4e1e8be", "score": "0.56317765", "text": "def on_subscribe(client, userdata, mid, granted_qos, properties=None):\n self.onsub_lock.acquire()\n if self.debug_flg:\n print(f\"subscribe accepted with QOS: {granted_qos} with mid: {mid}\")\n self.onsub_lock.release()", "title": "" }, { "docid": "a1f651e6f780ec893cbe7bab0c3770b6", "score": "0.56171685", "text": "async def register(websocket,path):\n ws_clients.add(websocket)", "title": "" }, { "docid": "ae1368c7698822c6b24ddcbe6ff12129", "score": "0.5611535", "text": "def _connect_to_chat(self):\n if self.chat_details is None:\n raise NotAuthenticatedError(\"You must first log in to Mixer!\")\n\n self.websocket = Socket(self.chat_details[\"endpoints\"])\n self.websocket.on(\"opened\", self._send_auth_packet)\n self.websocket.on(\"message\", lambda msg: self.emit(\"message\", msg))", "title": "" }, { "docid": "b686b490ca5468aa198061b57bf5dd2f", "score": "0.55902344", "text": "def connect_to_websocket(self):\n # conn = yield websocket_connect(\"wss://ws-feed-public.sandbox.gdax.com\")\n conn = yield websocket_connect(\"wss://ws-feed.gdax.com\")\n req = {\n \"type\": \"subscribe\",\n \"product_ids\": [\n \"BTC-USD\"\n ],\n \"channels\": [\n \"level2\"\n ]\n }\n conn.write_message(json.dumps(req))\n while True:\n msg = yield conn.read_message()\n if msg:\n response = json.loads(msg)\n # print(response['type'])\n if response['type'] == 'l2update':\n # # Perform update in database\n # and emit update to client\n self.perform_update(response)\n\n elif response['type'] == 'snapshot':\n # Store snapshot in database\n for bid_data in response['bids']:\n # Intialize a new row of data\n item = self.add_new_gdax_item(\"bid\", bid_data[0], bid_data[1])\n self.session.add(item)\n\n for ask_data in response['asks']:\n item = self.add_new_gdax_item(\"ask\", ask_data[0], ask_data[1])\n self.session.add(item)\n\n self.session.commit()\n print(\"GDAX Snapshot Received\")\n # print(response)\n self.snapshot_received = True\n elif response['type'] == 'error':\n print(response)\n else:\n break", "title": "" }, { "docid": "d850528e0d772d5adc3eca6c2ddf37da", "score": "0.557667", "text": "def handle(self):\n data, sock = self.request\n print ('HOST: UDPEchoClientHandler: Rx: \\n%s\\n' % data)\n sock.sendto(data, self.client_address)", "title": "" }, { "docid": "a1130a4aed0cad229b79e289337b3857", "score": "0.55707645", "text": "def subscribe(self, topic, func=None, fargs=None):\r\n\r\n if (self.__mqtt is None) or (not self.__connected):\r\n raise MQTTClientError('connect to the server before calling subscribe()')\r\n\r\n if self.__listening:\r\n LOG.warning('already listened. Do nothing.')\r\n return\r\n\r\n self.__func = func\r\n self.__args = fargs if fargs else ()\r\n self.__mqtt.on_message = self.__on_message\r\n self.__sub_topic = self.__topic.topic(self.__uid, topic)\r\n self.__subscribe(self.__sub_topic)\r\n self.__listening = True", "title": "" }, { "docid": "6543728a404ad89ec24620c8b88a342e", "score": "0.55667305", "text": "async def server_on_message(self, websocket, path):\n # Register websocket\n self.connected_clients.add(websocket)\n try:\n await self.on_connect(websocket)\n\n while True:\n try:\n data = await websocket.recv()\n message = json.loads(data)\n\n await self.handle_message_exchange(message, websocket)\n except Exception as e:\n logger.error(e)\n if websocket in self.connected_clients:\n self.connected_clients.remove(websocket)\n if websocket in self.raft_nodes:\n self.raft_nodes.remove(websocket)\n if websocket in self.control_connection:\n self.control_connection.remove(websocket)\n if websocket in self.node_connections:\n self.node_connections.remove(websocket)\n break\n finally:\n if websocket in self.connected_clients:\n self.connected_clients.remove(websocket)", "title": "" }, { "docid": "8146967e1a7c78cf3a1c87216e797ab7", "score": "0.5559368", "text": "async def subscribe(self) -> None:\n try:\n self.ws = await websockets.connect(self.ws_endpoint)\n\n if self.request is not None:\n LOGGER.info('Requesting Book: {}'.format(self.request))\n await self.ws.send(self.request)\n LOGGER.info('BOOK %s: %s subscription request sent.' %\n (self.exchange.upper(), self.sym))\n\n if self.trades_request is not None:\n LOGGER.info('Requesting Trades: {}'.format(self.trades_request))\n await self.ws.send(self.trades_request)\n LOGGER.info('TRADES %s: %s subscription request sent.' %\n (self.exchange.upper(), self.sym))\n\n self.last_subscribe_time = dt.now(tz=TIMEZONE)\n\n # Add incoming messages to a queue, which is consumed and processed\n # in the run() method.\n while True:\n self.queue.put(json.loads(await self.ws.recv()))\n\n except websockets.ConnectionClosed as exception:\n LOGGER.warn('%s: subscription exception %s' % (self.exchange, exception))\n self.retry_counter += 1\n elapsed = (dt.now(tz=TIMEZONE) - self.last_subscribe_time).seconds\n\n if elapsed < 10:\n sleep_time = max(10 - elapsed, 1)\n time.sleep(sleep_time)\n LOGGER.info('%s - %s is sleeping %i seconds...' %\n (self.exchange, self.sym, sleep_time))\n\n if self.retry_counter < self.max_retries:\n LOGGER.info('%s: Retrying to connect... attempted #%i' %\n (self.exchange, self.retry_counter))\n await self.subscribe() # recursion\n else:\n LOGGER.warn('%s: %s Ran out of reconnection attempts. '\n 'Have already tried %i times.' %\n (self.exchange, self.sym, self.retry_counter))", "title": "" }, { "docid": "ffeb58fce1952b02d2c48183b669e68c", "score": "0.55575657", "text": "def on_connect(self):\n print(\"You're connected to the streaming server.\")", "title": "" }, { "docid": "41f00dbd1874994712749593498a41aa", "score": "0.55537254", "text": "def subscribeTo(self, aTopic, aFunctionBindTo):\n if(self.mAlreadySubscribed == False):\n self.mFunction = aFunctionBindTo\n self.mTopic = aTopic\n \n tMESSAGE = \"subscribe, \" + aTopic\n tMessageInBytes = bytes(tMESSAGE, 'UTF-8')\n self.mSock.sendto(tMessageInBytes, (self.mSubscriberServiceIPV4, self.mSubscriberServicePort))\n self.mAlreadySubscribed = True\n self.mSock.close()\n self.mListeningSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.mListeningSocket.bind(('', self.mListeningPort))\n #wont keep the thread up if main thread die\n self.mThread.daemon = True\n try:\n self.mThread.start()\n except: #catch all errors\n pass\n return True\n else:\n return False", "title": "" }, { "docid": "7f9cbcf36f5f663c8c7b2e9349708f55", "score": "0.55497867", "text": "def subscribe(self, topic, callback):\n if topic not in self.callbacks:\n self.callbacks[topic] = [callback]\n else:\n if callback not in self.callbacks[topic]:\n self.callbacks[topic].append(callback)\n\n def callback_handler(client, userdata, message):\n # log = f\"{message.payload.decode()} {message.topic}\"\n if message.topic in self.callbacks:\n for callbk in self.callbacks[message.topic]:\n callbk(message.payload)\n\n self.connection.on_message = callback_handler\n return self.connection.subscribe(topic)", "title": "" }, { "docid": "15cbb8a1f7bfcaeb39ff36c1ab694b7d", "score": "0.5531924", "text": "async def _start(self) -> None:\n await self.send({\"type\": \"websocket.connect\"})\n await self.receive()", "title": "" }, { "docid": "b770eb5036f3c849bc84cdd54f3aad5c", "score": "0.55316156", "text": "def subscribe(self, device: str, control: str):\n topic = generate_topic(device, control)\n\n result_code: int\n message_id: int | None\n result_code, message_id = self.__client.subscribe(topic)\n self.__raise_error_if_any(result_code)", "title": "" }, { "docid": "3101dc1fc0283932611f191e2582c8c7", "score": "0.55312043", "text": "def on_startup():\n mqtt.subscribe('client/speech/speak')", "title": "" }, { "docid": "c282998f8bc443b180a92881e5a01d3c", "score": "0.55235696", "text": "def on_connect(client, userdata, flags, rc):\n main_logger.info(\"attempting on_connect\")\n if rc==0:\n mqtt_client.connected = True\n for topic in MQTT_SUB_TOPIC:\n client.subscribe(topic)\n main_logger.info(\"Subscribed to: {0}\".format(topic))\n main_logger.info(\"Successful Connection: {0}\".format(str(rc)))\n else:\n mqtt_client.failed_connection = True # If rc != 0 then failed to connect. Set flag to stop mqtt loop\n main_logger.info(\"Unsuccessful Connection - Code {0}\".format(str(rc)))", "title": "" }, { "docid": "f56e3251fc326b3c247372eff566ceca", "score": "0.5515813", "text": "def listen(self, name, handler):\n self.client_endpoint.connect(...)\n self.handler = handler", "title": "" }, { "docid": "2f11716bf717a126caea8b86537d9376", "score": "0.5501132", "text": "def subscribe(self, subscriber):\n self._subscribers[subscriber.uid] = subscriber", "title": "" }, { "docid": "db596cfaddc21f03e25725b68cbf68cc", "score": "0.54992354", "text": "def listen(self):\n logging.info(\n \"ThreadedServer.listen(): listening for new client connection...\")\n self.sock.listen(5)\n\n while True:\n client, address = self.sock.accept()\n self.client = client\n new_player = Player()\n # client.settimeout(10)\n\n # if connected, make threads\n if self.verify_client_conn(client):\n logging.info(\n \"================================================================\\nThreadedServer.listen(): Connected to: {}:{}\\n==========================================================================\\n\".format(address[0], str(address[1])))\n t1 = threading.Thread(target=self.speak_to_client, name=\"send_\" + self.thread_name, args=(\n client, address, new_player))\n t2 = threading.Thread(target=self.listen_to_client, name=\"recv_\" + self.thread_name, args=(\n client, address, new_player))\n\n t1.start()\n t2.start()\n\n logging.info(\n f\"ThreadedServer.listen() Show active threads \\n{threading.enumerate()}\\n\")", "title": "" }, { "docid": "4e473263cf14a22328bb34c3e230a0b1", "score": "0.54958236", "text": "def listening(self):\r\n while self.is_connected:\r\n msg_receiv = self.client_socket.recv(1024)\r\n if len(msg_receiv) > 0:\r\n msg_receiv = pickle.loads(msg_receiv)\r\n print \"j'ai bien recu \"+msg_receiv", "title": "" }, { "docid": "69e6a05ad8e8814ca1b44ef4b808941b", "score": "0.54850984", "text": "def websocket(self):\n return self.websocket_client.wss", "title": "" }, { "docid": "109c4648ec05697a54b2e612a53cdb50", "score": "0.5478585", "text": "def subscribe(self):\r\n for channel in self.bus.listeners:\r\n # Subscribe self.start, self.exit, etc. if present.\r\n method = getattr(self, channel, None)\r\n if method is not None:\r\n self.bus.subscribe(channel, method)", "title": "" }, { "docid": "2ce1b57fc85b1882151f4f140c324245", "score": "0.54769194", "text": "async def _subscribe_channels(self, ws: WSAssistant):\n try:\n payload = {\n \"id\": str(get_tracking_nonce()),\n \"type\": \"subscribe\",\n \"topic\": f\"/market/candles:{self._ex_trading_pair}_{CONSTANTS.INTERVALS[self.interval]}\",\n \"privateChannel\": False,\n \"response\": False,\n }\n subscribe_candles_request: WSJSONRequest = WSJSONRequest(payload=payload)\n\n await ws.send(subscribe_candles_request)\n self.logger().info(\"Subscribed to public klines...\")\n except asyncio.CancelledError:\n raise\n except Exception:\n self.logger().error(\n \"Unexpected error occurred subscribing to public klines...\",\n exc_info=True\n )\n raise", "title": "" } ]
9afc8cba396106e0c0e2d2cf5abd0df5
Search a word's phonetic in 'WordReference'.
[ { "docid": "a39f189fce2e8d571f78c75ac7966b03", "score": "0.66305864", "text": "def getPhonetic(word, language, variant=\"UK\") :\n\n\tlangFrom = \"\"\n\tlangTo = \"\"\n\n\tif language == \"English\":\n\t\tlangFrom = \"en\"\n\t\tlangTo = \"fr\"\n\n\ttry:\n\t\tpage = requests.get(f\"https://www.wordreference.com/{langFrom}{langTo}/{word}\")\n\texcept:\n\t\treturn NetworkError\n\n\tsoup = BeautifulSoup(page.content, 'html.parser')\n\tpronuncation_widget = soup.find(\"div\", {\"id\":\"pronunciation_widget\"})\n\n\tif pronuncation_widget != None:\n\t\tpronuncation = pronuncation_widget.text\n\telse :\n\t\treturn WordNotFoundError # Le mot cherché n'est pas disponible sur WordReference\n\n\ttry:\n\t\tif variant == \"US\" and \"US\" in pronuncation.split('/')[2]:\n\t\t\treturn pronuncation.split('/')[3]\n\t\telse :\n\t\t\treturn pronuncation.split('/')[1]\n\texcept:\n\t\treturn WordNotFoundError", "title": "" } ]
[ { "docid": "f621e037c12702cc9d483850528970ea", "score": "0.6215948", "text": "def check_word_as_pounct(self,word):\t\t\n\t\tdetailed_result=[]\n\t\t# ToDo : fix it to isdigit, by moatz saad\n\t\tif word.isnumeric():\n\t\t\tdetailed_result.append(wordCase.wordCase({\n\t\t\t'word':word,\n\t\t\t'affix': ('', '', '', ''),\t\t\t\n\t\t\t'stem':'',\n\t\t\t'original':word,\n\t\t\t'vocalized':word,\n\t\t\t'tags':self.get_number_tags(word),\n\t\t\t'type':'NUMBER',\n\t\t\t'freq':0,\n\t\t\t'syntax':'',\t\t\t\n\t\t\t}));\t\n\t\tif word in stem_pounct_const.POUNCTUATION:\n\t\t\tdetailed_result.append(wordCase.wordCase({\n\t\t\t'word':word,\n\t\t\t'affix': ('', '', '', ''),\n\t\t\t'stem':'',\n\t\t\t'original':word,\n\t\t\t'vocalized':word,\n\t\t\t'tags':stem_pounct_const.POUNCTUATION[word]['tags'],\n\t\t\t'type':'POUNCT',\n\t\t\t'freq':0,\n\t\t\t'syntax':'',\t\t\t\n\t\t\t}));\n\n\t\treturn detailed_result;", "title": "" }, { "docid": "f15c2c8e548933d5db43f035e0e157ee", "score": "0.59108704", "text": "def _searchWord(self, forward):\r\n document = core.workspace().currentDocument()\r\n\r\n cursor = document.qutepart.textCursor()\r\n if not cursor.hasSelection():\r\n cursor.select(cursor.WordUnderCursor)\r\n word = cursor.selectedText()\r\n wordStartAbsPos = cursor.anchor()\r\n wordEndAbsPos = cursor.position()\r\n\r\n if not word:\r\n return\r\n\r\n regExp = re.compile('\\\\b%s\\\\b' % re.escape(word))\r\n text = document.qutepart.text\r\n\r\n # avoid matching word under cursor\r\n if forward:\r\n startPoint = wordEndAbsPos\r\n else:\r\n startPoint = wordStartAbsPos\r\n\r\n self._updateFoundItemsHighlighting(regExp)\r\n\r\n match, matches = self._searchInText(regExp, document.qutepart.text, startPoint, forward)\r\n if match is not None:\r\n document.qutepart.absSelectedPosition = (match.start(), match.start() + len(match.group(0)))\r\n core.mainWindow().statusBar().showMessage('Match %d of %d' % \\\r\n (matches.index(match) + 1, len(matches)), 3000)\r\n else:\r\n core.workspace().currentDocument().qutepart.resetSelection()", "title": "" }, { "docid": "119af19f789667feed1933f8c0940bbb", "score": "0.5865671", "text": "def lookup(self, word: str) -> Word:\n pass", "title": "" }, { "docid": "49892e79152973faf6541ccd239a512f", "score": "0.57349604", "text": "def similar_words(word):\n\tpass", "title": "" }, { "docid": "52a1aa05273ce8f7537946394e8f926e", "score": "0.5687349", "text": "def isMatchWord(self): #$NON-NLS-1$\r", "title": "" }, { "docid": "ef1eee697a52f3b9d1a9cc95a527b609", "score": "0.5653841", "text": "def findText(self, wordList, forward=True):\n fullList = globalref.docRef.root.descendantList(True)\n currentPos = fullList.index(self.currentItem)\n fullList = fullList[currentPos+1:] + fullList[:currentPos]\n if not forward:\n fullList.reverse()\n for item in fullList:\n if item.matchWords(wordList):\n self.changeSearchOpen([item])\n return item\n return None", "title": "" }, { "docid": "41302f76f4b995dbdfaf54a9acf1da4f", "score": "0.56114215", "text": "def search(self, word):\n p = self.helper(word)\n if p and p.isWord:\n return True\n return False", "title": "" }, { "docid": "e706dab44c63ad4b71eb78a872fd5b52", "score": "0.55804014", "text": "def pronounSearch(filter):\n pronouns = fileToStringList(\"pronoun dictionary\")\n filteredText = []\n for line in poemList:\n line = line.split(\" \")\n words = \"\"\n for word in line:\n words += \" \"\n if word in pronouns:\n words += word.upper()\n else:\n words += word \n filteredText += [words]\n printPoem(filteredText)", "title": "" }, { "docid": "634b4ba065aa9dc816a8038e5a6b7202", "score": "0.5579486", "text": "def search(self, word: str) -> bool:\n tree = self.look_up\n for w in word:\n if w not in tree:\n return False\n tree = tree[w]\n if '#' in tree:\n return True\n return False", "title": "" }, { "docid": "d090b4b2009235bf089acd6ef97e9b48", "score": "0.554672", "text": "def setMatchWord(self, b): #$NON-NLS-1$\r", "title": "" }, { "docid": "cc8a36b1931c5d7138df46c5692e3cc7", "score": "0.55318964", "text": "def search(self, word):\n node = self.trie\n for c in word:\n if c in node:\n node = node[c]\n else:\n return False\n return node.get('term') is not None", "title": "" }, { "docid": "3be2be54c47feae253a6511406d75f0f", "score": "0.55304444", "text": "def search(self, word: str) -> bool:\n # 1. Initialize: cur = root\n # 2. for each char c in target string S:\n # 3. if cur does not have a child c:\n # 4. search fails\n # 5. cur = cur.children[c]\n # 6. search successes\n \n cur = self.root\n for c in word:\n if c not in cur.child:\n return False\n cur = cur.child[c]\n return cur.word", "title": "" }, { "docid": "556576af3c8d07ab77912cf1477e6d69", "score": "0.5497367", "text": "def search(self, word):\n return word in self.s", "title": "" }, { "docid": "bb0d92329c4e3314d64003defe172e5b", "score": "0.5487036", "text": "def search(self, word: str) -> bool:\r\n word_tuple = self.make_tuple(word)\r\n\r\n for w in self.words:\r\n diff_from = w - word_tuple\r\n diff_to = word_tuple - w\r\n if len(diff_from) == 1 and len(diff_to) == 1:\r\n return True\r\n\r\n return False", "title": "" }, { "docid": "c2a3a9d7142fdc191a6f4135418508b5", "score": "0.5475612", "text": "def test_find_word():\n question = ['salut', 'grandpy', 'comment', 'vas', 'tu',\n 'peux', 'tu', 'me', 'dire', 'où', 'se', 'trouve',\n 'l', 'arc', 'de', 'triomphe']\n req = pars.find_word(question)\n result = \"où trouve arc triomphe \"\n assert str(req) == str(result)", "title": "" }, { "docid": "2a6472a8616a373a711dfd938804744b", "score": "0.547298", "text": "def search(self, word: str) -> bool:\n node = self.links\n for c in word:\n if c not in node:\n return False\n node = node[c]\n # If at end after for loop, this is a word\n if '#' in node:\n return True\n # Else we are still in the middle of a word even though the letters matched up so far\n return False", "title": "" }, { "docid": "58d0e8a7e52a09d7a00f86268fb2167d", "score": "0.5467885", "text": "def search(self, word):\n return self.find(self.root, word)", "title": "" }, { "docid": "4eb54815db618751275ce31f4d211450", "score": "0.5466823", "text": "def search(self, word: str) -> bool:\n tree = self.lookup\n for a in word:\n if a not in tree:\n return False\n tree = tree[a]\n if \"#\" in tree:\n return True\n return False", "title": "" }, { "docid": "06dff3a05a2a8aecf6193aab308874e2", "score": "0.54580563", "text": "def find_similar_words(self) -> None:\n for word, token in zip(self.words, self.tokens):\n if word not in self.synonyms:\n self.synonyms[word] = self._synonyms_generator.neighbors(word, token.tag_, self.perturb_opts['top_n'])", "title": "" }, { "docid": "573197d52075cdf856037db98d220891", "score": "0.5451955", "text": "def search(self, word):\n if self.root == None:\n return False\n else:\n return self.root.getWord(word, isRoot=True)", "title": "" }, { "docid": "a2dc36522701e660c82beb68b0a3f85e", "score": "0.54393554", "text": "def search(self, word: str) -> bool:\n t = self.trie\n for i in word:\n if i not in t:\n return False\n t = t[i]\n if '#' not in t:\n return False\n return True", "title": "" }, { "docid": "e700d57593b0321889c80e89dcc3dde1", "score": "0.5432258", "text": "def search(self, word):\n return self.match(word, 0, self.root)", "title": "" }, { "docid": "11f5bd470f95c5fb80259dd7afc11c8c", "score": "0.54315823", "text": "def search(self, word):\n n = len(word)\n w = word + '#'\n #if g(self.td, w, 0, n+1):\n # return False\n for i in xrange(n):\n loc = ord(word[i]) - ord('a') + 1\n w = word[:i] + '.' + word[i+1:] + '#'\n z = [0]\n if f(self.td, w, 0, n+1, z, loc):\n return True\n return False", "title": "" }, { "docid": "674b018ce35745405a7173701c65d216", "score": "0.54207706", "text": "def search(self, word):\n cur = self.rootNode\n for w in word:\n if cur.children[ord(w) - ord('a')] is None:\n return False\n else:\n cur = cur.children[ord(w)-ord('a')]\n \n return cur.isWord", "title": "" }, { "docid": "d62e27d835bbc446f0a221b8dd8bf299", "score": "0.5379058", "text": "def search(self, word, text):\n return bool(re.search(r'\\b{}\\b'.format(re.escape(word)), text))", "title": "" }, { "docid": "77f817b649730a6d1e4ab4b685a3e9b6", "score": "0.53673387", "text": "def look_up_pronunciation(word: str, PronouncingTable) -> List[str]:\n for i in range(len(PronouncingTable[0])):\n if word == PronouncingTable[0][i]:\n return PronouncingTable[1][i]\n return []", "title": "" }, { "docid": "9c3edf8cd651c8c2c542c95162c46c5e", "score": "0.53462887", "text": "def search(self, word):\n\n if len(word) not in self.dic: return False\n\n for candidate in self.dic[len(word)]:\n diff = 0\n\n for a, b in zip(candidate, word):\n if a != b: diff += 1\n\n if diff == 1: return True\n\n return False", "title": "" }, { "docid": "dcd0b14bbc81546e11b7885e8e1aa545", "score": "0.5334784", "text": "def search(self, word):\n flag = False\n \n def recursive(word,trie):\n nonlocal flag\n \n if word == \"\" and \"#\" in trie:\n flag = True\n return \n ## 单词过长\n if word != \"\" and trie == {\"#\":\"#\"}:\n return\n\n \n if word[0] == \".\":\n for key in trie:\n recursive(word[1:],trie[key])\n \n elif word[0] in trie:\n recursive(word[1:],trie[word[0]])\n ## 不存在该前缀\n else:\n return\n\n recursive(word,self.trie)\n return flag", "title": "" }, { "docid": "731e8b4e58170b5801a59201a75aae50", "score": "0.5333343", "text": "def search(self, word):\n return self.searchFrom(word, self.root)", "title": "" }, { "docid": "d9a1b1c2f85f47dbec17764ee2d002db", "score": "0.531149", "text": "def search(self, word):\n for i in range(len(word)):\n for ch_idx in range(26):\n ch = chr(ord('a') + ch_idx)\n if ch != word[i]:\n replace_word = word[:i] + ch + word[i+1:]\n if replace_word in self.dictionary:\n return True\n return False", "title": "" }, { "docid": "db484243b2bdbb1db1075f47ba4ea170", "score": "0.53038156", "text": "def search(self, word: 'str') -> 'bool':\n temp = self.root\n for ch in word:\n if ch not in temp.links:\n return False\n temp = temp.links[ch]\n return temp.isEnd", "title": "" }, { "docid": "79a45cd0fca596fd841446ea08d4a3b4", "score": "0.529919", "text": "def search(self, word):\r\n current = self.root\r\n for w in word:\r\n current = current.children.get(w)#False\r\n if current == None:\r\n return False\r\n return current.isword# isword 将会在遍历完apple, 字母e之后,自动改成True,所以search('app') 返回\r", "title": "" }, { "docid": "54c5d8a16717620fa7a995cd1f6c40ee", "score": "0.5298913", "text": "def __contains__(self, word):\n return word in self.word2id", "title": "" }, { "docid": "286913f0ac3cef1aa8ee1c6fa17d430c", "score": "0.52985764", "text": "def word_search_and_compare(self):\r\n d = self.var.get()\r\n pattern = d\r\n return [w for w in word_list if re.match(pattern,w)]", "title": "" }, { "docid": "cd78b2039c3cedf3c9e0a412f3df94a6", "score": "0.52977455", "text": "def findW(words, casesensitive=False, context=False, ordered=0, bk=None):\n t0 = tc()\n \n # unpack string for individual words\n word_array = ['']\n for element in words:\n # separate words by where spaces are\n if element == ' ':\n word_array.append('')\n else:\n word_array[-1] += element\n\n # change apostrophes?\n for i in range(len(word_array)):\n word = word_array[i]\n if \"'\" in word:\n word_split = word.split(\"'\")\n word = u\"\\u2019\".join(word_split)\n word_array[i] = word\n \n # analyze words if upper/lowercase does not matter\n if casesensitive == False:\n # redefine 'words'\n words = []\n Words = []\n for word_index in range(len(word_array)):\n UppercaseLetter = word_array[word_index][0].upper()\n LowercaseLetter = word_array[word_index][0].lower()\n RestOfWord = word_array[word_index][1:]\n # create uppercase word\n Word = UppercaseLetter + RestOfWord\n Words.append(Word)\n # create lowercase word\n word = LowercaseLetter + RestOfWord\n words.append(word)\n # print(words)\n # print(Words)\n else:\n words = word_array\n # print(words)\n \n\n verses_containing_words = 0\n verses_containing_extra_words = 0\n occurrences = 0\n \n # search entire Bible, or one book, or multiple books\n if bk is None:\n # If no book is specified, search entire Bible\n books_to_go_through = Book_names\n elif type(bk) == list:\n # If a list of books is specified, search those books\n books_to_go_through = bk\n for bkk in books_to_go_through:\n if bkk not in Book_names:\n print(bkk + ' not found in the books of the Bible.')\n return\n elif type(bk) == str:\n # If a single string is entered, check if it is a book, or the entire new/old testament\n books_to_go_through = [bk]\n if bk not in Book_names:\n if bk.upper() == 'OT':\n books_to_go_through = Book_names[:39]\n elif bk.upper() == 'NT':\n books_to_go_through = Book_names[39:]\n else:\n print(bk + ' not found in the books of the Bible.')\n return\n\n # go through books of the Bible\n for book_name in books_to_go_through:\n Book = Bible_searchable[book_name]\n \n # go through each chapter\n for chapter_index in range(len(Book)):\n chapter_number = chapter_index + 1\n Chapter = Book[chapter_index]\n \n # go through each verse\n for verse_name,EntireVerse in Chapter.items():\n\n # In case words should be matched in order, store a copy of the verse\n Verse = EntireVerse\n\n # check to see if each word is in the verse\n word_index = 0\n contains = True\n contains_added = True\n while contains and word_index < len(words):\n \n # If upper/lowercase is unimportant...\n if casesensitive == False:\n word = words[word_index]\n Word = Words[word_index]\n # Is word in verse? (also check non-original tongues)\n if (word not in Verse) and (Word not in Verse):\n contains = False\n elif ordered != 0:\n # If word is in verse, and order is important, let subsequent calls only search the rest of the verse\n split_index = min(try_index(Verse, [word, Word]))\n if (ordered == 2) and (word_index != 0) and (split_index != 0):\n contains = False\n Verse = Verse[split_index+1:]\n \n # If upper/lowercase is important...\n elif casesensitive == True:\n word = words[word_index]\n # Is word in verse? (also check non-original tongues)\n if word not in Verse:\n contains = False\n elif ordered != 0:\n # If word is in verse, and order is important, let subsequent calls only search the rest of the verse\n split_index = Verse.index(word)\n if (ordered == 2) and (word_index != 0) and (split_index != 0):\n contains = False\n Verse = Verse[split_index+1:]\n \n word_index += 1\n\n if contains == True:\n verses_containing_words += 1\n ActualVerse = Bible[book_name][chapter_index][verse_name]\n total_verse = verse_writeout(ActualVerse)\n print(book_name,chapter_number,':',verse_name)\n print(' ',total_verse)\n tf = tc()\n print(tf-t0) \n print('number of verses containing specified words: ',verses_containing_words)", "title": "" }, { "docid": "e9e41d7e82f2c86ef3b9f6615e7e45c7", "score": "0.52892405", "text": "def __zh_word_tea_dictionary_search(self, chinese_word):\n general_dictionary = {}\n\n # Conduct a search.\n web_search = f\"http://babelcarp.org/babelcarp/babelcarp.cgi?phrase={chinese_word}&define=1\"\n eth_page = requests.get(web_search, headers=self.zw_useragent, timeout=15)\n try:\n tree = html.fromstring(eth_page.content) # now contains the whole HTML page\n word_content = tree.xpath('//fieldset[contains(@id,\"translation\")]//text()')\n except BaseException:\n return None\n\n # Get the headword of the entry.\n try:\n head_word = word_content[2].strip()\n except IndexError:\n return None\n\n if chinese_word not in head_word:\n # If the characters don't match: Exit. This includes null searches.\n return None\n else: # It exists.\n try:\n pinyin = re.search(r\"\\((.*?)\\)\", word_content[2]).group(1).lower()\n except AttributeError: # Never mind, it does not exist.\n return None\n\n meaning = word_content[3:]\n meaning = [item.strip() for item in meaning]\n\n # Format the entry to return\n formatted_line = f'\\n\\n**Tea Meanings**: \"{\" \".join(meaning)}.\"'\n formatted_line = formatted_line.replace(\" )\", \" \")\n formatted_line = formatted_line.replace(\" \", \" \")\n formatted_line += f\" ([Babelcarp]({web_search}))\" # Append source\n\n general_dictionary[\"meaning\"] = formatted_line\n general_dictionary[\"pinyin\"] = pinyin\n\n return general_dictionary", "title": "" }, { "docid": "2f1fcb9dc073f9bd440e9f975ff4d84a", "score": "0.52829", "text": "def __zh_word_cccanto_search(self, cantonese_word: str) -> None | Dict[str, str]:\n general_dictionary = {}\n\n # We open the file.\n with open(FILE_ADDRESS_ZH_CCCANTO, encoding=\"utf-8\") as f:\n existing_data = f.read().split(\"\\n\")\n\n relevant_line = None\n\n # Look for the relevant word (should not take long.)\n for entry in existing_data:\n traditional_headword = entry.split(\" \", 1)[0]\n if cantonese_word == traditional_headword:\n relevant_line = entry\n break\n\n if relevant_line is not None:\n # Parse the entry (based on code from Marcanuy at https://github.com/marcanuy/cedict_utils, MIT license)\n hanzis = relevant_line.partition(\"[\")[0].split(\" \", 1)\n keywords = {\n \"meanings\": relevant_line.partition(\"/\")[2]\n .replace('\"', \"'\")\n .rstrip(\"/\")\n .strip()\n .split(\"/\"),\n \"traditional\": hanzis[0].strip(\" \"),\n \"simplified\": hanzis[1].strip(\" \"),\n # Take the content in between the two brackets\n \"pinyin\": relevant_line.partition(\"[\")[2].partition(\"]\")[0],\n \"jyutping\": relevant_line.partition(\"{\")[2].partition(\"}\")[0],\n \"raw_line\": relevant_line,\n }\n\n formatted_line = '\\n\\n**Cantonese Meanings**: \"{}.\"'.format(\n \"; \".join(keywords[\"meanings\"])\n )\n formatted_line += (\n f\" ([CC-Canto](https://cantonese.org/search.php?q={cantonese_word}))\"\n )\n for i in range(0, 9):\n keywords[\"jyutping\"] = keywords[\"jyutping\"].replace(\n str(i), f\"^{str(i)} \"\n ) # Adds syntax for tones\n keywords[\"jyutping\"] = (\n keywords[\"jyutping\"].replace(\" \", \" \").strip()\n ) # Replace double spaces\n\n general_dictionary[\"meaning\"] = formatted_line\n general_dictionary[\"pinyin\"] = keywords[\"pinyin\"]\n general_dictionary[\"jyutping\"] = keywords[\"jyutping\"]\n\n return general_dictionary", "title": "" }, { "docid": "13a0ed020c7f75d885b6395a7a4eab35", "score": "0.5277528", "text": "def search(self, word):\n cur = self.root\n for c in word:\n if c in cur.d:\n cur = cur.d[c]\n else:\n return False\n return True if 0 in cur.d else False", "title": "" }, { "docid": "49111f63cb2263f7c680c79069fbd7f9", "score": "0.52603996", "text": "def lookup_exact(self, normalized_spelling):\n if Word._get_cache().contains(normalized_spelling):\n return self.filter(words__normalized_spelling=normalized_spelling)\n else:\n return self.none()", "title": "" }, { "docid": "803904b6ab3d9386c944a427b4068ff7", "score": "0.5258035", "text": "def search(self, word):\n current = self.root\n n = len(word)\n\n i = 0\n while i < n:\n node_found = False\n\n for trie_node in current.children:\n if trie_node.value == word[i]:\n current = trie_node\n i += 1\n node_found = True\n break\n\n if not node_found:\n return (False, False)\n\n # if end of word reached, then increment weight to indicate popularity/frequency of access\n if current.end_of_word:\n current.weight += 1\n\n return (True, True if current.end_of_word else False)", "title": "" }, { "docid": "42edee9ef6aeba56c4525a2ed7d1dcee", "score": "0.5256944", "text": "def search(self, word):\n return self.searchre(word, self.root);", "title": "" }, { "docid": "78d40b8ca14697e036b719ed5f03f2fa", "score": "0.52565396", "text": "def get_word (self, word):\n for w in self.words:\n if w.word == word:\n return w\n return None", "title": "" }, { "docid": "2a145cf91aeed21da722c383ec995ae4", "score": "0.52554923", "text": "def check_word(self,word, guessedTag=\"\"):\t\n\t\tword=araby.stripTatweel(word);\n\t\tword_vocalised=word;\n\t\tword_nm=araby.stripTashkeel(word);\n\t\tresulted_text=u\"\";\n\t\tresulted_data=[];\n\t\t# if word is a pounctuation\n\t\tresulted_data+=self.check_word_as_pounct(word_nm);\n\t\t# Done: if the word is a stop word we have some problems,\n\t\t# the stop word can also be another normal word (verb or noun),\n\t\t# we must consider it in future works\n\t\t# if word is stopword allow stop words analysis\n\t\tresulted_data+=self.check_word_as_stopword(word_nm);\n\n\t\t#if word is verb\n\t\t# مشكلة بعض الكلمات المستبعدة تعتبر أفعلا أو اسماء\n\t\tif self.tagger.hasVerbTag(guessedTag) or self.tagger.isStopWordTag(guessedTag):\n\t\t\tresulted_data+=self.check_word_as_verb(word_nm);\n\t\t\t#print \"is verb\", rabti,len(resulted_data);\n\t\t#if word is noun\n\t\tif self.tagger.hasNounTag(guessedTag) or self.tagger.isStopWordTag(guessedTag):\t\t\t\n\t\t\tresulted_data+=self.check_word_as_noun(word_nm);\n\t\tif len(resulted_data)==0:\n\t\t\t#check the word as unkonwn\n\t\t\tresulted_data+=self.check_word_as_unknown(word_nm);\n\t\t\t#check if the word is nomralized and solution are equivalent\n\t\tresulted_data = self.check_normalized(word_vocalised, resulted_data)\n\t\t#check if the word is shadda like\n\t\tresulted_data = self.check_shadda(word_vocalised, resulted_data)\n\n\t\t#check if the word is vocalized like results\t\t\t\n\t\tif self.partial_vocalization_support:\n\t\t\tresulted_data=self.check_partial_vocalized(word_vocalised, resulted_data);\n\t\t# add word frequency information in tags\n\t\tresulted_data = self.addWordFrequency(resulted_data);\n\n\t\tif len(resulted_data)==0:\n\t\t\tresulted_data.append(wordCase.wordCase({\n\t\t\t'word':word, \n\t\t\t'affix': ('' , '', '', ''), \n\t\t\t'stem':'',\n\t\t\t'original':word,\n\t\t\t'vocalized':word,\n\t\t\t'tags':u'',\n\t\t\t'type':'unknown',\n\t\t\t'root':'',\n\t\t\t'template':'',\n\t\t\t'freq':self.wordfreq.getFreq(word, 'unknown'),\n\t\t\t'syntax':'',\n\t\t\t})\n\t\t\t);\n\t\treturn resulted_data;", "title": "" }, { "docid": "2ad525da66c886ac0b4a95585aa3a0dd", "score": "0.5247887", "text": "def search(self, word):\n node = self.root\n for chars in word:\n node = node.data.get(chars)\n if not node:\n return False\n return node.is_word #", "title": "" }, { "docid": "83d31c22d1e8665ce3abde1012f1138f", "score": "0.52335036", "text": "def find(self, word):\n if len(word) < 3:\n return \"at least 3 chars\"\n\n word = word.lower().strip()\n char_list = list(word)\n char_list.sort()\n sign = \"\".join(char_list)\n\n matched_words = self.trie.find(sign)\n self.trie.reset()\n if len(matched_words) == 0:\n return \"not find anagrams\"\n else:\n sign, score = self.high_score(matched_words)\n return dict.data[sign][0]", "title": "" }, { "docid": "75bec44b7b4ae79e680262c4f945746f", "score": "0.5222018", "text": "def search_word(word):\r\n\r\n word = word.strip()\r\n if word.startswith(' '):\r\n return None\r\n\r\n for char in word:\r\n if char not in kanas and char not in letters and char not in splitch:\r\n break\r\n else:\r\n return None\r\n\r\n search_url = BASIC_URL + word\r\n try:\r\n content_str = requests.get(search_url).content.decode('utf-8')\r\n except requests.exceptions.ConnectionError:\r\n print('Error! Connection failed!\\nOn searching %s' % word)\r\n return None\r\n\r\n str_1_re = str_1_start + str_re_bracket + str_1_end\r\n re_1 = re.compile(str_1_re)\r\n result_1 = re_1.search(content_str)\r\n\r\n if result_1:\r\n return result_1.group(1)\r\n\r\n str_2_re = str_2_start + str_re_bracket + str_2_end\r\n re_2 = re.compile(str_2_re)\r\n result_2 = re_2.search(content_str)\r\n\r\n if result_2:\r\n return result_2.group(1)\r\n\r\n return None", "title": "" }, { "docid": "95fbebcd5615fa5e42f692cecf9c6ccf", "score": "0.5215799", "text": "def check_all_dependents_of_word_in_ref_are_in_hyp(c, wordIndex):\n depIndex = wordIndex - 1\n# print('target =', wordIndex)\n # look for a dependent of word to its left in reference\n while (depIndex >=0) :\n# print('depIndex = ', depIndex)\n govRefIndex = int(c.getBuffer().getWord(depIndex).getFeat('GOVREF')) + depIndex\n# print(\"govRefIndex = \", govRefIndex)\n if govRefIndex == wordIndex : # dep is a dependent of word in ref \n #check that dep has the same governor in hyp \n govHypIndex = int(c.getBuffer().getWord(depIndex).getFeat('GOV')) + depIndex\n# print(depIndex, 'is dependent ')\n if govHypIndex != govRefIndex :\n# print('wrong gov (', govHypIndex, ')');\n return False\n depIndex -= 1\n\n sentenceChange = False\n depIndex = wordIndex + 1\n while depIndex < c.getBuffer().getLength() :\n# print('depIndex = ', depIndex)\n govRefIndex = int(c.getBuffer().getWord(depIndex).getFeat('GOVREF')) + depIndex\n# print(\"govRefIndex = \", govRefIndex)\n if(govRefIndex == wordIndex): # dep is a dependent of word in ref\n govHypIndex = int(c.getBuffer().getWord(depIndex).getFeat('GOV')) + depIndex\n# print(depIndex, 'is dependent ')\n if govHypIndex != govRefIndex :\n# print('wrong gov (', govHypIndex, ')');\n return False\n depIndex += 1\n\n return True", "title": "" }, { "docid": "0f79bbbcaf448bb2753aa735ab0e90b7", "score": "0.5213169", "text": "def search(self, word):\n return self.search_word(self.root, word, 0, len(word))", "title": "" }, { "docid": "089065648193f8050512ae756019ff02", "score": "0.5206609", "text": "def search(self, word):\n current = self.root\n for letter in word:\n current = current.children.get(letter)\n if current is None:\n return False\n return current.is_word", "title": "" }, { "docid": "cc150e7652a471dad4efa9ae94880637", "score": "0.5205561", "text": "def paraphrase_sentence( sentence, model ):\n # 3 implementations:\n # if the first letter is equal to the first letter of w, move to the next one.\n # don't use the letter e at all\n # spelling corrections\n\n blob = textblob.TextBlob( sentence )\n print(\"The sentence's words are\")\n LoW = blob.words\n print(LoW)\n\n NewLoW = \"\"\n for w in LoW:\n \n if w not in model:\n NewLoW += w + \" \"\n else:\n \n w_alternatives = model.most_similar(positive=[w], topn=100)\n\n counter = 0\n\n for i in range(len(w_alternatives)):\n \n first_alternative, first_alternative_score = w_alternatives[i] # initial one!\n\n if (first_alternative[0] != w[0]):\n if 'e' not in first_alternative:\n break\n else:\n counter += 1\n \n if counter == len(w_alternatives):\n first_alternative, first_alternative_score = w_alternatives[0]\n else: \n NewLoW += first_alternative + \" \"\n \n # you should change this so that it returns a new string (a new sentence),\n # NOT just print a list of words (that's what's provided in this starter code)\n \n NewLoW = NewLoW[:-1]\n \n NewLoW = textblob.TextBlob(NewLoW)\n NewLoW = NewLoW.correct()\n NewLoW = NewLoW.words\n \n NewSentence = \"\"\n \n for x in NewLoW:\n NewSentence += x + \" \"\n \n NewSentence = NewSentence[:-1] \n \n return NewSentence", "title": "" }, { "docid": "a9ed18f5bd662bfde151aaf8fe66b769", "score": "0.51939875", "text": "def fuzzy_lookup(tokens, rule_set):\n # we'll have to de-metaphone these to decide what part of\n # the phrase needs to be passed on...\n phonetic = metaphone(''.join(tokens))\n for start in range(len(phonetic)):\n table = rule_set\n for offset, char in enumerate(phonetic[start:]):\n if char in table:\n table = table[char]\n elif table is not rule_set:\n if PHRASE_MARKER in table:\n return table[PHRASE_MARKER], phonetic[start:]\n if WORD_MARKER in table:\n return table[WORD_MARKER], phonetic[start:]\n elif None in table:\n return table[None], phonetic[start : start + offset]\n return [], ''", "title": "" }, { "docid": "dd82e57f0e8755748a931f546571fbeb", "score": "0.51849705", "text": "def search(self, answer, par_title):\n main_answer = self.ans.get_main_words(answer)\n #print '<br>main lemmata of answer: ',main_answer,'<br>' #for debugging\n\n main_par = self.ans.get_main_words(par_title)\n #print '<br>main lemmata of par_title: ',main_par,'<br><br>' #for debugging\n\n success = '1'\n\n for lemma in main_answer:\n if lemma not in main_par:\n success = '0'\n break\n return success", "title": "" }, { "docid": "6b60a1acdfeb7595fa38611f7e45d8a9", "score": "0.51840717", "text": "def find_pronoun(sent):\n pronoun = None\n\n for word, part_of_speech in sent.pos_tags:\n # Disambiguate pronouns\n if part_of_speech == 'PRP' and word.lower() == 'you':\n pronoun = 'I'\n elif part_of_speech == 'PRP' and word == 'I':\n # If the user mentioned themselves, then they will definitely be the pronoun\n pronoun = 'You'\n return pronoun", "title": "" }, { "docid": "518c9aa1c3370375c9ae9ef71f96d3d1", "score": "0.5174752", "text": "def search(self, word):\r\n return any(\r\n self.magicdict.get(c, 0) > 1\r\n or self.magicdict.get(c, 0) == 1 and word not in self.magicset\r\n for c in self._candidate(word))", "title": "" }, { "docid": "ce7365e8581c90bfb23003d3a2ed6ead", "score": "0.51745605", "text": "def busqueda(self, xPers):", "title": "" }, { "docid": "fd7ac766c0de25cc56089e8042174747", "score": "0.5173696", "text": "def searchTextString(source, topic, people):\r\n listWordsTopic = []\r\n if topic != -1:\r\n listWordsTopic = vocTopics[topic][language_in]\r\n for i in range(len(listWordsTopic)):\r\n listWordsTopic[i] = listWordsTopic[i].encode('utf-8')\r\n listWordsPeople = []\r\n if people != -1:\r\n listWordsPeople = vocPeople[people][language_in]\r\n for i in range(len(listWordsPeople)):\r\n listWordsPeople[i] = listWordsPeople[i].encode('utf-8')\r\n print(\"listWordsTopic\", listWordsTopic)\r\n print(\"listWordsPeople\", listWordsPeople)\r\n foundSource = None\r\n print(\"Searching....\")\r\n foundResults = {} #key, topic, people\r\n if (source == 'bible' or source == -1):\r\n foundSource = 'bible'\r\n for book in Bible:\r\n #print(\"book\", book)\r\n for bookNum in Bible[book]:\r\n #print(\"bookNum\", bookNum)\r\n for verseNum in Bible[book][bookNum]:\r\n #print(\"verseNum\", verseNum)\r\n #print(Bible[book][bookNum][verseNum])\r\n #print(Bible[book][bookNum][verseNum][language_out])\r\n if (language_out in Bible[book][bookNum][verseNum]): #check that language exists\r\n foundKey = BibleDataToID(book, bookNum, verseNum)\r\n wordInSentence(foundSource, listWordsTopic, 'topic', Bible[book][bookNum][verseNum][language_out], foundKey, foundResults)\r\n wordInSentence(foundSource, listWordsPeople, 'people', Bible[book][bookNum][verseNum][language_out], foundKey, foundResults)\r\n\r\n if (source == 'pray' or source == -1):\r\n foundSource = 'pray'\r\n for prayer in prayers:\r\n for part in prayers[prayer]['parts']:\r\n #print(prayers[prayer]['parts'][part])\r\n if (language_out in prayers[prayer]['parts'][part]): #check that language exists\r\n foundKey = prayer\r\n wordInSentence(foundSource, listWordsTopic, 'topic', prayers[prayer]['parts'][part][language_out], foundKey, foundResults)\r\n wordInSentence(foundSource, listWordsPeople, 'people', prayers[prayer]['parts'][part][language_out], foundKey, foundResults)\r\n\r\n\r\n if (source == 'pope' or source == -1): #exactly the same as prayers\r\n foundSource = 'pope'\r\n for popeword in pope:\r\n for part in pope[popeword]['parts']:\r\n if (language_out in pope[popeword]['parts'][part]): #check that language exists\r\n foundKey = popeword\r\n wordInSentence(foundSource, listWordsTopic, 'topic', pope[popeword]['parts'][part][language_out], foundKey, foundResults)\r\n wordInSentence(foundSource, listWordsPeople, 'people', pope[popeword]['parts'][part][language_out], foundKey, foundResults)\r\n\r\n\r\n if (source == 'quotes' or source == -1): #exactly the same as prayers\r\n foundSource = 'quotes'\r\n for quote in quotes:\r\n for part in quotes[quote]['parts']:\r\n if (language_out in quotes[quote]['parts'][part]): #check that language exists\r\n foundKey = quote\r\n wordInSentence(foundSource, listWordsTopic, 'topic', quotes[quote]['parts'][part][language_out], foundKey, foundResults)\r\n wordInSentence(foundSource, listWordsPeople, 'people', quotes[quote]['parts'][part][language_out], foundKey, foundResults)\r\n\r\n #print(\"foundResults before saints:\", foundResults)\r\n if (people != -1):\r\n if people[0].isdigit(): #if it's a date, the query is a saint\r\n foundSource = 'saints'\r\n foundKey = people[0:len(people)-1] + 's'\r\n if foundKey not in foundResults:\r\n foundResults[foundKey] = {}\r\n foundResults[foundKey]['source'] = foundSource\r\n foundResults[foundKey]['topic'] = 0\r\n foundResults[foundKey]['people'] = 1\r\n\r\n\r\n #print(\"foundResults:\", foundResults)\r\n #filter case of AND (as so far it collected OR)\r\n if (topic != -1 and people != -1):\r\n for result in list(foundResults): #converted to a list to force a copy of the keys to avoid runtime error\r\n if foundResults[result]['topic']==0 or foundResults[result]['people'] ==0:\r\n del foundResults[result]\r\n #print(\"deleted\", result)\r\n\r\n #print(\"entriesSkip\", entriesSkip)\r\n for result in list(foundResults):\r\n #print([-1, foundResults[result]['source'], topic, people, result])\r\n if ([-1, foundResults[result]['source'], topic, people, result] in entriesSkip):\r\n del foundResults[result]\r\n #print(\"skipping\", result)\r\n\r\n print(\"filtered foundResults:\", foundResults)\r\n\r\n \"\"\"\r\n foundResults contains for each key:\r\n the source where it has been found, and the flags topic and people, which are 0/1\r\n the variable foundSource will be overwritten and is not usable\r\n topic and people are the variables containing the original query parameters\r\n \"\"\"\r\n \r\n if len(foundResults) > 0: \r\n chosenResult = random.choice(list(foundResults)) #chosenResult is a key of dictionary\r\n if ([-1, source, topic, people, chosenResult, '0'] not in entries):\r\n print(\"adding to the entries\")\r\n entries.append([-1, source, topic, people, chosenResult, '0'])\r\n print(\"chosenResult:\",chosenResult)\r\n #print(\"entries\", entries)\r\n\r\n return [foundResults[chosenResult]['source'], chosenResult]\r\n else:\r\n return [None, None]", "title": "" }, { "docid": "fce05c06e3b70acbfd536b5a5375ee95", "score": "0.51694447", "text": "def search(self, word: str) -> bool:\n trie = self.lookup\n\n def _helper(word, trie):\n if '#' in trie:\n return True\n\n if word[0]=='.':\n for k in trie.keys():\n if _helper(word[1:], trie[k]):\n return True\n return False\n elif word[0] in trie:\n return _helper(word[1:], trie[word[0]])\n else:\n return False\n \n return _helper(word, trie)", "title": "" }, { "docid": "34b671a639e7f2f78a29b0e96a68b0ca", "score": "0.51692384", "text": "def search(self, sentence):\n docs = self.solr.query(nltk.word_tokenize(sentence))\n for doc in docs:\n print \" \".join(doc[\"tokens\"])", "title": "" }, { "docid": "73f60bf9569920fa87bdb9f44707ccf4", "score": "0.5167708", "text": "def __contains__(self, word: str) -> bool:\n word = word.lower()\n for w in self.words:\n if word == w:\n return True\n return False", "title": "" }, { "docid": "f8e1ce0bfed13370e770dedcdaafb566", "score": "0.51632833", "text": "def search(term):\n try:\n for ds in thesaurus.keys():\n if term in thesaurus[ds].speicher:\n #-->Ueberprufen ob das Term in self.speicher\n print thesaurus[ds]\n else:\n return False\n return True\n except:\n print \"Exception:\\nsearch() funktion.\"", "title": "" }, { "docid": "2e8d81790ed622e692adc7fbc12c6243", "score": "0.5159494", "text": "def search(self, word):\n curr = self.root\n\n for char in word:\n if char in curr.children:\n curr = curr.children[char]\n else:\n return False\n\n return curr.word", "title": "" }, { "docid": "0eddfb92df58fbf9cd8f12378037ebba", "score": "0.51592755", "text": "def phonetics(self):\n status = False\n for phile in self.files.all():\n for tag in phile.tags.all():\n if tag.name == 'Phonetics':\n status = phile.phile.name\n break\n return status", "title": "" }, { "docid": "a9abed17f7360fba0ae9fdb6a7d3160b", "score": "0.51582825", "text": "def find(self, word):\n letters = list(word)\n node = self._root\n \n for letter in letters:\n if letter not in node._children:\n return False\n node = node._children[letter]\n if node.isWord:\n if node._value:\n return node._value\n else:\n return True\n else:\n return False", "title": "" }, { "docid": "62d3d7fcae0609fe97563d2dd63891f3", "score": "0.51580274", "text": "def search(self, word):\n i = 0\n it = self.root\n while i < len(word):\n if len(it.children) > 0:\n k = bisect.bisect_left(it.children, TreeNode(word[i]))\n if k < len(it.children) and it.children[k].val == word[i]:\n it = it.children[k]\n i += 1\n else:\n break\n else:\n break\n if i != len(word) or not it.is_word:\n return False\n else:\n return True", "title": "" }, { "docid": "e3cec28d40485563f092fa41b4c7ca69", "score": "0.5153317", "text": "def search(self, word):\n current = self.root\n for i in word:\n if current.dict.get(i,None)==None:\n return False\n current = current.dict[i]\n\n if current.isEOW==True:\n return True\n else:\n return False", "title": "" }, { "docid": "7e50da15c7e6884f2df967d049fb96df", "score": "0.51523215", "text": "def search(self, word: str) -> bool:\n current = self.root\n for i in word:\n if i in current.node.keys():\n current = current.node[i]\n else:\n return False\n return current.isword", "title": "" }, { "docid": "b30b7b67c28f90d3021da2d79927ad56", "score": "0.5147014", "text": "def easy_search_terms(self, sentence, esource, ths, rules, off_list=None):\n\t\treduced_term_list = []\n\t\tsentence_terms = []\n\t\tsentence_terms_text = []\n\t\tentity_tokens = []\n\n\n\t\t#Create a gazette with words that start with any word of the sentence\n\t\t#Why was this after next section?\n\t\tfor token in sentence.tokens:\n\t\t\tptoken = str(token.text.encode(\"utf-8\")).lower()\n\t\t\tif ptoken in self.term_dic:\n\t\t\t\tfor term in self.term_dic[ptoken]:\n\t\t\t\t\t#print term\n\t\t\t\t\treduced_term_list.append(term)\n\n\t\tif off_list != None:\n\t\t\tif \"twice_validated\" in rules:\n\t\t\t\tfor term in off_list + reduced_term_list:\n\t\t\t\t\ttsplit = term.split(\" \")\n\t\t\t\t\tif len(tsplit) == 2:\n\t\t\t\t\t\tt = tsplit[1] + \" in the \" + tsplit[0]\n\t\t\t\t\t\tt2 = tsplit[1] + \" of the \" + tsplit[0]\n\t\t\t\t\t\t#print sentence.sid, t\n\t\t\t\t\t\tif t in sentence.text.lower() or t2 in sentence.text.lower():\n\t\t\t\t\t\t\t#print t\n\t\t\t\t\t\t\treduced_term_list.append(tsplit[1] + \" in the \" + tsplit[0])\n\t\t\t\t\t\t\treduced_term_list.append(tsplit[1] + \" of the \" + tsplit[0])\n\t\t\t\t\tif \"type\" in term:\n\t\t\t\t\t\ti = term.find(\"type\")\n\t\t\t\t\t\tx = term[:i].strip()\n\t\t\t\t\t\ty = term[i:].strip()\n\t\t\t\t\t\treduced_term_list.append(x + \" \" + y)\n\t\t\t\t\t\treduced_term_list.append(x + \" \" + y.replace(\"type\", \"\"))\n\t\t\t\t\t\treduced_term_list.append(y + \" \" + x)\n\t\t\t\t\t\treduced_term_list.append(y + \" \" + x.replace(\"type\", \"\"))\n\n\n\n\t\t#Iterate gazette and check if the sentence has any exact match\n\t\tfor term in reduced_term_list:\n\t\t\tterm_tokens = nltk.word_tokenize(term.lower())\n\t\t\ttoken_flag = False\n\t\t\tfor token in sentence.tokens:\n\t\t\t\tif term_tokens[0].lower() == token.text.lower():\n\t\t\t\t\ttoken_flag = True\n\t\t\t\t\tstart_index = sentence.tokens.index(token)\n\t\t\tif token_flag:\t\t\t\t\t\n\t\t\t\tend_index = start_index + len(term_tokens)\n\t\t\t\tentity_tokens = []\n\t\t\t\tfor token in sentence.tokens[start_index:end_index]:\n\t\t\t\t\tentity_tokens.append(token.text)\n\t\t\t\t#Check if gazette term is the same as sentence tokens for that term.\t\t\t\t\t\t\t\t\n\t\t\t\tif term_tokens == [x.lower() for x in entity_tokens]:\n\t\t\t\t\tstart = sentence.tokens[start_index:end_index][0].dstart\n\t\t\t\t\tend = sentence.tokens[start_index:end_index][-1].dend\n\t\t\t\t\tterm = str(sentence.text[sentence.tokens[start_index:end_index][0].start:sentence.tokens[start_index:end_index][-1].end])\t\t\t\t\t\n\t\t\t\t\tsentence_terms.append((start, end, term))\n\t\t\t\t\tsentence_terms_text.append(term)\n\t\t\t\t\t#print term\n\t\t\t\t\t\n\n\t\t### RULES FOR VALIDATION (maybe should be in functions)\n\t\t#Undidented next line. See if it changes reslts.\n\t\tfor token in sentence.tokens: \t# sentence and adds the combination and next word\n\t\t\tif \"posgowords\" in rules: #Tries to find a combination of go_word and pos_go_word in\n\t\t\t\tpos_flag = False\t\t\t# if next word is not a noun, looks for next word.\n\t\t\t\ttok = str(token.text.encode(\"utf-8\").strip().lower())\n\t\t\t\tif tok in go_words:\n\t\t\t\t\ttlist = []\n\t\t\t\t\tfor word in pos_go_words:\n\t\t\t\t\t\tterm = tok + \" \" + word\n\t\t\t\t\t\tif term in str(sentence.text.encode(\"utf-8\")):\n\t\t\t\t\t\t\ttlist.append(term)\n\t\t\t\t\tif len(tlist) > 0:\n\t\t\t\t\t\tterm = max(tlist, key=len)\n\t\t\t\t\t\tl = len(term.split(\" \"))\n\t\t\t\t\t\tindex_start = sentence.tokens.index(token)\n\t\t\t\t\t\tindex_end = index_start + l + 1 #+1 for next word\n\t\t\t\t\t\tterm = sentence.text[sentence.tokens[index_start:index_end][0].start:sentence.tokens[index_start:index_end][-1].end]\n\t\t\t\t\t\tif sentence.tokens[index_end-1].pos != \"NN\" and sentence.tokens[index_end-1].pos != \"NNS\": #\n\t\t\t\t\t\t\tindex_end += 1\n\t\t\t\t\t\t\tterm = sentence.text[sentence.tokens[index_start:index_end][0].start:sentence.tokens[index_start:index_end][-1].end]\n\t\t\t\t\t\tif index_end < len(sentence.tokens):\n\t\t\t\t\t\t\tif sentence.tokens[index_end].pos == \"NN\" or sentence.tokens[index_end].pos == \"NNS\":\n\t\t\t\t\t\t\t\tindex_end += 1\n\t\t\t\t\t\t\t\tterm = sentence.text[sentence.tokens[index_start:index_end][0].start:sentence.tokens[index_start:index_end][-1].end]\n\t\t\t\t\t\tsentence_terms.append((sentence.tokens[index_start:index_end][0].dstart, sentence.tokens[index_start:index_end][-1].dend, str(term.strip())))\n\t\t\t\t\n\t\t\tif \"gowords\" in rules:\n\t\t\t\tgo_flag = False\n\t\t\t\tif str(token.text.encode(\"utf-8\")).strip().lower() in go_words:\n\t\t\t\t\tindex = sentence.tokens.index(token)\n\t\t\t\t\tfor word in go_words:\n\t\t\t\t\t\tterm = str(sentence.tokens[index-1].text) + \" \" + word\n\t\t\t\t\t\tif term in reduced_term_list:\n\t\t\t\t\t\t\t#print term, \"---\", token.text, \"---\", sentence.text\n\t\t\t\t\t\t\tgo_flag = True\n\t\t\t\tif go_flag and index-1 > 0 and index+1 < len(sentence.tokens):\n\t\t\t\t\tprint \"********\"\n\t\t\t\t\tstart = sentence.tokens[index-1:index+1][0].dstart\n\t\t\t\t\tend = sentence.tokens[index-1:index+1][-1].dend\n\t\t\t\t\tterm = sentence.text[sentence.tokens[index-1:index+1][0].start:sentence.tokens[index-1:index+1][-1].end]\n\t\t\t\t\tsentence_terms.append((start, end, str(term).strip()))\n\n\n\t\tif \"longterms\" in rules: #Add terms that are longer than the ones that exist.\n\t\t\tsentence_entities = [] \n\t\t\tfor s in sentence.entities.elist:\n\t\t\t\tif s.startswith(esource):\n\t\t\t\t\tsentence_entities = [str(x.text.encode(\"utf-8\")) for x in sentence.entities.elist[s]]\n\n\t\t\tfor term in [x[2] for x in sentence_terms] + sentence_entities:\n\t\t\t\tterm_tokens = nltk.word_tokenize(term.strip().lower())\n\t\t\t\tfor token in sentence.tokens:\n\t\t\t\t\tif term_tokens[0].lower() == token.text.lower():\n\t\t\t\t\t\tstart_index = sentence.tokens.index(token)\n\t\t\t\t\t\tend_index = start_index + len(term_tokens)\n\t\t\t\t\t\tif term_tokens == [str(x.text) for x in sentence.tokens[start_index:end_index]]:\n\t\t\t\t\t\t\t#Look for bigger term to the left\n\t\t\t\t\t\t\tif start_index > 0:\n\t\t\t\t\t\t\t\tif sentence.tokens[start_index-1].text == \"and\" and sentence.tokens[end_index-1].text in go_words:\n\t\t\t\t\t\t\t\t\ti = 2\n\t\t\t\t\t\t\t\t\twhile \",\" in sentence.tokens[start_index-i:start_index+1-i][0].text:\n\t\t\t\t\t\t\t\t\t\tterm = \" \".join([x.text for x in sentence.tokens[start_index-1-i:end_index]]).replace(\" ,\", \",\")\n\t\t\t\t\t\t\t\t\t\tstart = sentence.tokens[start_index-1-i:end_index][0].dstart\n\t\t\t\t\t\t\t\t\t\tend = sentence.tokens[start_index-1-i:end_index][-1].dend\n\t\t\t\t\t\t\t\t\t\tsentence_terms.append((start, end, str(term.strip())))\n\t\t\t\t\t\t\t\t\t\ti += 2\n\n\t\t\t\t\t\t\t#look for bigger term to the right (bigger than 2)\n\t\t\t\t\t\t\tif end_index < len(sentence.tokens):\n\t\t\t\t\t\t\t\tif sentence.tokens[end_index].text == \",\" and sentence.tokens[start_index].text in go_words:\n\t\t\t\t\t\t\t\t\ti = 2\n\t\t\t\t\t\t\t\t\twhile \",\" in sentence.tokens[end_index+i:end_index+1+i][0].text:\n\t\t\t\t\t\t\t\t\t\tterm = \" \".join([x.text for x in sentence.tokens[start_index:end_index+1+i]]).replace(\" ,\", \",\")\n\t\t\t\t\t\t\t\t\t\tstart = sentence.tokens[start_index:end_index+1+i][0].dstart\n\t\t\t\t\t\t\t\t\t\tend = sentence.tokens[start_index:end_index+1+i][-1].dend\n\t\t\t\t\t\t\t\t\t\tif sentence.tokens[start_index:end_index+1+i][-1].text == \",\":\n\t\t\t\t\t\t\t\t\t\t\tend -=1\n\t\t\t\t\t\t\t\t\t\t\tterm = term[:-1]\n\t\t\t\t\t\t\t\t\t\tsentence_terms.append((start, end, str(term.strip())))\n\t\t\t\t\t\t\t\t\t\ti += 2\n\t\t\t\t\t\t\t\t\tterm = \" \".join([x.text for x in sentence.tokens[start_index:end_index+1+i]]).replace(\" ,\", \",\")\n\t\t\t\t\t\t\t\t\tstart = sentence.tokens[start_index:end_index+1+i][0].dstart\n\t\t\t\t\t\t\t\t\tend = sentence.tokens[start_index:end_index+1+i][-1].dend\n\t\t\t\t\t\t\t\t\tif sentence.tokens[start_index:end_index+1+i][-1].text == \",\":\n\t\t\t\t\t\t\t\t\t\tend -=1\n\t\t\t\t\t\t\t\t\t\tterm = term[:-1]\n\t\t\t\t\t\t\t\t\tsentence_terms.append((start, end, str(term.strip())))\n\t\t\t\t\t\t\t#Check if its a double term ex: \"x and y anomalies\"\n\t\t\t\t\t\t\tif start_index > 0:\n\t\t\t\t\t\t\t\tif sentence.tokens[start_index-1].text == \"and\":\n\t\t\t\t\t\t\t\t\tterm_flag = False\n\t\t\t\t\t\t\t\t\ttok1 = sentence.tokens[start_index-2:start_index-1][0].text\n\t\t\t\t\t\t\t\t\ttok2 = sentence.tokens[end_index-1].text\n\t\t\t\t\t\t\t\t\tif str(tok2) in go_words:\n\t\t\t\t\t\t\t\t\t\tfor word in go_words:\n\t\t\t\t\t\t\t\t\t\t\tjoin_tok = str(tok1) + \" \" + word\n\t\t\t\t\t\t\t\t\t\t\tif join_tok in reduced_term_list:\n\t\t\t\t\t\t\t\t\t\t\t\tterm_flag = True\n\t\t\t\t\t\t\t\t\tif term_flag:\n\t\t\t\t\t\t\t\t\t\tstart = sentence.tokens[start_index-2:end_index][0].dstart\n\t\t\t\t\t\t\t\t\t\tend = sentence.tokens[start_index-2:end_index][-1].dend\n\t\t\t\t\t\t\t\t\t\tterm = sentence.text[sentence.tokens[start_index-2:end_index][0].start:sentence.tokens[start_index-2:end_index][-1].end]\n\t\t\t\t\t\t\t\t\t\tsentence_terms.append((start, end, str(term).strip()))\n\n\t\tif \"small_ent\" in rules: #Remove smaller entities\n\t\t\tsmaller_entities = set([])\n\t\t\tfor a in sentence_terms:\n\t\t\t\tfor b in sentence_terms_text + entity_tokens:\n\t\t\t\t\tif a[2].lower() in b and a[2].lower() != b:\n\t\t\t\t\t\t#logging.info(\"{}: small entity: {} / big entity: {}\".format(sentence.sid, a, b))\n\t\t\t\t\t\tsmaller_entities.add(a)\n\t\t\tfor x in smaller_entities:\n\t\t\t\tsentence_terms.remove(x)\n\t\t\t\t#logging.info(\"{}, removed smaller entity: {}\".format(sentence.sid, x))\n\n\t\tfor term in reduced_term_list: #Check if sentence is composed by a single entity\n\t\t\t#print term, str(sentence.text)\n\t\t\tif term.lower() == str(sentence.text).lower():\n\t\t\t\t#print term\n\t\t\t\tsentence_terms = []\n\t\t\t\tstart = 0\n\t\t\t\tend = len(term)\n\t\t\t\tterm = str(sentence.text)\t\t\t\t\t\n\t\t\t\tsentence_terms.append((start, end, term))\n\n\t\tsentence_terms = list(set(sentence_terms))\n\t\t#if len(sentence_terms) > 0:\n\t\t#\tprint sentence.sid, sentence_terms\n\t\treturn sentence_terms", "title": "" }, { "docid": "603d36f4ea8dd5954778da1408869859", "score": "0.5146544", "text": "def search(self, word: str) -> bool:\n point = self.root\n for ch in word:\n if point.childers[ch] is None:\n return False\n point = point.childers[ch]\n return point.is_end", "title": "" }, { "docid": "21b814248255a3308b1030b3e913265c", "score": "0.5143131", "text": "def search_word(self, word):\n current = self.root\n for letter in word:\n current = current.children.get(letter)\n if not current:\n return False\n return current.is_word", "title": "" }, { "docid": "5129a7afd385ec5a88186b9bc4b1788b", "score": "0.5137912", "text": "def search(self, word):\n def _dfs(trie, idx):\n \n if idx == len(word) and trie.get(\"final\") and trie[\"final\"] == word:\n return True\n char = word[idx]\n if char != '.' and char in trie:\n return _dfs(trie[char], idx + 1)\n else:\n valid = False\n for char in trie.keys():\n valid = valid or _dfs(trie[char], idx + 1)\n if valid:\n return True\n return False\n \n return _dfs(self._trie, 0)", "title": "" }, { "docid": "d4e18b7b98b229ae8b26a167c9a88a51", "score": "0.51302934", "text": "def match_nouns(self, nword):\n\n def filter_is_true(nid, test):\n \"\"\"Run a test on a particular noun, passed by ID.\"\"\"\n try:\n tmethod = self.filters[test.lstrip('!')]\n except KeyError:\n return True\n \n return tmethod(nid) ^ (test[0] == '!') # negate if test starts with !\n \n def get_matching_nouns(iword):\n return (self.get_nouns_by_name(self.sub_input_words(nword)) if nword[0] == '%'\n else filter(bool, (self.nouns.get(nid) for nid in nword.split(','))))\n \n if ':' in nword:\n # filter matching nouns using tests\n nword, tests = nword.split(':', 1)\n return set(noun for noun in get_matching_nouns(nword)\n if all(filter_is_true(noun.id, test) for test in tests.split(':')))\n else:\n return get_matching_nouns(nword)", "title": "" }, { "docid": "2474539b8ee1273f7aeece86997bb162", "score": "0.512346", "text": "def zh_word(self, word: str) -> str:\n\n alternate_meanings = []\n alternate_pinyin = ()\n alternate_jyutping = None\n\n eth_page = requests.get(\n \"https://www.mdbg.net/chinese/dictionary?page=worddict&wdrst=0&wdqb=c:\"\n + word,\n timeout=15,\n headers=self.zw_useragent,\n )\n tree = html.fromstring(eth_page.content) # now contains the whole HTML page\n word_exists = str(\n tree.xpath('//p[contains(@class,\"nonprintable\")]/strong/text()')\n )\n cmn_pronunciation = tree.xpath('//div[contains(@class,\"pinyin\")]/a/span/text()')\n # We only want the pronunciations to be as long as the input\n cmn_pronunciation = cmn_pronunciation[0 : len(word)]\n # We don't need a dividing character per pinyin standards\n cmn_pronunciation = \"\".join(cmn_pronunciation)\n\n # Check to not return anything if the entry is invalid\n if \"No results found\" in word_exists:\n # First we try to check our specialty dictionaries. Buddhist dictionary first. Then the tea dictionary.\n search_results_buddhist = self.__zh_word_buddhist_dictionary_search(\n tradify(word)\n )\n search_results_tea = self.__zh_word_tea_dictionary_search(simplify(word))\n search_results_cccanto = self.__zh_word_cccanto_search(tradify(word))\n\n # If both have nothing, we kick it down to the character search.\n if (\n search_results_buddhist is None\n and search_results_tea is None\n and search_results_cccanto is None\n ):\n logger.info(\n \"ZH-Word: No results found. Getting individual characters instead.\"\n )\n # This will split the word into character chunks.\n if len(word) < 2:\n to_post = self.zh_character(word)\n else: # The word is longer than one character.\n to_post = \"\"\n search_characters = list(word)\n for character in search_characters:\n to_post += \"\\n\\n\" + self.zh_character(character)\n return to_post\n\n # Otherwise, let's try to format the data nicely.\n if search_results_buddhist is not None:\n alternate_meanings.append(search_results_buddhist[\"meaning\"])\n alternate_pinyin = search_results_buddhist[\"pinyin\"]\n if search_results_tea is not None:\n alternate_meanings.append(search_results_tea[\"meaning\"])\n alternate_pinyin = search_results_tea[\"pinyin\"]\n if search_results_cccanto is not None:\n alternate_meanings.append(search_results_cccanto[\"meaning\"])\n alternate_pinyin = search_results_cccanto[\"pinyin\"]\n alternate_jyutping = search_results_cccanto[\"jyutping\"]\n logger.info(\n f\"ZH-Word: No results for word {word}, but results are in specialty dictionaries.\"\n )\n\n if len(alternate_meanings) == 0:\n # The standard search function for regular words.\n # Get alternate pinyin from a separate function. We get Wade Giles and Yale. Like 'Guan1 yin1 Pu2 sa4'\n try:\n py_split_pronunciation = tree.xpath(\n '//div[contains(@class,\"pinyin\")]/a/@onclick'\n )\n py_split_pronunciation = re.search(\n r\"\\|(...+)\\'\\)\", py_split_pronunciation[0]\n ).group(0)\n # Format it nicely.\n py_split_pronunciation = py_split_pronunciation.split(\"'\", 1)[0][\n 1:\n ].strip()\n alt_romanize = self.__zh_word_alt_romanization(py_split_pronunciation)\n except IndexError:\n # This likely means that the page does not contain that information.\n alt_romanize = (\"---\", \"---\")\n\n meaning = [\n div.text_content()\n for div in tree.xpath('//div[contains(@class,\"defs\")]')\n ]\n # This removes any empty spaces or commas that are in the list.\n meaning = [x for x in meaning if x not in [\" \", \", \"]]\n meaning = \"/ \".join(meaning)\n meaning = meaning.strip()\n\n # Obtain the Cantonese information.\n yue_page = requests.get(\n \"https://cantonese.org/search.php?q=\" + word,\n timeout=15,\n headers=self.zw_useragent,\n )\n yue_tree = html.fromstring(\n yue_page.content\n ) # now contains the whole HTML page\n yue_pronunciation = yue_tree.xpath(\n '//h3[contains(@class,\"resulthead\")]/small/strong//text()'\n )\n # This Len needs to be double because of the numbers\n yue_pronunciation = yue_pronunciation[0 : (len(word) * 2)]\n yue_pronunciation = iter(yue_pronunciation)\n yue_pronunciation = [\n c + next(yue_pronunciation, \"\") for c in yue_pronunciation\n ]\n\n # Combines the tones and the syllables together\n yue_pronunciation = \" \".join(yue_pronunciation)\n for i in range(0, 9):\n # Adds Markdown syntax\n yue_pronunciation = yue_pronunciation.replace(str(i), f\"^({str(i)}) \")\n yue_pronunciation = yue_pronunciation.strip()\n\n else: # This is for the alternate search with the specialty dictionaries.\n cmn_pronunciation = self.__zh_word_decode_pinyin(alternate_pinyin)\n alt_romanize = self.__zh_word_alt_romanization(alternate_pinyin)\n if alternate_jyutping is not None:\n yue_pronunciation = alternate_jyutping\n else:\n yue_pronunciation = \"---\" # The non-Canto specialty dictionaries do not include Jyutping pronunciation.\n meaning = \"\\n\".join(alternate_meanings)\n\n # Format the header appropriately.\n if tradify(word) == simplify(word):\n lookup_line_1 = str(\n \"# [{0}](https://en.wiktionary.org/wiki/{0}#Chinese)\".format(word)\n )\n else:\n lookup_line_1 = (\n \"# [{0} ({1})](https://en.wiktionary.org/wiki/{0}#Chinese)\".format(\n tradify(word), simplify(word)\n )\n )\n\n # Format the rest.\n lookup_line_1 += \"\\n\\nLanguage | Pronunciation\\n---------|--------------\"\n lookup_line_1 += (\n f\"\\n**Mandarin** (Pinyin) | *{cmn_pronunciation}*\\n**Mandarin** (Wade-Giles) | *{alt_romanize[1]}*\"\n f\"\\n**Mandarin** (Yale) | *{alt_romanize[0]}*\\n**Cantonese** | *{yue_pronunciation}*\"\n )\n\n # Add Hokkien and Hakka data.\n lookup_line_1 += self.__zh_character_min_hak(tradify(word))\n\n # Format the meaning line.\n if len(alternate_meanings) == 0:\n # Format the regular results we have.\n lookup_line_2 = f'\\n\\n**Meanings**: \"{meaning}.\"'\n\n # Append chengyu data if the string is four characters.\n if len(word) == 4:\n chengyu_data = self.__zh_word_chengyu(word)\n if chengyu_data is not None:\n logger.info(\"ZH-Word: >> Added additional chengyu data.\")\n lookup_line_2 += chengyu_data\n\n # We append Buddhist results if we have them.\n mainline_search_results_buddhist = (\n self.__zh_word_buddhist_dictionary_search(tradify(word))\n )\n if mainline_search_results_buddhist is not None:\n lookup_line_2 += mainline_search_results_buddhist[\"meaning\"]\n\n else: # This is for the alternate dictionaries only.\n lookup_line_2 = \"\\n\" + meaning\n\n # Format the footer with the dictionary links.\n lookup_line_3 = (\n \"\\n\\n\\n^Information ^from \"\n \"[^CantoDict](https://www.cantonese.sheik.co.uk/dictionary/search/?searchtype=1&text={0}) ^| \"\n \"[^MDBG](https://www.mdbg.net/chinese/dictionary?page=worddict&wdrst=0&wdqb=c:{0}) ^| \"\n \"[^Yellowbridge](https://yellowbridge.com/chinese/dictionary.php?word={0}) ^| \"\n \"[^Youdao](https://dict.youdao.com/w/eng/{0}/#keyfrom=dict2.index)\"\n )\n lookup_line_3 = lookup_line_3.format(word)\n\n # Combine everything together.\n to_post = lookup_line_1 + lookup_line_2 + \"\\n\\n\" + lookup_line_3\n logger.info(\n f\"ZH-Word: Received a lookup command for {word} in Chinese. Returned search results.\"\n )\n return to_post", "title": "" }, { "docid": "f11e2aedbf9cc55ef6eaa3a0d9f87405", "score": "0.5122512", "text": "def search(self, word: str) -> bool:\n stack = [(self.trie, 0)] \n while len(stack):\n ptr, i = stack.pop()\n \n if i == len(word):\n if ptr.is_word:\n return True\n elif word[i] == '.':\n for child in ptr.children:\n if child:\n stack.append((child, i+1))\n else:\n c = ord(word[i]) - ord('a')\n if ptr.children[c]:\n stack.append((ptr.children[c], i+1))\n \n return False", "title": "" }, { "docid": "47c9e87dd0adb9194f38e3ce3743f15f", "score": "0.51146907", "text": "def test_recognize(self):\n expected = set([\"banana\"])\n assert self.recognizer.recognize(\"ana\") == expected # \"ana\" should match to \"banana\" within a levenshtein threshold of 3", "title": "" }, { "docid": "b42b6b8b2de85ce68c9c2d7a629ebc82", "score": "0.5110162", "text": "def filter_pos(word, obj, linkage):\n # Filtering trigrams\n if obj.third_word:\n if word == obj.first_word:\n if not filter_check_conj(obj.second_word, obj.first_word,\n obj.first_word_pos, obj.third_word_pos, obj, linkage):\n if not filter_check_conj(obj.third_word, obj.first_word,\n obj.first_word_pos, obj.second_word_pos, obj, linkage):\n if not filter_check_trigrams(word, obj.first_word_pos, obj.second_word_pos,\n obj.third_word_pos, obj, linkage):\n filter_check_trigrams(word, obj.first_word_pos, obj.third_word_pos,\n obj.second_word_pos, obj, linkage)\n elif word == obj.second_word:\n if not filter_check_conj(obj.first_word, obj.second_word,\n obj.second_word_pos, obj.third_word_pos, obj, linkage):\n if not filter_check_conj(obj.third_word, obj.second_word,\n obj.first_word_pos, obj.second_word_pos, obj, linkage):\n if not filter_check_trigrams(word, obj.second_word_pos, obj.first_word_pos,\n obj.third_word_pos, obj, linkage):\n filter_check_trigrams(word, obj.second_word_pos, obj.third_word_pos,\n obj.first_word_pos, obj, linkage)\n else:\n if not filter_check_conj(obj.second_word, obj.third_word,\n obj.first_word_pos, obj.third_word_pos, obj, linkage):\n if not filter_check_conj(obj.first_word, obj.third_word,\n obj.third_word_pos, obj.second_word_pos, obj, linkage):\n if not filter_check_trigrams(word, obj.third_word_pos, obj.second_word_pos,\n obj.first_word_pos, obj, linkage):\n filter_check_trigrams(word, obj.third_word_pos, obj.first_word_pos,\n obj.second_word_pos, obj, linkage)\n # Filtering bigrams\n else:\n if word == obj.first_word:\n if obj.first_word_pos in self.possible_bigrams \\\n and obj.second_word_pos in self.possible_bigrams[obj.first_word_pos]:\n create_candidates_dict(self.filtered_candidates, word, linkage, [obj])\n else:\n if obj.second_word_pos in self.possible_bigrams \\\n and obj.first_word_pos in self.possible_bigrams[obj.second_word_pos]:\n create_candidates_dict(self.filtered_candidates, word, linkage, [obj])", "title": "" }, { "docid": "bc62ea8916b0b471b876b1cad20095d6", "score": "0.51050824", "text": "def __contains__(self, word: str) -> bool:\n return self.search(word) > 0", "title": "" }, { "docid": "3a481df1012cf275a9b824619bc2e25f", "score": "0.510192", "text": "def search(self, word: str) -> bool:\n cur = self\n i = 0\n while i < len(word) and word[i] in cur.children:\n cur = cur.children[word[i]]\n i += 1\n \n return i == len(word) and cur.count >= 1", "title": "" }, { "docid": "61a932810d8b77e3f4b7a92447938e25", "score": "0.51014376", "text": "def noun(self, w):\n if w in self.jap_noun_dict.keys():\n return self.jap_noun_dict[w]\n elif w in self.eng_noun_dict.keys():\n return self.eng_noun_dict[w]\n else:\n print('Noun', w, 'not found')\n assert False", "title": "" }, { "docid": "3fa4a689f9afd87da8e99c9a1e6fdb5d", "score": "0.50992703", "text": "def search(self, word):\n return self.root.search(word)", "title": "" }, { "docid": "2dd50c62527f5cd09869f2a88189be14", "score": "0.5097264", "text": "def solve_coref(self):\n nlp = LangModel.get_instance()\n text = self.to_txt()\n doc = nlp(text)\n tok_list = list(token.text_with_ws for token in doc)\n for cluster in doc._.coref_clusters:\n cluster_main_words = set(cluster.main.text.split(' '))\n for coref in cluster:\n if coref != cluster.main.text and bool(set(coref.text.split(' ')).intersection(cluster_main_words)) == False:\n tok_list[coref.start] = cluster.main.text + doc[coref.end - 1].whitespace_\n for i in range(coref.start + 1, coref.end):\n tok_list[i] = \"\"\n self.txt = \"\".join(tok_list)\n return self.txt", "title": "" }, { "docid": "8f7f0d35fa0e1cd283b287dc0393311f", "score": "0.50957876", "text": "def search(self, word):\n return self._search(self.root, word, 0)", "title": "" }, { "docid": "77b4946d7e621ac42c917b80ad3abc14", "score": "0.5095266", "text": "def lookup_word(self, target: str):\n return en_dict.__lookup(self.d, target, 0)", "title": "" }, { "docid": "a47dc1577d1ec3408a4ab60270bf47c2", "score": "0.50942844", "text": "def get_tags(self, word: str) -> Optional[str]:\n for tag, words in self._pos2words.items():\n if word.lower() in words:\n return tag", "title": "" }, { "docid": "f9899bdb2db89808e040b11e590047d7", "score": "0.5094146", "text": "def search(self, word):\n curr = self.root\n for s in word:\n if s in curr.sub_node:\n curr = curr.sub_node[s]\n else:\n return False\n return curr.flag", "title": "" }, { "docid": "d4630da3df1393ca25258f5a269e837b", "score": "0.50895876", "text": "def search(self, word: str) -> bool:\n cur = self.root\n for c in word:\n if c not in cur:\n return False\n cur = cur[c]\n return '#' in cur", "title": "" }, { "docid": "672a7ec3f9f53eb1e02fa0e6b1406a09", "score": "0.5087387", "text": "def check_word_as_noun(self,noun):\n\t\treturn self.nounstemmer.stemming_noun(noun);", "title": "" }, { "docid": "23a7d8dc2948f841c179d0b27f1ef186", "score": "0.50758886", "text": "def test_word_bound_regex_complex_search(parser: ShoulderBirdParser) -> None:\n msg = \"appreciated\"\n matches = parser.get_matches(\"101\", \"Delta\", msg)\n assert len(matches) == 0\n msg = \"preoct\"\n matches = parser.get_matches(\"101\", \"Delta\", msg)\n assert len(matches) == 1", "title": "" }, { "docid": "8f8e834a6398fb8144a96755bd62b200", "score": "0.5075861", "text": "def search_by_lemma(cli, args):\n wn = get_wn_profile(cli, args)\n get_synsets_by_term(wn, args.lemma, args.pos, compact=not args.detail, lang=args.lang)\n pass", "title": "" }, { "docid": "1e8e81a2e4d87b29ad435db03f4e5a7e", "score": "0.5073183", "text": "def analyze(self, word):\n res = self._get_constituents(word)\n res[\"related_words\"] = []\n if res[\"roots\"]:\n for root in res[\"roots\"]:\n res[\"related_words\"] += self._get_related_words(root)\n res[\"related_words\"] = list(set(res[\"related_words\"]))\n try:\n res[\"related_words\"].remove(res[\"original_word\"])\n except ValueError:\n pass\n return res", "title": "" }, { "docid": "34cb44f25d6b51e685777d047ca3a563", "score": "0.50690717", "text": "def isin_cmu(word):\n if type(word) == list or len(word.split(\" \")) > 1:\n if type(word)==str:\n word = word.split(\" \")\n for w in word:\n if w.lower() not in word_dict:\n return False\n return True\n return word.lower() in word_dict", "title": "" }, { "docid": "d4db5327b507b0ff07771e70cd404096", "score": "0.50672233", "text": "def search(self, word):\n\n def find(word, trie):\n\n if not word:\n return True\n\n first, rest = word[0], word[1:]\n\n if first == '.':\n\n\n for letter, child in trie.children.items():\n\n if find(rest, child):\n return True\n\n elif first not in trie.children:\n return False\n else:\n\n return find(rest, trie.children[first])\n\n return find(word, self.root) or False", "title": "" }, { "docid": "0255114554a107656ecf5d808312eb1a", "score": "0.5061024", "text": "def search_dict(meaning_dict, word):\n found = False\n filtered_dict_list = []\n if word.casefold() in meaning_dict:\n found = True\n filtered_dict_list = meaning_dict[word.casefold()]\n return filtered_dict_list, found", "title": "" }, { "docid": "c37748d7662274affe9d82e971554e25", "score": "0.5060229", "text": "def encontrando_letra(letra,word):\n a,b='áéíóú','aeiou'\n sintilde=str.maketrans(a,b)\n word=word.translate(sintilde)\n if letra in word:\n search= word.index(letra)\n posiciones=[i for i in range(0,len(word)) if letra==word[i]]\n else:\n posiciones=[]\n return posiciones", "title": "" }, { "docid": "07cebe19add845cac509a6314e71125c", "score": "0.50533223", "text": "def backward_nounsearch(nounphraselist,nounhead):\n\t#print \"Backward-Nounsearch\"\n\t#print nounphraselist, nounhead\n\tnounphraselist.reverse()\n\tfor np in nounphraselist:\n\t#\tprint np[0].reveal()\n\t\tgetsense=find_in_Extension(nounhead,np[0].extension)\n\t\tif getsense>-1:\t\t\t\t\t#remove unccessary senses\n\t\t\tnp[0].extension=[np[0].extension[getsense]]\n\t\t\treturn np\n\treturn 0", "title": "" }, { "docid": "0a0ad41eea397e29b7c3210eca591455", "score": "0.50527984", "text": "def test_common_words(self): \n filename = \"gutenberg.txt\"\n commonwords = CommonWords(filename)\n word = \"the \"\n count = commonwords.word_count(word)\n print(\"\\n\")\n print(f\"{word} exists {count} times\")", "title": "" }, { "docid": "e0a4cdc7de5e9013914fe8a4932f5629", "score": "0.50512457", "text": "def _lookup_words(\n self, words: List[str], word_dict: Dict[str, List[str]], n: int = 5\n ) -> Dict[str, Dict[str, Any]]:\n\n pronunciations: Dict[str, Dict[str, Any]] = {}\n\n # Dictionary uses upper-case letters\n dictionary_upper = self.profile.get(\"speech_to_text.dictionary_upper\", False)\n\n # Check words against dictionary\n unknown_words = set()\n for word in words:\n if dictionary_upper:\n lookup_word = word.upper()\n else:\n lookup_word = word.lower()\n\n pronounces = list(word_dict.get(lookup_word, []))\n in_dictionary = len(pronounces) > 0\n\n pronunciations[word] = {\n \"in_dictionary\": in_dictionary,\n \"pronunciations\": pronounces,\n }\n if not in_dictionary:\n unknown_words.add(word)\n\n # Guess pronunciations for unknown word\n if unknown_words:\n # Path to phonetisaurus FST\n g2p_path = self.profile.read_path(\n self.profile.get(\n f\"speech_to_text.{self.speech_system}.g2p_model\", \"g2p.fst\"\n )\n )\n\n g2p_casing = self.profile.get(\"speech_to_text.g2p_casing\", \"\").lower()\n\n # Case-sensitive mapping from upper/lower cased word back to original casing\n word_map: Dict[str, str] = {}\n\n with tempfile.NamedTemporaryFile(mode=\"w+\", suffix=\".txt\") as wordlist_file:\n # Write words to a temporary word list file\n for word in unknown_words:\n original_word = word\n if g2p_casing == \"upper\":\n # FST was trained with upper-case letters\n word = word.upper()\n elif g2p_casing == \"lower\":\n # FST was trained with loser-case letters\n word = word.lower()\n\n print(word, file=wordlist_file)\n word_map[word] = original_word\n\n wordlist_file.seek(0)\n\n # Output phonetisaurus results to temporary file\n with tempfile.NamedTemporaryFile(\n mode=\"w+\", suffix=\".txt\"\n ) as pronounce_file:\n # Use phonetisaurus to guess pronunciations\n g2p_command = [\n \"phonetisaurus-apply\",\n \"--model\",\n g2p_path,\n \"--word_list\",\n wordlist_file.name,\n \"--nbest\",\n str(n),\n ]\n\n self._logger.debug(repr(g2p_command))\n subprocess.check_call(g2p_command, stdout=pronounce_file)\n\n pronounce_file.seek(0)\n\n # Read results\n ws_pattern = re.compile(r\"\\s+\")\n\n for line in pronounce_file:\n parts = ws_pattern.split(line)\n word = word_map[parts[0].strip()]\n phonemes = \" \".join(parts[1:]).strip()\n pronunciations[word][\"pronunciations\"].append(phonemes)\n\n return pronunciations", "title": "" }, { "docid": "3680d23254344da739deaad8b23a4c0d", "score": "0.50512004", "text": "def lookup_query(query):\n if (dictionary_word.get(query, False)):\n return ('The reasult of search: ' + json.dumps(dictionary_word[query], indent=4))\n\n return ('Sorry, there is no such word :(')", "title": "" }, { "docid": "48bfbd167cb6b821ef0d717900781793", "score": "0.5049092", "text": "def search(self, word):\n return self.searchnode(self,word)", "title": "" }, { "docid": "c4899904cea8ebb8da156245d69d8793", "score": "0.50404733", "text": "def spellCheck(self, sentence: Sentence) -> Sentence:\n previousRoot = None\n result = Sentence()\n for i in range(sentence.wordCount()):\n word = sentence.getWord(i)\n fsmParses = self.fsm.morphologicalAnalysis(word.getName())\n if fsmParses.size() == 0:\n candidates = self.candidateList(word)\n bestCandidate = word.getName()\n bestRoot = word\n bestProbability = 0.0\n for candidate in candidates:\n fsmParses = self.fsm.morphologicalAnalysis(candidate)\n root = fsmParses.getParseWithLongestRootWord().getWord()\n if previousRoot is not None:\n probability = self.__nGram.getProbability(previousRoot.getName(), root.getName())\n else:\n probability = self.__nGram.getProbability(root.getName())\n if probability > bestProbability:\n bestCandidate = candidate\n bestRoot = root\n bestProbability = probability\n previousRoot = bestRoot\n result.addWord(Word(bestCandidate))\n else:\n result.addWord(word)\n previousRoot = fsmParses.getParseWithLongestRootWord().getWord()\n return result", "title": "" }, { "docid": "4ba20aa1eebd8e49b5094c92452c15a5", "score": "0.5039387", "text": "def search(self, word):\n node = self.root\n for item in word:\n if item in node.childrenValue:\n node = node.children[node.childrenValue[item]]\n else:\n return False\n if \"\" in node.childrenValue:\n return True\n else:\n return False", "title": "" } ]
a62790cae6789b1d12d43b936f35c3a6
Decoder block for albedo
[ { "docid": "e2767a3a5b8c9cf3aa792d1df864d5aa", "score": "0.5774206", "text": "def _albedo_decoder(albedo_mlp_output, encoder_op_each_layer):\n\t\tfilters = [ 256, 256, 256, 128, 64]\n\t\tdeconvolved = albedo_mlp_output\n\n\t\tfor i in range(5):\n\t\t\tdeconvolved_input = tf.concat(\n\t\t\t\t[deconvolved, encoder_op_each_layer[5-(i+1)]],\n\t\t\t\t3)\n\t\t\tdeconvolved = tf.layers.conv2d_transpose(\n\t\t\t\tdeconvolved_input,\n\t\t\t\tfilters = filters[i],\n\t\t\t\tkernel_size = 4,\n\t\t\t\tstrides = 2,\n\t\t\t\tpadding = \"SAME\",\n\t\t\t\tname = \"albedo_decoder_deconv2d_%d\" % i)\n\t\t\tdeconvlved = tf.layers.batch_normalization(\n\t\t\t\tdeconvolved,\n\t\t\t\ttraining=(mode == tf.estimator.ModeKeys.TRAIN),\n\t\t\t\tname = \"albedo_decoder_batch_norm_%d\" % i)\n\t\t\tdeconvolved = tf.nn.relu(\n\t\t\t\tdeconvolved,\n\t\t\t\tname=None)\n\t\talbedo_decoder_output = tf.layers.conv2d(\n\t\t\t\tdeconvolved,\n\t\t\t\tfilters = 3,\n\t\t\t\tkernel_size = 1,\n\t\t\t\tname = \"albedo_decoder_conv2d\")\n\t\treturn albedo_decoder_output", "title": "" } ]
[ { "docid": "626baf0b964a809a74e5496021d993f6", "score": "0.6972518", "text": "def _decode(self):\n pass", "title": "" }, { "docid": "38895bd4f86a4a78899953d8615e9326", "score": "0.68361384", "text": "def decoder(self, decoder: decoders.Decoder):\n pass", "title": "" }, { "docid": "cfdd22f5baf132fe9bbd0a8d7d221931", "score": "0.66675913", "text": "def _decode(self, decode_input):\n pass", "title": "" }, { "docid": "fc8a4d920cc0a168f628c6a30a917442", "score": "0.6611095", "text": "def doDecode(self, data):\r\n\t\traise NotImplementedError", "title": "" }, { "docid": "8172a3f12a2a7c8cc9be82e0e436f1d5", "score": "0.6397546", "text": "def getincrementaldecoder(encoding):\n\tpass", "title": "" }, { "docid": "05394b827274cdc9bd803e34820c19f2", "score": "0.6352971", "text": "def decode(self, data: bytes) -> Any:", "title": "" }, { "docid": "d25bfeabe6d7478697db77be9f0efeae", "score": "0.63112146", "text": "def PyCodec_Decoder(space, encoding):\n raise NotImplementedError", "title": "" }, { "docid": "656e43c228720bab15917f95c975a86a", "score": "0.62642276", "text": "def decode(self, object,final):\n\t\tpass", "title": "" }, { "docid": "02c7229d4908dae552e9e362d525bf82", "score": "0.6207092", "text": "def create_decoder(self):\n raise NotImplementedError", "title": "" }, { "docid": "6838eda268384ea3705705d0a9e2155d", "score": "0.6159395", "text": "def _decode(self, input):\n return input", "title": "" }, { "docid": "3b5279604d06a6f5f3644877ca5660ca", "score": "0.6148942", "text": "def decompress_stream(self,key): \n self.key = key \n #print \"++++++++++++ decompressing stream +++++++++++++++++++++++++\" ,key\n try: \n data = self.objects[self.key]\n start = data.find(\"stream\")\n end = data.find(\"endstream\")\n self.buff = data[start+6:].strip()\n if len(self.buff) < 2:return\n \n self.methods = self.get_compMethod(key,data)\n self.data = self.buff.strip() \n for self.method in self.methods:\n #print self.method\n if 'fl' == self.method.lower():\n self.data = decompress(self.data)\n if 'ascii85decode' == self.method.lower():\n self.data = ascii85decode(self.data)\n if 'asciihexdecode' == self.method.lower(): \n self.data = asciihexdecode(self.data)\n if 'lzwdecode' == self.method.lower():\n self.data = lzwdecode(self.data) \n \n if len(self.methods) == 0: \n self.handle_evasion(key,data[:start]) \n \n except Exception,err:\n pass \n #print sys.stderr.write('ERROR: %s\\n' % str(err)),key,self.methods\n y = \"\"\n #print filter(lambda x: chr(ord(x)),self.data)\n return self.data", "title": "" }, { "docid": "22961228a5d8e0767be431da4d6b4ff7", "score": "0.614481", "text": "def getdecoder(encoding):\n\tpass", "title": "" }, { "docid": "3a80e52aec8dd02a28150454c6640933", "score": "0.61191016", "text": "def decode(cls, b):\n raise NotImplementedError", "title": "" }, { "docid": "815597b99c08003f75af554e6c32b357", "score": "0.6056201", "text": "def decoder(self) -> Optional[Decoder]:\n pass", "title": "" }, { "docid": "ec94cd0717d46587ae681b9923388c24", "score": "0.6045181", "text": "def decode(self, *args, **kwargs):\n raise NotImplementedError", "title": "" }, { "docid": "c9d8a3a06e710b06131968de49690b3b", "score": "0.599665", "text": "def _decode(*args):\n\t\treturn args", "title": "" }, { "docid": "c9d8a3a06e710b06131968de49690b3b", "score": "0.599665", "text": "def _decode(*args):\n\t\treturn args", "title": "" }, { "docid": "ec151e7c78054d10ee1a53e21416e15c", "score": "0.5995815", "text": "def create_decoder(self):\n batch_size = tf.shape(self.encode)[0]\n l = tf.multiply(self.encode, self.act_embed, name='merge')\n l = FC(l, 1024, 'dec')\n l = FC(l, 64 * 10 * 10, 'ip4')\n l = ReLu(l, 'relu1')\n l = tf.reshape(l, [-1, 10, 10, 64], name='dec-reshape')\n l = Deconv2D(l, [6, 6], [batch_size, 20, 20, 64], 64, 2, 'SAME', 'deconv3')\n l = ReLu(l, 'relu2')\n l = Deconv2D(l, [6, 6], [batch_size, 40, 40, 64], 64, 2, 'SAME', 'deconv2')\n l = ReLu(l, 'relu3')\n l = Deconv2D(l, [6, 6], [batch_size, 84, 84, 3], 3, 2, 'VALID', 'x_hat-05')\n return l", "title": "" }, { "docid": "004738b5189ab156c3b08484230a4e63", "score": "0.58919895", "text": "def _streamEMB():\r\n pass", "title": "" }, { "docid": "6cd76706583c27d28efc67cf8db75c6d", "score": "0.5870717", "text": "def decode(self):\n self.decoded = True\n if self.dtype == 'BTH0':\n self.subpack = BTH0(self.datablock, self.subheader['Beams'])\n elif self.dtype == 'SNI0':\n self.subpack = SNI0(self.datablock)\n elif self.dtype == 'WCD0':\n self.subpack = WCD0(self.datablock)\n else:\n self.decoded = False\n print \"Data record \" + str(self.header[0]) + \" decoding is not yet supported.\"", "title": "" }, { "docid": "80d31190a67ca9809d6e8d81c532c0ca", "score": "0.5848532", "text": "def __init__(self, src_embed, encoder, tgt_embed, decoder, enc2dec, generator):\n super(EncoderDecoder, self).__init__()\n self.src_embed = src_embed\n self.encoder = encoder\n self.enc2dec = enc2dec\n self.tgt_embed = tgt_embed\n self.decoder = decoder\n self.generator = generator", "title": "" }, { "docid": "cf288f0eb8fa271862df0176fbf97a8a", "score": "0.58331096", "text": "def decode(self, z):\n return self.generator(z)", "title": "" }, { "docid": "9abd21a9925f2a7133048d0c6bace4dd", "score": "0.58246773", "text": "def PyCodec_Decode(space, object, encoding, errors):\n raise NotImplementedError", "title": "" }, { "docid": "f3af54a011a660b2cdea7bd33bf90890", "score": "0.58233213", "text": "def on_decoded(self, packet):\n self.emit('packet', packet)", "title": "" }, { "docid": "f15ca233b40a14500cf04cb2ee430e23", "score": "0.5805976", "text": "def _decode1(self, body, data):\n if \" \" in body:\n evtype,body = body.split(\" \",1)\n else:\n evtype,body = body,\"\"\n evtype = evtype.upper()\n if evtype == \"CIRC\":\n m = re.match(r\"(\\d+)\\s+(\\S+)(\\s\\S+)?(\\s\\S+)?(\\s\\S+)?(\\s\\S+)?\", body)\n if not m:\n raise ProtocolError(\"CIRC event misformatted.\")\n ident,status,path,purpose,reason,remote = m.groups()\n ident = int(ident)\n if path:\n if \"PURPOSE=\" in path:\n remote = reason\n reason = purpose\n purpose=path\n path=[]\n elif \"REASON=\" in path:\n remote = reason\n reason = path\n purpose = \"\"\n path=[]\n else:\n path_verb = path.strip().split(\",\")\n path = []\n for p in path_verb:\n path.append(p.replace(\"~\", \"=\").split(\"=\")[0])\n else:\n path = []\n\n if purpose and \"REASON=\" in purpose:\n remote=reason\n reason=purpose\n purpose=\"\"\n\n if purpose: purpose = purpose[9:]\n if reason: reason = reason[8:]\n if remote: remote = remote[15:]\n event = CircuitEvent(evtype, ident, status, path, purpose, reason,\n remote, body)\n elif evtype == \"STREAM\":\n #plog(\"DEBUG\", \"STREAM: \"+body)\n m = re.match(r\"(\\S+)\\s+(\\S+)\\s+(\\S+)\\s+(\\S+)?:(\\d+)(\\sREASON=\\S+)?(\\sREMOTE_REASON=\\S+)?(\\sSOURCE=\\S+)?(\\sSOURCE_ADDR=\\S+)?(\\s+PURPOSE=\\S+)?\", body)\n if not m:\n raise ProtocolError(\"STREAM event misformatted.\")\n ident,status,circ,target_host,target_port,reason,remote,source,source_addr,purpose = m.groups()\n ident,circ = map(int, (ident,circ))\n if not target_host: # This can happen on SOCKS_PROTOCOL failures\n target_host = \"(none)\"\n if reason: reason = reason[8:]\n if remote: remote = remote[15:]\n if source: source = source[8:]\n if source_addr: source_addr = source_addr[13:]\n if purpose:\n purpose = purpose.lstrip()\n purpose = purpose[8:]\n event = StreamEvent(evtype, ident, status, circ, target_host,\n int(target_port), reason, remote, source, source_addr,\n purpose, body)\n elif evtype == \"ORCONN\":\n m = re.match(r\"(\\S+)\\s+(\\S+)(\\sAGE=\\S+)?(\\sREAD=\\S+)?(\\sWRITTEN=\\S+)?(\\sREASON=\\S+)?(\\sNCIRCS=\\S+)?\", body)\n if not m:\n raise ProtocolError(\"ORCONN event misformatted.\")\n target, status, age, read, wrote, reason, ncircs = m.groups()\n\n #plog(\"DEBUG\", \"ORCONN: \"+body)\n if ncircs: ncircs = int(ncircs[8:])\n else: ncircs = 0\n if reason: reason = reason[8:]\n if age: age = int(age[5:])\n else: age = 0\n if read: read = int(read[6:])\n else: read = 0\n if wrote: wrote = int(wrote[9:])\n else: wrote = 0\n event = ORConnEvent(evtype, status, target, age, read, wrote,\n reason, ncircs, body)\n elif evtype == \"STREAM_BW\":\n m = re.match(r\"(\\d+)\\s+(\\d+)\\s+(\\d+)\", body)\n if not m:\n raise ProtocolError(\"STREAM_BW event misformatted.\")\n event = StreamBwEvent(evtype, body, *m.groups())\n elif evtype == \"BW\":\n m = re.match(r\"(\\d+)\\s+(\\d+)\", body)\n if not m:\n raise ProtocolError(\"BANDWIDTH event misformatted.\")\n read, written = map(long, m.groups())\n event = BWEvent(evtype, read, written, body)\n elif evtype in (\"DEBUG\", \"INFO\", \"NOTICE\", \"WARN\", \"ERR\"):\n event = LogEvent(evtype, body)\n elif evtype == \"NEWDESC\":\n ids_verb = body.split(\" \")\n ids = []\n for i in ids_verb:\n ids.append(i.replace(\"~\", \"=\").split(\"=\")[0].replace(\"$\",\"\"))\n event = NewDescEvent(evtype, ids, body)\n elif evtype == \"ADDRMAP\":\n # TODO: Also parse errors and GMTExpiry\n m = re.match(r'(\\S+)\\s+(\\S+)\\s+(\\\"[^\"]+\\\"|\\w+)', body)\n if not m:\n raise ProtocolError(\"ADDRMAP event misformatted.\")\n fromaddr, toaddr, when = m.groups()\n if when.upper() == \"NEVER\": \n when = None\n else:\n when = time.strptime(when[1:-1], \"%Y-%m-%d %H:%M:%S\")\n event = AddrMapEvent(evtype, fromaddr, toaddr, when, body)\n elif evtype == \"NS\":\n event = NetworkStatusEvent(evtype, parse_ns_body(data), data)\n elif evtype == \"NEWCONSENSUS\":\n event = NewConsensusEvent(evtype, parse_ns_body(data), data)\n elif evtype == \"BUILDTIMEOUT_SET\":\n m = re.match(\n r\"(\\S+)\\sTOTAL_TIMES=(\\d+)\\sTIMEOUT_MS=(\\d+)\\sXM=(\\d+)\\sALPHA=(\\S+)\\sCUTOFF_QUANTILE=(\\S+)\",\n body)\n set_type, total_times, timeout_ms, xm, alpha, quantile = m.groups()\n event = BuildTimeoutSetEvent(evtype, set_type, int(total_times),\n int(timeout_ms), int(xm), float(alpha),\n float(quantile), body)\n elif evtype == \"GUARD\":\n m = re.match(r\"(\\S+)\\s(\\S+)\\s(\\S+)\", body)\n entry, guard, status = m.groups()\n event = GuardEvent(evtype, entry, guard, status, body)\n elif evtype == \"TORCTL_TIMER\":\n event = TimerEvent(evtype, data)\n else:\n event = UnknownEvent(evtype, body)\n\n return event", "title": "" }, { "docid": "b67d36b773ac1c98416946d255a8afb0", "score": "0.5796752", "text": "def get_decoder(aes_adapter):\n def decode(cipthertext):\n return aes_adapter.decrypt(cipthertext)\n \n return decode", "title": "" }, { "docid": "dbf29e9bb0c4dbdc3a711c24cc9ceadc", "score": "0.57395804", "text": "def h264_frame_cb(self, h264_frame):", "title": "" }, { "docid": "cb3db0e41e5ebfd55a462b37b6761e09", "score": "0.57241505", "text": "def forward(self, decoder_inner_states):\n return None", "title": "" }, { "docid": "4d22563669cf14411d5c12414526ae2d", "score": "0.56893575", "text": "def dataDecoder(data):\n\treturn json.loads(base64.b64decode(data))", "title": "" }, { "docid": "f68c28b8971dac56bacd45365aa29806", "score": "0.561687", "text": "def decode(data: dict):\n try:\n if data[\"sourceEndpoint\"] == \"0x00\":\n # decode ZDO\n if \"zdo\" not in CLUSTERS:\n zdo = CLUSTERS[\"zdo\"] = ZDO(None)\n else:\n zdo = CLUSTERS[\"zdo\"]\n\n cluster_id = int(data[\"clusterId\"], 0)\n raw = bytes.fromhex(data[\"APSPlayload\"][2:])\n hdr, args = zdo.deserialize(cluster_id, raw)\n cmd = ZDOCmd(hdr.command_id).name\n if hdr.command_id == ZDOCmd.Active_EP_rsp:\n return {\"command\": cmd, \"status\": str(args[0]), \"endpoints\": args[2]}\n elif hdr.command_id == ZDOCmd.Simple_Desc_rsp:\n desc: SizePrefixedSimpleDescriptor = args[2]\n return {\n \"command\": cmd,\n \"status\": str(args[0]),\n \"device_type\": desc.device_type,\n \"device_version\": desc.device_version,\n \"endpoint\": desc.endpoint,\n \"input_clusters\": desc.input_clusters,\n \"output_clusters\": desc.output_clusters,\n \"profile\": desc.profile,\n }\n elif hdr.command_id == ZDOCmd.Node_Desc_rsp:\n desc: NodeDescriptor = args[2]\n return {\n \"command\": cmd,\n \"status\": str(args[0]),\n \"is_mains_powered\": desc.is_mains_powered,\n \"logical_type\": str(desc.logical_type),\n \"manufacturer_code\": desc.manufacturer_code,\n }\n elif hdr.command_id in (ZDOCmd.Bind_rsp, ZDOCmd.Mgmt_Leave_rsp):\n return {\n \"command\": cmd,\n \"status\": str(args[0]),\n }\n elif hdr.command_id in (ZDOCmd.Node_Desc_req, ZDOCmd.Active_EP_req):\n return {\"command\": cmd}\n elif hdr.command_id == ZDOCmd.Simple_Desc_req:\n return {\n \"command\": cmd,\n \"endpoint\": args[0],\n }\n elif hdr.command_id == ZDOCmd.Bind_req:\n return {\n \"command\": cmd,\n \"src_addr\": args[0],\n \"src_endpoint\": args[1],\n \"cluster\": args[2],\n \"dst_addr\": args[3],\n }\n elif hdr.command_id == ZDOCmd.IEEE_addr_rsp:\n return {\n \"command\": cmd,\n \"status\": args[0],\n \"ieee\": args[1],\n \"nwk\": args[2],\n }\n elif hdr.command_id == ZDOCmd.Mgmt_Leave_req:\n return {\n \"command\": cmd,\n \"ieee\": args[0],\n }\n elif hdr.command_id == ZDOCmd.Mgmt_NWK_Update_rsp:\n return {\n \"command\": cmd,\n \"status\": args[0],\n \"channels\": args[1],\n \"total\": args[2],\n \"failures\": args[3],\n \"energy\": args[4],\n }\n elif hdr.command_id == ZDOCmd.NWK_addr_rsp:\n return {\n \"command\": cmd,\n \"status\": args[0],\n \"ieee\": args[1],\n \"nwk\": args[2],\n }\n else:\n raise NotImplemented\n\n # decode ZCL\n cluster_id = int(data[\"clusterId\"], 0)\n if cluster_id not in CLUSTERS:\n cluster = CLUSTERS[cluster_id] = Cluster.from_id(None, cluster_id)\n cluster._log = lambda *_, **__: None\n else:\n cluster = CLUSTERS[cluster_id]\n\n raw = bytes.fromhex(data[\"APSPlayload\"][2:])\n try:\n hdr, args = cluster.deserialize(raw)\n hdr: ZCLHeader\n except ValueError as e:\n return {\"cluster_id\": cluster_id, \"error\": str(e)}\n except KeyError as e:\n return {\"cluster_id\": cluster_id, \"error\": f\"Key error: {e}\"}\n\n payload = {\n \"endpoint\": int(data[\"sourceEndpoint\"], 0),\n \"seq\": hdr.tsn,\n }\n\n if cluster.ep_attribute:\n payload[\"cluster\"] = cluster.ep_attribute\n else:\n payload[\"cluster_id\"] = cluster_id\n\n if hdr.frame_control.is_general:\n payload[\"command\"] = Command(hdr.command_id).name\n\n if (\n hdr.command_id == Command.Report_Attributes\n or hdr.command_id == Command.Write_Attributes\n ):\n (attrs,) = args\n for attr in attrs:\n assert isinstance(attr, Attribute)\n if attr.attrid in cluster.attributes:\n name = cluster.attributes[attr.attrid].name\n else:\n name = attr.attrid\n\n value = attr.value.value\n if isinstance(value, bytes) and value:\n payload[name] = \"0x\" + value.hex()\n elif isinstance(value, list) and not isinstance(value, EUI64):\n payload[name] = [v.value for v in value]\n elif isinstance(value, int):\n payload[name] = int(value)\n else:\n payload[name] = value\n\n elif hdr.command_id == Command.Read_Attributes_rsp:\n (attrs,) = args\n for attr in attrs:\n assert isinstance(attr, ReadAttributeRecord)\n if attr.attrid in cluster.attributes:\n name = cluster.attributes[attr.attrid].name\n else:\n name = attr.attrid\n\n if attr.value is not None:\n value = attr.value.value\n if isinstance(value, bytes) and value:\n payload[name] = \"0x\" + value.hex()\n elif isinstance(value, list):\n payload[name] = [v.value for v in value]\n elif isinstance(value, int):\n payload[name] = int(value)\n else:\n payload[name] = value\n else:\n payload[name] = str(attr.status)\n\n elif hdr.command_id == Command.Read_Attributes:\n (attrs,) = args\n payload[\"value\"] = attrs\n\n elif hdr.command_id == Command.Configure_Reporting:\n (attrs,) = args\n # fix __repr__ bug\n for attr in attrs:\n if not hasattr(attr, \"reportable_change\"):\n attr.reportable_change = None\n payload[\"value\"] = attrs\n\n elif (\n hdr.command_id == Command.Write_Attributes_rsp\n or hdr.command_id == Command.Configure_Reporting_rsp\n ):\n (resp,) = args\n payload[\"status\"] = [str(attr.status) for attr in resp]\n\n elif hdr.command_id == Command.Discover_Commands_Received_rsp:\n payload[\"status\"] = bool(args[0])\n payload[\"value\"] = args[1]\n\n elif hdr.command_id == Command.Default_Response:\n payload[\"value\"] = args[0]\n payload[\"status\"] = str(args[1])\n\n else:\n if isinstance(args, bytes) and args:\n args = \"0x\" + args.hex()\n payload[\"command_id\"] = int(hdr.command_id)\n payload[\"value\"] = args\n\n elif hdr.frame_control.is_cluster:\n # if isinstance(args, bytes) and args:\n # args = \"0x\" + args.hex()\n\n payload[\"command_id\"] = hdr.command_id\n if hdr.command_id < len(cluster.commands):\n payload[\"command\"] = cluster.commands[hdr.command_id]\n if args:\n payload[\"value\"] = args\n\n else:\n if isinstance(args, bytes) and args:\n args = \"0x\" + args.hex()\n\n payload.update({\"command_id\": hdr.command_id, \"value\": args})\n\n return payload\n\n except Exception as e:\n _LOGGER.debug(\"Error while parsing zigbee\", exc_info=e)\n return None", "title": "" }, { "docid": "4efe99706bc450b6087bc45900faadf5", "score": "0.5615661", "text": "def decryptor(self):", "title": "" }, { "docid": "ac5f7ffd6048c15111f770cbc783b3e4", "score": "0.5596908", "text": "def ice_postUnmarshal(self):\n pass # Currently unused", "title": "" }, { "docid": "ac5f7ffd6048c15111f770cbc783b3e4", "score": "0.5596908", "text": "def ice_postUnmarshal(self):\n pass # Currently unused", "title": "" }, { "docid": "8427df3257328528c26a61b4d7ad32c4", "score": "0.5574899", "text": "def pre_unpack(cls, packet: 'dict[str, Any]') -> 'None':", "title": "" }, { "docid": "22982f63a768d2129c9b93dc2ab39052", "score": "0.5568638", "text": "def handlePacket(hdr, data):\n print(decoder.decode(data))", "title": "" }, { "docid": "c846256b4df06d00b751d7b53bf36892", "score": "0.55586743", "text": "def __enter__(self) -> MP3Decoder:\n ...", "title": "" }, { "docid": "58029eecd5aabab55fbede1e828a1f3a", "score": "0.5558033", "text": "def __init__(self, raw):\n self._hal = cetacean._parse_hal(raw)", "title": "" }, { "docid": "43c13a1b1a08edd9fe819b08990185bb", "score": "0.5526881", "text": "def DecodeCardData(datalist):\r\n #if RunBBBHW():\r\n # decode the data stread read from the card reader\r\n pass", "title": "" }, { "docid": "f5633445cd2196f481ae05ee201dba9b", "score": "0.55074894", "text": "def incremental_decode(decoder, *args, **kwargs):\n kwargs[\"return_tuple\"] = False\n return decode(*args, **kwargs)", "title": "" }, { "docid": "5f35f6a760294a29a935abd567435cf6", "score": "0.55020064", "text": "def sample_decode(self, z, greedy=False):\n raise NotImplementedError()", "title": "" }, { "docid": "01f3e6fca1b5ba32a14fc1e90d4b5c6c", "score": "0.55010587", "text": "def Decode(self, encoded_data):\n return b'', b''", "title": "" }, { "docid": "8d0db0b47a416a38b6a2ebb772b77989", "score": "0.5474095", "text": "def _decode_text(self):\n \n print(f\"AtBash decode; received message is {self.message}\")\n return self._applyCipher()", "title": "" }, { "docid": "d6eb14817741e876cbd69c2bcdb65af4", "score": "0.5462845", "text": "def decode_msg(self, message):\n\t\tpass", "title": "" }, { "docid": "806ef27c4098f00577cdb7fe063f8974", "score": "0.5461527", "text": "def start_decode_by_msg(interface_obj,\n msg):\n msg = msg.split(\" \", 2)\n print(msg)\n status_code = msg[0]\n if status_code == \"*1\" and len(msg) > 2:\n address = msg[1].capitalize()\n value = msg[2]\n decodes = interface_obj.get_dict_decodes()\n if address in decodes:\n decodes[address](value)", "title": "" }, { "docid": "147edb0ee1a2ac1af51651bfabda424b", "score": "0.5460968", "text": "def fix_stream():\n content_iter = res.iter_content(8192)\n data = next(content_iter)\n yield b'FLV' + data[3:]\n for chunk in content_iter:\n yield chunk", "title": "" }, { "docid": "cd61f8673a70275330464ecd88305f50", "score": "0.5457824", "text": "def decode(self, decoder_input, step=0):\n\n cell_input = torch.cat((decoder_input, self.attention_context), -1)\n self.attention_hidden, self.attention_cell = self.attention_rnn(cell_input, (self.attention_hidden, self.attention_cell))\n\n\n self.attention_hidden = F.dropout(self.attention_hidden, self.p_attention_dropout, self.training)\n\n attention_weights_cat = torch.cat((self.attention_weights.unsqueeze(1),self.attention_weights_cum.unsqueeze(1)), dim=1)\n\n\n self.attention_context, self.attention_weights = self.attention_layer(self.attention_hidden, self.memory, self.processed_memory,attention_weights_cat, self.mask)\n\n self.attention_weights_cum += self.attention_weights\n\n decoder_input = torch.cat((self.attention_hidden, self.attention_context), -1)\n\n self.decoder_hidden, self.decoder_cell = self.decoder_rnn(decoder_input, (self.decoder_hidden, self.decoder_cell))\n self.decoder_hidden = F.dropout(self.decoder_hidden, self.p_decoder_dropout, self.training)\n\n decoder_hidden_attention_context = torch.cat((self.decoder_hidden, self.attention_context), dim=1)\n\n decoder_output = self.linear_projection(decoder_hidden_attention_context)\n\n gate_prediction = self.gate_layer(decoder_hidden_attention_context)\n return decoder_output, gate_prediction, self.attention_weights", "title": "" }, { "docid": "8c93a45c991e36ea8f5341956eb71b4c", "score": "0.5449894", "text": "def __init__(self, resolver_context, path_spec):\n super(EncodedStream, self).__init__(resolver_context, path_spec)\n self._current_offset = 0\n self._decoded_data = b''\n self._decoded_data_offset = 0\n self._decoded_data_size = 0\n self._decoded_stream_size = None\n self._decoder = None\n self._encoded_data = b''\n self._encoding_method = None\n self._file_object = None\n self._realign_offset = True", "title": "" }, { "docid": "805bfc1d0d84d611045e63a34befabe5", "score": "0.54455596", "text": "def DecoderBlock(d_model, d_ff, n_heads,\n dropout, mode, ff_activation):\n \n # Add list of two Residual blocks: the attention with normalization and dropout and feed-forward blocks\n return [\n tl.Residual(\n # Normalize layer input\n tl.LayerNorm(), \n # Add causal attention \n tl.CausalAttention(d_feature, n_heads=n_heads, dropout=dropout, mode=mode) \n ),\n tl.Residual(\n # Add feed-forward block\n # We don't need to normalize the layer inputs here. The feed-forward block takes care of that for us.\n FeedForward(d_model, d_ff, dropout, mode, ff_activation)\n ),\n ]", "title": "" }, { "docid": "b00d79e19317529639a7bad50df3b3b6", "score": "0.54431593", "text": "def decode(self):\n super(Decoder, self).calculate_i_symbols()", "title": "" }, { "docid": "74339f9854916975a3e85433c580a91a", "score": "0.5421054", "text": "def __init__(self, encoder):\n self.encoder = encoder", "title": "" }, { "docid": "8aaf705c4b5b6a4fdf86e4a7f64bf3a9", "score": "0.54164857", "text": "def decode_character(self, pbs):\r\n raise NotImplementedError()", "title": "" }, { "docid": "bad4a2184e3204c3f04f16c542c233d8", "score": "0.54144484", "text": "def decode(self, data: bytes) -> Any:\n return self.decoder.decode(data.decode(\"utf8\"))", "title": "" }, { "docid": "db8378125524870f8c622862220a7662", "score": "0.5387947", "text": "def decode(self, z):\n z = self.fc3(z)\n z = self.decoder(z)\n return z", "title": "" }, { "docid": "dfad4d9b0e6981d4c35c0b4d9f28dc39", "score": "0.5386913", "text": "def __init__(self, data):\n self.data = EncodingBytes(data)\n self.encoding = None", "title": "" }, { "docid": "268445abb2b0eb9942a26009dc57c96d", "score": "0.5371185", "text": "def load_decoder(autoencoder):\n dim = len(autoencoder.get_config()['input_layers'])\n mag_phase_flag = False\n decoders = []\n if dim == 2: \n mag_phase_flag = True\n decoders.append(autoencoder.get_layer('mag_decoder'))\n decoders.append(autoencoder.get_layer('phase_decoder'))\n else:\n decoders.append(autoencoder.get_layer('decoder'))\n return decoders,mag_phase_flag", "title": "" }, { "docid": "f4c126d16254ae48575123acbade7757", "score": "0.5360108", "text": "def decrypt(self, data):", "title": "" }, { "docid": "e0ae1fc618748ee86c844dfd04c99d69", "score": "0.53584963", "text": "def decode(self, *args, **kwargs):\n return self.tokenizer.decode(*args, **kwargs)", "title": "" }, { "docid": "95cb7be13ca1975eeced4ece06ee909d", "score": "0.5352908", "text": "def decoder(self):\n return{'dec_units':self.units, 'embedding_dim':self.embedding_dim,\n 'num_chars':self.num_chars, 'K':self.K}", "title": "" }, { "docid": "2c2dc0d7babd52208dfff995f598f790", "score": "0.5345057", "text": "def decompress(bytestream: bytes, padding: int, tree: Node) -> str:\n # TODO: Implement this function\n ...", "title": "" }, { "docid": "cdae5e70056f45b66cdaa165b78bc33f", "score": "0.53391147", "text": "def Decode(self, input_batch):\n return self._BeamSearchDecode(input_batch)", "title": "" }, { "docid": "7706f52d1c0875bf6c8922ed0669d81f", "score": "0.53332925", "text": "def decode(bdata):\n\n return parse_blist(bdata)[0]", "title": "" }, { "docid": "15a452c3d9b2dd806c2c6756f9a455f5", "score": "0.5331376", "text": "def decode(self, file, filename, streaming):\n raise NotImplementedError()", "title": "" }, { "docid": "966e871e5aa7ea2e730454460aa0da25", "score": "0.5329762", "text": "def decode_57(p_pyhouse_obj, p_controller_obj):\n l_message = p_controller_obj._Message\n l_obj = utilDecode.get_obj_from_message(p_pyhouse_obj, l_message[4:7])\n l_link_obj = LinkData()\n l_link_obj.Flag = l_flags = l_message[2]\n l_link_obj.Group = l_group = l_message[3]\n l_link_obj.InsteonAddess = l_obj.InsteonAddress\n l_link_obj.Data = l_data = [l_message[7], l_message[8], l_message[9]]\n l_flag_control = l_flags & 0x40\n l_type = 'Responder'\n if l_flag_control != 0:\n l_type = 'Controller'\n LOG.info(\"All-Link response-57 - Group={:#02X}, Name={}, Flags={:#x}, Data={}, {}\".format(l_group, l_obj.Name, l_flags, l_data, l_type))\n l_ret = True\n return l_ret", "title": "" }, { "docid": "2fb36d89daf5003a61d47b57052bcf10", "score": "0.5325287", "text": "def __init__(self, decoder_args):\n super(SimpleDFSDecoder, self).__init__(decoder_args)\n #self._min_length_ratio = 0.25 # TODO: Make configurable\n self._min_length_ratio = -0.1\n self._min_length = -100", "title": "" }, { "docid": "521c7da474a2e658edf0f15e39fa23cb", "score": "0.53212535", "text": "def decode(self, decoder_input, time_step):\n t = time_step#0.5 - 0.5*np.cos(min(1, time_step / 500)*np.pi)\n cell_input = torch.cat((\n decoder_input, self.attention_context, self.get_latents(t)), -1)\n self.attention_hidden = self.attention_rnn(\n cell_input, self.attention_hidden)\n self.attention_hidden = F.dropout(\n self.attention_hidden, self.p_attention_dropout, self.training)\n\n attention_weights_cat = torch.cat(\n (self.attention_weights.unsqueeze(1),\n self.attention_weights_cum.unsqueeze(1)), dim=1)\n self.attention_context, self.attention_weights = self.attention_layer(\n self.attention_hidden, self.memory, self.processed_memory,\n attention_weights_cat, self.mask)\n\n self.attention_weights_cum += self.attention_weights\n decoder_input = torch.cat(\n (self.attention_hidden, self.attention_context), -1)\n self.decoder_hidden = self.decoder_rnn(\n decoder_input, self.decoder_hidden)\n self.decoder_hidden = F.dropout(\n self.decoder_hidden, self.p_decoder_dropout, self.training)\n\n decoder_hidden_attention_context = torch.cat(\n (self.decoder_hidden, self.attention_context), dim=1)\n decoder_output = self.linear_projection(\n decoder_hidden_attention_context)\n\n decoder_output = self.mel_params(decoder_output.squeeze(1))\n\n gate_prediction = self.gate_layer(decoder_hidden_attention_context).squeeze(1)\n return decoder_output, gate_prediction, self.attention_weights", "title": "" }, { "docid": "53ea7a47eeb122d96e3e5bc68044bdd9", "score": "0.53130627", "text": "def decode(self, data):\n decoded_data = self._decoder(data)\n return decoded_data", "title": "" }, { "docid": "c607b1dfe86d7cef0547e69e343b1978", "score": "0.5308986", "text": "def decode(self, data):\n if data[0] == '#':\n # list of names\n self.names = [self.sub_re.sub('_', name) for name in data[1:].split('!')]\n print \"JobySimDecoder got names:\", self.names\n return None\n\n vals = map(eval, data.split('!'))\n d = dict(zip(self.names, vals))\n #print d\n return d", "title": "" }, { "docid": "2f756994e847b09b50842af820384a48", "score": "0.53000313", "text": "def get_encoder(self):", "title": "" }, { "docid": "406e826cf4e87db8a9dd2d576e52d22c", "score": "0.52965343", "text": "def decode(self, z):\n h2 = self.de1(z)\n return self.decoder(h2)", "title": "" }, { "docid": "2d4b6412fe217a1c185cc32f5760b363", "score": "0.52954984", "text": "def __init__(self) -> None:\n self.path = None\n self.replay = None\n self.bitstream = None\n self.netstream = None\n self.header_raw = None\n self.body_raw = None\n\n # Decoder Settings\n self.ignore_network = None\n self.verbose = None", "title": "" }, { "docid": "9c9e17c253279a74f20744d5b78c7385", "score": "0.52928096", "text": "def decode(self, data_batch):\n warnings.warn('`decode` method not found, falling back to `forward`')\n return self.forward(data_batch)", "title": "" }, { "docid": "91d7633f9a48468f46543b4aaabcfdb1", "score": "0.5284167", "text": "def batch_decode(self, sequences, **kwargs):\n return super().batch_decode(sequences, **kwargs)", "title": "" }, { "docid": "2c61d8d687f6258ee1ff152daf2d01ab", "score": "0.5271713", "text": "def __build_decoder(self, z, y_dim):\n\n self.logger.debug(\"create decoder\")\n\n # define network\n net = slim.fully_connected(z, 50, activation_fn=tf.nn.softplus)\n net = slim.fully_connected(net, 50, activation_fn=tf.nn.softplus)\n\n # get moments\n y_hat = slim.fully_connected(net, y_dim, activation_fn=None)\n y_ls2 = slim.fully_connected(net, y_dim, activation_fn=None)\n\n return y_hat, y_ls2", "title": "" }, { "docid": "579aec73593b9435d511da13b2ec45fc", "score": "0.5259557", "text": "def get_decoding(self):\n return self._dual.get_values('decoding')", "title": "" }, { "docid": "b88ccb62c6cc5eca19fb0e7987dbffb5", "score": "0.525838", "text": "def __init__(self, data=None):\n self._converter = None\n if data:\n self.decode(data)", "title": "" }, { "docid": "ff74bb77d713042f9dd9af85b3e65aad", "score": "0.5253929", "text": "def _build_decoder(self, arch, **kwargs):\n # usual non-bottleneck transformer decoder\n if (not arch) or (arch == \"seq2seq\"):\n decoder = self.decoder\n else:\n raise ValueError(f\"Unknown arch = {self.arch}, supported arch = {self.supported_arch}\")\n\n return decoder", "title": "" }, { "docid": "ac943a10e87260c5a8487f6b1913a2b3", "score": "0.52502054", "text": "def build_decoder(opt, embeddings, pos = None):\n # 根据transformer论文,整个decoder层由dec_layers=6层堆叠而成,d_model = dec_rnn_size = 512,\n # multi-head attention中 h = heads = 8, feed-forward层大小为transformer_ff=2048 \n return TransformerDecoder(opt.dec_layers, opt.dec_rnn_size,\n opt.heads, opt.transformer_ff,\n opt.dropout, embeddings, pos)", "title": "" }, { "docid": "308dc5ae38319d61e558b24711bbdc69", "score": "0.52474207", "text": "def _decode_nalu(self, nalu_bytes):\n if \"0x\" + nalu_bytes[0 : 4 * 8].hex == self.START_CODE_PREFIX:\n start_code = nalu_bytes.read(\"bytes:4\")\n else:\n start_code = nalu_bytes.read(\"bytes:3\")\n forbidden_zero_bit = nalu_bytes.read(1)\n nal_ref_idc = nalu_bytes.read(\"uint:2\")\n nal_unit_type = nalu_bytes.read(\"uint:5\")\n nal_unit_payload = nalu_bytes[nalu_bytes.pos :]\n\n rbsp_payload = BitStream()\n for i in range(int(len(nal_unit_payload) / 8)):\n if (\n len(nal_unit_payload) - nal_unit_payload.pos >= 24\n and nal_unit_payload.peek(\"bits:24\") == \"0x000003\"\n ):\n rbsp_payload.append(nal_unit_payload.read(\"bits:8\"))\n rbsp_payload.append(nal_unit_payload.read(\"bits:8\"))\n nal_unit_payload.read(\"bits:8\")\n else:\n if nal_unit_payload.pos == len(nal_unit_payload):\n continue\n rbsp_payload.append(nal_unit_payload.read(\"bits:8\"))\n\n return nal_unit_type, rbsp_payload", "title": "" }, { "docid": "ad8b777e9d9c17b87a40241244905996", "score": "0.5245915", "text": "def handlePacket(self, data):\n r = self.mres()\n r.deserialize(data)\n self.response = r", "title": "" }, { "docid": "e6370326a0e4aebb4a8c87ab5c66bdea", "score": "0.5229287", "text": "def deserialize(self, data):", "title": "" }, { "docid": "0100d9ec3e1372f9c672f26173225f86", "score": "0.5208996", "text": "def feed(self, binary):\r\n\t\temit = False\r\n\t\tif self.decoder is None:\r\n\t\t\tself.decoder = codecs.getincrementaldecoder(self.encoding)()\r\n\t\tbuf = self.buf + self.decoder.decode(binary)\r\n\t\tif self.jsondec is None:\r\n\t\t\tself.jsondec = json.JSONDecoder()\r\n\t\twhile True:\r\n\t\t\ttry:\r\n\t\t\t\tobj,pos = self.jsondec.raw_decode(buf)\r\n\t\t\texcept ValueError,e:\r\n\t\t\t\tself.buf = buf\r\n\t\t\t\tbreak\r\n\t\t\t\r\n\t\t\temit = True\r\n\t\t\tself.callback(obj)\r\n\t\t\t\r\n\t\t\twhile pos<len(buf) and buf[pos] in \"\\r\\n\":\r\n\t\t\t\tpos += 1\r\n\t\t\r\n\t\t\tbuf = buf[pos:]\r\n\t\treturn emit", "title": "" }, { "docid": "a43708002b86a7213edbba69a6dad5b1", "score": "0.52077204", "text": "def decoder(self, init_state):\n # pd.Print(init_state)\n # define counter variable in the decoding\n array_len = pd.fill_constant(shape=[1], dtype='int64', value=self.max_length)\n counter = pd.zeros(shape=[1], dtype='int64', force_cpu=True)\n static_count = pd.zeros(shape=[1], dtype='int64', force_cpu=True)\n\n # define tensor array to save content at each time step, and write initial id, score and state\n state_h_array = pd.create_array('float32')\n pd.array_write(self.h, array=state_h_array, i=counter)\n state_c_array = pd.create_array('float32')\n pd.array_write(self.c, array=state_c_array, i=counter)\n\n src_indexes = fluid.layers.data(\n name='source_index', shape=[1], dtype='int64', lod_level=1)\n src_index_array = pd.create_array('int64')\n pd.array_write(src_indexes, array=src_index_array, i=counter)\n\n ids_array = pd.create_array('int64')\n scores_array = pd.create_array('float32')\n\n init_ids = fluid.layers.data(\n name=\"init_ids\", shape=[1], dtype=\"int64\", lod_level=2)\n init_scores = fluid.layers.data(\n name=\"init_scores\", shape=[1], dtype=\"float32\", lod_level=2)\n\n pd.array_write(init_ids, array=ids_array, i=counter)\n pd.array_write(init_scores, array=scores_array, i=counter)\n\n encoder_vec_array = pd.create_array('float32')\n pd.array_write(self.encoder_vec, array=encoder_vec_array, i=static_count)\n encoder_vec_full_array = pd.create_array('float32')\n pd.array_write(self.encoder_vec_full, array=encoder_vec_full_array, i=static_count)\n encoder_proj_array = pd.create_array('float32')\n pd.array_write(self.encoder_proj, array=encoder_proj_array, i=static_count)\n\n event_embedding_array = pd.create_array('float32')\n pd.array_write(self.event_embedding, array=event_embedding_array, i=static_count)\n\n # define conditional variable to stop loop\n cond = pd.less_than(x=counter, y=array_len)\n # define while_op\n while_op = pd.While(cond=cond)\n with while_op.block(): # define the computing of each step\n # pd.Print(counter)\n\n # obtain input at present step of decoder, including id chosen at previous step, corresponding score and state at previous step.\n pre_ids = pd.array_read(array=ids_array, i=counter)\n pre_h_state = pd.array_read(array=state_h_array, i=counter)\n pre_c_state = pd.array_read(array=state_c_array, i=counter)\n\n # pre_score = pd.array_read(array=scores_array, i=counter)\n pre_score = pd.array_read(array=scores_array, i=static_count)\n\n _encoder_input_ids = pd.array_read(\n array=src_index_array, i=static_count)\n\n event_embedding = pd.array_read(\n array=event_embedding_array, i=static_count)\n\n # print(\"pre_h_state\", pre_h_state)\n encoder_vec = pd.array_read(\n array=encoder_vec_array, i=static_count)\n encoder_vec_full = pd.array_read(\n array=encoder_vec_full_array, i=static_count)\n encoder_proj = pd.array_read(\n array=encoder_proj_array, i=static_count)\n\n # # update input state as state correspondent with id chosen at previous step\n # pre_h_state_expanded = pd.sequence_expand(pre_h_state, pre_score)\n # pre_c_state_expanded = pd.sequence_expand(pre_c_state, pre_score)\n # computing logic of decoder under the same train mode, including input vector and computing unit of decoder\n # compute predicting probability of normalized word\n pre_ids_emb = pd.embedding(\n input=pre_ids,\n size=[self.target_dict_dim, self.embedding_dim],\n dtype='float32',\n param_attr=fluid.ParamAttr(name=\"trg_embedding\"))\n\n # pd.Print(pre_ids_emb)\n att_context = self.simple_attention(\n encoder_vec, encoder_proj, pre_h_state)\n # print(\"att_context\", att_context)\n # print(\"pre_ids_emb\", pre_ids_emb)\n # pd.Print(att_context)\n\n prob_c = fluid.layers.sequence_expand_as(pre_score,\n encoder_vec)\n # pd.Print(prob_c)\n\n current_score, current_h, current_c, this_prob_c = self.copy_decoder(\n pre_ids_emb, \n encoder_vec, encoder_vec_full, encoder_proj, \n _encoder_input_ids, pre_ids,\n prob_c, att_context,\n pre_h_state, pre_c_state,\n event_embedding)\n\n # decoder_inputs = fluid.layers.concat(\n # input=[att_context, pre_ids_emb], axis=1)\n # current_h, current_c = self.lstm_step(\n # decoder_inputs, pre_h_state, pre_c_state, self.decoder_size)\n # # compute predicting probability of nomarlized word\n # current_score = fluid.layers.fc(input=current_h,\n # size=self.target_dict_dim,\n # act='softmax',\n # param_attr=fluid.ParamAttr(name=\"out_softmax_w\"),\n # bias_attr=fluid.ParamAttr(name=\"out_softmax_b\"))\n\n # # current_state = pd.fc(input=[pre_state_expanded, pre_ids_emb],\n # # size=decoder_size,\n # # act='tanh')\n # current_state_with_lod = pd.lod_reset(x=current_h, y=pre_score)\n # current_score = pd.fc(input=current_state_with_lod,\n # size=self.target_dict_dim,\n # act='softmax',\n # param_attr=fluid.ParamAttr(name=\"out_softmax_w\"),\n # bias_attr=fluid.ParamAttr(name=\"out_softmax_b\"))\n # print(current_score)\n topk_scores, topk_indices = pd.topk(current_score, k=self.beam_size)\n # pd.Print(topk_indices)\n # pd.Print(topk_scores)\n selected_ids, selected_scores = topk_indices, topk_scores\n\n # # compute accumulated score and perform beam search\n # accu_scores = pd.elementwise_add(\n # x=pd.log(topk_scores), y=pd.reshape(pre_score, shape=[-1]), axis=0)\n # selected_ids, selected_scores = pd.beam_search(\n # pre_ids,\n # pre_score,\n # topk_indices,\n # accu_scores,\n # self.beam_size,\n # # end_id=self.end_id,\n # end_id=999999,\n # level=0)\n\n # pd.Print(selected_ids)\n # pd.Print(selected_scores)\n\n pd.increment(x=counter, value=1, in_place=True)\n # write search result and corresponding hidden layer into tensor array\n pd.array_write(current_h, array=state_h_array, i=counter)\n pd.array_write(current_c, array=state_c_array, i=counter)\n pd.array_write(selected_ids, array=ids_array, i=counter)\n pd.array_write(selected_scores, array=scores_array, i=counter)\n # pd.Print(selected_ids)\n # pd.Print(selected_scores)\n\n # update condition to stop loop\n length_cond = pd.less_than(x=counter, y=array_len)\n finish_cond = pd.logical_not(pd.is_empty(x=selected_ids))\n pd.logical_and(x=length_cond, y=finish_cond, out=cond)\n\n # pd.Print(array_len)\n # translation_ids, translation_scores = pd.beam_search_decode(\n # ids=ids_array, scores=scores_array, beam_size=self.beam_size, end_id=self.end_id)\n # pd.Print(translation_ids)\n translation_ids, translation_ids_index = pd.tensor_array_to_tensor(ids_array, axis=1)\n translation_scores, translation_scores_index = pd.tensor_array_to_tensor(scores_array, axis=1)\n\n return translation_ids, translation_scores", "title": "" }, { "docid": "57eb5221e001f9fabf8c29663de5e28e", "score": "0.51929384", "text": "def _decoder(self, z):\n # Number of outputs for decoder\n num_outputs = self._ndims\n\n # Dense Layer\n dense = fully_connected(inputs=z, num_outputs=50,\n activation_fn=tf.nn.softplus)\n\n # Hidden Layer\n hidden = fully_connected(\n inputs=dense, num_outputs=100, activation_fn=tf.nn.softplus)\n\n # Output Layer\n output = fully_connected(\n inputs=hidden, num_outputs=num_outputs, activation_fn=tf.nn.sigmoid)\n return output", "title": "" }, { "docid": "1ea99a0495a5692a97a2e97355893bac", "score": "0.5192917", "text": "def decoded(self):\n self.offset = self.offset * -1\n return self.cipher()", "title": "" }, { "docid": "bc4ff94c92ae04bd0640007f450ae366", "score": "0.51878834", "text": "def decode_ufocapture(fname):\n raise NotImplementedError", "title": "" }, { "docid": "bba883b82b163a975b522ef68405d732", "score": "0.5181577", "text": "def decoder(self, y_tilde):\n nb_examples = y_tilde.shape[0]\n hidden_decoder = tls.leaky_relu(numpy.dot(y_tilde,\n self.__parameters_eae['weights_decoder']['l1']) +\n numpy.tile(self.__parameters_eae['biases_decoder']['l1'],\n (nb_examples, 1)))\n reconstruction = numpy.dot(hidden_decoder,\n self.__parameters_eae['weights_decoder']['mean']) + \\\n numpy.tile(self.__parameters_eae['biases_decoder']['mean'],\n (nb_examples, 1))\n return (hidden_decoder, reconstruction)", "title": "" }, { "docid": "581f22bae8b648b865a8a10e93ef2867", "score": "0.5176327", "text": "def _extract_frame(self, data):\n if (data == self.A0FLAG or data == self.C0FLAG) and (self.stepindex == 0):\n # print(\"Head: \", hex(data))\n self.stepindex += 1\n self._buffer.append(hex(data))\n elif self.stepindex < 4 and self.stepindex >= 1:\n self.stepindex += 1\n self._buffer.append(hex(data))\n elif self.stepindex == 4:\n self.stepindex += 1\n self._framelen = int(data)\n # print(\"length: \", self._framelen)\n self._buffer.append(hex(data))\n elif self.stepindex == 5:\n if self._framecurlen < self._framelen:\n self._buffer.append(hex(data))\n self._framecurlen += 1\n elif self._framecurlen == self._framelen:\n self._buffer.append(hex(data))\n resu_crc = checkcrc.xorcrc_str(self._buffer)\n if data == resu_crc:\n self.frame_received(self._buffer)\n self.reset()", "title": "" }, { "docid": "d478ca8698ce386c64c931afc59305d9", "score": "0.5167792", "text": "def msg_decoder(msg):\n\n if msg.encoding == 'bgr8':\n image = np.frombuffer(msg.data, np.uint8).reshape((msg.height, msg.width, 3))[:, :, ::-1]\n elif msg.encoding == 'bgra8':\n image = np.frombuffer(msg.data, np.uint8).reshape((msg.height, msg.width, 4))[:, :, ::-1][:,:,1:]\n elif msg.encoding == 'rgb8':\n image = np.frombuffer(msg.data, np.uint8).reshape((msg.height, msg.width, 3))\n elif msg.encoding == 'mono8' or msg.encoding == '8UC1':\n image = np.frombuffer(msg.data, np.uint8).reshape((msg.height, msg.width))\n image = np.stack((image,) * 3, axis = -1) # greyscale to RGB\n elif msg.encoding == 'mono16' or msg.encoding == '16UC1':\n image = np.frombuffer(msg.data, np.uint16).reshape((msg.height, msg.width)).astype(np.float)\n image_max = np.percentile(image, 95)\n image_min = np.percentile(image, 5)\n image = 255*((image - image_min)/(image_max - image_min))\n image = np.clip(image, 0, 255)\n image = np.stack((image,) * 3, axis = -1) # greyscale to RGB\n else:\n print(\"Image encoding \" + msg.encoding + \" not supported yet.\")\n return None\n\n return image", "title": "" }, { "docid": "b82ab7e92d92f37fb0b92377893805b6", "score": "0.5167117", "text": "def decompress(textrep):\n pass", "title": "" }, { "docid": "e067f956f99d3c5a32b3c5a2e8a1be4d", "score": "0.5162957", "text": "def deserialize_bar_code_model(\n serialized_item_handle: HHandle\n) -> HHandle:\n with HalconOperator(1986) as proc:\n proc.set_input_tuple(0, serialized_item_handle)\n proc.init_oct(0)\n proc.execute()\n bar_code_handle = proc.get_output_tuple_s(0)\n return bar_code_handle # type: ignore", "title": "" }, { "docid": "04d54ed3527234da59bfdb4a080b6d14", "score": "0.51594746", "text": "def _build_decoder(self, encoder_outputs, encoder_state, hps):\n\n tgt_sos_id = tf.cast(self.vocab_table.lookup(tf.constant(hps.sos)),\n tf.int32)\n tgt_eos_id = tf.cast(self.vocab_table.lookup(tf.constant(hps.eos)),\n tf.int32)\n\n iterator = self.iterator\n\n with tf.variable_scope('decoder') as decoder_scope:\n sample_id, final_context_state,coverage_loss= tf.no_op(), tf.no_op(),tf.no_op()\n\n dec_in_state = self._reduce_states(encoder_state[0], encoder_state[1])\n\n cell, decoder_initial_state = self._build_decoder_cell(hps, None, dec_in_state, None)\n # target_output = iterator.target_output\n # target_input = iterator.target_input\n source_sequence_length = iterator.source_sequence_length\n # target_sequence_length = iterator.target_sequence_length\n\n source = iterator.source\n\n\n if self.time_major:\n print(\"it's time major\")\n\n source = tf.transpose(source)\n source_sequence_length = tf.transpose(source_sequence_length)\n\n if self.mode != tf.contrib.learn.ModeKeys.INFER:\n target_input = iterator.target_input\n target_output = iterator.target_output\n target_sequence_length = iterator.target_sequence_length\n\n if self.time_major:\n utils.print_out(\"time major\")\n target_input = tf.transpose(target_input)\n target_output = tf.transpose(target_output)\n target_sequence_length = tf.transpose(target_sequence_length)\n\n target_max_time = self.get_max_time(target_output)\n dec_target_mask = tf.sequence_mask(target_sequence_length, target_max_time, dtype=tf.float32)\n self._target_batch = target_output\n\n\n emb_dec_inputs = [tf.nn.embedding_lookup(self.embedding_decoder, x) for x in tf.unstack(target_input,\n axis=1, num =150)] # list length max_dec_steps containing shape (batch_size, emb_size)\n\n\n\n source_max_time = self.get_max_time(source)\n\n en_src_mask = tf.sequence_mask(source_sequence_length, source_max_time, dtype=tf.float32)\n if self.time_major:\n en_src_mask = tf.transpose(en_src_mask)\n\n decoder_outputs, dec_out_state, attn_dists, p_gens, coverage = self.attention_decoder(hps, emb_dec_inputs, dec_in_state, encoder_outputs, en_src_mask, cell,\n initial_state_attention=False,\n copy_source=hps.copy_source,\n use_coverage=True,\n prev_coverage=None, scope =decoder_scope)\n\n\n logits = self.output_layer(tf.stack(decoder_outputs))\n\n coverage_loss = _coverage_loss(attn_dists, dec_target_mask)\n\n\n\n else:\n # maximum_iteration: The maximum decoding steps.\n maximum_iterations = self._get_infer_maximum_iterations(\n hps, source_sequence_length)\n beam_width = hps.beam_width\n length_penalty_weight = hps.length_penalty_weight\n start_tokens = tf.fill([self.batch_size], tgt_sos_id)\n end_token = tgt_eos_id\n\n if beam_width > 0:\n utils.print_out(\"beam search activated\")\n my_decoder = tf.contrib.seq2seq.BeamSearchDecoder(\n cell=cell,\n embedding=self.embedding_decoder,\n start_tokens=start_tokens,\n end_token=end_token,\n initial_state=decoder_initial_state,\n beam_width=beam_width,\n output_layer=self.output_layer,\n length_penalty_weight=length_penalty_weight)\n else:\n # Helper\n utils.print_out(\"greedy decoding activated\")\n\n helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(\n self.embedding_decoder, start_tokens, end_token)\n\n # Decoder\n my_decoder = tf.contrib.seq2seq.BasicDecoder(\n cell,\n helper,\n dec_in_state,\n output_layer=self.output_layer # applied per timestep\n )\n\n # Dynamic decoding\n outputs, final_context_state, _ = tf.contrib.seq2seq.dynamic_decode(\n my_decoder,\n maximum_iterations=maximum_iterations,\n output_time_major=self.time_major,\n swap_memory=True,\n scope=decoder_scope)\n\n if beam_width > 0:\n logits = tf.no_op()\n sample_id = outputs.predicted_ids\n # if self.time_major:\n # sample_id = tf.transpose(sample_id, perm=[1, 2, 0])\n # utils.print_out(\"transpose activated for sampleid \")\n\n\n\n else:\n logits = outputs.rnn_output\n sample_id = outputs.sample_id\n\n return coverage_loss,logits, sample_id, final_context_state", "title": "" }, { "docid": "50fb28d8eceeebd3b6d5fbe52fee43a2", "score": "0.515891", "text": "def frameReceived(opcode, data, fin):", "title": "" }, { "docid": "c3a1d27deddb7c84802e783c0bb8789f", "score": "0.5157705", "text": "def parser_hook(self, raw: bytes) -> List[Dict]:\n raise NotImplementedError", "title": "" }, { "docid": "17759e404ca12de5f656c9267afbe4b9", "score": "0.51533115", "text": "def _decoder(self, inputs, all_states):\n with tf.variable_scope(\"decoder\", reuse=tf.AUTO_REUSE):\n enc_pad_mask, enc_doc_mask, dec_in = inputs[: 3]\n\n # Concatenating the pad masks of all sentences.\n enc_pad_mask = tf.reshape(enc_pad_mask,\n [-1, FLAGS.max_enc_sent*FLAGS.max_enc_steps_per_sent]) # Shape: B x T_enc.\n\n # Converting ID's to word vectors.\n dec_in_vecs = tf.nn.embedding_lookup(params=self._word_embedding, ids=dec_in) # Shape: B x T_dec x D.\n dec_in_vecs = tf.cast(dec_in_vecs, dtype=tf.float32) # Shape: B x T_dec x D.\n\n # Memory masks.\n mem_masks = [enc_pad_mask, enc_doc_mask] # Shape: [B x T_enc, B x S_in].\n\n # NSE internal states.\n sent_mems, doc_mem = all_states[0] # Shape: B x T_enc x D, B x S_in x D.\n state = [[sent_mems, doc_mem]] + all_states[1:]\n\n writes = []\n p_attns = []\n p_gens = []\n for i in range(FLAGS.dec_steps):\n\n x_t = dec_in_vecs[:, i, :] # Shape: B x D.\n output, state = self._nse.step(\n x_t=x_t, mem_masks=mem_masks, prev_state=state, use_pgen=FLAGS.use_pgen\n )\n\n # Appending the outputs.\n writes.append(output[0])\n p_attns.append(output[1])\n\n if FLAGS.use_pgen:\n p_gens.append(output[2])\n\n p_vocabs = self._get_vocab_dist(writes) # Shape: T_dec * [B x vsize].\n\n p_cums = p_vocabs\n if FLAGS.use_pgen:\n enc_in_ext_vocab = inputs[3]\n p_cums = self._get_cumulative_dist(\n p_vocabs, p_gens, p_attns, enc_in_ext_vocab\n ) # Shape: T_dec * [B x ext_vsize].\n\n if FLAGS.mode.lower() == \"test\":\n p_final = p_cums[0] # Shape: Bm x V.\n\n topk_probs, topk_ids = tf.nn.top_k(p_final, k=2*FLAGS.beam_size, name=\"topk_preds\")\n topk_log_probs = tf.log(tf.clip_by_value(topk_probs, 1e-10, 1.0))\n\n outputs = [topk_ids, topk_log_probs, state, p_attns]\n if FLAGS.use_pgen:\n outputs.append(p_gens)\n\n return outputs\n else:\n return p_cums, p_attns", "title": "" }, { "docid": "191eb1a3af3210354b12e9bae77ef99b", "score": "0.51462364", "text": "def __init__(self):\n self._last_decoding = None\n self._cs = None\n self._firstaddr = None\n self._arch = None\n self.valid_archs = None\n self.modes = None\n Backend.__init__(self)", "title": "" }, { "docid": "7310867d5f707f28d8c7bd81dd1363e9", "score": "0.51425415", "text": "def decode(self, decoder_input, tgt_lang=None):\n\n cell_input = torch.cat((decoder_input, self.attention_context), -1)\n # B, prenet_dim + model_dim\n self.attention_hidden, self.attention_cell = self.attention_rnn(\n cell_input, (self.attention_hidden, self.attention_cell), tgt_lang)\n # B, model_dim\n self.attention_hidden = F.dropout(\n self.attention_hidden, self.p_attention_dropout, self.training)\n\n attention_weights_cat = torch.cat(\n (self.attention_weights.unsqueeze(1),\n self.attention_weights_cum.unsqueeze(1)), dim=1)\n # B, 2 , Max_time\n # attention_hidden should be query\n self.attention_context, self.attention_weights = self.attention_layer(\n self.attention_hidden, self.memory, self.processed_memory,\n attention_weights_cat, self.mask, tgt_lang)\n\n self.attention_weights_cum += self.attention_weights\n decoder_input = torch.cat(\n (self.attention_hidden, self.attention_context), -1)\n # B, 2* model_dim\n self.decoder_hidden, self.decoder_cell = self.decoder_rnn(\n decoder_input, (self.decoder_hidden, self.decoder_cell), tgt_lang)\n self.decoder_hidden = F.dropout(\n self.decoder_hidden, self.p_decoder_dropout, self.training)\n\n decoder_hidden_attention_context = torch.cat(\n (self.decoder_hidden, self.attention_context), dim=1)\n decoder_output = self.linear_projection(\n decoder_hidden_attention_context, tgt_lang)\n # B, nmel * nframeperstep\n gate_prediction = self.gate_layer(decoder_hidden_attention_context, tgt_lang)\n\n return decoder_output, gate_prediction, self.attention_weights, self.attention_context", "title": "" }, { "docid": "3110cb915aad4720aa2bf939b094889f", "score": "0.5138669", "text": "def post_decrypt_data(self, data):\n# print(\"---RawRNCryptor---\")#debug\n# print(data) #debug\n# print(bord(data[-1]))#debug\n data = data[:-bord(data[-1])]\n \n return data", "title": "" }, { "docid": "33d5885b022fcf07af88b51eb092b2d0", "score": "0.5122174", "text": "def __init__(self, fileblock):\n hdr_sz = Datagram.hdr_dtype.itemsize\n H0_sz = Datagram.H0_dtype.itemsize\n self.header = np.frombuffer(fileblock, dtype = Datagram.hdr_dtype, count = 1)[0]\n self.decoded = False\n self.dtype = self.header[0]\n if np.frombuffer(fileblock[hdr_sz:hdr_sz+2], dtype = '>S2') == 'H0':\n self.subheader = np.frombuffer(fileblock, dtype = Datagram.H0_dtype, offset = hdr_sz, count = 1)[0]\n self.datablock = fileblock[hdr_sz+H0_sz:]\n # the snippet and truepix datagrams have a larger header...\n if self.dtype == 'SNI0' or self.dtype == 'TPX0':\n self.datablock = self.datablock[6*4:]\n else:\n self.datablock = fileblock[hdr_sz:]", "title": "" }, { "docid": "cad359e1ea1c317bc873c6e1fcd5d0f1", "score": "0.51208204", "text": "def get_decoder( model ):\n decod = [ l for l in model.layers if 'decoder' in l.name ]\n\n for dec in decod:\n if 'decoder' not in dec.name: \n print( \"Structure '{}' not valid as decoder architecture\".format( dec.name ) )\n sys.exit( 1 )\n\n if dec.layers[ 0 ].input_shape[ -1 ] != cnfg[ 'latent_size' ]: \n if 'latent_subsize' not in cnfg: \n print( \"Input size '{:d}' is not the expected '{}'\".format(\n dec.layers[ 0 ].input_shape[ -1 ], cnfg[ 'latent_size' ] ) )\n sys.exit( 1 )\n\n elif dec.layers[ 0 ].input_shape[ -1 ] != cnfg[ 'latent_subsize' ]: \n print( \"Input size '{:d}' is not the expected '{}'\".format(\n dec.layers[ 0 ].input_shape[ -1 ], cnfg[ 'latent_subsize' ] ) )\n sys.exit( 1 )\n\n # just to try a random latent space\n # ts.save_image( dec.predict( rnd.uniform(-3, 3) * np.random.rand(1,128) ), True, \"i.jpg\" )\n\n if len( decod ) == 1:\n return decod[ 0 ]\n\n return decod", "title": "" }, { "docid": "8a9c0da991ec2080f575adf808bd88b3", "score": "0.51069516", "text": "def _decode(self, data):\n return base64.b64decode(data)", "title": "" } ]
814a86ccb163c5aa3b30a811286a1675
Get the currently selected job
[ { "docid": "9ccb19c631ea05b86427f4c455904752", "score": "0.83990645", "text": "def get_job(self) -> Job:\n return self.jobs_list[self.sel_idx]", "title": "" } ]
[ { "docid": "af8b12dff6f2530fbbc6ac201ccc6a00", "score": "0.76543224", "text": "def job(self):\n return self.batch[self.job_id]", "title": "" }, { "docid": "c77c5c99ee3ce12ea54d8fa4e31cad56", "score": "0.74811244", "text": "def current_job(self):\n assert(ExecutorThread.executor_object is not None)\n return self.__job", "title": "" }, { "docid": "9ecc71e9f9dcc8380aa3973f37a4d3c5", "score": "0.74108416", "text": "def getJob(self, name=None):\n if name == None: \n name = self.jobstable.get_selectedRecordNames()[0]\n if name == None:\n return None, name\n jobid = self.DB.meta.peatsa_jobs[name]\n try:\n job = PEATSA.WebApp.Data.Job(jobid, self.connection)\n except:\n #print 'job not in database'\n return None,name\n return job, name", "title": "" }, { "docid": "0f4be7825549e1aa7c028eaea001baaf", "score": "0.73360646", "text": "def job(self):\n\n if self.current_bead is None:\n return None\n\n if self.jobs is None:\n RosProxy().notify(\"Can not find jobs.\", STATE.ERROR)\n return None\n\n _job = None\n for job in self.jobs.configurations:\n if job.job_number == self.current_bead.wps_job_number:\n return job\n\n return None", "title": "" }, { "docid": "c7f730bd9a2326fbb1000e8495107d10", "score": "0.7218985", "text": "def get_job(self) -> Dict[Text, Text]:\n request = self._client.projects().jobs().get(name=self._job_name)\n return request.execute()", "title": "" }, { "docid": "3d208b9f6ce656558644faa6d6530e96", "score": "0.70712274", "text": "def latest_job(self):\n return self.jobmanagers[self.current_network].latest_job", "title": "" }, { "docid": "50814c6ef52744dc8990d89a072cf79a", "score": "0.6990164", "text": "def get_job(self, identifier: str):\n self._log_operation('Getting job {i}'.format(i=identifier))\n return self._job_queue.get_job_details(identifier)", "title": "" }, { "docid": "aad51da0917c85589f9f232b8c594f8c", "score": "0.69777465", "text": "def job(self) -> str:\n return self._job", "title": "" }, { "docid": "aad51da0917c85589f9f232b8c594f8c", "score": "0.69777465", "text": "def job(self) -> str:\n return self._job", "title": "" }, { "docid": "5f1bdfb45c40c9b081591080fa4afdd6", "score": "0.693246", "text": "def get_job(self) -> CustomJob:\n return self._client.get_custom_job(name=self._job_name)", "title": "" }, { "docid": "5fabe3444209b9b8c69e97f9238714d6", "score": "0.6889623", "text": "def get_job(jid=None):\n if not jid:\n raise CommandExecutionError(\"ID option must not be none.\")\n\n query = {\"type\": \"op\", \"cmd\": \"<show><jobs><id>{}</id></jobs></show>\".format(jid)}\n\n return __proxy__[\"panos.call\"](query)", "title": "" }, { "docid": "d0f6443556c64dd8a6a0c80da380d552", "score": "0.6888683", "text": "def get_object(self) -> Job:\n project = ProjectPermissionsMixin.get_object(self)\n return project.jobs.get(id=self.kwargs[\"job\"])", "title": "" }, { "docid": "5067befa595f885794f913ec2cb322b5", "score": "0.6761618", "text": "def name(self):\n return self._job", "title": "" }, { "docid": "f5c480b954de55cb76b39c7da4a4aec9", "score": "0.67527044", "text": "def get_current_job(connection: Optional['Redis'] = None, job_class: Optional['Job'] = None) -> Optional['Job']:\n if connection:\n warnings.warn(\"connection argument for get_current_job is deprecated.\", DeprecationWarning)\n if job_class:\n warnings.warn(\"job_class argument for get_current_job is deprecated.\", DeprecationWarning)\n return _job_stack.top", "title": "" }, { "docid": "4b7cd238aad2a9761a46438d11c2daa1", "score": "0.66537917", "text": "def jobid(self):\n return self.get_db('jobid')", "title": "" }, { "docid": "78e3f2e10d4cf60bc8ea6f8525ff88ea", "score": "0.664405", "text": "def get_job(self, job_name):\n try:\n return self.json_dict['job definitions'][job_name]\n except KeyError:\n print('No job \"%s\" in %s' % (job_name, self.filepath))\n return None", "title": "" }, { "docid": "b6933f5b50956edf9b0c0726a8fcca91", "score": "0.66311663", "text": "def get_job(self, job_reference):\n url = 'jobs/{0}'.format(job_reference)\n result = self.get(url)\n return result.get('job', result)", "title": "" }, { "docid": "2d38772ab8b7b8ad1802c89a71c7a45f", "score": "0.6616254", "text": "def _get_job(self, uid):\n try:\n return self._jobs[uid]\n except KeyError:\n raise JobNotFoundError('job \\'%s\\' is not found' % (uid,))", "title": "" }, { "docid": "93b6ad4303cba70a1a992da909f5ba2c", "score": "0.65424514", "text": "def getNode(self):\r\n try:\r\n output,error = Popen(\"qstat | grep \"+self.jobId, shell=True, stdout=PIPE, stderr=PIPE).communicate()\r\n if self.jobId in output:\r\n return output.split(\"\\t\")[7]\r\n if len(error) > 0:\r\n logging.error(error)\r\n except ValueError:\r\n logging.info(\"Error: waiting for not submitted job...\")", "title": "" }, { "docid": "0b5833b47c71c6580a7c4caffdaae616", "score": "0.6541083", "text": "def job_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"job_id\")", "title": "" }, { "docid": "8a5c43603b02703bf2d1d585b8a374cd", "score": "0.64970195", "text": "def current_task(self):\n try:\n return self.active_item(remove=False)\n except queue.Empty:\n return None", "title": "" }, { "docid": "7547812ad097f1785dbbad4e18e7eba3", "score": "0.6488299", "text": "def fetchJob(self):\n \n mpDlg = MultipleValDialog(title='Get Job',\n initialvalues=('','my job1'),\n labels=('ID','Your label',),\n types=('string','string'),\n parent=self.mainwin)\n if mpDlg.result == True:\n jobid = mpDlg.results[0]\n name = mpDlg.results[1]\n else:\n return\n job = PEATSA.WebApp.Data.Job(jobid, self.connection) \n if job != None: \n print 'adding job id %s to list' %job.identification\n self.storeJob(name, job)\n self.updateJobs()\n return", "title": "" }, { "docid": "3c3ef46cb2d48b5039f1f46284060519", "score": "0.64742666", "text": "def job_id(self):\n return self._job_id", "title": "" }, { "docid": "18e6652ba9e4bcab35480341824c784c", "score": "0.6451212", "text": "def get_job(arn=None):\n pass", "title": "" }, { "docid": "9029ee9214b222b84e27cdbce6fb4666", "score": "0.64294446", "text": "def get_current_url(self,\n job_name: str) -> str:\n\n self.cur.callproc('job_get_current_url_SP', (job_name, ))\n job_state = self.cur.fetchone()\n\n if job_state is None:\n raise ValueError('Job is unknown!')\n if job_state[0] is not None:\n # Field 0 contains the status: finished (not None) or not (None)\n raise RuntimeError(f\"Job {job_name} already finished.\")\n # Field 1 contains either the current or the start URL\n return job_state[1]", "title": "" }, { "docid": "06483d882088e84f35e61319a12d4a6e", "score": "0.6415561", "text": "def get_job(job_name: str):\n\n job_details = redis_controller.get_job_details(job_name=job_name)\n return job_details", "title": "" }, { "docid": "84b55ffbeedd0ff6fba729ba8cffee39", "score": "0.64076096", "text": "def get_job_shared(self, job, context=None):\n return self._client.call_method(\n 'UserAndJobState.get_job_shared',\n [job], self._service_ver, context)", "title": "" }, { "docid": "d8b638f76c9ceacb45d3117d7aca9c33", "score": "0.64054716", "text": "def getJobID(self):\n return self.__nupicJobID", "title": "" }, { "docid": "b44597406b159ab9d484fab9d4d69923", "score": "0.63929665", "text": "def get_current_task(self):\n return self.get_current_step().get_last_task()", "title": "" }, { "docid": "e7f504259ec7155499cb42dc084a3206", "score": "0.6363812", "text": "def get_job_name(self) -> Text:\n return self._job_name", "title": "" }, { "docid": "c5f784aecdda3e0e6f0e0006c09128a5", "score": "0.6363635", "text": "def GetSelection(self):\r\n\r\n return self._current", "title": "" }, { "docid": "3c336258e50fc9c183446912b0b6c3b6", "score": "0.63534665", "text": "def getSelected(self):\n selected = self.defaultChoice\n if self.tableSelected is not None:\n selected = self.tableSelected.getString(self.defaultChoice)\n return self.map.get(selected)", "title": "" }, { "docid": "6990697f8730aa95d7b6826bddfe5f8f", "score": "0.63476473", "text": "def getSelectedItem(self):\n return self.selected", "title": "" }, { "docid": "6b61c917a7fc487b76212e3453451189", "score": "0.6322638", "text": "def get_job_for_worker(self, worker_name):\n job = self._in_queue.pop(0)\n self._workers[worker_name].active_jobs.append(job)\n return job.arguments", "title": "" }, { "docid": "dda09c5deca9e8e16820e57870902346", "score": "0.629666", "text": "def get_job(self, job_id):\n\n try:\n exposure = Job.objects.filter(id=job_id)\n except:\n exposure = None\n\n return exposure", "title": "" }, { "docid": "48ea24f4da972ded10e31515a51f3a0d", "score": "0.62954223", "text": "def getSelectedItem(self):\n currentIndex = self.table.selectionModel().currentIndex()\n return self.model.itemFromIndex(currentIndex)", "title": "" }, { "docid": "7aa6d233a7345f50ef72c40ab0641c37", "score": "0.6293708", "text": "def get_selected(self):\n return self.selected", "title": "" }, { "docid": "c2a5b93c970dbaf6db845cd3b0618f2a", "score": "0.62937015", "text": "def selected(self):\n return self._choices[self._selected][0]", "title": "" }, { "docid": "e87e801ef46c550a8a8a41324d368793", "score": "0.62896395", "text": "def getJob(uniq):\n return Job(Cuebot.getStub('job').GetJob(\n job_pb2.JobGetJobRequest(id=uniq), timeout=Cuebot.Timeout).job)", "title": "" }, { "docid": "8799724c5d97b8bb12c3e70cc82f850b", "score": "0.62876314", "text": "def current_choice(self):\n\t\treturn self.choice_data_list[self.select_index]", "title": "" }, { "docid": "00aa089818698a12166c474195780082", "score": "0.628369", "text": "def _get_current_task():\r\n return current_task", "title": "" }, { "docid": "00aa089818698a12166c474195780082", "score": "0.628369", "text": "def _get_current_task():\r\n return current_task", "title": "" }, { "docid": "61950c11040d5cb19c6d357215e525e6", "score": "0.62808716", "text": "def job_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"job_name\")", "title": "" }, { "docid": "d057c3b5b1e5bd91b71dcb5fc03b12f5", "score": "0.6272014", "text": "def current_val(self):\n try:\n return self.listbox.get(self.listbox.curselection()[0])\n except IndexError:\n raise KeyError(\"Nothing selected\")", "title": "" }, { "docid": "34c3dadd642a1b239b1a06a27ac28726", "score": "0.62645125", "text": "def current_job(user):\n logs = user.log_set.filter(finish__isnull=True)[:1]\n if logs:\n log = logs[0]\n result = LabelResponse(log.job.name,\n log.get_duration_display())\n else:\n log = user.log_set.latest()\n result = LabelResponse('Not Working',\n str(timezone.localtime(log.start).date()))\n\n return result", "title": "" }, { "docid": "a304b5fe9b384b5ce8d6417267e66456", "score": "0.6261799", "text": "def current_state(self) -> Optional[pulumi.Input['JobCurrentState']]:\n return pulumi.get(self, \"current_state\")", "title": "" }, { "docid": "a696973960bc6764197621cc4a83ad67", "score": "0.6252568", "text": "def get_job(self) -> Union[Dict[Text, Text], CustomJob]:\n pass", "title": "" }, { "docid": "b93d4e1971fc66390e4efc9f8c09438f", "score": "0.6224905", "text": "def retrieve_job(self, job_id):\n job = {}\n with self._lock:\n if job_id not in self._jobs:\n return None\n job = self._jobs[job_id]\n return job", "title": "" }, { "docid": "ba73ba8de2269a2cad9d8eb0750e9399", "score": "0.6220498", "text": "def last_job(self): # TOFIX model the job and return an object instead of dictionary\n return self._data.get('summary_fields', {}).get('last_job')", "title": "" }, { "docid": "94a3a533b396c02b1190034b7e21e977", "score": "0.6218048", "text": "def retrieve_job(self, job_id) -> AzureQuantumJob:\n return self._provider.get_job(job_id)", "title": "" }, { "docid": "9b2d9b92e8816572122138224818709d", "score": "0.61945635", "text": "def get_job_by_id(self, job_id):\n return self.get_resource(category=SYSTEM, resource_level=JOB,\n resource_level_id=job_id)", "title": "" }, { "docid": "8b46be80f8dd216a7bee66c7f522528e", "score": "0.61901975", "text": "def __jobSelectedHandle(self, job):\n if job:\n self.__jobSelectedLineEdit.setText(job.name())\n else:\n self.__jobSelectedLineEdit.setText(\"\")", "title": "" }, { "docid": "0c4b8260e2688fee5cba617ca1b02eb7", "score": "0.61812234", "text": "def get_selected(self):\n\n # if there are items in the list\n if self.currentIndex() != -1:\n modelitem = self._dictmodel.itemFromIndex(\n self._modelindexes[ self.currentIndex() ]\n )\n return modelitem", "title": "" }, { "docid": "e04b55768b1e2385bb292f248653450c", "score": "0.61712205", "text": "def job_name(self) -> str:\n return pulumi.get(self, \"job_name\")", "title": "" }, { "docid": "c6184e26543769efb4fc3d0c88ae5ab8", "score": "0.6162487", "text": "def get_currently_selected_configuration(self):\n return self.get_container().currently_selected_configuration", "title": "" }, { "docid": "d0a9d2cdb4d593d07b230b49e7c7c5ac", "score": "0.6156431", "text": "def job_id(self):\n return self._properties.get(\"jobReference\", {}).get(\"jobId\")", "title": "" }, { "docid": "573b6befb6f32323630c4b1e95ebc7a9", "score": "0.6136343", "text": "def get_jobs_id(self, ti) -> None:\n return self.get_hook().get_jobs_id(ti)", "title": "" }, { "docid": "17fe9d59c386516b87879a2e119685b2", "score": "0.6074745", "text": "def GetSelection(self):\n return self.__selected_item", "title": "" }, { "docid": "10d2cae850564ddc6c57257a4b75b7d3", "score": "0.60608876", "text": "def GetSelection(self):\n \n return self.selected", "title": "" }, { "docid": "e27824548380df250ad465f6b8d9594a", "score": "0.60586756", "text": "async def get_jobs(jobId: int) -> str: \n return mngr.getJob(str(jobId))", "title": "" }, { "docid": "3f0e2fde101431fcae6ef47742544cba", "score": "0.6044516", "text": "def job_id(self) -> JobId:\r\n return self._job_id", "title": "" }, { "docid": "36ae7856d81d2a3194dac50821ca271f", "score": "0.6035905", "text": "def get_current(self):\n return self.current", "title": "" }, { "docid": "520d86fc8aae26bbe4bb10391ad5b948", "score": "0.60348165", "text": "def get_label(self):\n return self.job[self.label_key]", "title": "" }, { "docid": "cb3828c0f8848e71ae6696f2990e718b", "score": "0.6017808", "text": "def jobId(self):\n returnself._ShREEKConfig.jobId()", "title": "" }, { "docid": "5235decd2a9a2db28bec2e88630097db", "score": "0.6017586", "text": "def current_workflow():\n try:\n return current_worker_pool.workflow\n except AttributeError:\n return None", "title": "" }, { "docid": "28c59d575fa715d62c15403eccd5b89e", "score": "0.60086626", "text": "def job_name(self):\n return self._stub.List(self._message).job_name", "title": "" }, { "docid": "9f73e5bdf81008391aafc240cec8a3c2", "score": "0.6006928", "text": "def get_state(self):\n\t\treturn Job(SDK.PrlVm_GetState(self.handle)[0])", "title": "" }, { "docid": "6e3374bbc7061f1cb82a9df8d0622399", "score": "0.6006506", "text": "def retrieve(received_job_id: str) -> Union[Job, None]:\n # todo: add error handling\n found_job = db.Jobs().get_by_id(received_job_id)\n if not found_job:\n return\n return found_job", "title": "" }, { "docid": "7c67b16d573ad63bd0b8d0a768ef6227", "score": "0.6005077", "text": "def get_job(self, _id):\n data = {\n 'class': 'Job',\n 'id': _id,\n 'attrs': {},\n }\n job = self.db_client.send_request('list', json.dumps(data))\n\n return Job(\n _id=job['id'],\n _type=job['type'],\n task=job['task'],\n command=job['command'],\n input_parameters=job['inputParameters'],\n status=job['status'],\n runner_id=job['runner'],\n )", "title": "" }, { "docid": "38c3db93e876eb6ee1d8016539e54fd4", "score": "0.59741336", "text": "def getCurrentRow(self):\n item = self.getSelectedItem()\n if item:\n return item.row()\n return None", "title": "" }, { "docid": "9b9933fb10f52a3bd51ab80868a1aa3d", "score": "0.5969369", "text": "def get_jobs_connection(self):\n return self.m_connection.jobs", "title": "" }, { "docid": "bbaebc5bd6fe50426934c552c1f7ffc2", "score": "0.5965098", "text": "def current_worker():\n try:\n return worker_thread_data.worker\n except AttributeError:\n return None", "title": "" }, { "docid": "878489e7c2a1603e25b729f1341fc9a7", "score": "0.5961013", "text": "def get_current_option(self) -> str:\n return self.options[self.current_option_index]", "title": "" }, { "docid": "1c9c4404705a410a6ea6a61cbc01a613", "score": "0.59596807", "text": "def getJobName():\n return os.environ['LCATR_JOB']", "title": "" }, { "docid": "045565465f0aa8d4f7385cec41a11693", "score": "0.59542495", "text": "def job_type(self):\n return self._job_type", "title": "" }, { "docid": "cfcfa494646e3e8a703b0af529d15b25", "score": "0.5949728", "text": "def GetCurrentItem(self):\r\n\r\n return self._current", "title": "" }, { "docid": "973ee774435ab56cd5ab0a1d36d9926b", "score": "0.5926058", "text": "def _get_job_id(self) -> str:\n return self.split_name[2][3:]", "title": "" }, { "docid": "512270a56166f65baecbffc96e2d95eb", "score": "0.5916962", "text": "async def get_job_state(jobId: int) -> State: \n return mngr.getStateJob(str(jobId))", "title": "" }, { "docid": "eff3e39af6070d08e905aa20fa9ce090", "score": "0.59167755", "text": "def find(self, job):\n for i, candidate_job in pendulate(self.jobs):\n if candidate_job == job:\n return i\n raise ValueError(\"Job not found in batch\")", "title": "" }, { "docid": "9403f019c08356c1fc189609fd60bb60", "score": "0.5916563", "text": "def job_name(self) -> str:\n return self._step_execution_context.job_name", "title": "" }, { "docid": "8cc211e741a2fe21697242139e70911d", "score": "0.59097177", "text": "def getSelection(self):\n return self.selection", "title": "" }, { "docid": "e783a91342650ad40ee9a456bf210691", "score": "0.59074605", "text": "def get_job_id(self):\n return {'job_id': self._job_id}", "title": "" }, { "docid": "3bd56a3cbdea81175a199406a62cc71a", "score": "0.59040135", "text": "def get_task(self, profile):\n task = None\n if self._value.has_option(profile, 'task'):\n task = self._value.get(profile, 'task')\n\n self.logger.info(\"%s is selected as task\" % task)\n return task", "title": "" }, { "docid": "e83f2595d6df75f56c2be24e96bfcaea", "score": "0.58960843", "text": "def getCurrent(self):\n return self.__current", "title": "" }, { "docid": "3ccc3b35a0173536aed4513549531203", "score": "0.5889516", "text": "def get_current(self, event=None):\n childes = self.nb.winfo_children() # return the list objects of child widgets of notebook[tab widget]\n return childes[self.nb.index('current')].winfo_children()[0]", "title": "" }, { "docid": "84cc4e6ed7bced348b0b1c7b01ed0d52", "score": "0.5887606", "text": "def get_job(self, id, jobstore=None):\n\n return self._scheduler.get_job(id, jobstore)", "title": "" }, { "docid": "e27689e9b4349ce17f0bd0022e7bda56", "score": "0.5877996", "text": "def get_task(self):\n return self.queue.get()", "title": "" }, { "docid": "8525cc2fb6d0683d168b8e34cfa10804", "score": "0.58687586", "text": "def job_type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"job_type\")", "title": "" }, { "docid": "4a277c3be41e9a6591244a8554a5611b", "score": "0.5860497", "text": "def GetSelection(self):\r\n\r\n return self.selection", "title": "" }, { "docid": "8957f0306906c63c116652f12e99f457", "score": "0.58585596", "text": "def get_thread(self):\n return self.threads[self.thread_id]", "title": "" }, { "docid": "19d37f5ca57dcec4c1760f90d8a64851", "score": "0.58583784", "text": "def latest_job():\n try:\n return models.SyncJob.objects.latest('start')\n except models.SyncJob.DoesNotExist:\n return False", "title": "" }, { "docid": "46f4e1f0b8c9d2bf7c3b3107a67e09fe", "score": "0.58579504", "text": "def current_item(self) -> MenuItem | None:\n if not self.all_items:\n return None\n else:\n return self.all_items[self.current_option]", "title": "" }, { "docid": "900ccee2e5ea45bd7bffdc7dda61f13d", "score": "0.584532", "text": "def reference(self) -> pulumi.Output['outputs.JobReference']:\n return pulumi.get(self, \"reference\")", "title": "" }, { "docid": "f6a5bcf3e0c88bf1affe903e77cc038e", "score": "0.5831991", "text": "def _retrieve_job_id(job_name, res_id):\n active_jobs = celery_inspector.active()\n job_id = _retrieve_task_id(job_name, res_id, active_jobs)\n if not job_id:\n reserved_jobs = celery_inspector.reserved()\n job_id = _retrieve_task_id(job_name, res_id, reserved_jobs)\n if not job_id:\n scheduled_jobs = celery_inspector.scheduled()\n job_id = _retrieve_task_id(job_name, res_id, scheduled_jobs)\n return job_id", "title": "" }, { "docid": "3f8b9eba1601c0ad75cf1873a5f36f52", "score": "0.5828973", "text": "def job(job_name):\n ClientID = Job.get_client_id(job_name)\n return tasks_for_client_job(ClientID, job_name)", "title": "" }, { "docid": "088cec76e613d82c4abf916190234e8b", "score": "0.58223146", "text": "def get_object(self, queryset=None):\n # 404 if job doesn't exist\n try:\n job = Job.objects.select_related().get(pk=self.kwargs['pk'])\n except Job.DoesNotExist:\n raise Http404(\"No Job with PK#{} found.\".format(self.kwargs['pk']))\n\n # Staff can see all jobs\n if self.request.user.is_staff:\n return job\n\n # Creator can see their own jobs no matter the status\n if job.creator == self.request.user:\n return job\n\n # For everyone else the job needs to be visible\n if job.visible:\n return job\n\n # Return None to signal 401 unauthorized\n return None", "title": "" }, { "docid": "99be3a98c227ea75ed580c60a5620fa8", "score": "0.5821088", "text": "def getJob(appName, jobId):\n jobs = db.getJobs(jobId=jobId)\n job = None if len(jobs) == 0 else jobs[0]\n k3job = dispatcher.getJob(int(jobId))\n\n if job == None:\n return returnError(\"Job ID, %s, does not exist\" % jobId, 404)\n\n thisjob = dict(job, url=dispatcher.getSandboxURL(jobId))\n if k3job != None:\n thisjob['master'] = k3job.master\n local = os.path.join(webapp.config['UPLOADED_JOBS_DEST'], appName, str(jobId)).encode(encoding='utf8', errors='ignore')\n path = os.path.join(webapp.config['UPLOADED_JOBS_DEST'], appName, str(jobId),'role.yaml').encode(encoding='utf8', errors='ignore')\n if os.path.exists(local) and os.path.exists(path):\n with open(path, 'r') as role:\n thisjob['roles'] = role.read()\n else:\n return returnError(\"Job Data no longer exists\", 400)\n\n thisjob['sandbox'] = sorted (os.listdir(local))\n\n if 'application/json' in request.headers['Accept']:\n return jsonify(thisjob)\n else:\n return render_template(\"last.html\", appName=appName, lastjob=thisjob)", "title": "" }, { "docid": "fd85fbcf857c13ba9729a8ea751e478e", "score": "0.5818568", "text": "def getSelected(self):\n\n return self._streams[self._selectedStream]", "title": "" }, { "docid": "c0db0d692b30ab8db20386f7587f5100", "score": "0.581538", "text": "def next_and_lock(self):\n\n if self._cur_job:\n log.error(\"There is already an active job! Please call done() or failed() on the job first.\")\n return None\n\n # Loop until we found a job\n while True:\n\n jobs = self._ls_waiting_jobs()\n\n # No more jobs\n if not jobs:\n return None\n\n if self._rnd_job:\n job = random.choice(jobs)\n else:\n job = sorted(jobs)[0]\n\n log.debug(\"Selected job: %s\" % job)\n\n wjob = \"%s.%s\" % (self._wid, job)\n\n src = pjoin(self._jobsdir, \"00_waiting\", job)\n dst = pjoin(self._jobsdir, \"01_running\", wjob)\n\n try:\n self._move(src, dst)\n except FileNotFoundError:\n log.warning(\"Job %s does not exist anymore. Moving on\" % src)\n continue\n\n if not self._worker_sync:\n return Job(self, \"01_running\", job)\n else:\n\n job = self._job_worker_selection(job)\n\n if job is not None:\n return job", "title": "" }, { "docid": "4f9fd1262ddea417bb84a554bbce314d", "score": "0.58028716", "text": "def selected_item(self) -> MenuItem | None:\n if self.selected_option == -1:\n return None\n else:\n return self.all_items[self.selected_option]", "title": "" } ]
02855788f1ab0f7e4223d629dd005483
Recursive function that checks for nonindexable columns.
[ { "docid": "8a7947e76ac9931b926d1f3a3c9fbd55", "score": "0.5383323", "text": "def check_indexable(token_list):\n for tok in token_list.tokens:\n if tok.ttype == tokens.Name:\n col = self.schema.get_column(self.table.value, tok.value)\n if col and not col.indexable:\n raise NotIndexableError()\n elif isinstance(tok, (sql.Comparison, sql.Identifier)):\n check_indexable(tok)", "title": "" } ]
[ { "docid": "e3fa63a2b75dd4515481dc104bb244c8", "score": "0.6160125", "text": "def _check(self, df):\n cols_to_check = []\n try:\n cols_to_check.append(getattr(self, 'field'))\n except AttributeError:\n pass\n try:\n cols_to_check += getattr(self, 'fields')\n except AttributeError:\n pass\n _check_columns(df, cols_to_check)", "title": "" }, { "docid": "41f4981bab0519aa652e7f6bc27d2192", "score": "0.61310595", "text": "def check_cols_not_present(df, disallowed_cols):\n\n if len(disallowed_cols & set(df.columns)) > 0:\n raise ValueError(\n \"Input metadata file already includes at least one of the \"\n \"following columns: {}\".format(disallowed_cols)\n )", "title": "" }, { "docid": "9c00f30bb00881dda27be3615d504edd", "score": "0.60815316", "text": "def _check_columns(self, df: pd.DataFrame, col_list: list, additional_str=\"\"):\n\n df_cols = df.columns\n for col in col_list:\n if col not in df_cols:\n print(\"Available colunms:\", df_cols)\n raise KeyError(f\"Column '{col}' doesn't exist in data. {additional_str}\")", "title": "" }, { "docid": "18ea98cbf60a3533d605aecf61976dfb", "score": "0.60659856", "text": "def _test_no_missing_columns(self, model):\n obj = model()\n for field in obj._meta.fields:\n if field.name == 'id':\n continue\n if _has_default(field):\n continue\n if field.name not in self and field.name not in self.index.names:\n raise PandasLovesPoniesException('missing column: %s' % field.name)\n return True", "title": "" }, { "docid": "bbd5c403b3e3800609f7151c43170526", "score": "0.5997924", "text": "def find_multi_val_cols(df, ignore_index_col=True, exception_cols=[]):\r\n multi_val_cols = []\r\n first_col = ignore_index_col\r\n for col in df:\r\n if first_col:\r\n first_col = False\r\n continue\r\n else:\r\n if col in exception_cols:\r\n continue\r\n else:\r\n if df[col].nunique() > 1:\r\n multi_val_cols.append(col)\r\n else:\r\n pass\r\n return multi_val_cols", "title": "" }, { "docid": "524ab3b6258fe3dcaa5510f45933c5ab", "score": "0.58972174", "text": "def _emptyColumn(self, index):\n list = [x[index] for x in self._aliens]\n count = 0\n for spot in list:\n if spot == None:\n count -= 1\n else:\n count += 1\n if count >= 0:\n return False\n else:\n return True", "title": "" }, { "docid": "69eb5f1ab0f68ed85866ced30689a978", "score": "0.5878816", "text": "def test_is_indexable(self):\n # verify -----------------------\n try:\n self.columns[0]\n except TypeError:\n msg = \"'_ColumnCollection' object does not support indexing\"\n self.fail(msg)\n except IndexError:\n pass", "title": "" }, { "docid": "69eb5f1ab0f68ed85866ced30689a978", "score": "0.5878816", "text": "def test_is_indexable(self):\n # verify -----------------------\n try:\n self.columns[0]\n except TypeError:\n msg = \"'_ColumnCollection' object does not support indexing\"\n self.fail(msg)\n except IndexError:\n pass", "title": "" }, { "docid": "bcc3efaa1ed1a08a54850274da46f2eb", "score": "0.5871422", "text": "def _all_nat_check(df):\n return np.all(pd.isnull(df))", "title": "" }, { "docid": "8eed673dbfb0c0b54fba0094851b0601", "score": "0.5846668", "text": "def test_col_idx_bad(self, api_objects, data):\n with pytest.raises(pytan3.api_objects.exceptions.ModuleError):\n data.columns[\"Nope\"]", "title": "" }, { "docid": "8eed673dbfb0c0b54fba0094851b0601", "score": "0.5846668", "text": "def test_col_idx_bad(self, api_objects, data):\n with pytest.raises(pytan3.api_objects.exceptions.ModuleError):\n data.columns[\"Nope\"]", "title": "" }, { "docid": "2485e4da00783debc26c3c850372f1d8", "score": "0.5808011", "text": "def __nonzero__(self):\n return bool(self._columns)", "title": "" }, { "docid": "a2c577628194b9f3df5994966c09e99f", "score": "0.5775897", "text": "def validate(self):\n first = self.statement.tokens[0].value\n if first != 'SELECT':\n raise UnexpectedError(first)\n elif self.statement.token_next(1).match(tokens.Wildcard, '*'):\n raise WildcardError()\n elif not self.where:\n raise UnexpectedEndError()\n elif not self.table:\n raise UnexpectedError('WHERE')\n\n def check_indexable(token_list):\n \"\"\"Recursive function that checks for non-indexable columns.\"\"\"\n for tok in token_list.tokens:\n if tok.ttype == tokens.Name:\n col = self.schema.get_column(self.table.value, tok.value)\n if col and not col.indexable:\n raise NotIndexableError()\n elif isinstance(tok, (sql.Comparison, sql.Identifier)):\n check_indexable(tok)\n\n check_indexable(self.where)", "title": "" }, { "docid": "1c9b91431a2b18c1e99fa980220819a2", "score": "0.5774767", "text": "def check_for_missing_columns(df, col_names):\n missing_columns = list(OrderedSet(col_names) - OrderedSet(df.schema.names))\n\n if len(missing_columns) > 0:\n RaiseIt.value_error(missing_columns, df.columns)\n\n return False", "title": "" }, { "docid": "1accd1105a1deb7bbd500dfdc83f7ea0", "score": "0.57646555", "text": "def missing_key_columns(args, unreconciled):\n error = False\n for column in [args.group_by, args.key_column]:\n if column not in unreconciled.columns:\n error = True\n print('ERROR: \"{}\" is not a column header'.format(column))\n return error", "title": "" }, { "docid": "806c0a2d3888cbd6b2fc360a48b12649", "score": "0.57543796", "text": "def validate_columns(self, df):\n expected_df_columns = pd.DataFrame(columns=self.expected_columns)\n\n columns_too_many = df.columns.difference(expected_df_columns.columns)\n if not len(columns_too_many) == 0:\n print('The provided dataframe has too many columns:', *columns_too_many, sep='\\n')\n\n columns_too_few = expected_df_columns.columns.difference(df.columns)\n if not len(columns_too_few) == 0:\n print('The provided dataframe is missing the following columns:', *columns_too_few, sep='\\n')\n\n return len(columns_too_many) + len(columns_too_few) == 0", "title": "" }, { "docid": "f31b795808c49920c0eb8bf8b06c5f01", "score": "0.5747699", "text": "def missing_columns(self):\n return self._row.isnull()", "title": "" }, { "docid": "5df080767c2941dda03c486ba9b47ce4", "score": "0.5742666", "text": "def verification(df: pd.DataFrame, column_name: str) -> None:\r\n types = []\r\n for index in df.index:\r\n if type(df.loc[index, column_name]) not in types:\r\n types.append(type(df.loc[index, column_name]))\r\n print(column_name, ':\\t', types)", "title": "" }, { "docid": "d5ffe361bcea013654e11771c715bdaa", "score": "0.5742278", "text": "def check_columns(self, df: pd.DataFrame) -> None:\n\n try:\n df = df.loc[:, [\"group\", self.normalizer, self.target]]\n except KeyError:\n LOGGER.info(\n f\" KeyError. Columns: group, target, an/or normalizer not in table \"\n f\"columns:{df.columns}\"\n )\n # import pdb; pdb.set_trace()\n raise", "title": "" }, { "docid": "4d4cd993dae4ce2d0ace105e836ae8e8", "score": "0.57144254", "text": "def _is_unreferenced_column_node(parent, node):\n return parent.label() == 'COLUMN_REFERENCE' \\\n and len(parent) > 1 \\\n and parent[1].label() == 'COLUMN_NAMES' \\\n and node.label() == 'COLUMN_NAMES'", "title": "" }, { "docid": "27d1a5422342d4eb826dc324df8c9be4", "score": "0.57092535", "text": "def _check_nan(df, *columns):\n\n for col in columns:\n if col not in df.columns:\n raise ValueError(\"Column not found: {}\".format(col))\n\n if df[col].isnull().any():\n raise ValueError(\"Column must not contain NaN values: {}\".format(col))", "title": "" }, { "docid": "bb5a9af27fef7ede67606a6e79e876eb", "score": "0.5707324", "text": "def get_notnull_columns(df: pd.DataFrame, candidates: Iterable):\n return [col for col in candidates\n if col in df and not np.all(df[col].isnull())]", "title": "" }, { "docid": "f893ded9eaaff04d2800bcee88bee7d8", "score": "0.57054573", "text": "def ignored_column_indicator(self):\n return self.match_results == -2", "title": "" }, { "docid": "8ff13a201a4797a8ec4fbbda969e2b70", "score": "0.5684672", "text": "def colinspection(df):\n columns = df.columns.tolist()\n for col in columns:\n print(df[col].head())\n# print(5*'//')\n# print('Percentage of Missing data:', df[col].isnull().mean())\n print(10*'----')", "title": "" }, { "docid": "c8b9bb021e9788f04969b6aa07f20c60", "score": "0.56765926", "text": "def get_non_numerical_cols(df):\n\tnon_numerical_cols = []\n\n\tfor col_name in df.columns:\n\t\tif df[col_name].dtypes not in ['uint8', 'uint32', 'uint64', 'int8', 'int32', 'int64', 'float16', 'float32',\n\t\t\t\t\t\t\t\t\t 'float64']:\n\t\t\tnon_numerical_cols.append(col_name)\n\n\treturn non_numerical_cols", "title": "" }, { "docid": "051b6cbc64f588d9fc3691ea4b71dea0", "score": "0.56352067", "text": "def handle_missing_threshold(df, prop_required_column = .3, prop_required_row = .9):\n threshold = int(round(prop_required_column*len(df.index),0))\n df.dropna(axis=1, thresh=threshold, inplace=True)\n threshold = int(round(prop_required_row*len(df.columns),0))\n df.dropna(axis=0, thresh=threshold, inplace=True)\n return df", "title": "" }, { "docid": "a51a136d3bfcbe78acd877e1f9e78906", "score": "0.5632437", "text": "def validate_columns(args, column_types, unreconciled):\n plugins = util.get_plugins('column_types')\n plugin_types = list(plugins.keys())\n\n error = missing_headers(unreconciled, column_types, plugin_types)\n error |= missing_key_columns(args, unreconciled)\n\n if error:\n error_exit(unreconciled, plugin_types)", "title": "" }, { "docid": "4c5440de8118a8e6781f3be6051164c2", "score": "0.5624304", "text": "def check_missing(df, col): \n return df[pd.isnull(df[col])]", "title": "" }, { "docid": "f95c9658c42145b083486bbb4c5d482c", "score": "0.5571803", "text": "def _test_invalid_nulls(self, model):\n from django.db.models import fields\n obj = model()\n nonnull_fields = [x for x in obj._meta.fields\n if not x.null and x.name != 'id']\n for field in nonnull_fields:\n if _has_default(field):\n continue\n nonnull_cols_series = _column_getter(self, field.name)\n if nonnull_cols_series.isnull().any():\n error_msg = '%s column contains nulls' % field.name\n raise PandasLovesPoniesException(error_msg)\n return True", "title": "" }, { "docid": "3fc3b4b9e82dc3b85244e676231b9d00", "score": "0.55682826", "text": "def _any_nat_check(df):\n return np.any(pd.isnull(df))", "title": "" }, { "docid": "842aaf30eeae134892185c43443c5b63", "score": "0.5563729", "text": "def check_data(self, df, *args):\n column_headers = df.columns\n print(\"Checking input data, all data headers: {}\".format(column_headers))\n for column in args:\n if column not in column_headers:\n print(\"Missing or mis-configured column: {}\".format(column))\n return False\n return True", "title": "" }, { "docid": "674d3c3b8445b44d99fbe88e3b23f7d8", "score": "0.5540896", "text": "def _continuous_col_finder(df, drop_na = False):\n df = df.copy()\n bin_cols = _binary_col_finder(df, drop_na = drop_na)\n cont_cols = [col for col in df.columns.tolist() if col not in bin_cols]\n \n return cont_cols", "title": "" }, { "docid": "42b52d755ff577ab939d7a4641fe72b5", "score": "0.5515423", "text": "def validate_data_columns(self, data_columns, min_itemsize, non_index_axes):\n if not len(non_index_axes):\n return []\n\n axis, axis_labels = non_index_axes[0]\n info = self.info.get(axis, {})\n if info.get(\"type\") == \"MultiIndex\" and data_columns:\n raise ValueError(\n f\"cannot use a multi-index on axis [{axis}] with \"\n f\"data_columns {data_columns}\"\n )\n\n # evaluate the passed data_columns, True == use all columns\n # take only valid axis labels\n if data_columns is True:\n data_columns = list(axis_labels)\n elif data_columns is None:\n data_columns = []\n\n # if min_itemsize is a dict, add the keys (exclude 'values')\n if isinstance(min_itemsize, dict):\n existing_data_columns = set(data_columns)\n data_columns = list(data_columns) # ensure we do not modify\n data_columns.extend(\n [\n k\n for k in min_itemsize.keys()\n if k != \"values\" and k not in existing_data_columns\n ]\n )\n\n # return valid columns in the order of our axis\n return [c for c in data_columns if c in axis_labels]", "title": "" }, { "docid": "7ad738750770e34e125efad157a9890b", "score": "0.54954255", "text": "def search_missing_values(self):\n log.info(\"Searching for missing values\")\n columns_with_missing = self.df.columns[self.df.isnull().any()].tolist()\n if columns_with_missing:\n for col in columns_with_missing:\n self.column_meta[col][\"missing\"] = True\n log.warning(f\"Missing values found in column: {col}\")\n else:\n log.info(\"No missing values found\")", "title": "" }, { "docid": "4e3c30cde06bfc2ab05acae79daba783", "score": "0.54807353", "text": "def row_has_all_fields(self, row):\n for f in self.mapping:\n if f[0] != '_' and f[1] and not row[f[0]]:\n return False\n return True", "title": "" }, { "docid": "7351ce515279d4551abb01fabab6f203", "score": "0.54802614", "text": "def test_column_exclude_rules(self):\n col_rules = deepcopy(self.basespecs['iris']['column_rules'])\n col_rules['Species']['exclude'] = ['virginica', 'versicolor']\n dframe_val = DataFrameValidator(data=self.iris_dframe.copy(),\n column_rules=col_rules,\n rules={'drop_duplicates': False})\n cleaned_species = dframe_val.clean()['Species']\n self.assertItemsEqual(cleaned_species.unique().tolist(), ['setosa'])\n self.assertEqual(cleaned_species.shape[0], 50)", "title": "" }, { "docid": "a9226f0091b25bccd15e094eefa8b3de", "score": "0.5437251", "text": "def check_and_drop_duplicate_columns(dataframe: pd.DataFrame):\n extra_columns = dataframe.columns[dataframe.columns.duplicated()]\n if len(extra_columns) == 0:\n return dataframe\n for column in extra_columns:\n test_equality = dataframe.loc[:, column] == dataframe.loc[:, column]\n if not test_equality.all(axis=None):\n raise ValueError\n return dataframe.loc[:, ~dataframe.columns.duplicated()]", "title": "" }, { "docid": "e29d468a67a26db8825dfc00511fe131", "score": "0.5436097", "text": "def collect_invalid_members_of(subset_df, missing=subs_missing_sibs):\n subset_df.apply(lambda sub: get_invalid_subset_member(sub, missing),\n axis=\"columns\")", "title": "" }, { "docid": "3edc66b5898cd55b7a8fb4535d9d87de", "score": "0.54328555", "text": "def validate_columns_names(df, col_names, index=0):\n\n columns = val_to_list(col_names)\n\n if is_list_of_tuples(columns):\n columns = [c[index] for c in columns]\n\n # Remove duplicates in the list\n if is_list_of_strings(columns):\n columns = OrderedSet(columns)\n\n check_for_missing_columns(df, columns)\n\n return True", "title": "" }, { "docid": "64de9c8f0a0a2b1a306871348930e610", "score": "0.543095", "text": "def test_is_indexable(self):\n # verify -----------------------\n try:\n self.rows[0]\n except TypeError:\n msg = \"'_RowCollection' object does not support indexing\"\n self.fail(msg)\n except IndexError:\n pass", "title": "" }, { "docid": "64de9c8f0a0a2b1a306871348930e610", "score": "0.543095", "text": "def test_is_indexable(self):\n # verify -----------------------\n try:\n self.rows[0]\n except TypeError:\n msg = \"'_RowCollection' object does not support indexing\"\n self.fail(msg)\n except IndexError:\n pass", "title": "" }, { "docid": "eb07a2d0f1822e472f0d0a7c058e9323", "score": "0.54210275", "text": "def _check_colname(*columns):\n\n for col in columns:\n if str(col).endswith(\"_\"):\n raise ValueError(\n \"Dict keys are not allowed to end with '_': {}\".format(col)\n )\n\n if \"__\" in str(col):\n raise ValueError(\n \"Dict keys are not allowed to contain '__': {}\".format(col)\n )", "title": "" }, { "docid": "c604dc2a2158947d13ff6de8c64907d6", "score": "0.5411608", "text": "def check_column_names(columns, *args):\n for arg in args:\n if isinstance(arg, (tuple, list)):\n missing = set(arg) - set(columns)\n if missing:\n raise ValueError(\"Following columns were requested but are \"\n \"not available: %s.\\n\"\n \"All requested columns: %s\\n\"\n \"Available columns: %s\"\n \"\" % (missing, arg, columns))", "title": "" }, { "docid": "e49c6ea7f798d0074137b8114b8fdb89", "score": "0.5401765", "text": "def test_filter_multiple_invalid_columns(self):\n self.dicom.filter_by_keywords({ \"invalid\" : \"bla\", \"another_invalid_col\" : \"bla\" })\n self.assertEqual(0, self.dicom.metadata.count())", "title": "" }, { "docid": "75924c096bfde057ca6e8a6acb26337c", "score": "0.5392577", "text": "def _validate_data(df: pd.DataFrame, schema: input_schema.Schema):\n\n for key, value in schema.input_schema_map.items():\n _ = value # TODO(mikebernico) Implement type checking.\n if key not in df.columns:\n schema_keys = list(schema.input_schema_map.keys())\n raise AttributeError(\n f'DataFrame does not contain expected column: {key}. '\n f'Ensure header matches schema keys: {schema_keys}.')", "title": "" }, { "docid": "ff9e9ae9dcd4defcb83d47f9c52d7e05", "score": "0.53670734", "text": "def fail_struct_and_ancestors(struct, disparate_column, fails=0):\n\n disparate_column[struct.idx] = fails\n if struct.level > 0:\n fail_struct_and_ancestors(struct.parent, disparate_column, fails=fails)", "title": "" }, { "docid": "b382c1957cabdff476ec67b392ae97ae", "score": "0.53512156", "text": "def check_data_frame(df: pd.DataFrame) -> None:\n\n required_cols = {\n \"Time\",\n \"Exchange\",\n \"Asset Class\",\n \"Product Name\",\n \"Product Code\",\n \"Start Period\",\n \"End Period\",\n \"Maintenance\",\n \"Currency\",\n \"Maint. Vol. Scan\",\n }\n assert required_cols.issubset(df.columns)", "title": "" }, { "docid": "81ff25db8120d5cc58f473ee78e3a6a9", "score": "0.5350528", "text": "def valid_type_in_col(arch):\r\n for attrib in arch.xpath('//*/@col'):\r\n try:\r\n int(attrib)\r\n except:\r\n return False\r\n return True", "title": "" }, { "docid": "8342f02d0f9f18a9899440e19fe69542", "score": "0.53426254", "text": "def index_like(index):\n return not (isinstance(index, pd.RangeIndex) and\n index._start == 0 and\n index._stop == len(index) and\n index._step == 1 and index.name is None)", "title": "" }, { "docid": "8342f02d0f9f18a9899440e19fe69542", "score": "0.53426254", "text": "def index_like(index):\n return not (isinstance(index, pd.RangeIndex) and\n index._start == 0 and\n index._stop == len(index) and\n index._step == 1 and index.name is None)", "title": "" }, { "docid": "93b7986ef6dd3b112286c38fc7d292a4", "score": "0.531596", "text": "def test_is_indexable(self):\n # verify -----------------------\n try:\n self.cells[0]\n except TypeError:\n msg = \"'_CellCollection' object does not support indexing\"\n self.fail(msg)\n except IndexError:\n pass", "title": "" }, { "docid": "93b7986ef6dd3b112286c38fc7d292a4", "score": "0.531596", "text": "def test_is_indexable(self):\n # verify -----------------------\n try:\n self.cells[0]\n except TypeError:\n msg = \"'_CellCollection' object does not support indexing\"\n self.fail(msg)\n except IndexError:\n pass", "title": "" }, { "docid": "1f699f42b9373549e2e92c9efd322583", "score": "0.5309775", "text": "def findSingleValueColumns(originalData):\n uselessColumns = [];\n\n for i in range(0, len(originalData[0])):\n isUsefulColumn = False;\n firstEntry = originalData[0][i];\n for j in range(1, len(originalData)):\n if (firstEntry != originalData[j][i]):\n isUsefulColumn = True;\n break;\n\n if (not isUsefulColumn):\n uselessColumns.append(i);\n \n return uselessColumns;", "title": "" }, { "docid": "3989fdac9dd1eadd0ef26c8e169c58ef", "score": "0.52963823", "text": "def check_for_empty_index(comparable: Compare):\n index_column = comparable.data_frame[idx.get_index_name(comparable)]\n comparable.empty_index = [index for index, value in enumerate(index_column)\n if value == Field.empty_string.value\n or value != value or value is None]", "title": "" }, { "docid": "601ceb455c2239064745f4e8c861c1d9", "score": "0.5295186", "text": "def check_valid_column(observation):\n\n valid_columns = set(columns)\n keys = set(observation.keys())\n\n if len(valid_columns - keys) > 0:\n missing = valid_columns - keys\n error = \"Missing columns: {}\".format(missing)\n return False, error\n\n if len(keys - valid_columns) > 0:\n extra = keys - valid_columns\n error = \"Unrecognized columns provided: {}\".format(extra)\n return False, error\n\n if len(keys - valid_columns) == 0:\n return True, \"\"", "title": "" }, { "docid": "40a868f60e69c3431bd9b2d3969fc8d9", "score": "0.5292685", "text": "def _index_check(self):\r\n for temp in self.data_dict.values():\r\n if (self.data_dict['open'].index.difference(temp.index).shape[0]\r\n != 0) or (self.data_dict['open'].shape != temp.shape):\r\n \r\n raise ValueError('Dataframe indices are not aligned')", "title": "" }, { "docid": "220b6ae2239ae2b79c1866ae8cd2e57a", "score": "0.528538", "text": "def test_df_iterate_over_columns2(self):\n from sdc.hiframes.api import get_nan_mask\n\n @self.jit\n def jitted_func():\n cols = ('A', 'B', 'C', 'D')\n df = pd.DataFrame({\n 'A': ['a', 'b', None, 'a', '', None, 'b'],\n 'B': ['a', 'b', 'd', 'a', '', 'c', 'b'],\n 'C': [np.nan, 1, 2, 1, np.nan, 2, 1],\n 'D': [1, 2, 9, 5, 2, 1, 0]\n })\n res_nan_mask = np.zeros(len(df), dtype=np.bool_)\n for col in literal_unroll(cols):\n res_nan_mask += get_nan_mask(df[col].values)\n return res_nan_mask\n\n # expected is a boolean mask of df rows that have None values\n expected = np.asarray([True, False, True, False, True, True, False])\n result = jitted_func()\n np.testing.assert_array_equal(result, expected)", "title": "" }, { "docid": "b0e148ecaf122b56e0f98404fd395b26", "score": "0.5284561", "text": "def is_valid(self,row):\n # Do we know how to deal with all the columns?\n coldiff = set(row.columns) - set(self.COLUMN_NAMES)\n if coldiff:\n raise Exception(\"Row contains unknown columns: \" + str(coldiff) )", "title": "" }, { "docid": "ff3debd100e78e5ef90274f68d69a6b1", "score": "0.5276152", "text": "def ColFilter(col):\n return not hiddencols or col not in hiddencols", "title": "" }, { "docid": "94c5574835260a85edd84885fef2c151", "score": "0.527597", "text": "def __validIndex__(self, i):\n try:\n int(i) # Not great here as it would pass floats and prolly others\n except ValueError:\n return False\n return True", "title": "" }, { "docid": "c11d1af3340601b79646c0035db15ca1", "score": "0.5256806", "text": "def validate_data_columns(self, data_columns, min_itemsize):\r\n\r\n if not len(self.non_index_axes):\r\n return []\r\n\r\n axis, axis_labels = self.non_index_axes[0]\r\n info = self.info.get(axis, dict())\r\n if info.get('type') == 'MultiIndex' and data_columns:\r\n raise ValueError(\"cannot use a multi-index on axis [{0}] with \"\r\n \"data_columns {1}\".format(axis, data_columns))\r\n\r\n # evaluate the passed data_columns, True == use all columns\r\n # take only valide axis labels\r\n if data_columns is True:\r\n data_columns = axis_labels\r\n elif data_columns is None:\r\n data_columns = []\r\n\r\n # if min_itemsize is a dict, add the keys (exclude 'values')\r\n if isinstance(min_itemsize, dict):\r\n\r\n existing_data_columns = set(data_columns)\r\n data_columns.extend([\r\n k for k in min_itemsize.keys()\r\n if k != 'values' and k not in existing_data_columns\r\n ])\r\n\r\n # return valid columns in the order of our axis\r\n return [c for c in data_columns if c in axis_labels]", "title": "" }, { "docid": "751177ea15f5dca1040a232b69cc189f", "score": "0.52445453", "text": "def check_valid_column(observation):\n \n valid_columns = {\n \"SubjectRaceCode\",\n \"SubjectSexCode\",\n \"SubjectEthnicityCode\",\n \"StatuteReason\", \n \"InterventionReasonCode\", \n \"ResidentIndicator\", \n \"SearchAuthorizationCode\",\n \"SubjectAge\",\n \"hour\",\n \"day_of_week\",\n }\n \n keys = set(observation.keys())\n \n if len(valid_columns - keys) > 0: \n missing = valid_columns - keys\n error = \"Missing columns: {}\".format(missing)\n return False, error\n \n if len(keys - valid_columns) > 0: \n extra = keys - valid_columns\n error = \"Unrecognized columns provided: {}\".format(extra)\n return False, error \n\n return True, \"\"", "title": "" }, { "docid": "1dcc38d104d90f34739b5e7bfef05e15", "score": "0.52435005", "text": "def fn_check_missing_data(df):\r\n return df.isnull().sum().sort_values(ascending=False)", "title": "" }, { "docid": "f133da545a2c8f390d9d2ed08bdec144", "score": "0.5239565", "text": "def boolean_col_expressions(self):\r\n return skip_if([\r\n no_support('firebird', 'not supported by database'),\r\n no_support('oracle', 'not supported by database'),\r\n no_support('mssql', 'not supported by database'),\r\n no_support('sybase', 'not supported by database'),\r\n no_support('maxdb', 'FIXME: verify not supported by database'),\r\n no_support('informix', 'not supported by database'),\r\n ])", "title": "" }, { "docid": "73629530698a773e87a8d298f85c31a4", "score": "0.5235513", "text": "def format_disjunctive_column(comparable_a: Compare, comparable_b: Compare):\n for index, header in enumerate(comparable_a.header):\n\n is_not_index = not header_validator.is_index(comparable_a, header)\n is_not_in_other = not header_validator.is_column(comparable_b, header)\n is_empty_type = header[Field.column_type.value] == Field.empty_string.value\n is_not_mapped = not header_validator.is_mapped(comparable_a, header)\n\n is_disjunctive = all([is_not_index, is_not_in_other, is_empty_type, is_not_mapped])\n\n if is_disjunctive:\n comparable_a.header[index][Field.column_type.value] = Field.disjunctive.value\n comparable_a.header[index][Field.column_name.value] = change_value(comparable_a, index)", "title": "" }, { "docid": "8d61c2dd845c44d1c6299ca9024c1227", "score": "0.5213469", "text": "def test_multiple_cols_incl_no_op():\n assert json_to_table(data, [\"name\", \"adict_col\"]).equals(\n pd.DataFrame(\n [\n {\n \"name\": \"blah\",\n \"adict_col.a\": 1,\n \"adict_col.b.c\": 1,\n \"list_col\": [{\"a\": 1}, {\"a\": 2}],\n \"adict_col\": {\"a\": 1, \"b\": {\"c\": 1}},\n \"bdict_list_col\": {\"a\": [{\"b\": 1}, {\"b\": 2}]},\n }\n ]\n )\n )", "title": "" }, { "docid": "57d2fd28e6347de38594e9da2c99c28f", "score": "0.52077776", "text": "def test_filter_invalid_column(self):\n self.dicom.filter_by_keywords({ \"invalid keyword\" : \"value\" })\n self.assertEqual(0, self.dicom.metadata.count())", "title": "" }, { "docid": "59830e994f7cf540c4eee242768e673f", "score": "0.520426", "text": "def notnull(self) -> DataFrame:\n return ~self.isna()", "title": "" }, { "docid": "3583f74ae4057d4ba2f5d84abbc105ed", "score": "0.52028775", "text": "def check_column(sudoku, n, pos,inicial):\n columna = sudoku[:,pos[1]]\n contador = 0\n for i in columna:\n if i == n:\n contador += 1\n \n if inicial:\n if contador == 1:\n return True\n else:\n return False \n else:\n if contador >= 1:\n return False\n else:\n return True", "title": "" }, { "docid": "0bcaa9767ea24b71f9589d3f5e4619b3", "score": "0.518393", "text": "def check_finite(dataframe, file, errorfile, exclude_cols=[]):\n nans = False\n df = dataframe.drop(list(set(exclude_cols).intersection(set(dataframe.columns))), axis=1)\n column_sums = np.sum(~np.isfinite(df))\n if column_sums.sum() > 0:\n nans = True\n errorfile.write('{}:\\n'.format(file))\n errorfile.write('Number of non-finite by column:\\n')\n errorfile.write(column_sums.to_string())\n errorfile.write('\\nIndices of non-finite values:\\n')\n isnans = np.isnan(df)\n inds = dict([(c, df.ix[isnans[c].values, c].index.tolist()) for c in df.columns])\n for k, v in inds.iteritems():\n if len(v) > 0:\n errorfile.write('{}:'.format(k))\n for i in v:\n errorfile.write(' {}'.format(i))\n errorfile.write('\\n')\n errorfile.write('\\n')\n return nans", "title": "" }, { "docid": "3fc0391e82aea940f6eebf46b41f2097", "score": "0.5182957", "text": "def test_null_rows(fast_out, df_name, thresh):\n # These are columns that only exist in earlier years\n pv.no_null_rows(\n df=fast_out.__getattribute__(df_name)(),\n df_name=df_name,\n thresh=thresh,\n )", "title": "" }, { "docid": "55b74f2ff1dbe073bdeeed39b8f41be5", "score": "0.51813966", "text": "def get_cols_with_negative_values(df: DataFrame) -> list:\n\tnumCols = get_numerical_cols(df)\n\treturn df[numCols].columns[(df[numCols] < 0).any()].tolist()", "title": "" }, { "docid": "6554b9d79c4848925ffc41d91c58da5c", "score": "0.518076", "text": "def test_raises_on_idx_out_of_range(self):\n with self.assertRaises(IndexError):\n self.columns[9]", "title": "" }, { "docid": "6554b9d79c4848925ffc41d91c58da5c", "score": "0.518076", "text": "def test_raises_on_idx_out_of_range(self):\n with self.assertRaises(IndexError):\n self.columns[9]", "title": "" }, { "docid": "ffaf8bc12e32eebc46c123561a14eaf6", "score": "0.51789826", "text": "def print_invalid_rows(gdf):\n for i, row in gdf.iterrows():\n if not row['geometry'].is_valid:\n print('not valid index', i, '\\n', row)", "title": "" }, { "docid": "462234af756e770b2cbb7443dcb3a5fa", "score": "0.51781344", "text": "def test_nan(self):\n for i in list(DATA.columns):\n self.assertTrue(not pd.isnull(DATA[i]).any())", "title": "" }, { "docid": "d41bacbdc0607a7ff1b5383e8cc8467d", "score": "0.51697177", "text": "def test_cols_negative(self):\n self.assertRaises(BaseException, basic.alphanum, -1)", "title": "" }, { "docid": "62542c96fc8123f5522923129c598331", "score": "0.51590306", "text": "def sanity_checker(self, verbose = True):\n for col in self.bcols:\n if col not in self.cat.colnames:\n print(f'Warning: {col} is missing in catalogue Table')\n raise Exception('Missing Column')\n if verbose:\n print('Checking catalogue columns (ra, dec, parallax, pmra, phot_g_mean_mag, etc) - OK')\n self.add_extra_cols(verbose = verbose)", "title": "" }, { "docid": "47e9a1c660ddf4b7604819f46667f362", "score": "0.5158693", "text": "def missing_headers(unreconciled, column_types, plugin_types):\n error = False\n for column, column_type in column_types.items():\n if column not in unreconciled.columns:\n error = True\n print('ERROR: \"{}\" is not a column header'.format(column))\n if column_type['type'] not in plugin_types:\n error = True\n print('ERROR: \"{}\" is not a column type'.format(\n column_type['type']))\n return error", "title": "" }, { "docid": "8d1f84015c7f809bd23875eacb2cf7a4", "score": "0.51507735", "text": "def check_cols_present(df, required_cols):\n\n if len(required_cols & set(df.columns)) < len(required_cols):\n raise ValueError(\n \"Input metadata file must include the following \"\n \"columns: {}\".format(required_cols)\n )", "title": "" }, { "docid": "930d36fd1945365e7a127181adb85b38", "score": "0.5149544", "text": "def check_column(\n df: pd.DataFrame, old_column_names: List, present: bool = True\n):\n for column_name in old_column_names:\n if present:\n if column_name not in df.columns:\n raise ValueError(\n f\"{column_name} not present in dataframe columns!\"\n )\n else: # Tests for exclusion\n if column_name in df.columns:\n raise ValueError(\n f\"{column_name} already present in dataframe columns!\"\n )", "title": "" }, { "docid": "7d6695bbf5767a28dbc46539990c2cd6", "score": "0.5132296", "text": "def check_valid_column(observation):\n \n valid_columns = {\n \"observation_id\",\n \"Type\",\n \"Date\",\n \"Part of a policing operation\",\n \"Latitude\",\n \"Longitude\",\n \"Gender\",\n \"Age range\",\n \"Officer-defined ethnicity\",\n \"Legislation\",\n \"Object of search\",\n \"station\"\n}\n\n keys = set(observation.keys())\n \n if len(valid_columns - keys) > 0: \n missing = valid_columns - keys\n error = \"Missing columns: {}\".format(missing)\n return False, error\n \n if len(keys - valid_columns) > 0: \n extra = keys - valid_columns\n error = \"Unrecognized columns provided: {}\".format(extra)\n return False, error \n\n return True, \"\"", "title": "" }, { "docid": "2e7497fdf4666e0af31ba55039d1a7e0", "score": "0.5127166", "text": "def _check_data_length(\n self,\n columns: Sequence[Hashable],\n data: Sequence[ArrayLike],\n ) -> None:\n if not self.index_col and len(columns) != len(data) and columns:\n empty_str = is_object_dtype(data[-1]) and data[-1] == \"\"\n # error: No overload variant of \"__ror__\" of \"ndarray\" matches\n # argument type \"ExtensionArray\"\n empty_str_or_na = empty_str | isna(data[-1]) # type: ignore[operator]\n if len(columns) == len(data) - 1 and np.all(empty_str_or_na):\n return\n warnings.warn(\n \"Length of header or names does not match length of data. This leads \"\n \"to a loss of data with index_col=False.\",\n ParserWarning,\n stacklevel=find_stack_level(),\n )", "title": "" }, { "docid": "c824c10d4001f501f292582b4e7896dd", "score": "0.5123544", "text": "def test_column_values(self):\n for column in self.table.columns:\n self.assertEqual(len(column.values), 0)", "title": "" }, { "docid": "a16d7cdee4929b20a0e5e4c9ef3b5cad", "score": "0.5120725", "text": "def unmatched_column_indicator(self):\n return self.match_results == -1", "title": "" }, { "docid": "e94a86eba0a9c73c2665950a7bbf9049", "score": "0.51171285", "text": "def cell_is_not_empty(row, column_id):\n for cell in row.cells:\n if cell.column_id == column_id:\n if cell.value:\n return True\n else:\n return False", "title": "" }, { "docid": "30e08e12955e3bdb38f55464b8919957", "score": "0.51115584", "text": "def is_admissible(index, index_set):\n back_neighbors = backward_neighbors(index)\n for ind_b in back_neighbors:\n if ind_b not in index_set:\n return False\n return True", "title": "" }, { "docid": "be1fcb412111a2ae440729ca099cbe20", "score": "0.5111317", "text": "def is_indexed(self):\r\n try:\r\n return getattr(self.table.cols, self.cname).is_indexed\r\n except:\r\n False", "title": "" }, { "docid": "eaf4c0d9fdc1bfb2837a3d3f4df8881f", "score": "0.51064456", "text": "def count_missing(df, is_print=True, return_df=False):\n \n nans_l = {}\n fir_valid = dict().fromkeys(df.columns)\n last_valid = dict().fromkeys(df.columns)\n \n for col in df.columns:\n ts = df[[col]].copy()\n fir_ix = ts.first_valid_index()\n if fir_ix is not None:\n fir_ix = ts.index.get_loc(fir_ix)\n last_ix = ts.index.get_loc(ts.last_valid_index())\n ts = ts.iloc[fir_ix:last_ix+1, :]\n #tuple for cnt nan and no-nan\n nans_l[col] = (ts.isna().sum().values[0], ts.notnull().sum().values[0])\n #to print first and last valid indexes\n fir_valid[col] = ts.first_valid_index()\n last_valid[col] = ts.last_valid_index()\n if is_print: \n columns_names = 'Element | Count NaN | Count no-NaN | First valid date| Last valid date'\n print(' '*(len(columns_names)//3) + 'Some information about data')\n print('-'*len(columns_names))\n print(columns_names)\n print('-'*len(columns_names))\n for col, val in nans_l.items():\n fir_val_ix = str(fir_valid[col]).split(' ')[0]\n last_val_ix = str(last_valid[col]).split(' ')[0]\n cnt_nan = val[0]\n cnt_nonan = val[1]\n print(f'{col:<7}{cnt_nan:>8}{cnt_nonan:>15}{fir_val_ix:>18}{last_val_ix:>18}')\n\n print(\"\\nP.S. valid date means that before or after this date there're only missing values.\\n\\\nOf course nonvalid dates are omitted\")\n if return_df:\n i = 0\n df = pd.DataFrame(columns=['Element', 'Count NaN', 'Count no-NaN', 'First valid date', 'Last valid date'])\n for col, val in nans_l.items():\n fir_val_ix = str(fir_valid[col]).split(' ')[0]\n last_val_ix = str(last_valid[col]).split(' ')[0]\n cnt_nan = val[0]\n cnt_nonan = val[1]\n \n df.loc[i] = [col, cnt_nan, cnt_nonan, fir_val_ix, last_val_ix]\n i += 1\n \n return df", "title": "" }, { "docid": "8bad98b811b467f84c095f3f084ee903", "score": "0.5100679", "text": "def check_input_df(input_df, expected_cols):\n\n if all(col in input_df.columns for col in expected_cols):\n pass\n else:\n raise ValueError(\n \"Input must be of type DataFrame and contain the following columns: {}\".format(\n \", \".join(expected_cols)\n )\n )", "title": "" }, { "docid": "6f4cf733fb3c1935f8166f9f8d58507e", "score": "0.50980073", "text": "def sanitize_df(df: pd.DataFrame) -> pd.DataFrame:\n # Delete empty rows\n for i in df.index.tolist()[-1::-1]:\n if df.iloc[i].isna().all():\n df.drop(i, inplace=True)\n else:\n break\n # Delete empty columns\n cols = []\n for column in df:\n if not df[column].isnull().all():\n cols.append(column)\n df = df[cols]\n return df", "title": "" }, { "docid": "e6ee1a04f6a6e56a8301fa2d7140cdfb", "score": "0.5087057", "text": "def _dict_validation(self, data: DataFrame, verbose: bool = False) -> DataFrame:\n\n dict_column_valid_values = self._read_dict_column_valid_values()\n\n if not dict_column_valid_values:\n if verbose:\n print(f'\\t| No columns to clean.')\n\n print(f'\\t| Row count: {data.count()} before cleaning')\n for column, values in dict_column_valid_values.items():\n if column in data.columns:\n data = data.filter(data[column].isin(values))\n if verbose:\n print(f'\\t| Row count: {data.count()}, after cleaning column: {column}')\n\n return data", "title": "" }, { "docid": "369605ce9128255a6fc840b2252e3193", "score": "0.50847715", "text": "def test_column_presence(profile: pd.DataFrame):\n KMCPProfile.validate(profile)", "title": "" }, { "docid": "3e682d51600e6609a33d18d8d2bbe52b", "score": "0.50722545", "text": "def apply(self, data_source):\r\n cols_with_nan = []\r\n for column in self.columns:\r\n if data_source[column].isnull().values.any():\r\n cols_with_nan.append(column)\r\n if cols_with_nan:\r\n raise ValidDataException(\r\n \"Columns containing NaN values\", cols_with_nan)\r\n\r\n # ============== Possible additional dataset checks =====================\r", "title": "" }, { "docid": "c72b1077f6c223737f1279516169c86e", "score": "0.50645167", "text": "def drop_mostly_missing_cols(hpdf):\n copy = hpdf.data.copy()\n # drop columns with more than 20% values missing\n notna_col_mask = ~ (copy.isna().sum()/len(copy) > 0.20)\n notna_col_mask.loc['SalePrice'] = True\n copy = copy.loc[:, notna_col_mask]\n # drop columns associated with those\n copy.drop(columns=['MiscVal'])\n return copy", "title": "" }, { "docid": "236290a51636775aa37a8e2a005e3d55", "score": "0.50597477", "text": "def test_invalid_column_as_index_raises_keyerror(self):\n self.assertRaises(KeyError, rad.preprocess, self.frame, [\"invalid\"])", "title": "" }, { "docid": "6912649baf46bbbe7ccc5166b1f4e012", "score": "0.50557643", "text": "def test_df_iterate_over_columns1(self):\n from sdc.hiframes.pd_dataframe_ext import get_dataframe_data\n from sdc.hiframes.api import get_nan_mask\n\n @self.jit\n def jitted_func():\n df = pd.DataFrame({\n 'A': ['a', 'b', None, 'a', '', None, 'b'],\n 'B': ['a', 'b', 'd', 'a', '', 'c', 'b'],\n 'C': [np.nan, 1, 2, 1, np.nan, 2, 1],\n 'D': [1, 2, 9, 5, 2, 1, 0]\n })\n\n # tuple of literals has to be created in a jitted function, otherwise\n # col_id won't be literal and unboxing in get_dataframe_data won't compile\n column_ids = (0, 1, 2, 3)\n res_nan_mask = np.zeros(len(df), dtype=np.bool_)\n for col_id in literal_unroll(column_ids):\n res_nan_mask += get_nan_mask(get_dataframe_data(df, col_id))\n return res_nan_mask\n\n # expected is a boolean mask of df rows that have None values\n expected = np.asarray([True, False, True, False, True, True, False])\n result = jitted_func()\n np.testing.assert_array_equal(result, expected)", "title": "" }, { "docid": "0b9aa472e431a4ec17013f374b59b55e", "score": "0.5053048", "text": "def check_index(self, index):\n\n if index.bottom != 1:\n pass\n #Only access arrays with whole indices!\n elif index.top >= len(self):\n pass\n #Array out of bounds error!\n elif index.sign == -1:\n pass\n #Indexes can't be negative!", "title": "" }, { "docid": "1af4f5770eb242513967f65b95444e88", "score": "0.50512403", "text": "def _is_valid_erp(d , / ):\r\n if not hasattr(d, \"columns\"): \r\n raise TypeError (\r\n \"ERP 'resistivity' and station measurement data expect\"\r\n f\" to be arranged in a dataframe. Got {type (d).__name__!r}\"\r\n )\r\n return not len(d) ==0 and ('resistivity' and 'station') in d.columns", "title": "" }, { "docid": "6766aa344d9214a4f55119bbdcaecec1", "score": "0.5050591", "text": "def all_better(self):\n df = self.compare_with_soa()\n idxs = [all(df[col] > 0) for col in df.columns]\n\n return df.iloc[:, idxs]", "title": "" } ]
7d4440ed8fc28e2206639809e8615ddc
Tests if this user can change the hierarchy. A return of true does not guarantee successful authorization. A return of false indicates that it is known performing any update will result in a ``PermissionDenied``. This is intended as a hint to an application that may opt not to offer these operations to an unauthorized user.
[ { "docid": "96a1e125c7c2c93baf4e3b63c5b3e402", "score": "0.66537386", "text": "def can_modify_family_hierarchy(self):\n return # boolean", "title": "" } ]
[ { "docid": "f086a73bf3494ee3a3cea9a84eaad19a", "score": "0.7123733", "text": "def can_modify_objective_hierarchy(self):\n return # boolean", "title": "" }, { "docid": "4ae42bba99802e77eb731d9315fbcaf2", "score": "0.71223724", "text": "def change(self, user, energy, *args):\n if user.is_anonymous or user.is_client:\n return False\n\n if user.is_administrator:\n return True\n\n if user.is_manager:\n return False\n\n if user.is_advisor and user.group == energy.group:\n return True\n\n return self.admin_permission(user, energy, *args)", "title": "" }, { "docid": "326dab2fcfc11c6a3385a81bc894a803", "score": "0.70367", "text": "def can_modify_log_hierarchy(self):\n # Implemented from template for\n # osid.resource.BinHierarchyDesignSession.can_modify_bin_hierarchy_template\n # NOTE: It is expected that real authentication hints will be\n # handled in a service adapter above the pay grade of this impl.\n if self._catalog_session is not None:\n return self._catalog_session.can_modify_catalog_hierarchy()\n return True", "title": "" }, { "docid": "c63b93a3cf33cf0081f182be0ad3e7e3", "score": "0.70354694", "text": "def can_modify_location_hierarchy(self):\n return # boolean", "title": "" }, { "docid": "eba8c36ed3d0fc460ce735581e8b7fca", "score": "0.6934173", "text": "def can_modify_objective_bank_hierarchy(self):\n return # boolean", "title": "" }, { "docid": "eba8c36ed3d0fc460ce735581e8b7fca", "score": "0.6934173", "text": "def can_modify_objective_bank_hierarchy(self):\n return # boolean", "title": "" }, { "docid": "eba8c36ed3d0fc460ce735581e8b7fca", "score": "0.6934173", "text": "def can_modify_objective_bank_hierarchy(self):\n return # boolean", "title": "" }, { "docid": "e3e191f8d0d0b022f488249f14cb5ad4", "score": "0.66498375", "text": "def has_change_permission(self, request, obj=None):\n return False", "title": "" }, { "docid": "e3e191f8d0d0b022f488249f14cb5ad4", "score": "0.66498375", "text": "def has_change_permission(self, request, obj=None):\n return False", "title": "" }, { "docid": "e3e191f8d0d0b022f488249f14cb5ad4", "score": "0.66498375", "text": "def has_change_permission(self, request, obj=None):\n return False", "title": "" }, { "docid": "16ab91f4e764f1c93cc98de15a17b6da", "score": "0.65783924", "text": "def check_change_permission(self, user, perm, obj):\n return None", "title": "" }, { "docid": "54e0f8ee1ff50cf83eb0d9d9792eeb84", "score": "0.6500337", "text": "def can_modify_map_hierarchy(self):\n return # boolean", "title": "" }, { "docid": "1ec207bfdcc96bf32208253eaddb4661", "score": "0.64400214", "text": "def check_change_object_permission(self, user, perm, obj):\n return user.has_perm(self.get_permission_name('change'))", "title": "" }, { "docid": "dc0e9522125ac95a83e422aaafda8609", "score": "0.64384574", "text": "def has_change_permission(self, request, obj=None):\n perm = self.object_change_permission\n if settings.FEINCMS_TREE_EDITOR_OBJECT_PERMISSIONS:\n r = request.user.has_perm(perm, obj)\n else:\n r = request.user.has_perm(perm)\n\n return r and super(TreeEditor, self).has_change_permission(\n request, obj)", "title": "" }, { "docid": "9101230496d06bdc9f8d50b9a5c3e01d", "score": "0.6308406", "text": "def change(self, user, yearlyenergyprice, *args):\n if user.is_anonymous or user.is_client:\n return False\n\n if user.is_administrator:\n return True\n\n if user.is_manager:\n return False\n\n if user.is_advisor and user.group == yearlyenergyprice.group:\n return True\n\n return self.admin_permission(user, yearlyenergyprice, *args)", "title": "" }, { "docid": "3ee419014ef54c5d0221eb7251c19df0", "score": "0.63054144", "text": "def change(self, user, professional, *args):\n if user.is_anonymous or user.is_client:\n return False\n\n if user.is_professional:\n return True\n\n if user.is_administrator:\n return True\n\n if user.is_advisor:\n return True\n\n return self.admin_permission(user, professional, *args)", "title": "" }, { "docid": "21116e49d78228aca38949b5a8fd33bb", "score": "0.62654173", "text": "def check_permission(self, context, instance):\n permission = '{0}.change_{1}'.format(instance._meta.app_label,\n instance._meta.module_name)\n return context['user'].has_perm(permission)", "title": "" }, { "docid": "9c24be45e6f53c353c5fac1fe1057a16", "score": "0.6231308", "text": "def can_access_objective_hierarchy(self):\n return # boolean", "title": "" }, { "docid": "6f01ca44f3d7b1375ec2255247908e53", "score": "0.6134754", "text": "def check_edit_permission(self):\n if not any('change' in item for item in self.get_permissions()):\n raise PermissionDenied()", "title": "" }, { "docid": "a52df81f115fb14cef89e5c89377dccb", "score": "0.6119095", "text": "def check_read_permission(self, user, perm, obj):\n return user.has_perm(self.get_permission_name('change'))", "title": "" }, { "docid": "695d6dbe0711f095b7443ecf67bcc3e3", "score": "0.6055337", "text": "def can_access_location_hierarchy(self):\n return # boolean", "title": "" }, { "docid": "e173a769d9ffd166694679d08f8e276c", "score": "0.60433006", "text": "def has_change_permission(self, request, obj=None):\r\n return request.user.is_staff and (obj is not None) and (obj.id % 2 == 0)", "title": "" }, { "docid": "1afbb28dfdd377291f475208186804b5", "score": "0.6035344", "text": "def has_change_permission(self, request, obj=None, strict=False):\n change_perm = super().has_change_permission(request, obj=obj)\n approve_perm = self.has_signoff_approve_permission(request, obj=obj)\n either_perm = change_perm or (approve_perm and not strict)\n return either_perm and (not obj or self.is_pending_signoff(obj))", "title": "" }, { "docid": "57cde41a3ad6fe26e840c64d84b425aa", "score": "0.60105664", "text": "def can_access_objective_bank_hierarchy(self):\n return # boolean", "title": "" }, { "docid": "57cde41a3ad6fe26e840c64d84b425aa", "score": "0.60105664", "text": "def can_access_objective_bank_hierarchy(self):\n return # boolean", "title": "" }, { "docid": "57cde41a3ad6fe26e840c64d84b425aa", "score": "0.60105664", "text": "def can_access_objective_bank_hierarchy(self):\n return # boolean", "title": "" }, { "docid": "30ff08b8b26ea6ad926c40328b89cb1f", "score": "0.59874535", "text": "def has_change_permission(self, request, obj=None):\n info = self.parent_model._meta.app_label, self.parent_model._meta.model_name\n url_name = u'{}_{}_change'.format(*info)\n return url_name == request.resolver_match.url_name", "title": "" }, { "docid": "7b480cdd29b962b00eac1ef13451b738", "score": "0.5984149", "text": "def perform_update(self, serializer):\n\n user = self.request.user\n obj = self.get_object()\n data = serializer.validated_data\n\n is_active_change = ('is_active' in data and\n obj.is_active != data['is_active'])\n is_staff_change = ('is_staff' in data and\n obj.is_staff != data['is_staff'])\n\n if ((is_staff_change or is_active_change) and not user.is_staff):\n raise PermissionDenied(\"Non-staff user cannot change \"\n \"'is_staff' or 'is_active'\")\n serializer.save()", "title": "" }, { "docid": "8c71c665b6da6712db8637bb041f9011", "score": "0.59692115", "text": "def check_permissions(cls, info: ResolverInfo) -> bool:\n user = info.context.user\n\n if not cls._meta.public and not check_authenticated(user):\n return False\n\n if not cls._meta.permissions:\n return True\n\n return check_perms(user, cls._meta.permissions, any_perm=cls._meta.permissions_any)", "title": "" }, { "docid": "becf3ca384cc189cf53ace9dcc61a7a8", "score": "0.5965556", "text": "def has_change_permission(self, request, obj=None):\r\n opts = self.opts\r\n codename = get_permission_codename('change', opts)\r\n return request.user.has_perm(\"%s.%s\" % (opts.app_label, codename))", "title": "" }, { "docid": "119a7787b5edf9a3886512232aa46c73", "score": "0.5964482", "text": "def can(self):\n identity = get_identity(self.user)\n return (\n is_owner(self.user, self.record) or\n (\n Permission(menrva_edit_published_record).allows(identity) and\n has_published(self.record)\n ) or\n Permission(menrva_edit).allows(identity)\n # NOTE: by default any Permission has a super-user Need\n )", "title": "" }, { "docid": "5cfe3a8beb83ef352de7dbb63bb04837", "score": "0.5953403", "text": "def can_write(self, user):\n return user.is_superuser or self.is_owner(user)", "title": "" }, { "docid": "9ecdc7c251ba998ee68c2a48b26dcf0a", "score": "0.5952299", "text": "def has_edit_permission(self, user: settings.AUTH_USER_MODEL) -> bool:\n return user.is_superuser or user == self.owner", "title": "" }, { "docid": "97782ee43b7acc12486517a1201fdd7f", "score": "0.5916611", "text": "def check_list_permission(self, user, perm, obj):\n return user.has_perm(self.get_permission_name('change'))", "title": "" }, { "docid": "f44abee2275fb7376af120e654474ede", "score": "0.5910422", "text": "def is_admin(self):\n return self._is_allowed(self.ADMIN_PERMISSION)", "title": "" }, { "docid": "a23e70d7ae4615c4fe6bb24422abd6fa", "score": "0.59097165", "text": "def has_change_permission(self, request, obj=None):\n opts = self.opts\n codename = get_permission_codename(\"change\", opts)\n return request.user.has_perm(\"%s.%s\" % (opts.app_label, codename))", "title": "" }, { "docid": "dc7baf806a8252b35fe7b3cb854516c5", "score": "0.5898846", "text": "def can_write(self, user):\n return user.is_superuser or self.is_in_users(user) or self.is_owner(user)", "title": "" }, { "docid": "29869365906d3f277f35fb029cbd5490", "score": "0.58698875", "text": "def canEdit(self):\n return self.getDetails().getPermissions().canEdit()", "title": "" }, { "docid": "ba4461ef7f171df7fff18e9c1f424cb7", "score": "0.58698493", "text": "def check_permissions(self, cleaned_data):\n user = self.request.user\n if not co.CheckPermissions(user, self.instance, co.CAN_EDIT):\n raise ValidationError('Operation can not be performed.')", "title": "" }, { "docid": "82f7d30c4f6577e2348331a6448dbcf7", "score": "0.5846985", "text": "def can_access_family_hierarchy(self):\n return # boolean", "title": "" }, { "docid": "82f7d30c4f6577e2348331a6448dbcf7", "score": "0.5846985", "text": "def can_access_family_hierarchy(self):\n return # boolean", "title": "" }, { "docid": "82f7d30c4f6577e2348331a6448dbcf7", "score": "0.5846985", "text": "def can_access_family_hierarchy(self):\n return # boolean", "title": "" }, { "docid": "d471640b2e580f0a4c409696fb8558bf", "score": "0.58413994", "text": "def has_permission(self):\n return True", "title": "" }, { "docid": "9908e48ad7c9445b462ec6e8d90106b0", "score": "0.58315104", "text": "def is_accessible(self):\n return admin_permission.can()", "title": "" }, { "docid": "9908e48ad7c9445b462ec6e8d90106b0", "score": "0.58315104", "text": "def is_accessible(self):\n return admin_permission.can()", "title": "" }, { "docid": "9908e48ad7c9445b462ec6e8d90106b0", "score": "0.58315104", "text": "def is_accessible(self):\n return admin_permission.can()", "title": "" }, { "docid": "9908e48ad7c9445b462ec6e8d90106b0", "score": "0.58315104", "text": "def is_accessible(self):\n return admin_permission.can()", "title": "" }, { "docid": "9908e48ad7c9445b462ec6e8d90106b0", "score": "0.58315104", "text": "def is_accessible(self):\n return admin_permission.can()", "title": "" }, { "docid": "9908e48ad7c9445b462ec6e8d90106b0", "score": "0.58315104", "text": "def is_accessible(self):\n return admin_permission.can()", "title": "" }, { "docid": "32e5a7a6fc1bda99c95f5a2d6af78e6e", "score": "0.5829296", "text": "def change_allow_level(self):\n return self._change_allow_level", "title": "" }, { "docid": "13db659673f91961e8200c5d30fa74f5", "score": "0.58086085", "text": "def can_edit(user, simulation):\n if simulation.user == user or user.is_superuser:\n return True\n else:\n return False", "title": "" }, { "docid": "0696d73ff32c02341df9f1e42a3a4a04", "score": "0.5786341", "text": "def has_permission(self, user):\n if user.is_anonymous:\n return False\n if user.groups.filter(\n pk=get_admin_group().pk).count() == 0:\n # We give full permissions to anyone in the admin group\n\n if \"func\" in self.kwargs:\n if not self.kwargs[\"func\"](user):\n return False\n\n for p in self.permissions:\n if \".\" not in p:\n # Probably on the happeningsite object\n p = \"happening.%s\" % p\n if not user.has_perm(p):\n return False\n return True", "title": "" }, { "docid": "4c81b54ae600ff37a5b1b11bf575bd14", "score": "0.5747423", "text": "def is_superuser(self):\r\n return self.access_level >= self.ACCESS_ROOT", "title": "" }, { "docid": "4f3c1c38ef1f8d323ea254f07624d585", "score": "0.5737781", "text": "def can_access_map_hierarchy(self):\n return # boolean", "title": "" }, { "docid": "9f8c9494aebab33a70e14563d399de35", "score": "0.5727718", "text": "def has_permission(self, request, view):\r\n return request.method in permissions.SAFE_METHODS or request.user.is_superuser", "title": "" }, { "docid": "5fcb9eab7c2d34499bdbb166481582d9", "score": "0.5710465", "text": "def test_authorized_change_self_true_to_false(self):\n target_user_id = AUTHORIZED_UPDATER_ID\n self.assertTrue(app.find_authorization_in_db(\n target_user_id, app.app.config['COLLECTION']))\n self.client.post('/v1/authorization/update', data={\n 'target_user_id': target_user_id,\n 'is_organizer': False,\n **DUMMY_GAUTH_REQUEST_DATA})\n self.assertFalse(app.find_authorization_in_db(\n target_user_id, app.app.config['COLLECTION']))", "title": "" }, { "docid": "0728738f67e3a352d19495f003f26205", "score": "0.5674512", "text": "async def hierarchy(self, ctx):\n server = ctx.message.server\n toggled = self.settings[server.id].get(\"respect_hierarchy\",\n default_settings[\"respect_hierarchy\"])\n if not toggled:\n self.settings[server.id][\"respect_hierarchy\"] = True\n await self.bot.say(\"Role hierarchy will be checked when \"\n \"moderation commands are issued.\")\n else:\n self.settings[server.id][\"respect_hierarchy\"] = False\n await self.bot.say(\"Role hierarchy will be ignored when \"\n \"moderation commands are issued.\")\n dataIO.save_json(\"data/mod/settings.json\", self.settings)", "title": "" }, { "docid": "b41b08a0fc69b6a4ecacdfb453bd9d63", "score": "0.56687194", "text": "def canWrite (self, obj):\n \n return self.isAdmin() or (self.getUserId() == obj.getDetails().getOwner().getId() and\n obj.getDetails().getPermissions().isUserWrite())", "title": "" }, { "docid": "5c599fd13ba67711e23071a898251fbc", "score": "0.5655981", "text": "async def is_admin(self):\n for user in self._users:\n if user[\"userName\"] == self._username:\n if user[\"level\"] == \"admin\":\n _LOGGER.debug(\n \"User %s has authorisation level %s\",\n self._username,\n user[\"level\"],\n )\n else:\n _LOGGER.warning(\n \"\"\"User %s has authorisation level %s. Only admin users can change\n camera settings! Switches will not work.\"\"\",\n self._username,\n user[\"level\"],\n )", "title": "" }, { "docid": "d4d8be9fd1811be004b57f3487466c23", "score": "0.5655306", "text": "def can(self):\n return self.identity.can(self.permission)", "title": "" }, { "docid": "f323651b4f73f44c03dc32facf452a8f", "score": "0.56519675", "text": "def is_accessible(self):\n return auth.check()", "title": "" }, { "docid": "53f0f6b387f2c47df178560c8312752c", "score": "0.5645688", "text": "def has_permission(self, request, view):\r\n return request.user.is_superuser", "title": "" }, { "docid": "4f15d14f085c1e67fb047f0ed158f8f9", "score": "0.56189257", "text": "def checkAttributePermission( self, attr, perm ):\n #It should work as follows (numbers denotes the order of roles search):\n #\n #for each attribute in each state may be set:\n # list of roles who can view attr and\n # list of roles who can change attr in current state\n #1. If roles are set (changed) for attribute in the state:\n # all users having one of listed roles can access to the attribute value.\n #\n #2. If roles are not yet set (changed) for attribute, try to acquire roles from state:\n # for viewing - permission 'View',\n # for editing - permission 'Modify portal content'\n #\n #3. If there is category inheritance and in the parent category there is\n # same state and same attribute, try to do as 1,2 but in parent category\n #\n #4. And so forth.\n\n category = self.getCategory()\n\n if not category:\n return None\n\n if type(attr) is StringType:\n attr = category.getAttributeDefinition(attr)\n\n if attr is None:\n return None\n\n attribute_id = attr.getId()\n\n membership = getToolByName(self, 'portal_membership', None)\n workflow = getToolByName( self, 'portal_workflow', None )\n if None in ( membership, workflow ):\n return 0\n\n user = membership.getAuthenticatedMember()\n user_roles = user.getRolesInContext(self)\n\n state = workflow.getStateFor( self )\n if state is None:\n #Most likely - no default states in category settings.\n return 0\n\n bases = [ category ]\n bases.extend( category.listBases(expand=1) )\n\n for category in bases:\n wf = category.__of__(self).getWorkflow()\n result = wf.getAttributePermissionInfo( state, attribute_id, perm )\n\n if result['acquired']: # and not result['roles']:\n #use parent's props\n sd = getattr( wf.states, state, None )\n if sd is None:\n continue\n\n if wf.states.isPrivateItem( sd ):\n #simply check permission\n #...hm, is it correct?\n return _checkPermission( perm, self )\n else:\n for u_role in user_roles:\n if u_role in result['roles']:\n return 1\n return 0\n return 0", "title": "" }, { "docid": "febd8422a0020ba8a554a22d85030cff", "score": "0.56091917", "text": "def HasPermission(data):\n if not Parent.HasPermission(data.User, MySet.Permission, MySet.PermissionInfo):\n message = MySet.PermissionResponse.replace(\"$username\", data.UserName).replace(\"$permission\", MySet.Permission).replace(\"$permissioninfo\", MySet.PermissionInfo)\n SendResp(data, message)\n return False\n return True", "title": "" }, { "docid": "abf85c8dde4dda067f9d92609ed45dc8", "score": "0.56049603", "text": "def can_update_relationships(self):\n return # boolean", "title": "" }, { "docid": "e016799eb4a56aafd8410b154c0d5958", "score": "0.5603641", "text": "def can_modify(self, student):\n return student.user.has_perm('change_course', self) \\\n and student.user.has_perm('delete_course', self)", "title": "" }, { "docid": "71f4e104b38d12d853279fb5c8b15d4d", "score": "0.5594958", "text": "def is_accessible(self):\n\n return current_user.is_authenticated and admin_permission.can()", "title": "" }, { "docid": "71f4e104b38d12d853279fb5c8b15d4d", "score": "0.5594958", "text": "def is_accessible(self):\n\n return current_user.is_authenticated and admin_permission.can()", "title": "" }, { "docid": "71f4e104b38d12d853279fb5c8b15d4d", "score": "0.5594958", "text": "def is_accessible(self):\n\n return current_user.is_authenticated and admin_permission.can()", "title": "" }, { "docid": "31b84d1dbc59acaf2ef717189e1c1bca", "score": "0.5586617", "text": "def can_access_log_hierarchy(self):\n # Implemented from template for\n # osid.resource.BinHierarchySession.can_access_bin_hierarchy\n # NOTE: It is expected that real authentication hints will be\n # handled in a service adapter above the pay grade of this impl.\n if self._catalog_session is not None:\n return self._catalog_session.can_access_catalog_hierarchy()\n return True", "title": "" }, { "docid": "230bcf0cf55a11ca067b326c83435d32", "score": "0.5585937", "text": "def canBeAdmin (self):\n return 0 in self.getEventContext().memberOfGroups", "title": "" }, { "docid": "10c7d77fbc3469d08c1d8c1a79602dd9", "score": "0.5579623", "text": "def is_accessible(self):\n return False", "title": "" }, { "docid": "f9a0336b54ed6bb28176b9b6912d6af4", "score": "0.55739427", "text": "def is_admin(self, bot, update):\n user = self.get_user_from_update(update)\n if not user.is_admin:\n bot.send_message(chat_id=update.effective_message.chat_id, text=Msg.missing_permission)\n return False\n return True", "title": "" }, { "docid": "1f4cb7b41ebf2888d2bdd69e9817a040", "score": "0.55646855", "text": "def check_viewlog_permission(self, user, perm, obj):\n return user.has_perm(self.get_permission_name('change'))", "title": "" }, { "docid": "4de747e5b8a114ad5aa2079fe4fa8241", "score": "0.5556951", "text": "def is_accessible(self):\n return True", "title": "" }, { "docid": "bb0e8c1c76dd213c7946adb1a2c17c75", "score": "0.5556138", "text": "def _set_permissions (self):\n \n if self.data_object and self.parameter:\n for checker in self.checkers:\n checker.Enable ( )\n if self.check_method (self.data_object, self.parameter, checker.element):\n checker.SetValue (True)\n else:\n checker.SetValue (False)\n else:\n for checker in self.checkers:\n checker.SetValue (False)\n checker.Disable ( )", "title": "" }, { "docid": "125855a6042fbc63a72040024cba67ec", "score": "0.55527806", "text": "def test_check_can_edit_super(self):\n self.assertTrue(check_can_edit(self.dat, self.user4))", "title": "" }, { "docid": "5ff23ca27bfff044acc742f33b54cc4c", "score": "0.5550484", "text": "def canOwnerWrite (self, obj):\n return obj.getDetails().getPermissions().isUserWrite()", "title": "" }, { "docid": "a0adba3dc7d59887e3c43fc8bd90550a", "score": "0.55479497", "text": "def __set_permissions(self):\n if not self.data_object:\n self.workplaces.Disable ( )\n for role in self.roles:\n role.SetValue (False)\n role.Disable ( )\n else:\n try:\n self.workplaces.Enable ( )\n workplace = self.workplaces.get_value ( )\n roles = self.data_object.roles[workplace]\n for role_checker in self.roles:\n if role_checker.element in workplace.roles:\n role_checker.Enable ( )\n role_checker.SetValue (role_checker.element in roles)\n else:\n role_checker.SetValue (False)\n role_checker.Disable ( )\n except:\n self.workplaces.Disable ( )\n for role in self.roles:\n role.SetValue (False)\n role.Disable ( )", "title": "" }, { "docid": "ab34ad9a4e185416ad931497b8f4cc6c", "score": "0.5531983", "text": "def _set_permissions (self):\n \n if self.data_object:\n for checker in self.checkers:\n checker.Enable ( )\n if self.check_method (self.data_object, checker.element):\n checker.SetValue (True)\n else:\n checker.SetValue (False)\n else:\n for checker in self.checkers:\n checker.SetValue (False)\n checker.Disable ( )", "title": "" }, { "docid": "2e033c8136ffe413b36875c508129aae", "score": "0.5530145", "text": "def writable(self) -> bool:\n\n user = self._target.session.current_user()\n group = self._target.session.find_group(gid=user.gid)\n mode = self.stat().st_mode\n uid = self.stat().st_uid\n gid = self.stat().st_gid\n\n if uid == user.id and (mode & stat.S_IWUSR):\n return True\n elif group.id == gid and (mode & stat.S_IWGRP):\n return True\n else:\n if group.id == gid and (mode & stat.S_IWGRP):\n return True\n else:\n if mode & stat.S_IWOTH:\n return True\n\n return False", "title": "" }, { "docid": "92b7c76ccf80c13776a9610cb27d50d8", "score": "0.55254436", "text": "def has_access(self, group):\n is_in_group = self.__usermanager.user_is_in_group(group,\n user_id=self.__id)\n is_admin = self.__usermanager.user_is_in_group(\"admin\",\n user_id=self.__id)\n\n if is_in_group or is_admin:\n self.save()\n return True\n\n is_admin = self.__usermanager.verify_user(self.__id, self.__name,\n \"admin\")\n is_in_group = self.__usermanager.verify_user(self.__id, self.__name,\n group)\n\n return is_admin or is_in_group", "title": "" }, { "docid": "0b700a16adfce4b2df9ba8fad1f3010d", "score": "0.5516191", "text": "def user_has_perm(self, user):\n if user.is_superuser:\n return True\n permission = self.permission.format(self.model.__name__.lower())\n permission = \"{}.{}\".format(self.model._meta.app_label, permission)\n return user.has_perm(permission)", "title": "" }, { "docid": "3ae456d9023f1cf5225c8035230de53a", "score": "0.55139494", "text": "def check_view_permissions(self):\n if self.action in ('list', 'retrieve', 'create'):\n return self.get_access_permissions().check_permissions(self.request.user)\n return False", "title": "" }, { "docid": "c5e26acd78e706dba47aa2f321e48f2d", "score": "0.550224", "text": "def can_change_userlevel(request_user, profile_user):\n request_user = validate_user(request_user)\n # admin can change user to all levels\n if request_user.has_admin_auth():\n return True\n # judge can change user to sub-judge, user\n user_level = profile_user.user_level\n if request_user.has_judge_auth() and \\\n (user_level == User.SUB_JUDGE or user_level == User.USER):\n return True\n\n return False", "title": "" }, { "docid": "0e5564ca7a946f17d9aceda27ae008c3", "score": "0.5476059", "text": "def is_cannot_set_permissions(self):\n return self._tag == 'cannot_set_permissions'", "title": "" }, { "docid": "0e5564ca7a946f17d9aceda27ae008c3", "score": "0.5476059", "text": "def is_cannot_set_permissions(self):\n return self._tag == 'cannot_set_permissions'", "title": "" }, { "docid": "a43cf0ab3f263dd02883b69414cd7d3f", "score": "0.5454196", "text": "def has_perm(self, perm, obj=None):\n return self.is_admin", "title": "" }, { "docid": "1a90e3743ed04c4d4cebe4d0517859a3", "score": "0.54512495", "text": "def has_access(self):\n return True", "title": "" }, { "docid": "c14f5d8958324c40796a407cf3fb78e6", "score": "0.5440727", "text": "def is_owner_or_raise(self):\n\t\t# Enforce access rights\n\t\tif not self.has_full_access:\n\t\t\traise self._denier\n\t\treturn True", "title": "" }, { "docid": "8f784457e99e3eb20000d30116ede014", "score": "0.5440518", "text": "def _check_perms(self):\n if self.skip_perms_check:\n return\n required_perm = COMMAND_TO_PERM[self.command]\n # first, check foruser if set. This will leave self.repo_perm set\n # to RepoPerm for foruser, so any error can be reported\n if self.foruser:\n LOG.debug(\"_check_perms for {}\".format(self.foruser))\n if not self.check_permissions(required_perm, user=self.foruser):\n self._raise_perms_error()\n # if that worked, check for authenticated user, resetting self.repo_perm\n if not self.check_permissions(required_perm):\n self._raise_perms_error()", "title": "" }, { "docid": "2764c32e6b9a4d4e8566d1b5b821e1dc", "score": "0.5431048", "text": "def can_set(self) -> Result[bool]:\n\n return self._execute_command('canSet')", "title": "" }, { "docid": "261d9f1f56fae736ab6f716ed7b4a51f", "score": "0.54293495", "text": "def can(self, user):\n if self.can_user(user):\n return True\n elif self.can_group(user):\n return True\n elif self.can_percentage(user):\n return True\n else:\n return False", "title": "" }, { "docid": "fd5a6d3bd8cba3ca094405dfb4412d11", "score": "0.5425347", "text": "def check_object_permissions(self, request, obj):\n return True", "title": "" }, { "docid": "831098857948384024310ee6e3b198f2", "score": "0.54191947", "text": "def check_view_permissions(self):\n if self.action in (\n \"create\",\n \"partial_update\",\n \"update\",\n \"destroy\",\n \"sort_related_users\",\n ):\n result = has_perm(self.request.user, \"assignments.can_see\") and has_perm(\n self.request.user, \"assignments.can_manage\"\n )\n elif self.action == \"candidature_self\":\n result = has_perm(self.request.user, \"assignments.can_see\") and has_perm(\n self.request.user, \"assignments.can_nominate_self\"\n )\n elif self.action == \"candidature_other\":\n result = has_perm(self.request.user, \"assignments.can_see\") and has_perm(\n self.request.user, \"assignments.can_nominate_other\"\n )\n else:\n result = False\n return result", "title": "" }, { "docid": "4e2bdc5487d373a569f75734632877eb", "score": "0.5412812", "text": "def can_change(self, request):\n return True", "title": "" }, { "docid": "7f954cbb8d304b31499651a4966b3779", "score": "0.54066485", "text": "def is_editable_by_current_user(self):\n\n return api.user.has_permission(\n 'Modify portal content', obj=self.context)", "title": "" }, { "docid": "2c8c30f2efe0be511ef894ab98777ef9", "score": "0.5394684", "text": "def can_user_set(self, user, channel):\n perms = channel.permissions_for(user)\n return user.manage_channels or user.administrator", "title": "" } ]
1659d94502ed19e7b08785e4a0c75390
Add a service variable of type ``ty`` to this model
[ { "docid": "88bde6386277d2c2911bb53b42cd1230", "score": "0.6668062", "text": "def service_define(self, service, ty):\n\n if service in self._data:\n raise NameError('Service variable <{}> already defined in _data'.format(service))\n if service in self._algebs:\n raise NameError('Service variable <{}> already defined in _algebs'.format(service))\n if service in self._states:\n raise NameError('Service variable <{}> already defined in _states'.format(service))\n\n self._service.append(service)\n self._service_ty.append(ty)", "title": "" } ]
[ { "docid": "9defd39fc9f8510cf3e2c3a479654431", "score": "0.6076422", "text": "def add(stype: ServiceType, service, name: str = None, *args, **kwargs):\n name = service.__name__ if name is None else name\n dynamic_provider = stype.value(service, *args, **kwargs)\n containers.DynamicContainer.__setattr__(\n Services.dynamic, name, dynamic_provider)", "title": "" }, { "docid": "3592212c5dee85235a01bbbcff19fa73", "score": "0.5992147", "text": "def add_variable(\n self,\n name: str,\n value: typing.Any,\n type_: str = None,\n value_info: typing.Mapping = None\n ):\n self.variables[name] = {'value': value, 'type': type_, 'valueInfo': value_info}\n\n return self", "title": "" }, { "docid": "1164a49bd4570e37925ffefd43c67229", "score": "0.5949808", "text": "def add_variable(\n self,\n name: str,\n value: typing.Any,\n type_: str = None,\n value_info: typing.Mapping = None\n ) -> None:\n self.variables[name] = {'value': value, 'type': type_, 'valueInfo': value_info}", "title": "" }, { "docid": "d3fa2a549b3fbcb831f3296c938707b2", "score": "0.5577186", "text": "def add_additional_variables(self, var_name, var_type):\n self.additional_variables.append(Variable(var_name, '{}, intent(in):: {}'.format(var_type, var_name)))", "title": "" }, { "docid": "5ef8f1293cb0d78bed7dd9506d03fa4f", "score": "0.5563456", "text": "def add_variable(self, name, attributes):\n self.variables[name] = self.VarStub(attributes)", "title": "" }, { "docid": "e9645ccb11b2eae161856c0bb1f830a5", "score": "0.55307704", "text": "def add_service_def_to_schema(self, service_type):\n self.schema_dict_tmpl[SERVICE_KEY][service_type] = \\\n copy.deepcopy(SERVICE_DICT_TMPL)", "title": "" }, { "docid": "d91270f4f3e5aadde2f796bfc6cbe8ee", "score": "0.55147415", "text": "def ServiceAdd(self, name, type, addr, required, optional):\n\t\tprint \"ServiceAdd\", self, name, type, addr, required, optional", "title": "" }, { "docid": "2f3d8348ebc7bc29cf3c5c9e357c746a", "score": "0.5456986", "text": "def var_define(self, variable, ty, fname, descr='', uname=''):\n if ty not in ('x', 'y'):\n raise ValueError('Type <{}> of the variable <{}> is invalid'.format(ty, variable))\n\n if not uname:\n uname = variable\n\n if ty == 'x':\n self._states.append(variable)\n self._fnamex.append(fname)\n self._unamex.append(uname)\n if descr:\n self._states_descr.update({variable: descr})\n elif ty == 'y':\n self._algebs.append(variable)\n self._fnamey.append(fname)\n self._unamey.append(uname)\n if descr:\n self._algebs_descr.update({variable: descr})", "title": "" }, { "docid": "69e6d6c7f0416249e3d1b62b72b62b78", "score": "0.53908265", "text": "async def async_set_variable_service_handler(service: ServiceCall) -> None:\n address = service.data.get(CONF_ADDRESS)\n vtype = service.data.get(CONF_TYPE)\n name = service.data.get(CONF_NAME)\n value = service.data.get(CONF_VALUE)\n init = service.data.get(CONF_INIT, False)\n isy_name = service.data.get(CONF_ISY)\n\n for config_entry_id in hass.data[DOMAIN]:\n isy_data = hass.data[DOMAIN][config_entry_id]\n isy = isy_data.root\n if isy_name and isy_name != isy.conf[\"name\"]:\n continue\n variable = None\n if name:\n variable = isy.variables.get_by_name(name)\n if address and vtype:\n variable = isy.variables.vobjs[vtype].get(address)\n if variable is not None:\n await variable.set_value(value, init)\n entity_registry = er.async_get(hass)\n async_log_deprecated_service_call(\n hass,\n call=service,\n alternate_service=\"number.set_value\",\n alternate_target=entity_registry.async_get_entity_id(\n Platform.NUMBER,\n DOMAIN,\n f\"{isy.uuid}_{address}{'_init' if init else ''}\",\n ),\n breaks_in_ha_version=\"2023.5.0\",\n )\n return\n _LOGGER.error(\"Could not set variable value; not found or enabled on the ISY\")", "title": "" }, { "docid": "0408b07492ffe40ff87b32d5a79318f5", "score": "0.5369281", "text": "def add_variable(self, name, value):\n self[name] = value", "title": "" }, { "docid": "b0541000e7bb74fb26ade412e014fb97", "score": "0.5238458", "text": "def add_variable(self, var: 'Constant'):\n self.__variables[var.name] = var", "title": "" }, { "docid": "962a46b03c91a0e89c14d384e7f631a0", "score": "0.5218415", "text": "def register_service(self, service):\n self.services.append(service)", "title": "" }, { "docid": "298f859d4d66ab8dc5ba7c58739bd3a9", "score": "0.5217007", "text": "def add(self, var, expression):\n expression = Polynomial.cast(expression)\n var.type = VariableType.TYPE # make var a type variable\n self._equations[var] = expression\n\n # register variables in the system.\n self._register_variable(var)\n self._register_expressions(expression)", "title": "" }, { "docid": "6ced6762373522c7466b293e13335e11", "score": "0.517509", "text": "def register_variable(variable, is_independent=False, is_equation=False):\n PuddleRepository.variables.add(variable)\n if is_independent:\n PuddleRepository.independent_variables.add(variable)\n if is_equation:\n PuddleRepository.equations.add(variable)", "title": "" }, { "docid": "72d2314cb8d744553cc0aa449ef03596", "score": "0.5170673", "text": "def add_var(self, var_name, required, dtype=None, dim=None, units=None):\n self.variables[var_name] = Variable(dtype, dim, units)\n if required:\n self.required_vars.append(var_name)\n else:\n self.optional_vars.append(var_name)", "title": "" }, { "docid": "778596abb7f52bbd18b1fc1f3d2ca999", "score": "0.5154649", "text": "def Potential(name, var, model=None):\r\n\r\n var.name = name\r\n modelcontext(model).potentials.append(var)\r\n return var", "title": "" }, { "docid": "9197efc30dfe97dcddc7a0e45e6e2f33", "score": "0.51336783", "text": "def add(self, var, value, where='dataset', reload=True):\n self._run(['--add', var, value], where=where, reload=reload, log_stderr=True)", "title": "" }, { "docid": "84559dc0b26564911b7b60b9d01aeb1f", "score": "0.51019806", "text": "def add_variable(self, var, domain):\n if var in self.variables:\n raise Exception(\"Variable name already exists: %s\" % str(var))\n\n self.numVars += 1\n self.variables.append(var)\n self.values[var] = domain\n self.unaryFactors[var] = None\n self.binaryFactors[var] = dict()", "title": "" }, { "docid": "4fc8b3c9b4bdb49630113516ed29ff6b", "score": "0.5091044", "text": "def add_variable(self,name, opt_value=0):\n\t\tself.variables[name] = opt_value", "title": "" }, { "docid": "73a95e41a740930f2898481d8b6f9784", "score": "0.5074586", "text": "def var(self, name, type=\"float32\", shape=None):\n if isinstance(name, Variable): return name\n v = self.vars.get(name, None)\n if v == None:\n v = Variable(name)\n self.vars[name] = v\n v.type = type\n if shape is not None: v.shape = shape\n return v", "title": "" }, { "docid": "be9535e3cf2217e4ec40a85589ca8352", "score": "0.50581855", "text": "def Variable(initial_value, name, trainable=None):\n raise NotImplementedError", "title": "" }, { "docid": "845a7fb0185db44efeeefd6c4d0ce4fb", "score": "0.5056541", "text": "def addService(self, id_service, desc_service, host, port):\n # TODO\n self.services[id_service] = [desc_service, host, port]", "title": "" }, { "docid": "1784a3c81f67489f3af2f67762e854ec", "score": "0.5042215", "text": "def var():\n\n return variable.Variable(\"generic_var\",template_units.kg_s,\"A generic var\")", "title": "" }, { "docid": "b28ccf3f3e5504994d442fd086f76baa", "score": "0.5020835", "text": "def __setattr__(self, key, var):\n if not self._after_init:\n object.__setattr__(self, key, var)\n else:\n if type(var) is VariableInfo or var is None:\n object.__setattr__(self, key, var)\n # add it to the list of variables\n self._variables.append(key)\n else:\n raise TypeError(\"Can only set attributes of type VariableInfo.\")", "title": "" }, { "docid": "b18b911166b4e57113a96dad0363061b", "score": "0.5010512", "text": "def add_workflowservice(wes_id):\n set_yaml(section='workflowservices', var2add=wes_id)", "title": "" }, { "docid": "dbcbd25faabfc615d9d61ae83d298409", "score": "0.49794123", "text": "def add_service_ref_to_solution(self, service_type):\n self.schema_dict_tmpl[SOLUTION_KEY][self.app_name][SERVICE_KEY]\\\n .append({\"$ref\": topic_ref_transform(\"#/services/{0}\", service_type)})", "title": "" }, { "docid": "024678819d4cc34a71ae2e45740064fc", "score": "0.49627677", "text": "def add_variable(self, variable):\n if not isinstance(variable, Variable):\n raise TypeError(f\"Expected {Variable}; got {type(variable)}.\")\n self.variables[variable.name] = variable", "title": "" }, { "docid": "024678819d4cc34a71ae2e45740064fc", "score": "0.49627677", "text": "def add_variable(self, variable):\n if not isinstance(variable, Variable):\n raise TypeError(f\"Expected {Variable}; got {type(variable)}.\")\n self.variables[variable.name] = variable", "title": "" }, { "docid": "70b9fb29bb32820944e7ba15bb2ca145", "score": "0.4937653", "text": "def step_impl(context, name, type_env):\n type_value = eval(type_env)\n context.data['type_env'][name] = type_value", "title": "" }, { "docid": "5684265b28e6fec35c1f0f0f8f53a1b8", "score": "0.49179962", "text": "def register_service(self, service):\n self._services.append(service)", "title": "" }, { "docid": "054391b1e377bed457c433e23cd3aed3", "score": "0.49073106", "text": "def add_var(self, name: str, size: int) -> None:\n self.named_variables.append(name)\n self.var_offset[name] = self.var_total_size\n self.var_size[name] = size\n self.var_total_size += size", "title": "" }, { "docid": "2fb56f52cd21385c775b4130a8a4f625", "score": "0.49039307", "text": "def __inject(model, model_context):\n __kwargs[variable_injector.VARIABLE_FILE_APPEND_ARG] = variable_injector.VARIABLE_FILE_UPDATE\n inserted, variable_model, variable_file_name = VariableInjector(_program_name, model, model_context,\n WebLogicHelper(\n __logger).get_actual_weblogic_version()). \\\n inject_variables_keyword_file(**__kwargs)\n if inserted:\n model = Model(variable_model)\n return inserted, model", "title": "" }, { "docid": "a8539f3cad57b511a87fe52c09debd7f", "score": "0.4900647", "text": "def add_symbol(self, key, ty, val): # pylint: disable=invalid-name\n if key in self.symbols.keys():\n old = str(self.symbols[key])\n new = str((ty, val))\n _internal_assert(False, f\"Name conflict in symbol table! [{key}] {old} -> {new}\")\n\n self.symbols[key] = ty, val\n\n if ty == Symbol.ThreadBind:\n if val.var.name not in self.binds.keys():\n self.binds[val.var.name] = val\n return\n val_ = self.binds[val.var.name]\n _internal_assert(\n tvm.tir.analysis.expr_deep_equal(val_.dom.extent, val.dom.extent),\n \"Thread extents should be uniform!\",\n )\n self.symbols[key] = ty, val_", "title": "" }, { "docid": "ecfff0f9c2e52140e4db0843aacbf130", "score": "0.4894691", "text": "def addVarReg():\n\n cl.cycleOverList('addVarReg')", "title": "" }, { "docid": "e3b72b81307247c0f690038494214360", "score": "0.48906937", "text": "def add_variable(self, name, label, style='.-', color=None, use_std=True):\n\n plot = {\n 'name': name,\n 'label': label,\n 'style': style,\n 'use_std': use_std,\n 'color': color,\n 'data': [],\n 'data_inf': [],\n 'data_sup': [],\n\n 'line': None,\n 'line_std': None\n }\n\n self._variables.append(plot)", "title": "" }, { "docid": "f9afe34c14c0a26fdf21f2109b334199", "score": "0.48824722", "text": "def add_service(\n self,\n service_type: str,\n handler: ScanHandler,\n device_info_extractor: DevInfoExtractor,\n ) -> None:\n self._services[service_type] = (handler, device_info_extractor)", "title": "" }, { "docid": "78d4a1aaad48ce397bf59ebe8bfa94f5", "score": "0.4875754", "text": "def define(self, name, var_type, kind):\n # creates new var with the next index\n new_var = Variable(name, var_type, kind, self.__var_segment_count[kind])\n # adds the var to the correct dictionary\n if kind in CLASS_VAR_DEC_KEYWORDS:\n self.__class_variables[name] = new_var\n else:\n self.__subroutine_variables[name] = new_var\n\n self.__var_segment_count[kind] += 1 # increment the next index of that kind", "title": "" }, { "docid": "ea6c241bbd5399df19bbb65a56f574d9", "score": "0.48703286", "text": "def set_service(self, name, obj):\r\n self.__service_map[name] = obj", "title": "" }, { "docid": "5a48106cf07f26247012102f727f824a", "score": "0.48689204", "text": "def set_variable(self, id: str, value: Any) -> None:\n self.__variables[id] = value", "title": "" }, { "docid": "cb13ea8e2033127b154cd4d49a762287", "score": "0.4868534", "text": "def add_service(session):\n service = SpeechLocalization(session)\n id = session.registerService(SpeechLocalization.service_name, service)\n return [service, id]", "title": "" }, { "docid": "4c0beb438d0c5b3661bf7a00284d14c1", "score": "0.48595047", "text": "def create_variable(self): #, name, is_input_feed=False, is_python=False, local=False, **kwargs):\n raise NotImplementedError", "title": "" }, { "docid": "2eda9ea2d8b4802327cc277d959b7527", "score": "0.48450002", "text": "def add(self, tier: str):\n self.tiers.append(tier)", "title": "" }, { "docid": "254a2541b304e2672a5c235e4e0f585d", "score": "0.48305318", "text": "def variable(type_name, name):\n var_type = ee_types.nameToClass(type_name) or computedobject.ComputedObject\n result = var_type.__new__(var_type)\n result.func = None\n result.args = None\n result.varName = name\n return result", "title": "" }, { "docid": "1d94069190d94f57b6b28eca7ab223f9", "score": "0.48005414", "text": "def _register_variable(self, v):\n\n if v.idx is not None:\n return # nothing to do.\n\n v.idx = self._next_idx()\n self._all_variables[v.idx] = v\n\n if v.tuning_param is not None:\n self._tuned_variables.add(v)\n\n # case over the variable's type\n if isinstance(v, Seq):\n self._seq_variables.add(v)\n self._register_expressions(v.inner_expressions)\n\n elif isinstance(v, MSet):\n self._mset_variables.add(v)\n self._register_expressions(v.inner_expressions)\n\n elif isinstance(v, UCyc):\n self._ucyc_variables.add(v)\n self._register_expressions(v.inner_expressions)\n\n elif isinstance(v, Set):\n self._set_variables.add(v)\n self._register_expressions(v.inner_expressions)\n\n elif isinstance(v, Cyc):\n self._cyc_variables.add(v)\n self._register_expressions(v.inner_expressions)", "title": "" }, { "docid": "4f27d76eed78930dd0a6d872d994d0c8", "score": "0.47718805", "text": "def add_input_variable(self, name, vartype, **kw):\n input_var = Variable(name, precision = vartype, **kw)\n self.arg_list.append(input_var)\n # WARNING: self.function_type.arg_list_precision is not updated\n return input_var", "title": "" }, { "docid": "06e929d45e3aae61aacaa7abf3bf071d", "score": "0.47694758", "text": "def add_variable(self, vartype, var):\n var.body = self.id\n\n super(Body, self).add_variable(vartype, var)", "title": "" }, { "docid": "9cf5cb78b6c06dcdcac406a34cb9724a", "score": "0.47619912", "text": "def add_var_hook(self, name, cb = None):\n\n if type(name).__name__ == 'dict':\n self._hooks.update(name)\n \n elif type(name).__name__ == 'str' and callable(cb):\n self._hooks[name] = cb", "title": "" }, { "docid": "8c762d1905afc842ad483a8e1474ec48", "score": "0.47408482", "text": "def __setattr__(self, name, value):\n if name == 'recipe':\n # set the prodtype list\n try:\n self.prodtypes = [self.prodtype_map[step] for step in value]\n except AttributeError:\n self.prodtypes = list()\n super().__setattr__(name, value)", "title": "" }, { "docid": "25aa544b97659805f627a20f7bdd61cc", "score": "0.4736419", "text": "def add_stkvar(*args):\r\n return _idaapi.add_stkvar(*args)", "title": "" }, { "docid": "42f196f2e87744d06f70d24d61967f5b", "score": "0.4714275", "text": "def opt_var(self, var_name, dtype=None, dim=None, units=None):\n self.variables[var_name] = Variable(dtype, dim, units)\n self.optional_vars.append(var_name)", "title": "" }, { "docid": "e80022b5ed6edbf664ca32526d5c11be", "score": "0.46884003", "text": "def _type_variable(self):\n v = Variable()\n v.type = VariableType.TYPE\n self._register_variable(v)\n return v", "title": "" }, { "docid": "83c93cd48db82f68b8389bc667e637f1", "score": "0.4688399", "text": "def add_regvar(*args):\r\n return _idaapi.add_regvar(*args)", "title": "" }, { "docid": "09de7da085bd94d00d2db85aab74c540", "score": "0.46875712", "text": "def add_var(self, name, definition):\n if name in self.variables:\n raise UpdateMakefilesException('variable already exists: %s' % name)\n self.variables[name] = Makefile.Variable(name, definition)", "title": "" }, { "docid": "e3ca0e04715da7487d455188deb04a64", "score": "0.46831843", "text": "def _set_tvar(self, key, val):\n keyt, edit = translate(str(key), str(val))\n # if keyt == \"SCHOST\" and val == \"ccb\": raise DefError\n if self.load is None:\n self.load = ecflow.Variable(keyt, edit)\n else:\n next = self.next\n self.next = Edit(keyt, edit, next)", "title": "" }, { "docid": "c4b68749a428281109e1143307749eb8", "score": "0.46808255", "text": "def register_parameter(self, variable, as_weight=False, as_bias=False, name=None, **collection_key_value_pairs):\n # Get variable name from variable if name is not given\n name = name if name is not None else variable.name\n # Attach collection key-value pairs\n utils.add_to_antipasti_collection(variable, antipasti_name=name, **collection_key_value_pairs)\n # Write to dict\n self._parameters['[LayerID:{}][{}]'.format(self.name, name)] = variable\n # Add to GraphKeys.TRAINABLE_VARIABLES if trainable\n if utils.is_antipasti_trainable(variable):\n A.add_to_collection(A.Collections.TRAINABLE_VARIABLES, variable)\n # Add to Collections.REGULARIZABLE_VARIABLES if regularizable\n if utils.is_antipasti_regularizable(variable):\n A.add_to_collection(A.Collections.REGULARIZABLE_VARIABLES, variable)\n # Add to GraphKeys.WEIGHTS if parameter is being registered as weight\n if as_weight:\n A.add_to_collection(A.Collections.WEIGHTS, variable)\n # Add to GraphKeys.BIASES if parameter is being registered as a bias\n if as_bias:\n A.add_to_collection(A.Collections.BIASES, variable)\n # Done.\n return variable", "title": "" }, { "docid": "a48f9c6b5dbc432a584cc5096d9a1c44", "score": "0.4678225", "text": "def addVariable(\n self, component, group, parm, value=None, lower=None, upper=None, scale=1.0, scaledStep=True, dh=1e-6\n ):\n\n container_id = self.vspModel.FindContainer(component, 0)\n if container_id == \"\":\n raise Error(\"Bad component for DV: %s\" % component)\n\n parm_id = self.vspModel.FindParm(container_id, parm, group)\n if parm_id == \"\":\n raise Error(f\"Bad group or parm: {component} {group} {parm}\")\n\n # Now we know the parmID is ok. So we just get the value\n val = self.vspModel.GetParmVal(parm_id)\n\n dvName = f\"{component}:{group}:{parm}\"\n\n if value is None:\n value = val\n\n if scaledStep:\n dh = dh * value\n\n if value == 0:\n raise Error(\n \"Initial value is exactly 0. scaledStep option cannot be used\"\n \"Specify an explicit dh with scaledStep=False\"\n )\n\n self.DVs[dvName] = vspDV(parm_id, dvName, component, group, parm, value, lower, upper, scale, dh)", "title": "" }, { "docid": "0429bf31ef91086fdf672452fa5981d1", "score": "0.4675014", "text": "def add_service(self, svc_id: str, service: ServiceBase):\n self._services[svc_id] = service", "title": "" }, { "docid": "b5d4224122a95fee641111e54b867ad2", "score": "0.46585572", "text": "def add_component_to_skymodel(sm: SkyModel, comp: SkyComponent):\n sm.components.append(comp)\n return sm", "title": "" }, { "docid": "28149eb10f1ec09b7c4f0d0206de5074", "score": "0.46506104", "text": "def provide(self, typ: Type[T], inst: T):\n self._instances[typ] = inst", "title": "" }, { "docid": "5d17fde5c15b1eb590666033e4bce0dd", "score": "0.46450153", "text": "def Variable(self, variables, workSpace):", "title": "" }, { "docid": "2b884a4f3754b090de619b0001e86300", "score": "0.4641824", "text": "def add_random_variable(self, var):\r\n self.named_vars[var.name] = var\r\n if not hasattr(self, var.name):\r\n setattr(self, var.name, var)", "title": "" }, { "docid": "6d888912c210c69e4fc5f7f10eed23ca", "score": "0.46408108", "text": "def make_var(step, var_type, options):\n switch = {\"linear\": make_linear_var, \"exponential\": make_exponential_var}\n return switch[var_type](step=step, **options)", "title": "" }, { "docid": "3783e07dfbaf9f1ee738e37d858513ad", "score": "0.46239263", "text": "def req_var(self, var_name, dtype=None, dim=None, units=None):\n self.variables[var_name] = Variable(dtype, dim, units)\n self.required_vars.append(var_name)", "title": "" }, { "docid": "1cc61cc199f2c634257e661fb5ed7322", "score": "0.46196917", "text": "def addFeatureSpec(self, name, spec):\n spec = dict(spec)\n typeName = spec.pop('type', None)\n featureCls = self.getFeatureClass(typeName)\n self._features[name] = featureCls(**spec)", "title": "" }, { "docid": "f384b2f19e43a291fab1ecdc762c8035", "score": "0.4615893", "text": "def Deterministic(name, var, model=None):\r\n var.name = name\r\n modelcontext(model).deterministics.append(var)\r\n modelcontext(model).add_random_variable(var)\r\n return var", "title": "" }, { "docid": "b31a73c73065b6d3b29698a4d041edb1", "score": "0.4614608", "text": "def get_trained_variables(self, Session,atom_types=[]):", "title": "" }, { "docid": "4a02774a85dda2b741bbe669873dae17", "score": "0.46116033", "text": "def service_add(self, **args) -> \"Service\":\n proto = self.bundle().service_prototype(**args)\n with allure_step(f\"Add service {proto.name} to cluster {self.name}\"):\n data = self._subcall(\"service\", \"create\", prototype_id=proto.id, cluster_id=self.id)\n return Service(self._api, id=data['id'])", "title": "" }, { "docid": "6dcb00737cc618fffe02427e40f5ce16", "score": "0.4608607", "text": "def add_feat( self, feat ) :\n self.features.add( feat.name, feat.value )", "title": "" }, { "docid": "9f685857a5e4295baacff6a0b90be24d", "score": "0.4605806", "text": "def add_servicepoint(self, identifier, callback):\n self._services[identifier] = callback", "title": "" }, { "docid": "51a939c890994fe01e003ece3f5e371b", "score": "0.46022916", "text": "def add_triple(domain_name,objectproprety,range_name,path):\n onto = get_ontology(path).load()\n with onto:\n exec('onto.{0}.{1} = [onto.{2}]'.format(domain_name,objectproprety,range_name))\n # onto.test1.has_for_ingredient = [onto.acetaminophen]\n onto.save(path)", "title": "" }, { "docid": "050ab371dc3ef3d31b4a907fadbd7500", "score": "0.45820224", "text": "def addSpeciesType(self, *args):\n return _libsbml.Model_addSpeciesType(self, *args)", "title": "" }, { "docid": "2423f46247ca24f3df7dd52ca5107061", "score": "0.45807213", "text": "def add_service(self,service):\n if service in self.services:\n pass #service already exists\n else:\n self.services.append(service)", "title": "" }, { "docid": "b5b31a2384dfa09acd7ef2d25ee1101c", "score": "0.45603275", "text": "def __init__(\n self_,\n name: str,\n type: SyntheticsConfigVariableType,\n example: Union[str, UnsetType] = unset,\n id: Union[str, UnsetType] = unset,\n pattern: Union[str, UnsetType] = unset,\n secure: Union[bool, UnsetType] = unset,\n **kwargs,\n ):\n if example is not unset:\n kwargs[\"example\"] = example\n if id is not unset:\n kwargs[\"id\"] = id\n if pattern is not unset:\n kwargs[\"pattern\"] = pattern\n if secure is not unset:\n kwargs[\"secure\"] = secure\n super().__init__(kwargs)\n\n self_.name = name\n self_.type = type", "title": "" }, { "docid": "26d7714c1c17b8c30d6beaf0b459e65e", "score": "0.45563242", "text": "def _set_service_config_var(service, var_name, value):\n run(\n \"echo '%s: %s' | sudo tee -a /var/opt/magma/configs/%s.yml\" % (\n var_name, str(value), service,\n ),\n )", "title": "" }, { "docid": "51bc5594f03c033631733987a8b94c67", "score": "0.4554668", "text": "def type(self, type):\n\n self.container['type'] = type", "title": "" }, { "docid": "3d5183973eee60784baa08a7d0ea35e1", "score": "0.45537454", "text": "def _hook_variable(self):\n # Overload 'special' methods here\n self._hook___new__(torch.autograd.variable.Variable)\n self._hook_var_contents()\n self._hook_var_owners()\n\n for attr in dir(torch.autograd.variable.Variable):\n\n # Conditions for inclusion/exclusion\n if attr in self.exclude + self.var_exclude:\n continue\n lit = getattr(torch.autograd.variable.Variable, attr)\n is_base = attr in dir(object)\n is_desc = inspect.ismethoddescriptor(lit)\n # is_func = isinstance(type(lit), types.FunctionType)\n is_func = isinstance(lit, types.FunctionType)\n try:\n is_service_func = 'HookService' in lit.__qualname__\n except:\n is_service_func = False\n is_old = re.match('old*', attr) is not None\n\n # Where the overloading happens\n if ((is_desc or (is_func and not is_service_func)) and not is_base and not is_old):\n passer = utils.pass_method_args(lit)\n new_attr = self._get_overload_method_in_tensor_or_var(passer)\n setattr(torch.autograd.variable.Variable,\n 'old_{}'.format(attr), lit)\n setattr(torch.autograd.variable.Variable, attr, new_attr)\n\n self._hook_send_(torch.autograd.variable.Variable)\n self._hook_get_(torch.autograd.variable.Variable)\n self._hook_var_serde()", "title": "" }, { "docid": "4e929d8b87b5e081f80a4ee8f822f056", "score": "0.45482057", "text": "def model(self, name, model):\n\n self.models[name] = model", "title": "" }, { "docid": "954250acfbdac177818e95e9c1eda8ed", "score": "0.45444962", "text": "def add_modelservice_to_registry(self, modelservice_name, ModelServiceClassDef):\n self.modelservice_registry[modelservice_name] = ModelServiceClassDef", "title": "" }, { "docid": "1b13ea8eba5ea76709b973cd73e7f7e4", "score": "0.45395696", "text": "def add_variable(nc, var):\n if nc is None:\n raise AttributeError(\"No file was specified\")\n if isinstance(nc, netCDF4._netCDF4.Dataset):\n pass\n else:\n nc = netCDF4.Dataset(nc, \"a\")\n\n # Handle the dimensions by enforcing a tuple list rather\n # than a list of strings, then add whatever we have\n try:\n dims = var['dims'].replace(\" \", \"\").split(',')\n except:\n dims = var['dims']\n try:\n nvar = nc.createVariable(var[\"name\"], var[\"type\"], dims)\n except:\n nvar = nc.createVariable(var[\"name\"], var[\"type\"])\n\n try:\n for key in var[\"attr\"]:\n # Check if it is a number and convert\n astr = __number_or_string(var[\"attr\"][key])\n setattr(nvar, key, astr)\n except KeyError:\n pass\n\n return nc", "title": "" }, { "docid": "e2007eed04f9d79d37cb2d12f40eaa1c", "score": "0.45244172", "text": "def addVariable(self, name : str, value) -> LookUpTable:\n # Check if variable exists \n result = self.modVariable(name, value) \n if result is None:\n newItem = LookUpItem(name, value)\n return LookUpTable(self.items + [newItem])\n else:\n return result", "title": "" }, { "docid": "6fe30ca8aac6e53b9cfcfa2c78760ec5", "score": "0.45215234", "text": "def seed_type(self, name, typ):\n self.typevars[name].lock(typ)", "title": "" }, { "docid": "1a554f5e1ad78308080dd2e8dcf41c03", "score": "0.4517167", "text": "def AddSymbol(self,name,stype,body=None):\n if self.IsSymbol(name):\n raise Exception('Program Bug -- name \"%s\" already exists is symbols' % name);\n self.symbols['list'].append(name);\n self.symbols['type'].append(stype);\n self.symbols['body'].append(body);", "title": "" }, { "docid": "afd0af436c849e3a11d8e676caf08cf9", "score": "0.45027423", "text": "def _add_weight(self, name, initial_value, dtype=None):\n variable = variable_v1.VariableV1(\n initial_value=initial_value,\n name=name,\n dtype=dtype,\n trainable=False,\n use_resource=True,\n synchronization=variables.VariableSynchronization.AUTO,\n # Set aggregation to NONE, as loss scaling variables should never be\n # aggregated.\n aggregation=variables.VariableAggregation.NONE)\n if context.executing_eagerly():\n graph_key = None\n else:\n graph = ops.get_default_graph()\n graph_key = graph._graph_key # pylint: disable=protected-access\n\n key = (name, graph_key)\n if self._weights.get(key, None) is not None:\n raise RuntimeError('Duplicate variables detected. {}'.format(key))\n self._weights[key] = variable\n self._handle_deferred_dependencies(name=name, trackable=variable)\n return variable", "title": "" }, { "docid": "19c9f61ffb201c3cd36364280d3f3321", "score": "0.44942945", "text": "def _add_to_dict(self, name, attr):\n if name not in self._dict:\n logger.debug(\"Added variable '%s' to collection\", name)\n self._dict[name] = attr", "title": "" }, { "docid": "1d64d7b1e9d7dcd83285585c625cbce8", "score": "0.44941947", "text": "def AddVariable(self, variable):\n callResult = self._Call(\"AddVariable\", variable)", "title": "" }, { "docid": "4387e584cf37f38ca7b3b40f91b7d189", "score": "0.4493591", "text": "def _add_ttl_ns(self, line):\n lg = logging.getLogger(\"%s.%s\" % (self.ln, inspect.stack()[0][3]))\n lg.setLevel(self.log_level)\n\n lg.debug(\"line:\\n%s\", line)\n line = str(line).strip()\n # if the line is not a prefix line exit\n if line is None or line == 'none' or line == '' \\\n or not line.lower().startswith('@prefix'):\n return\n # parse the turtle line\n line = line.replace(\"@prefix\",\"\",1).strip()\n if line.endswith(\".\"):\n line = line[:-1]\n prefix = line[:line.find(\":\")].strip()\n uri = self.clean_iri(line[line.find(\":\")+1:].strip())\n # add the namespace to the class\n lg.debug(\"\\nprefix: %s uri: %s\", prefix, uri)\n self.bind(prefix, uri, override=False, calc=False)", "title": "" }, { "docid": "e51362f31c5b35dd0258c125ae5c7aa3", "score": "0.44895974", "text": "def service(self, service):\n\n self._service = service", "title": "" }, { "docid": "7c3493e6d4ddd2c8d1d1c25202443291", "score": "0.44788188", "text": "def __setitem__(self, name: str, value):\n attribute = self.model[name]\n if not isinstance(value, attribute.type):\n value = attribute.type(value)\n super().__setitem__(name, value)", "title": "" }, { "docid": "4e05e4f277b531c1119e4daec0dc6084", "score": "0.44782597", "text": "def add_vars(self, **names):\n for (name, attr) in names.items():\n if isinstance(attr, Variable):\n attr_var = attr\n else:\n attr_var = Variable(\n name,\n attr.get(n.STANDARD_NAME, DEFAULT_INFO),\n attr.get(n.LONG_NAME, DEFAULT_INFO),\n attr.get(n.UNITS, DEFAULT_INFO))\n self._add_to_dict(name, attr_var)", "title": "" }, { "docid": "75ae13c69cf34e6bee58d23bb49ea474", "score": "0.44745833", "text": "def add(self, val: object) -> None:\n self._ctx[type(val)] = val", "title": "" }, { "docid": "65ef7ee407a390011f398cc52e7e352c", "score": "0.4469828", "text": "def put_variable(self, col: str, name: str, value: Any):\n if self.scope is None:\n raise ValueError(\"Can't access variables on unbound modules\")\n self.scope.put_variable(col, name, value)", "title": "" }, { "docid": "f15198a08547e33a19f010e2898717e3", "score": "0.445989", "text": "def add_variable(self, name, description, default=None, min=None, max=None, edit_method=\"\"):\n if type(default) == int:\n if edit_method == \"\":\n self.add(name, \"int\", 0, description, default, min, max)\n else: # enum\n self.add(name, \"int\", 0, description, default, min, max, edit_method)\n elif type(default) == float:\n self.add(name, \"double\", 0, description, default, min, max)\n elif type(default) == str:\n self.add(name, \"str\", 0, description, default)\n elif type(default) == bool:\n self.add(name, \"bool\", 0, description, default)\n\n return default", "title": "" }, { "docid": "07789a59bae2a38738250a04f45259e9", "score": "0.44525698", "text": "def add_auto_stkpnt(*args):\r\n return _idaapi.add_auto_stkpnt(*args)", "title": "" }, { "docid": "0156454b525cdc3e2964299e2ec54f9b", "score": "0.44474766", "text": "def substitutetype(ty):\n\n ty = ty.replace(\"std::\", \"\")\n ty = re.sub(r\"(.*)<(.*)>\",\"\\\\1< \\\\2 >\",ty)\n ty = re.sub(r\"glm::tvec(.*)< T, P >\", \"glm::vec\\\\1\", ty)\n ty = re.sub(r\"tvec(.*)< T, P >\", \"glm::vec\\\\1\", ty)\n ty = re.sub(r\"glm::tmat(.*)< T, P >\", \"glm::mat\\\\1\", ty)\n ty = re.sub(r\"tmat(.*)< T, P >\", \"glm::mat\\\\1\", ty)\n ty = ty.replace(\"glm::mat2x2\", \"glm::mat2\")\n ty = ty.replace(\"glm::mat3x3\", \"glm::mat3\")\n ty = ty.replace(\"glm::mat4x4\", \"glm::mat4\")\n ty = re.sub(r\"glm::tquat(.*)< T, P >\", \"glm::quat\\\\1\", ty)\n ty = re.sub(r\"tquat(.*)< T, P >\", \"glm::quat\\\\1\", ty)\n return ty", "title": "" }, { "docid": "614db44ba135598f6aa013d6d3e24fbf", "score": "0.44427925", "text": "def add_feature(self, word, feature_name, is_test):\n self.features[word][feature_name] = 1\n if not is_test:\n self.feat_freq[feature_name] += 1", "title": "" }, { "docid": "3cc64afd02b922e3516aaa2d0f8e9d57", "score": "0.44409594", "text": "def define(self, key, value):\n self.bindings[key] = value", "title": "" }, { "docid": "a29d39a72159d29329b007249c0ff8a1", "score": "0.44374675", "text": "def add_type(type, ext, strict=True):\n if _db is None:\n init()\n return _db.add_type(type, ext, strict)", "title": "" }, { "docid": "507292c0496fcaa2f2d177e85e5dcf6a", "score": "0.443726", "text": "def add_variable(self):\n\n self._max_var_id += 1\n\n return CNFExprVar(self._max_var_id)", "title": "" }, { "docid": "193fce564413aab280711e344891165c", "score": "0.44345352", "text": "def __setattr__(self, name, value):\n self.scope[name] = value", "title": "" }, { "docid": "520d0e4dc24e362c3395271899874cc0", "score": "0.44314107", "text": "def add(self, name, value):\n self.local_dictionary[name] = value", "title": "" } ]
16d45c9acf46f40f244ccc434e3d5dfb
Adds liquid fuel or atomic fuel tanks to design
[ { "docid": "6723dea64db5b09e54a14db3774b557c", "score": "0.69049513", "text": "def add_conventional_tanks(self, lf):\n if self.mainengine.name == \"LFB Twin-Boar\":\n lf = max(lf, 36000)\n self.notes.append(\"6400 units of liquid fuel are already included in the engine\")\n if self.fueltype is parts.FuelTypes.LiquidFuel:\n smalltankcount = ceil(lf / parts.RocketFuelTanks[parts.SmallestTank[self.size]].m_full)\n else:\n # atomic fuel\n # Adomic Fuel is liquid fuel without oxidizer.\n smalltankcount = ceil(lf / (parts.RocketFuelTanks[parts.SmallestTank[self.size]].m_full *\n parts.AtomicTankFactor))\n self.notes.append(\"Atomic fuel is regular liquid fuel w/out oxidizer (remove oxidizer in VAB!)\")\n # Fuel tank calculation:\n # We use that\n # - Tank size is 2^n times the size of smallest tank with that radius\n # - It is cheapest to use the biggest tank possible\n for i in range(parts.SmallestTank[self.size], parts.BiggestTank[self.size]+1):\n if i != parts.BiggestTank[self.size]:\n if smalltankcount % 2 != 0:\n self.fueltanks.append((1, parts.RocketFuelTanks[i]))\n smalltankcount = smalltankcount // 2\n else:\n if self.mainengine.name == \"LFB Twin-Boar\":\n smalltankcount -= 1\n if smalltankcount > 0:\n self.fueltanks.append((smalltankcount, parts.RocketFuelTanks[i]))\n self.fueltanks.append((1, parts.TwinBoarPseudoTank))\n else:\n if smalltankcount > 0:\n self.fueltanks.append((smalltankcount, parts.RocketFuelTanks[i]))", "title": "" } ]
[ { "docid": "c4376dbfe4ecc36c450fad9f25cf7233", "score": "0.63634956", "text": "def add_special_tanks(self, xf, tank):\n tankcount = ceil(xf / tank.m_full)\n if tank.size == parts.RadialSize.RadiallyMounted:\n tankcount = max(tankcount, 2)\n self.requiredscience.add(tank.level)\n self.fueltanks.append((tankcount, tank))", "title": "" }, { "docid": "082cfc2b4b7fa373b344bef0cb13906c", "score": "0.5959416", "text": "def build(self):\n\n # Create a custom grid, fe_set \n # why create an fe_set instead of using Transformation \n # factory?\n nfe = 6\n fe_a = 1/4.0\n fe_b = 0.2\n fe_set = [0, 0.004]\n for i in range(1,nfe+1):\n if i < nfe*fe_a:\n fe_set.append(i*fe_b/(nfe*fe_a))\n elif i == nfe: \n fe_set.append(1)\n else:\n fe_set.append(fe_b + (i-nfe*fe_a)*(1-fe_b)/(nfe*(1-fe_a)))\n\n \"\"\"\n Args: (to MB_CLC_fuel object, as defined in model file)\n dae_method = method to use for calcuating derivatives (default = OCLR)\n - BFD1 - 1st order backwards finite difference\n - OCLR - Orthogonal collocation, Lagrange-Radau\n - OCLL - Orthogonal collocation, Lagrange-Legendre\n press_drop = Pressure drop correlation for superficial velocity calc.\n - SimplifiedP - simplified pressure correlations \n - Ergun - Ergun equation\n fe_set = set of normalised finite element locations\n nfe = number of finite elements for bed discretization (default = 15)\n (not used if fe_set specified)\n ncp = number of collocation points (OCLR or OCLL only, default = 3)\n\n fe_set_t\n nfe_t \n ^ adding time set as a model-level continuous set...\n will change when moving to new framework\n \"\"\" \n # Create unit model for fuel reactor\n # unit model - an attribute of the flowsheet model\n # \"a block within a block\"\n #self.MB_fuel = MB_CLC_fuel.MB(\n # parent=self,\n # dae_method = 'OCLR',\n # press_drop = 'Ergun',\n # fe_set = fe_set,\n # ncp = 3)\n\n # need to add time set to the above\n # open question still:\n # how long of a horizon should I simulate?\n #\n # why is nfe in z-dimension not an input here?\n self.MB_fuel = MB_CLC_fuel.MB(\n parent=self,\n dae_method = 'OCLR',\n press_drop = 'Ergun',\n fe_set = fe_set,\n ncp = 3,\n horizon = 1, # was 10\n nfe_t = 1, # \" \"\n ncp_t = 3)", "title": "" }, { "docid": "fe2ee9896aa4f33c95c28e736bdbce24", "score": "0.57530856", "text": "def create_lf_design(payload, pressure, dv, acc, eng,\n size=None, count=1, fueltype=parts.FuelTypes.LiquidFuel, tank=None):\n if size is None:\n size = eng.size\n design = Design(payload, eng, count, size, fueltype)\n if fueltype is parts.FuelTypes.LiquidFuel:\n f_e = 1 / 8\n elif fueltype is parts.FuelTypes.AtomicFuel:\n f_e = parts.AtomicTank_f_e\n else:\n f_e = tank.f_e\n m_p = payload + count*eng.m\n lf = physics.lf_needed_fuel(dv, physics.engine_isp(eng, pressure), m_p, f_e)\n if lf is None:\n return None\n if fueltype is parts.FuelTypes.LiquidFuel or fueltype is parts.FuelTypes.AtomicFuel:\n design.add_conventional_tanks((1 + f_e) * lf)\n else:\n design.add_special_tanks((1 + f_e) * lf, tank)\n design.calculate_performance(dv, pressure)\n if not design.has_enough_acceleration(acc):\n return None\n return design", "title": "" }, { "docid": "3426d73dc82411122530ba3c274ebc77", "score": "0.5537668", "text": "def add_credit_calculation_fuel_types_0111(apps, schema_editor):\n db_alias = schema_editor.connection.alias\n\n approved_fuel = apps.get_model('api', 'ApprovedFuel')\n\n approved_fuel.objects.using(db_alias).filter(\n credit_calculation_only=True\n ).delete()\n\n approved_fuel.objects.using(db_alias).bulk_create([\n approved_fuel(\n name=\"Petroleum-based diesel\",\n effective_date=\"2017-01-01\",\n credit_calculation_only=True\n ),\n approved_fuel(\n name=\"Petroleum-based gasoline\",\n effective_date=\"2017-01-01\",\n credit_calculation_only=True\n )\n ])", "title": "" }, { "docid": "4fa7dc54cbd0731fe1fd8be6249b9b1e", "score": "0.55242157", "text": "def add_design(design, eosscontext: EOSSContext, active: bool):\n\n if active:\n Design.objects.create(activecontext=eosscontext.activecontext,\n id=design['id'],\n inputs=json.dumps(design['inputs']),\n outputs=json.dumps(design['outputs']))\n else:\n Design.objects.create(eosscontext=eosscontext,\n id=design['id'],\n inputs=json.dumps(design['inputs']),\n outputs=json.dumps(design['outputs']))\n eosscontext.added_archs_count += 1\n\n eosscontext.last_arch_id += 1\n\n if eosscontext.added_archs_count >= 5:\n eosscontext.added_archs_count = 0\n activate_diversifier(eosscontext)\n\n eosscontext.save()", "title": "" }, { "docid": "3ff7db4149d11baca3408731d4d1b4d1", "score": "0.54325074", "text": "def fuel(self, fuel):\n\n self._fuel = fuel", "title": "" }, { "docid": "521030d08546c9b055941c622782262f", "score": "0.5401777", "text": "def fuel(self, f: float):\n if isinstance(f, float) is False:\n raise Warning(\"[WARNING]Fuel is not Float type.\")\n if f < 0: # <= ?\n raise Warning(\"[WARNING]Fuel have to Positive.\")\n self.__gas_remained += f\n if self.__gas_remained > self.__GAS_CAPACITY:\n # 최대 용량까지는 채우고 넘친건 안채우고\n self.__gas_remained = self.__GAS_CAPACITY\n raise Warning(\"[WARNING]Fuel is overfilled.\")", "title": "" }, { "docid": "466f2aecdee189b7b185bc345ef5fbbf", "score": "0.5339595", "text": "def create_model(\n time_set=None,\n time_units=pyo.units.s,\n nfe=5,\n tee=False,\n calc_integ=True,\n):\n fs_cfg = {\"dynamic\": True, \"time_set\": time_set, \"time_units\": time_units}\n model_name = \"Steam Tank, Dynamic\"\n\n if time_set is None:\n time_set = [0, 3]\n\n m = pyo.ConcreteModel(name=model_name)\n m.fs = FlowsheetBlock(**fs_cfg)\n # Create a property parameter block\n m.fs.prop_water = iapws95.Iapws95ParameterBlock(\n phase_presentation=iapws95.PhaseType.LG\n )\n # Create the valve and tank models\n m.fs.valve_1 = Valve(\n dynamic=False,\n has_holdup=False,\n pressure_flow_callback=_valve_pressure_flow_cb,\n material_balance_type=MaterialBalanceType.componentTotal,\n property_package=m.fs.prop_water,\n )\n m.fs.tank = Heater(\n has_holdup=True,\n material_balance_type=MaterialBalanceType.componentTotal,\n property_package=m.fs.prop_water,\n )\n m.fs.valve_2 = Valve(\n dynamic=False,\n has_holdup=False,\n pressure_flow_callback=_valve_pressure_flow_cb,\n material_balance_type=MaterialBalanceType.componentTotal,\n property_package=m.fs.prop_water,\n )\n # Add a controller\n m.fs.ctrl = PIDController(\n process_var=m.fs.tank.control_volume.properties_out[:].pressure,\n manipulated_var=m.fs.valve_1.valve_opening,\n calculate_initial_integral=calc_integ,\n mv_bound_type=ControllerMVBoundType.SMOOTH_BOUND,\n controller_type=ControllerType.PI,\n )\n\n # The control volume block doesn't assume the two phases are in equilibrium\n # by default, so I'll make that assumption here, I don't actually expect\n # liquid to form but who knows. The phase_fraction in the control volume is\n # volumetric phase fraction hence the densities.\n @m.fs.tank.Constraint(m.fs.time)\n def vol_frac_vap(b, t):\n return (\n b.control_volume.properties_out[t].phase_frac[\"Vap\"]\n * b.control_volume.properties_out[t].dens_mol\n / b.control_volume.properties_out[t].dens_mol_phase[\"Vap\"]\n ) == (b.control_volume.phase_fraction[t, \"Vap\"])\n\n # Connect the models\n m.fs.v1_to_tank = Arc(source=m.fs.valve_1.outlet, destination=m.fs.tank.inlet)\n m.fs.tank_to_v2 = Arc(source=m.fs.tank.outlet, destination=m.fs.valve_2.inlet)\n\n # Add the stream constraints and do the DAE transformation\n pyo.TransformationFactory(\"network.expand_arcs\").apply_to(m.fs)\n pyo.TransformationFactory(\"dae.finite_difference\").apply_to(\n m.fs, nfe=nfe, wrt=m.fs.time, scheme=\"BACKWARD\"\n )\n\n # Fix the derivative variables to zero at time 0 (steady state assumption)\n m.fs.fix_initial_conditions()\n\n # Fix the input variables\n m.fs.valve_1.inlet.enth_mol.fix(50000)\n m.fs.valve_1.inlet.pressure.fix(5e5)\n m.fs.valve_2.outlet.pressure.fix(101325)\n m.fs.valve_1.Cv.fix(0.001)\n m.fs.valve_2.Cv.fix(0.001)\n m.fs.valve_1.valve_opening.fix(1)\n m.fs.valve_2.valve_opening.fix(1)\n m.fs.tank.heat_duty.fix(0)\n m.fs.tank.control_volume.volume.fix(2.0)\n\n # Fix controller settings\n m.fs.ctrl.gain_p.fix(1e-6)\n m.fs.ctrl.gain_i.fix(1e-5)\n # m.fs.ctrl.gain_d.fix(1e-6)\n # m.fs.ctrl.derivative_of_error[m.fs.time.first()].fix(0)\n m.fs.ctrl.setpoint.fix(3e5)\n m.fs.ctrl.mv_ref.fix(0)\n m.fs.ctrl.mv_lb = 0.0\n m.fs.ctrl.mv_ub = 1.0\n\n for t in m.fs.time:\n m.fs.valve_1.inlet.flow_mol[t] = 100 # initial guess on flow\n # simple initialize\n m.fs.valve_1.initialize()\n propagate_state(m.fs.v1_to_tank)\n m.fs.tank.initialize()\n propagate_state(m.fs.tank_to_v2)\n # Can't specify both flow and outlet pressure so free the outlet pressure\n # for initialization and refix it after. Inlet flow gets fixed in init, but\n # is unfixed for the final problem\n m.fs.valve_2.outlet.pressure.unfix()\n m.fs.valve_2.initialize()\n m.fs.valve_2.outlet.pressure.fix(101325)\n m.fs.valve_1.valve_opening.unfix()\n m.fs.valve_1.valve_opening[m.fs.time.first()].fix()\n # Return the model and solver\n return m", "title": "" }, { "docid": "90134edba142744be072caf15ee8cd90", "score": "0.53340214", "text": "def create_sfb_design(payload, pressure, dv, acc, sfb_allowed, eng, eng_F_percentage, size, count, sfb, sfbcount):\n design = Design(payload, eng, count, size, parts.FuelTypes.LiquidFuel)\n design.add_sfb(sfb, sfbcount)\n # lpsr = Fl * I_sps / Fs / I_spl\n lpsr = count * eng.F_vac * sfb.isp_vac / sfbcount / sfb.F_vac / eng.isp_vac\n design.eng_F_percentage = eng_F_percentage\n m_p = payload + count*eng.m\n lf = physics.sflf_concurrent_needed_fuel(dv, physics.engine_isp(eng, pressure),\n physics.engine_isp(sfb, pressure),\n m_p,\n design.get_sfbmountmass(), sfbcount * sfb.m_full, sfbcount * sfb.m_empty,\n lpsr * eng_F_percentage)\n if lf is None:\n return None\n design.add_conventional_tanks(9 / 8 * lf)\n design.calculate_performance(dv, pressure)\n if not design.has_enough_acceleration(acc):\n return None\n if not design.sfb_burning_when_allowed(sfb_allowed):\n return None\n if sfbcount != 1:\n design.notes.append(\"Set liquid fuel engine thrust to {:.0%} while SFB are burning\".format(eng_F_percentage))\n return design", "title": "" }, { "docid": "e1d92090255f7c6be24bcf6ab4eef980", "score": "0.5296159", "text": "def buildFuelModels(allowDynamicModels=True,allowNonBurningModels=False):\n fuelModels = dict()\n \n # Code FMx: Original 13 Fuel Models\n fuelModels[\"FM1\"] = FuelModel(\n 1, \"FM1\", \"Short grass [1]\",\n 1.0, 0.12, 8000, 8000,\n 0.034, 0, 0, 0, 0,\n 3500, 1500, 1500,\n False, True)\n fuelModels[\"FM2\"] = FuelModel(\n 2, \"FM2\", \"Timber grass and understory [2]\",\n 1.0, 0.15, 8000, 8000,\n 0.092, 0.046, 0.023, 0.023,\n 0,3000, 1500, 1500,\n False, True)\n fuelModels[\"FM3\"] = FuelModel(\n 3, \"FM3\", \"Tall grass [3]\",\n 2.5, 0.25, 8000, 8000,\n 0.138, 0, 0, 0, 0,\n 1500, 1500, 1500,\n False, True)\n fuelModels[\"FM4\"] = FuelModel(\n 4, \"FM4\", \"Chaparral [4]\",\n 6.0, 0.2, 8000, 8000,\n 0.230, 0.184, 0.092, 0, 0.230,\n 2000, 1500, 1500,\n False, True)\n fuelModels[\"FM5\"] = FuelModel(\n 5, \"FM5\", \"Brush [5]\",\n 2.0, 0.20, 8000, 8000,\n 0.046, 0.023, 0, 0, 0.092,\n 2000, 1500, 1500,\n False, True)\n fuelModels[\"FM6\"] = FuelModel(\n 6, \"FM6\", \"Dormant brush, hardwood slash [6]\",\n 2.5, 0.25, 8000, 8000,\n 0.069, 0.115, 0.092, 0, 0,\n 1750, 1500, 1500,\n False, True)\n fuelModels[\"FM7\"] = FuelModel(\n 7, \"FM7\", \"Southern rough [7]\",\n 2.5, 0.40, 8000, 8000,\n 0.052, 0.086, 0.069, 0, 0.017,\n 1750, 1500, 1500,\n False, True)\n fuelModels[\"FM8\"] = FuelModel(\n 8, \"FM8\", \"Short needle litter [8]\",\n 0.2, 0.3, 8000, 8000,\n 0.069, 0.046, 0.115, 0, 0,\n 2000, 1500, 1500,\n False, True)\n fuelModels[\"FM9\"] = FuelModel(\n 9, \"FM9\", \"Long needle or hardwood litter [9]\",\n 0.2, 0.25, 8000, 8000,\n 0.134, 0.019, 0.007, 0, 0,\n 2500, 1500, 1500,\n False, True)\n fuelModels[\"FM10\"] = FuelModel(\n 10, \"FM10\", \"Timber litter & understory [10]\",\n 1.0, 0.25, 8000, 8000,\n 0.138, 0.092, 0.230, 0, 0.092,\n 2000, 1500, 1500,\n False, True)\n fuelModels[\"FM11\"] = FuelModel(\n 11, \"FM11\", \"Light logging slash [11]\",\n 1.0, 0.15, 8000, 8000,\n 0.069, 0.207, 0.253, 0, 0,\n 1500, 1500, 1500,\n False, True)\n fuelModels[\"FM12\"] = FuelModel(\n 12, \"FM12\", \"Medium logging slash [12]\",\n 2.3, 0.20, 8000, 8000,\n 0.184, 0.644, 0.759, 0, 0,\n 1500, 1500, 1500,\n False, True)\n fuelModels[\"FM13\"] = FuelModel(\n 13, \"FM13\", \"Heavy logging slash [13]\",\n 3.0, 0.25, 8000, 8000,\n 0.322, 1.058, 1.288, 0, 0,\n 1500, 1500, 1500,\n False, True)\n \n if not allowDynamicModels:\n return fuelModels\n else:\n pass\n # 14-89 Available for custom models\n\n if allowNonBurningModels:\n # Code NBx: Non-burnable\n # 90 Available for custom NB model \n fuelModels[\"NB1\"] = FuelModel(\n 91, \"NB1\", \"Urban, developed [91]\",\n 1.0, 0.10, 8000, 8000,\n 0, 0, 0, 0, 0,\n 1500, 1500, 1500,\n False, True)\n fuelModels[\"NB2\"] = FuelModel(\n 92, \"NB2\", \"Snow, ice [92]\",\n 1.0, 0.10, 8000, 8000,\n 0, 0, 0, 0, 0,\n 1500, 1500, 1500,\n False, True)\n fuelModels[\"NB3\"] = FuelModel(\n 93, \"NB3\", \"Agricultural [93]\",\n 1.0, 0.10, 8000, 8000,\n 0, 0, 0, 0, 0,\n 1500, 1500, 1500,\n False, True)\n \n # Indices 94-95 Reserved for future standard non-burnable models\n \n fuelModels[\"NB4\"] = FuelModel(\n 94, \"NB4\", \"Future standard non-burnable [94]\",\n 1.0, 0.10, 8000, 8000,\n 0, 0, 0, 0, 0,\n 1500, 1500, 1500,\n False, True)\n fuelModels[\"NB5\"] = FuelModel(\n 95, \"NB5\", \"Future standard non-burnable [95]\",\n 1.0, 0.10, 8000, 8000,\n 0, 0, 0, 0, 0,\n 1500, 1500, 1500,\n False, True)\n \n # Indices 96-97 Available for custom NB model\n \n fuelModels[\"NB8\"] = FuelModel(\n 98, \"NB8\", \"Open water [98]\",\n 1.0, 0.10, 8000, 8000,\n 0, 0, 0, 0, 0,\n 1500, 1500, 1500,\n False, True)\n fuelModels[\"NB9\"] = FuelModel(\n 99, \"NB9\", \"Bare ground [99]\",\n 1.0, 0.10, 8000, 8000,\n 0, 0, 0, 0, 0,\n 1500, 1500, 1500,\n False, True)\n\n # Code GRx: Grass\n # Index 100 Available for custom GR model\n f = 2000.0 / 43560.0\n fuelModels[\"GR1\"] = FuelModel(\n 101, \"GR1\", \"Short, sparse, dry climate grass (D)\",\n 0.4, 0.15, 8000, 8000,\n 0.10*f, 0, 0, 0.30*f, 0,\n 2200, 2000, 1500,\n True, True)\n fuelModels[\"GR2\"] = FuelModel(\n 102, \"GR2\", \"Low load, dry climate grass (D)\",\n 1.0, 0.15, 8000, 8000,\n 0.10*f, 0, 0, 1.0*f, 0,\n 2000, 1800, 1500,\n True, True)\n fuelModels[\"GR3\"] = FuelModel(\n 103, \"GR3\", \"Low load, very coarse, humid climate grass (D)\",\n 2.0, 0.30, 8000, 8000,\n 0.10*f, 0.40*f, 0, 1.50*f, 0,\n 1500, 1300, 1500,\n True, True)\n fuelModels[\"GR4\"] = FuelModel(\n 104, \"GR4\", \"Moderate load, dry climate grass (D)\",\n 2.0, 0.15, 8000, 8000,\n 0.25*f, 0, 0, 1.9*f, 0,\n 2000, 1800, 1500,\n True, True)\n fuelModels[\"GR5\"] = FuelModel(\n 105, \"GR5\", \"Low load, humid climate grass (D)\",\n 1.5, 0.40, 8000, 8000,\n 0.40*f, 0.0, 0.0, 2.50*f, 0.0,\n 1800, 1600, 1500,\n True, True)\n fuelModels[\"GR6\"] = FuelModel(\n 106, \"GR6\", \"Moderate load, humid climate grass (D)\",\n 1.5, 0.40, 9000, 9000,\n 0.10*f, 0, 0, 3.4*f, 0,\n 2200, 2000, 1500,\n True, True)\n fuelModels[\"GR7\"] = FuelModel(\n 107, \"GR7\", \"High load, dry climate grass (D)\",\n 3.0, 0.15, 8000, 8000,\n 1.0*f, 0, 0, 5.4*f, 0,\n 2000, 1800, 1500,\n True, True)\n fuelModels[\"GR8\"] = FuelModel(\n 108, \"GR8\", \"High load, very coarse, humid climate grass (D)\",\n 4.0, 0.30, 8000, 8000,\n 0.5*f, 1.0*f, 0, 7.3*f, 0,\n 1500, 1300, 1500,\n True, True)\n fuelModels[\"GR9\"] = FuelModel(\n 109, \"GR9\", \"Very high load, humid climate grass (D)\",\n 5.0, 0.40, 8000, 8000,\n 1.0*f, 1.0*f, 0, 9.0*f, 0,\n 1800, 1600, 1500,\n True, True)\n # 110-112 are reserved for future standard grass models\n # 113-119 are available for custom grass models\n\n # Code GSx: Grass and shrub\n # 120 available for custom grass and shrub model\n fuelModels[\"GS1\"] = FuelModel(\n 121, \"GS1\", \"Low load, dry climate grass-shrub (D)\",\n 0.9, 0.15, 8000, 8000,\n 0.2*f, 0, 0, 0.5*f, 0.65*f,\n 2000, 1800, 1800,\n True, True)\n fuelModels[\"GS2\"] = FuelModel(\n 122, \"GS2\", \"Moderate load, dry climate grass-shrub (D)\",\n 1.5, 0.15, 8000, 8000,\n 0.5*f, 0.5*f, 0, 0.6*f, 1.0*f,\n 2000, 1800, 1800,\n True, True)\n fuelModels[\"GS3\"] = FuelModel(\n 123, \"GS3\", \"Moderate load, humid climate grass-shrub (D)\",\n 1.8, 0.40, 8000, 8000,\n 0.3*f, 0.25*f, 0, 1.45*f, 1.25*f,\n 1800, 1600, 1600,\n True, True)\n fuelModels[\"GS4\"] = FuelModel(\n 124, \"GS4\", \"High load, humid climate grass-shrub (D)\",\n 2.1, 0.40, 8000, 8000,\n 1.9*f, 0.3*f, 0.1*f, 3.4*f, 7.1*f,\n 1800, 1600, 1600,\n True, True)\n # 125-130 reserved for future standard grass and shrub models\n # 131-139 available for custom grass and shrub models\n\n # Shrub\n # 140 available for custom shrub model\n fuelModels[\"SH1\"] = FuelModel(\n 141, \"SH1\", \"Low load, dry climate shrub (D)\",\n 1.0, 0.15, 8000, 8000,\n 0.25*f, 0.25*f, 0, 0.15*f, 1.3*f,\n 2000, 1800, 1600,\n True, True)\n fuelModels[\"SH2\"] = FuelModel(\n 142, \"SH2\", \"Moderate load, dry climate shrub (S)\",\n 1.0, 0.15, 8000, 8000,\n 1.35*f, 2.4*f, 0.75*f, 0, 3.85*f,\n 2000, 1800, 1600,\n True, True)\n fuelModels[\"SH3\"] = FuelModel(\n 143, \"SH3\", \"Moderate load, humid climate shrub (S)\",\n 2.4, 0.40, 8000., 8000.,\n 0.45*f, 3.0*f, 0, 0, 6.2*f,\n 1600, 1800, 1400,\n True, True)\n fuelModels[\"SH4\"] = FuelModel(\n 144, \"SH4\", \"Low load, humid climate timber-shrub (S)\",\n 3.0, 0.30, 8000, 8000,\n 0.85*f, 1.15*f, 0.2*f, 0, 2.55*f,\n 2000, 1800, 1600,\n True, True)\n fuelModels[\"SH5\"] = FuelModel(\n 145, \"SH5\", \"High load, dry climate shrub (S)\",\n 6.0, 0.15, 8000, 8000,\n 3.6*f, 2.1*f, 0, 0, 2.9*f,\n 750, 1800, 1600,\n True, True)\n fuelModels[\"SH6\"] = FuelModel(\n 146, \"SH6\", \"Low load, humid climate shrub (S)\",\n 2.0, 0.30, 8000, 8000,\n 2.9*f, 1.45*f, 0, 0, 1.4*f,\n 750, 1800, 1600,\n True, True)\n fuelModels[\"SH7\"] = FuelModel(\n 147, \"SH7\", \"Very high load, dry climate shrub (S)\",\n 6.0, 0.15, 8000, 8000,\n 3.5*f, 5.3*f, 2.2*f, 0, 3.4*f,\n 750, 1800, 1600,\n True, True)\n fuelModels[\"SH8\"] = FuelModel(\n 148, \"SH8\", \"High load, humid climate shrub (S)\",\n 3.0, 0.40, 8000, 8000,\n 2.05*f, 3.4*f, 0.85*f, 0, 4.35*f,\n 750, 1800, 1600,\n True, True)\n fuelModels[\"SH9\"] = FuelModel(\n 149, \"SH9\", \"Very high load, humid climate shrub (D)\",\n 4.4, 0.40, 8000, 8000,\n 4.5*f, 2.45*f, 0, 1.55*f, 7.0*f,\n 750, 1800, 1500,\n True, True)\n # 150-152 reserved for future standard shrub models\n # 153-159 available for custom shrub models\n\n # Timber and understory\n # 160 available for custom timber and understory model\n fuelModels[\"TU1\"] = FuelModel(\n 161, \"TU1\", \"Light load, dry climate timber-grass-shrub (D)\",\n 0.6, 0.20, 8000, 8000,\n 0.2*f, 0.9*f, 1.5*f, 0.2*f, 0.9*f,\n 2000, 1800, 1600,\n True, True)\n fuelModels[\"TU2\"] = FuelModel(\n 162, \"TU2\", \"Moderate load, humid climate timber-shrub (S)\",\n 1.0, 0.30, 8000, 8000,\n 0.95*f, 1.8*f, 1.25*f, 0, 0.2*f,\n 2000, 1800, 1600,\n True, True)\n fuelModels[\"TU3\"] = FuelModel(\n 163, \"TU3\", \"Moderate load, humid climate timber-grass-shrub (D)\",\n 1.3, 0.30, 8000, 8000,\n 1.1*f, 0.15*f, 0.25*f, 0.65*f, 1.1*f,\n 1800, 1600, 1400,\n True, True)\n fuelModels[\"TU4\"] = FuelModel(\n 164, \"TU4\", \"Dwarf conifer understory (S)\",\n 0.5, 0.12, 8000, 8000,\n 4.5*f, 0, 0, 0, 2.0*f,\n 2300, 1800, 2000,\n True, True)\n fuelModels[\"TU5\"] = FuelModel(\n 165, \"TU5\", \"Very high load, dry climate timber-shrub (S)\",\n 1.0, 0.25, 8000, 8000,\n 4.0*f, 4.0*f, 3.0*f, 0, 3.0*f,\n 1500, 1800, 750,\n True, True)\n # 166-170 reserved for future standard timber and understory models\n # 171-179 available for custom timber and understory models\n # Timber and litter\n # 180 available for custom timber and litter models\n fuelModels[\"TL1\"] = FuelModel(\n 181, \"TL1\", \"Low load, compact conifer litter (S)\",\n 0.2, 0.30, 8000, 8000,\n 1.0*f, 2.2*f, 3.6*f, 0, 0,\n 2000, 1800, 1600,\n True, True)\n fuelModels[\"TL2\"] = FuelModel(\n 182, \"TL2\", \"Low load broadleaf litter (S)\",\n 0.2, 0.25, 8000, 8000,\n 1.4*f, 2.3*f, 2.2*f, 0, 0,\n 2000, 1800, 1600,\n True, True)\n fuelModels[\"TL3\"] = FuelModel(\n 183, \"TL3\", \"Moderate load conifer litter (S)\",\n 0.3, 0.20, 8000, 8000,\n 0.5*f, 2.2*f, 2.8*f, 0, 0,\n 2000, 1800, 1600,\n True, True)\n fuelModels[\"TL4\"] = FuelModel(\n 184, \"TL4\", \"Small downed logs (S)\",\n 0.4, 0.25, 8000, 8000,\n 0.5*f, 1.5*f, 4.2*f, 0, 0,\n 2000, 1800, 1600,\n True, True)\n fuelModels[\"TL5\"] = FuelModel(\n 185, \"TL5\", \"High load conifer litter (S)\",\n 0.6, 0.25, 8000, 8000,\n 1.15*f, 2.5*f, 4.4*f, 0, 0,\n 2000, 1800, 160,\n True, True)\n fuelModels[\"TL6\"] = FuelModel(\n 186, \"TL6\", \"High load broadleaf litter (S)\",\n 0.3, 0.25, 8000, 8000,\n 2.4*f, 1.2*f, 1.2*f, 0, 0,\n 2000, 1800, 1600,\n True, True)\n fuelModels[\"TL7\"] = FuelModel(\n 187, \"TL7\", \"Large downed logs (S)\",\n 0.4, 0.25, 8000, 8000,\n 0.3*f, 1.4*f, 8.1*f, 0, 0,\n 2000, 1800, 1600,\n True, True)\n fuelModels[\"TL8\"] = FuelModel(\n 188, \"TL8\", \"Long-needle litter (S)\",\n 0.3, 0.35, 8000, 8000,\n 5.8*f, 1.4*f, 1.1*f, 0, 0,\n 1800, 1800, 1600,\n True, True)\n fuelModels[\"TL9\"] = FuelModel(\n 189, \"TL9\", \"Very high load broadleaf litter (S)\",\n 0.6, 0.35, 8000, 8000,\n 6.65*f, 3.30*f, 4.15*f, 0, 0,\n 1800, 1800, 1600,\n True, True)\n # 190-192 reserved for future standard timber and litter models\n # 193-199 available for custom timber and litter models\n # Slash and blowdown\n # 200 available for custom slash and blowdown model\n fuelModels[\"SB1\"] = FuelModel(\n 201, \"SB1\", \"Low load activity fuel (S)\",\n 1.0, 0.25, 8000, 8000,\n 1.5*f, 3.0*f, 11.0*f, 0, 0,\n 2000, 1800, 1600,\n True, True)\n fuelModels[\"SB2\"] = FuelModel(\n 202, \"SB2\", \"Moderate load activity or low load blowdown (S)\",\n 1.0, 0.25, 8000, 8000,\n 4.5*f, 4.25*f, 4.0*f, 0, 0,\n 2000, 1800, 1600,\n True, True)\n fuelModels[\"SB3\"] = FuelModel(\n 203, \"SB3\", \"High load activity fuel or moderate load blowdown (S)\",\n 1.2, 0.25, 8000, 8000,\n 5.5*f, 2.75*f, 3.0*f, 0, 0,\n 2000, 1800, 1600,\n True, True)\n fuelModels[\"SB4\"] = FuelModel(\n 204, \"SB4\", \"High load blowdown (S)\",\n 2.7, 0.25, 8000, 8000,\n 5.25*f, 3.5*f, 5.25*f, 0, 0,\n 2000, 1800, 1600,\n True, True)\n \n return fuelModels", "title": "" }, { "docid": "0ba14d04319d1cc764dd5c167c56f100", "score": "0.5286508", "text": "def addFacility():\n\tname = getInput(lambda x: x != 'Admin' and x not in facilityList.keys(), \"Enter name of new facility: \")\n\tcapacity = getInput(isPositive, \"Enter capacity: \")\n\t\n\tMedicalFacility(name, None, int(capacity))", "title": "" }, { "docid": "0ff787ddc530d02f3867535c70b2e31b", "score": "0.525206", "text": "def parse_bouquet_design(self, bouquet_design):\n\n match = re.match(self.bouquet_design_pattern, bouquet_design)\n\n if not match:\n print(f'{bouquet_design} is not a valid design')\n\n bouquet_name, size, flowers_str, total = match.groups()\n\n flowers = re.findall(self.flowers_pattern, flowers_str)\n flowers_dict = {}\n\n certain_amount = 0\n for amount, name in flowers:\n amount = int(amount)\n flowers_dict[name] = amount\n certain_amount += amount\n\n total = int(total)\n any_amount = total - certain_amount\n\n self.bouquet_designs[size].append({\n 'flowers': Counter(flowers_dict),\n 'total': total,\n 'any': any_amount,\n 'name': bouquet_name,\n })", "title": "" }, { "docid": "a56b3b65feed66b8d5147fda5ec1b31e", "score": "0.5206796", "text": "def design(self, lch, w_dict, th_dict, seg_dict, stack_dict, dum_info):\n # design dummies\n w_tail = w_dict['tail']\n w_in = w_dict['in']\n w_load = w_dict['load']\n th_tail = th_dict['tail']\n th_in = th_dict['in']\n th_load = th_dict['load']\n\n # design main transistors\n seg_tail1 = seg_dict['tail1']\n seg_tail2 = seg_dict['tail2']\n seg_tailcm = seg_dict['tailcm']\n seg_in = seg_dict['in']\n seg_ref = seg_dict['ref']\n seg_diode1 = seg_dict['diode1']\n seg_ngm1 = seg_dict['ngm1']\n seg_diode2 = seg_dict['diode2']\n seg_ngm2 = seg_dict['ngm2']\n\n stack_tail = stack_dict['tail']\n stack_in = stack_dict['in']\n stack_diode = stack_dict['diode']\n stack_ngm = stack_dict['ngm']\n\n self.instances['XTAIL'].design(w=w_tail, l=lch, seg=seg_tail1 * 2, intent=th_tail,\n stack=stack_tail)\n self.instances['XTAIL2L'].design(w=w_tail, l=lch, seg=seg_tail2, intent=th_tail,\n stack=stack_tail)\n self.instances['XTAIL2R'].design(w=w_tail, l=lch, seg=seg_tail2, intent=th_tail,\n stack=stack_tail)\n self.instances['XCML'].design(w=w_tail, l=lch, seg=seg_tailcm, intent=th_tail,\n stack=stack_tail)\n self.instances['XCMR'].design(w=w_tail, l=lch, seg=seg_tailcm, intent=th_tail,\n stack=stack_tail)\n self.instances['XREF'].design(w=w_tail, l=lch, seg=seg_ref, intent=th_tail,\n stack=stack_tail)\n self.instances['XINL'].design(w=w_in, l=lch, seg=seg_in, intent=th_in, stack=stack_in)\n self.instances['XINR'].design(w=w_in, l=lch, seg=seg_in, intent=th_in, stack=stack_in)\n self.instances['XRES'].design(w=w_in, l=lch, seg=seg_ref, intent=th_in, stack=stack_in)\n self.instances['XDIOL'].design(w=w_load, l=lch, seg=seg_diode1, intent=th_load,\n stack=stack_diode)\n self.instances['XDIOR'].design(w=w_load, l=lch, seg=seg_diode1, intent=th_load,\n stack=stack_diode)\n self.instances['XNGML'].design(w=w_load, l=lch, seg=seg_ngm1, intent=th_load,\n stack=stack_ngm)\n self.instances['XNGMR'].design(w=w_load, l=lch, seg=seg_ngm1, intent=th_load,\n stack=stack_ngm)\n self.instances['XDIO2L'].design(w=w_load, l=lch, seg=seg_diode2, intent=th_load,\n stack=stack_diode)\n self.instances['XDIO2R'].design(w=w_load, l=lch, seg=seg_diode2, intent=th_load,\n stack=stack_diode)\n self.instances['XNGM2L'].design(w=w_load, l=lch, seg=seg_ngm2, intent=th_load,\n stack=stack_ngm)\n self.instances['XNGM2R'].design(w=w_load, l=lch, seg=seg_ngm2, intent=th_load,\n stack=stack_ngm)\n\n # design dummies\n self.design_dummy_transistors(dum_info, 'XDUM', 'VDD', 'VSS')", "title": "" }, { "docid": "3c39be295a4a95228c591f714729ad29", "score": "0.520205", "text": "def stoich_flue_gas(self, nw):\n lamb = 1\n n_fuel = 1\n m_fuel = 1 / molar_mass_flow(self.fuel.val) * n_fuel\n m_fuel_fg = m_fuel\n m_co2 = 0\n m_h2o = 0\n molar_masses[self.h2o] = CP.PropsSI('M', self.h2o)\n molar_masses[self.co2] = CP.PropsSI('M', self.co2)\n molar_masses[self.o2] = CP.PropsSI('M', self.o2)\n\n self.fg = {}\n self.fg[self.co2] = 0\n self.fg[self.h2o] = 0\n\n for f, x in self.fuel.val.items():\n fl = set(list(self.fuels())).intersection(\n set([a.replace(' ', '') for a in CP.get_aliases(f)]))\n\n if len(fl) == 0:\n if f in self.fg.keys():\n self.fg[f] += x * m_fuel\n else:\n self.fg[f] = x * m_fuel\n else:\n n_fluid = x * m_fuel / molar_masses[f]\n m_fuel_fg -= n_fluid * molar_masses[f]\n structure = fluid_structure(f)\n n = {}\n for el in ['C', 'H', 'O']:\n if el in structure.keys():\n n[el] = structure[el]\n else:\n n[el] = 0\n\n m_co2 += n_fluid * n['C'] * molar_masses[self.co2]\n m_h2o += n_fluid * n['H'] / 2 * molar_masses[self.h2o]\n\n self.fg[self.co2] += m_co2\n self.fg[self.h2o] += m_h2o\n\n n_o2 = (m_co2 / molar_masses[self.co2] +\n 0.5 * m_h2o / molar_masses[self.h2o]) * lamb\n m_air = n_o2 * molar_masses[self.o2] / self.air.val[self.o2]\n\n self.air_min = m_air / m_fuel\n\n for f, x in self.air.val.items():\n if f != self.o2:\n if f in self.fg.keys():\n self.fg[f] += m_air * x\n else:\n self.fg[f] = m_air * x\n\n m_fg = m_fuel + m_air\n\n for f in self.fg.keys():\n self.fg[f] /= m_fg\n\n if not self.path.is_set:\n self.path.val = None\n tespy_fluid(self.fuel_alias.val, self.fuel.val,\n [1000, nw.p_range_SI[1]], nw.T_range_SI,\n path=self.path.val)\n tespy_fluid(self.fuel_alias.val + '_fg', self.fg,\n [1000, nw.p_range_SI[1]], nw.T_range_SI,\n path=self.path.val)\n msg = ('Generated lookup table for ' + self.fuel_alias.val +\n ' and for stoichiometric flue gas at stoichiometric '\n 'combustion chamber ' + self.label + '.')\n logging.debug(msg)\n\n if self.air_alias.val not in ['Air', 'air']:\n tespy_fluid(self.air_alias.val, self.air.val,\n [1000, nw.p_range_SI[1]], nw.T_range_SI,\n path=self.path.val)\n msg = ('Generated lookup table for ' + self.air_alias.val +\n ' at stoichiometric combustion chamber ' + self.label + '.')\n else:\n msg = ('Using CoolProp air at stoichiometric combustion chamber ' +\n self.label + '.')\n logging.debug(msg)", "title": "" }, { "docid": "318dd7356e6b6227392f6d735ba6f03d", "score": "0.51707864", "text": "def fuelProduction(self):\r\n try:\r\n for bid in self.bids:\r\n if bid.plant.source == \"Gas\":\r\n self.gas_fuel += bid.sold_amount * self.hours_for_bidround / bid.plant.efficiency\r\n self.total_gas_production += bid.sold_amount * self.hours_for_bidround\r\n elif bid.plant.source == \"Coal\":\r\n self.coal_fuel += bid.sold_amount * self.hours_for_bidround / bid.plant.efficiency\r\n self.total_coal_production += bid.sold_amount*self.hours_for_bidround\r\n elif bid.plant.source == \"PV\":\r\n self.total_pv_production += bid.sold_amount * self.hours_for_bidround\r\n except Exception as e:\r\n print(\"Exception in fuelProduction()\")\r\n print(e)", "title": "" }, { "docid": "ff358730aa52bfa6675dfc42dc466f56", "score": "0.516541", "text": "def fill_gas_tank(self):\n\t\tprint(\"This car does not need a gas tank!\")", "title": "" }, { "docid": "20c7d8c865f35c319dc225d3491f80ec", "score": "0.5164768", "text": "def create_bouquet(self, design, size):\n \n bouquet = Counter(design['flowers'])\n flowers = self.flowers[size]\n flowers -= bouquet\n\n left = design['any']\n if left:\n for flower_name, amount in flowers.items():\n if amount >= left:\n bouquet[flower_name] += left\n flowers[flower_name] = amount - left\n break\n else:\n bouquet[flower_name] += amount\n flowers[flower_name] = 0\n left -= amount\n\n self.total_count[size] -= design['total']\n\n bouquet_flowers = ''.join(\n [f'{name}{amount}' for name, amount in bouquet.items()],\n )\n bouquet_str = design['name'] + size + bouquet_flowers\n\n return bouquet_str", "title": "" }, { "docid": "a6cb6a7348f3f06072c4d3add05a91d6", "score": "0.5162991", "text": "def add_technology(self, name):\n if name in self.technologies:\n logging.debug('Warning: Technology {} was added twice to supply node {}'.format(name, self.name))\n return\n self.technologies[name] = SupplyTechnology(name, self.cost_of_capital, self.scenario)\n self.tech_names.append(name)", "title": "" }, { "docid": "4affdcd64d064bb2bdd05616ff703ec1", "score": "0.51550865", "text": "def add_technology(self, name):\n if name in self.technologies:\n logging.debug(\"Warning, storage tech {} was added twice to node {}\".format(name, self.name))\n return\n self.technologies[name] = StorageTechnology(name, self.cost_of_capital, self.scenario)\n self.tech_names.append(name)", "title": "" }, { "docid": "545bac376e6696710cb0c912734d1716", "score": "0.5147059", "text": "def fill_gas_tank(self):\r\n print(\"This car doesn't need a gas tank!\")", "title": "" }, { "docid": "3d247b02eb9d3ca679fadf6738181db5", "score": "0.5142591", "text": "def __init__(self, name, fuel, fanciness):\n super().__init__(name, fuel)\n self.price_per_km *= fanciness", "title": "" }, { "docid": "e03ff0af14f3cd9139524ca593c9de9d", "score": "0.51414496", "text": "def add_stock(self):\n self.stock = SupplyStock(supply_node=self.name, scenario=self.scenario)\n self.stock.unit = cfg.calculation_energy_unit + \"/\" + cfg.getParam('time_step')", "title": "" }, { "docid": "1e641585a8d590730cd937fe17c420e1", "score": "0.51261896", "text": "def initialise_fluids(self, nw):\n if self.air_alias.val in ['air', 'Air']:\n air = self.air_alias.val\n else:\n air = 'TESPy::' + self.air_alias.val\n flue_gas = 'TESPy::' + self.fuel_alias.val + \"_fg\"\n\n for c in nw.comps.loc[self].o:\n if not c.fluid.val_set[air]:\n c.fluid.val[air] = 0.8\n if not c.fluid.val_set[flue_gas]:\n c.fluid.val[flue_gas] = 0.2", "title": "" }, { "docid": "7d00d428a9f0692471e23a9d5ad41c68", "score": "0.5125038", "text": "def addObjects(self):\n\n self.normal = self.guide.blades[\"blade\"].z * -1\n self.binormal = self.guide.blades[\"blade\"].x\n\n self.WIP = self.options[\"mode\"]\n\n if self.negate and self.settings[\"overrideNegate\"]:\n self.negate = False\n self.n_factor = 1\n\n # FK controllers ------------------------------------\n self.fk_npo = []\n self.fk_ctl = []\n self.tweak_npo = []\n self.tweak_ctl = []\n self.curv_pos = []\n self.upv_curv_pos = []\n self.upv_curv_lvl = []\n self.tangentsCtl = []\n t = self.guide.tra[\"root\"]\n\n parent = self.root\n tOld = False\n fk_ctl = None\n self.previusTag = self.parentCtlTag\n for i, t in enumerate(transform.getChainTransform(self.guide.apos,\n self.normal,\n self.negate)):\n self.dist = vector.getDistance(self.guide.apos[i],\n self.guide.apos[i + 1])\n if self.settings[\"neutralpose\"] or not tOld:\n tnpo = t\n else:\n tnpo = transform.setMatrixPosition(\n tOld,\n transform.getPositionFromMatrix(t))\n\n fk_npo = primitive.addTransform(\n parent, self.getName(\"fk%s_npo\" % i), tnpo)\n\n fk_ctl = self.addCtl(\n fk_npo,\n \"fk%s_ctl\" % i,\n t,\n self.color_fk,\n \"cube\",\n w=self.dist,\n h=self.size * .1,\n d=self.size * .1,\n po=datatypes.Vector(self.dist * .5 * self.n_factor, 0, 0),\n tp=self.previusTag)\n\n tweak_npo = primitive.addTransform(\n parent, self.getName(\"tweak%s_npo\" % i), tnpo)\n\n self.tweak_npo.append(tweak_npo)\n\n tweak_ctl = self.addCtl(\n tweak_npo,\n \"tweak%s_ctl\" % i,\n t,\n self.color_ik,\n \"cube\",\n w=self.size * .15,\n h=self.size * .05,\n d=self.size * .15,\n ro=datatypes.Vector([0, 0, 1.5708]),\n tp=self.previusTag)\n\n upv_curv_lvl = primitive.addTransform(\n tweak_ctl, self.getName(\"upv%s_lvl\" % i), t)\n upv_curv_lvl.attr(\"tz\").set(.01)\n\n self.fk_npo.append(fk_npo)\n self.fk_ctl.append(fk_ctl)\n self.tweak_ctl.append(tweak_ctl)\n self.upv_curv_lvl.append(upv_curv_lvl)\n tOld = t\n self.previusTag = fk_ctl\n parent = fk_ctl\n\n # TANGENTS\n tangents = []\n tangents_npo = []\n tangents_upv = []\n\n if not i:\n letters = \"A\"\n else:\n letters = \"AB\"\n\n for tang in letters:\n tang_npo = primitive.addTransform(\n tweak_ctl,\n self.getName(\"tng{}{}_npo\".format(tang, str(i))),\n t)\n\n tangents_npo.append(tang_npo)\n\n tang_ctl = self.addCtl(\n tang_npo,\n \"tng{}{}_ctl\".format(tang, str(i)),\n t,\n self.color_ik,\n \"square\",\n w=self.size * .07,\n h=self.size * .07,\n d=self.size * .07,\n ro=datatypes.Vector([0, 0, 1.5708]),\n tp=self.previusTag)\n\n upv_tang_curv_lvl = primitive.addTransform(\n tang_ctl,\n self.getName(\"tngUpv{}{}_lvl\".format(tang, str(i))),\n t)\n upv_tang_curv_lvl.attr(\"tz\").set(.01)\n tangents_upv.append(upv_tang_curv_lvl)\n\n tangents.append(tang_ctl)\n\n tangents_npo[0].attr(\"tx\").set(self.dist * .3333)\n\n # delete the first B tangent\n if not i:\n self.curv_pos.append(tweak_ctl)\n self.curv_pos.append(tangents[0])\n self.upv_curv_pos.append(upv_curv_lvl)\n self.upv_curv_pos.append(tangents_upv[0])\n else:\n self.curv_pos.append(tangents[1])\n self.curv_pos.append(tweak_ctl)\n self.curv_pos.append(tangents[0])\n self.upv_curv_pos.append(tangents_upv[1])\n self.upv_curv_pos.append(upv_curv_lvl)\n self.upv_curv_pos.append(tangents_upv[0])\n tangents_npo[1].attr(\"tx\").set(self.dist * -.3333)\n\n self.tangentsCtl.extend(tangents)\n\n # ==========\n\n # self.jnt_pos.append([fk_ctl, i, None, False])\n\n # add end control\n tweak_npo = primitive.addTransform(\n fk_ctl, self.getName(\"tweakEnd_npo\"), t)\n tweak_ctl = self.addCtl(\n tweak_npo,\n \"tweakEnd_ctl\",\n t,\n self.color_ik,\n \"cube\",\n w=self.size * .15,\n h=self.size * .05,\n d=self.size * .15,\n ro=datatypes.Vector([0, 0, 1.5708]),\n tp=self.previusTag)\n\n upv_curv_lvl = primitive.addTransform(\n tweak_ctl, self.getName(\"upvEnd_lvl\"), t)\n upv_curv_lvl.attr(\"tz\").set(.01)\n\n if self.negate:\n self.off_dist = self.dist * -1\n else:\n self.off_dist = self.dist\n tweak_npo.attr(\"tx\").set(self.off_dist)\n\n self.tweak_ctl.append(tweak_ctl)\n self.upv_curv_lvl.append(upv_curv_lvl)\n\n # tangent END\n tang_npo = primitive.addTransform(\n tweak_ctl,\n self.getName(\"tngEnd{}_npo\".format(tang, str(i))),\n t)\n\n tang_ctl = self.addCtl(\n tang_npo,\n \"tngEnd{}_ctl\".format(tang, str(i)),\n t,\n self.color_ik,\n \"square\",\n w=self.size * .07,\n h=self.size * .07,\n d=self.size * .07,\n ro=datatypes.Vector([0, 0, 1.5708]),\n tp=self.previusTag)\n\n upv_tang_curv_lvl = primitive.addTransform(\n tang_ctl,\n self.getName(\"tngUpv{}{}_lvl\".format(tang, str(i))),\n t)\n upv_tang_curv_lvl.attr(\"tz\").set(.01)\n tangents_upv.append(upv_tang_curv_lvl)\n\n tang_npo.attr(\"tx\").set(self.dist * -.3333)\n\n self.curv_pos.append(tang_ctl)\n self.curv_pos.append(tweak_ctl)\n self.upv_curv_pos.append(tang_ctl)\n self.upv_curv_pos.append(upv_curv_lvl)\n\n self.tangentsCtl.append(tang_ctl)\n\n # add length offset control if keep length\n # This option will be added only if keep length is active\n if self.settings[\"keepLength\"]:\n self.tweakTip_npo = primitive.addTransform(\n tweak_ctl, self.getName(\"tweakTip_npo\"), t)\n tweak_ctl = self.addCtl(\n self.tweakTip_npo,\n \"tweakTip_ctl\",\n t,\n self.color_fk,\n \"square\",\n w=self.size * .1,\n h=self.size * .1,\n d=self.size * .1,\n ro=datatypes.Vector([0, 0, 1.5708]),\n tp=self.previusTag)\n\n upv_curv_lvl = primitive.addTransform(\n tweak_ctl, self.getName(\"upvTip_lvl\"), t)\n upv_curv_lvl.attr(\"tz\").set(.01)\n\n # move to align with the parent\n self.tweakTip_npo.attr(\"tx\").set(0)\n\n self.tweak_ctl.append(tweak_ctl)\n self.upv_curv_lvl.append(upv_curv_lvl)\n\n # add visual reference\n self.line_ref = icon.connection_display_curve(\n self.getName(\"visualRef\"),\n [self.tweakTip_npo.getParent(), tweak_ctl])\n\n # Curves -------------------------------------------\n self.mst_crv = curve.addCnsCurve(self.root,\n self.getName(\"mst_crv\"),\n self.curv_pos,\n 3)\n\n self.slv_crv = curve.addCurve(self.root, self.getName(\"slv_crv\"),\n [datatypes.Vector()] * 32,\n False,\n 3)\n\n self.upv_crv = curve.addCnsCurve(self.root,\n self.getName(\"upv_crv\"),\n self.upv_curv_pos,\n 3)\n\n self.slv_upv_crv = curve.addCurve(self.root,\n self.getName(\"slv_upv_crv\"),\n [datatypes.Vector()] * 32,\n False,\n 3)\n\n self.mst_crv.setAttr(\"template\", True)\n self.slv_crv.setAttr(\"visibility\", False)\n self.upv_crv.setAttr(\"visibility\", False)\n self.slv_upv_crv.setAttr(\"visibility\", False)\n\n # Divisions\n self.div_cns = []\n self.upv_cns = []\n\n tagP = self.parentCtlTag\n self.Extra_tweak_npo = []\n self.Extra_tweak_ctl = []\n\n if self.settings[\"overrideJntNb\"]:\n self.def_number = self.settings[\"jntNb\"]\n else:\n self.def_number = len(self.guide.apos)\n\n for i in range(self.def_number):\n # References\n div_cns = primitive.addTransform(self.root,\n self.getName(\"%s_cns\" % i))\n\n pm.setAttr(div_cns + \".inheritsTransform\", False)\n self.div_cns.append(div_cns)\n\n upv_cns = primitive.addTransform(self.root,\n self.getName(\"%s_upv\" % i))\n\n pm.setAttr(upv_cns + \".inheritsTransform\", False)\n self.upv_cns.append(upv_cns)\n\n t = transform.getTransform(div_cns)\n\n extraTweak_npo = primitive.addTransform(\n div_cns,\n self.getName(\"extraTweak{}_npo\".format(tang, str(i))),\n t)\n self.Extra_tweak_npo.append(extraTweak_npo)\n Extra_tweak_ctl = self.addCtl(extraTweak_npo,\n \"extraTweak%s_ctl\" % i,\n t,\n self.color_fk,\n \"circle\",\n w=self.size * .15,\n d=self.size * .15,\n ro=datatypes.Vector([0, 0, 1.5708]),\n tp=tagP)\n attribute.setKeyableAttributes(Extra_tweak_ctl)\n self.Extra_tweak_ctl.append(Extra_tweak_ctl)\n self.jnt_pos.append([Extra_tweak_ctl, i])", "title": "" }, { "docid": "4122803c711ee914e60ec481e887655d", "score": "0.5112155", "text": "def add_unit_of_measures_0117(apps, schema_editor):\n db_alias = schema_editor.connection.alias\n\n approved_fuel = apps.get_model('api', 'ApprovedFuel')\n unit_of_measure = apps.get_model('api', 'UnitOfMeasure')\n\n uom_l = unit_of_measure.objects.using(db_alias).create(\n name=\"L\",\n effective_date=\"2017-01-01\"\n )\n uom_kg = unit_of_measure.objects.using(db_alias).create(\n name=\"kg\",\n effective_date=\"2017-01-01\"\n )\n uom_kwh = unit_of_measure.objects.using(db_alias).create(\n name=\"kWh\",\n effective_date=\"2017-01-01\"\n )\n uom_m3 = unit_of_measure.objects.using(db_alias).create(\n name=\"m³\",\n effective_date=\"2017-01-01\"\n )\n\n approved_fuel.objects.using(db_alias).filter(\n name__in=[\n \"Biodiesel\", \"Ethanol\", \"HDRD\", \"Natural gas-based gasoline\",\n \"Petroleum-based diesel\", \"Petroleum-based gasoline\",\n \"Propane\", \"Renewable diesel\", \"Renewable gasoline\"\n ]\n ).update(\n unit_of_measure=uom_l\n )\n approved_fuel.objects.using(db_alias).filter(\n name__in=[\n \"Hydrogen\", \"LNG\"\n ]\n ).update(\n unit_of_measure=uom_kg\n )\n approved_fuel.objects.using(db_alias).filter(\n name=\"Electricity\"\n ).update(\n unit_of_measure=uom_kwh\n )\n approved_fuel.objects.using(db_alias).filter(\n name=\"CNG\"\n ).update(\n unit_of_measure=uom_m3\n )", "title": "" }, { "docid": "3314b2ee6dbd4e64347b914b83ad4c42", "score": "0.50818443", "text": "def determine_features(self, designs, preferredsize, bestgimbal, prefergenerators,\n prefershortengines, prefermonopropellant):\n lowest_mass = True\n lowest_cost = True\n lowest_requirements = True\n best_gimbal_range = True\n shortest_engine = True\n for e in designs:\n if not e.is_best:\n continue\n if lowest_mass and e.get_mass() < self.get_mass():\n lowest_mass = False\n if lowest_cost and e.get_cost() < self.get_cost():\n lowest_cost = False\n if lowest_requirements and e.requiredscience.is_easier_than(self.requiredscience):\n lowest_requirements = False\n if best_gimbal_range and e.mainengine.tvc > self.mainengine.tvc:\n best_gimbal_range = False\n if shortest_engine and e.mainengine.length < self.mainengine.length:\n shortest_engine = False\n if lowest_mass:\n self.features.add(Features.mass)\n if lowest_cost:\n self.features.add(Features.cost)\n if lowest_requirements:\n # this extra condition is false, but it looks strange if requiring 'only'\n # VeryHeavRocketry is presented as something good\n if (techtree.Node.VeryHeavyRocketry not in self.requiredscience.nodes and\n techtree.Node.HypersonicFlight not in self.requiredscience.nodes and\n techtree.Node.IonPropulsion not in self.requiredscience.nodes):\n self.features.add(Features.low_requirements)\n if prefershortengines and shortest_engine:\n self.features.add(Features.short_engine)\n if ((bestgimbal == 1 and self.mainengine.tvc > 0.0) or\n (bestgimbal == 2 and best_gimbal_range)):\n self.features.add(Features.gimbal)\n if prefermonopropellant and self.fueltype is parts.FuelTypes.Monopropellant:\n self.features.add(Features.monopropellant)\n if prefergenerators and self.mainengine.electricity:\n self.features.add(Features.generator)\n if preferredsize is not None and \\\n (self.size is preferredsize or self.size is parts.RadialSize.RadiallyMounted):\n self.features.add(Features.radial_size)", "title": "" }, { "docid": "13f28693e051937cf3c4d5a99e2bf4fa", "score": "0.50801796", "text": "def unit_techs(name, build_time, metal, energy, zetium, volume_coefs,\n points_mult, max_planets_required,\n attack_mod, crit_abs_coef,\n is_space, unlocker, npc=False):\n build_time = mins2sec(build_time)\n\n specs = get_specs(name, is_space)\n\n ### Base technology\n\n num_of_base_levels = max_lvl_for_unit_tech(name, unlocker)\n\n start_mult = unit_tech_start_multiplier(npc)\n end_mult = start_mult if num_of_base_levels == 1 \\\n else unit_tech_end_multiplier(npc)\n\n list = tuple()\n list += unit_tech_base(\n name, build_time, start_mult, end_mult, num_of_base_levels,\n metal, energy, zetium, volume_coefs, points_mult, max_planets_required, npc\n )\n\n for spec_name, func in specs:\n # Pass attack_mod to function, because it expects it!\n max = func(attack_mod, crit_abs_coef) * base_proportion * 100\n coefs = lin_dep_raw(0, max, num_of_base_levels) if unlocker \\\n else lin_dep_raw_with_first_lvl(max, num_of_base_levels)\n\n list += (name, spec_name.lower() + \" mod\") + coefs + (0, 1),\n list += empty_row\n\n ### Specialization technologies\n\n for spec_name, func in specs:\n tech_name = name + \" \" + spec_name\n list += unit_tech_base(\n tech_name, build_time, start_mult, end_mult, 10,\n metal, energy, zetium, volume_coefs, points_mult, max_planets_required,\n npc\n )\n\n # Pass attack_mod to function, because it expects it!\n max_value = func(attack_mod, crit_abs_coef) * specialized_proportion * 100\n coefs = lin_dep_raw_with_first_lvl(max_value, 10)\n list += (tech_name, spec_name.lower() + \" mod\") + coefs + (0, 1),\n list += empty_row\n\n return list", "title": "" }, { "docid": "b972d67f55845810dda3970c430b7271", "score": "0.5071852", "text": "def enrich_fuel(enrich, fuel):\n fuel_comp = {}\n # build fuel comp. starting with bounded compound\n for isotope in mats[fuel]['comp']:\n if isotope == 92000:\n # add enriched Uranium to fuel composition\n fuel_comp.update({92235 : mats[fuel]['comp'][92000]*enrich,\n 92238 : mats[fuel]['comp'][92000]*(1-enrich) })\n else:\n fuel_comp.update({isotope : mats[fuel]['comp'][isotope]})\n \n mmO = 32\n mmN = 14.0067\n mmU25 = 235.04\n mmU28 = 238.05\n\n if fuel == 'UN':\n mmfuel = mmN + 1 / ((enrich / mmU25) + ((1-enrich) / mmU28))\n if fuel == 'UO2':\n mmfuel = mmO + 1 / ((enrich / mmU25) + ((1-enrich) / mmU28))\n \n return fuel_comp, mmfuel", "title": "" }, { "docid": "cb1924df1adaa539fe9f8312c33f6485", "score": "0.5071585", "text": "def fly(self):\n if self.fuel > 1:\n self.fuel = self.fuel - 1\n else:\n print('Not enough fuel!')", "title": "" }, { "docid": "76a562111b5675a76dcf0d2b8b9f37cb", "score": "0.5070485", "text": "def fill_gas_tank(self):\n print(\"This car doesn't need a gas tank!\")", "title": "" }, { "docid": "76a562111b5675a76dcf0d2b8b9f37cb", "score": "0.5070485", "text": "def fill_gas_tank(self):\n print(\"This car doesn't need a gas tank!\")", "title": "" }, { "docid": "76a562111b5675a76dcf0d2b8b9f37cb", "score": "0.5070485", "text": "def fill_gas_tank(self):\n print(\"This car doesn't need a gas tank!\")", "title": "" }, { "docid": "336ef0b924a50959bd07d3c4ad4c3ec0", "score": "0.50611955", "text": "def fill_gas_tank(self):\n print(\"No need for gas in an electric car!\")", "title": "" }, { "docid": "3528219963d4da17b5b9d15fd41c6142", "score": "0.5050947", "text": "def _setup(self,\n loglevel=10,\n electronic_conductance='electronic_conductance',\n occupancy='occupancy',\n voltage='voltage',\n **params):\n self._fluid = params['active_fluid']\n try: self._fluid = self.find_object_by_name(self._fluid) \n except: pass #Accept object\n self._X_name = voltage\n self._boundary_conditions_setup()\n g = self._fluid.get_throat_data(prop=electronic_conductance)\n s = self._fluid.get_throat_data(prop=occupancy)\n self._conductance = g*s+g*(-s)/1e3", "title": "" }, { "docid": "beb93b038c111e1bd728757ba264fc08", "score": "0.5036853", "text": "def reaction_balance(self, fluid):\n if isinstance(self, combustion_engine):\n inl = self.inl[2:]\n outl = self.outl[2:]\n else:\n inl = self.inl\n outl = self.outl\n\n ######################################################################\n # molar mass flow for fuel and oxygen\n n_fuel = {}\n n_oxy_stoich = {}\n n_h = 0\n n_c = 0\n for f in self.fuel_list:\n n_fuel[f] = 0\n for i in inl:\n n = i.m.val_SI * i.fluid.val[f] / molar_masses[f]\n n_fuel[f] += n\n n_h += n * self.fuels[f]['H']\n n_c += n * self.fuels[f]['C']\n\n # stoichiometric oxygen requirement for each fuel\n n_oxy_stoich[f] = n_fuel[f] * (self.fuels[f]['H'] / 4 +\n self.fuels[f]['C'])\n\n n_oxygen = 0\n for i in inl:\n n_oxygen += (i.m.val_SI * i.fluid.val[self.o2] /\n molar_masses[self.o2])\n\n ######################################################################\n # calculate stoichiometric oxygen\n n_oxygen_stoich = n_h / 4 + n_c\n\n ######################################################################\n # calculate lambda if not set\n if not self.lamb.is_set:\n self.lamb.val = n_oxygen / n_oxygen_stoich\n\n ######################################################################\n # calculate excess fuel if lambda is lower than 1\n if self.lamb.val < 1:\n n_h_exc = (n_oxygen_stoich - n_oxygen) * 4\n n_c_exc = (n_oxygen_stoich - n_oxygen)\n else:\n n_h_exc = 0\n n_c_exc = 0\n\n ######################################################################\n # equation for carbondioxide\n if fluid == self.co2:\n dm = (n_c - n_c_exc) * molar_masses[self.co2]\n\n ######################################################################\n # equation for water\n elif fluid == self.h2o:\n dm = (n_h - n_h_exc) / 2 * molar_masses[self.h2o]\n\n ######################################################################\n # equation for oxygen\n elif fluid == self.o2:\n if self.lamb.val < 1:\n dm = -n_oxygen * molar_masses[self.o2]\n else:\n dm = -n_oxygen / self.lamb.val * molar_masses[self.o2]\n\n ######################################################################\n # equation for fuel\n elif fluid in self.fuel_list:\n if self.lamb.val < 1:\n n_fuel_exc = (-(n_oxygen / n_oxygen_stoich - 1) *\n n_oxy_stoich[fluid] *\n (self.fuels[f]['H'] / 4 + self.fuels[f]['C']))\n else:\n n_fuel_exc = 0\n dm = -(n_fuel[fluid] - n_fuel_exc) * molar_masses[fluid]\n\n ######################################################################\n # equation for other fluids\n else:\n dm = 0\n\n res = dm\n for i in inl:\n res += i.fluid.val[fluid] * i.m.val_SI\n for o in outl:\n res -= o.fluid.val[fluid] * o.m.val_SI\n return res", "title": "" }, { "docid": "8fae5051ef38731e65efa832af478585", "score": "0.5029067", "text": "def fill_gas_tank(self):\n print(\"This car doesn't have a gas tank!\")", "title": "" }, { "docid": "2c97738b9001ff6d453fe3ce8a118cb5", "score": "0.5022136", "text": "def add_unit_models(m):\n fs = m.fs_main.fs_stc\n prop_water = m.fs_main.prop_water\n\n # Custom valve function for throttle valve\n def throttle_valve_function(blk):\n blk.Cv.fix(1)\n a = blk.vfa = pyo.Var(initialize=2.8904e-02, doc=\"Valve function parameter a\")\n b = blk.vfb = pyo.Var(initialize=3.3497e-02, doc=\"Valve function parameter b\")\n c = blk.vfc = pyo.Var(initialize=1.4514e-02, doc=\"Valve function parameter c\")\n d = blk.vfd = pyo.Var(initialize=1.4533e-03, doc=\"Valve function parameter d\")\n a.fix()\n b.fix()\n c.fix()\n d.fix()\n o = blk.valve_opening\n\n @blk.Expression(m.fs_main.time)\n def valve_function(bd, t):\n return a * o[t] ** 3 - b * o[t] ** 2 + c * o[t] - d\n\n # Unit model for multistage turbine including throttle valve\n fs.turb = TurbineMultistage(\n dynamic=False,\n property_package=prop_water,\n num_parallel_inlet_stages=1,\n throttle_valve_function=ValveFunctionType.custom,\n throttle_valve_function_callback=throttle_valve_function,\n num_hp=14,\n num_ip=9,\n num_lp=5,\n hp_split_locations=[14],\n ip_split_locations=[6, 9],\n lp_split_locations=[2, 4, 5],\n hp_disconnect=[14],\n hp_split_num_outlets={14: 2},\n ip_split_num_outlets={9: 3},\n )\n\n # Unit model for regulating valve of BFPT (boiler feed pump turbine)\n fs.bfp_turb_valve = Valve(dynamic=False, property_package=prop_water)\n\n # Unit model for main stage of BFPT\n fs.bfp_turb = TurbineStage(dynamic=False, property_package=prop_water)\n\n # Unit model for outlet stage of BFPT\n fs.bfp_turb_os = TurbineOutletStage(dynamic=False, property_package=prop_water)\n\n # Unit model for main condenser\n fs.condenser = Condenser(\n dynamic=False,\n shell={\"has_pressure_change\": False, \"property_package\": prop_water},\n tube={\"has_pressure_change\": False, \"property_package\": prop_water},\n )\n\n # Unit model for auxiliary condenser\n fs.aux_condenser = Condenser(\n dynamic=False,\n shell={\"has_pressure_change\": False, \"property_package\": prop_water},\n tube={\"has_pressure_change\": False, \"property_package\": prop_water},\n )\n\n # Unit model for condenser hotwell (hotwell tank modeled separately)\n # Modeled as a mixer of makeup, main, and auxiliary condenser water streams\n # Set momentum_mixing_type to none since aux_condenser outlet pressure\n # is usually not equal to the main condenser outlet pressure\n # We impose the constraints to let the mixed pressure equal to\n # the main condenser pressure and makeup water pressure\n fs.condenser_hotwell = Mixer(\n dynamic=False,\n momentum_mixing_type=MomentumMixingType.none,\n inlet_list=[\"main_condensate\", \"makeup\", \"aux_condensate\"],\n property_package=prop_water,\n )\n\n # Unit model for water control valve between makeup tank and hotwell\n fs.makeup_valve = Valve(\n dynamic=False, has_holdup=False, phase=\"Liq\", property_package=prop_water\n )\n\n # Unit model for hotwell tank with holdup for dynamic model\n # Modeled as a simple tank with constant cross section area and tank level\n fs.hotwell_tank = WaterTank(\n tank_type=\"simple_tank\", has_holdup=True, property_package=prop_water\n )\n\n # Unit model for condensate pump\n fs.cond_pump = WaterPump(dynamic=False, property_package=prop_water)\n\n # Unit model for water control valve after hotwell tank\n # Used to control deaerator level\n fs.cond_valve = Valve(\n dynamic=False, has_holdup=False, phase=\"Liq\", property_package=prop_water\n )\n\n # Unit model for feed water heater 1\n fs.fwh1 = FWH0D(\n has_desuperheat=False,\n has_drain_cooling=False,\n has_drain_mixer=True,\n condense={\n \"tube\": {\"has_pressure_change\": True},\n \"shell\": {\"has_pressure_change\": True},\n \"has_holdup\": True,\n },\n property_package=prop_water,\n )\n\n # Unit model for drain pump of FWH1\n fs.fwh1_drain_pump = WaterPump(dynamic=False, property_package=prop_water)\n\n # Unit model for mixer of FWH1 drain and condensate\n fs.fwh1_drain_return = Mixer(\n dynamic=False,\n inlet_list=[\"feedwater\", \"fwh1_drain\"],\n property_package=prop_water,\n momentum_mixing_type=MomentumMixingType.equality,\n )\n\n # Unit model for feed water heater 2\n fs.fwh2 = FWH0D(\n has_desuperheat=False,\n has_drain_cooling=True,\n has_drain_mixer=True,\n condense={\n \"tube\": {\"has_pressure_change\": True},\n \"shell\": {\"has_pressure_change\": True},\n \"has_holdup\": True,\n },\n cooling={\"dynamic\": False, \"has_holdup\": False},\n property_package=prop_water,\n )\n\n # Unit model for water control valve between drain of fwh2 and fwh1\n fs.fwh2_valve = Valve(\n dynamic=False, has_holdup=False, phase=\"Liq\", property_package=prop_water\n )\n\n # Unit model for feed water heater 3\n fs.fwh3 = FWH0D(\n has_desuperheat=False,\n has_drain_cooling=True,\n has_drain_mixer=False,\n condense={\n \"tube\": {\"has_pressure_change\": True},\n \"shell\": {\"has_pressure_change\": True},\n \"has_holdup\": True,\n },\n cooling={\"dynamic\": False, \"has_holdup\": False},\n property_package=prop_water,\n )\n\n # Unit model for control valve between drain of fwh3 and fwh2\n fs.fwh3_valve = Valve(\n dynamic=False, has_holdup=False, phase=\"Liq\", property_package=prop_water\n )\n\n # Unit model for deaerator also known as fwh4\n # Modeled as mixer to mix extracted steam with condensate and drain\n # from fwh5\n # Using MomentumMixingType.equality for momentum_mixing_type\n # deaerator tank modeled separately for holdup in dyyamic model\n fs.fwh4_deair = Mixer(\n dynamic=False,\n momentum_mixing_type=MomentumMixingType.equality,\n inlet_list=[\"steam\", \"drain\", \"feedwater\"],\n property_package=prop_water,\n )\n\n # Unit model for deaerator water tank\n # Modeled as a horizontal cylindrical tank\n fs.da_tank = WaterTank(\n tank_type=\"horizontal_cylindrical_tank\",\n has_holdup=True,\n property_package=prop_water,\n )\n\n # Unit model for electrical feedwater booster pump\n fs.booster = WaterPump(dynamic=False, property_package=prop_water)\n\n # Unit model for main boiler feed water pump driven by steam turbine\n fs.bfp = WaterPump(dynamic=False, property_package=prop_water)\n\n # Unit model for splitter for spray water stream for main attemperator\n fs.split_attemp = Separator(\n dynamic=False, property_package=prop_water, outlet_list=[\"FeedWater\", \"Spray\"]\n )\n\n # Unit model for attemperator spray control valve\n fs.spray_valve = Valve(\n dynamic=False, has_holdup=False, phase=\"Liq\", property_package=prop_water\n )\n\n # Unit model for feed water heater 5\n fs.fwh5 = FWH0D(\n has_desuperheat=True,\n has_drain_cooling=True,\n has_drain_mixer=True,\n condense={\n \"tube\": {\"has_pressure_change\": True},\n \"shell\": {\"has_pressure_change\": True},\n \"has_holdup\": True,\n },\n desuperheat={\"dynamic\": False},\n cooling={\"dynamic\": False, \"has_holdup\": False},\n property_package=prop_water,\n )\n\n # Unit model for water control valve drain of fwh5 and deaerator\n fs.fwh5_valve = Valve(\n dynamic=False, has_holdup=False, phase=\"Liq\", property_package=prop_water\n )\n\n # Unit model for feed water heater 6\n fs.fwh6 = FWH0D(\n has_desuperheat=True,\n has_drain_cooling=True,\n has_drain_mixer=False,\n condense={\n \"tube\": {\"has_pressure_change\": True},\n \"shell\": {\"has_pressure_change\": True},\n \"has_holdup\": True,\n },\n desuperheat={\"dynamic\": False},\n cooling={\"dynamic\": False, \"has_holdup\": False},\n property_package=prop_water,\n )\n\n # Unit model for water control valve between drain of fwh6 and fwh5\n fs.fwh6_valve = Valve(\n dynamic=False, has_holdup=False, phase=\"Liq\", property_package=prop_water\n )\n\n # Important process variables, declared and used in PID controllers\n # Variable for main steam temperature\n fs.temperature_main_steam = pyo.Var(fs.time, initialize=810)\n\n # Constraint to calculate main steam temperature\n @fs.Constraint(fs.time)\n def temperature_main_steam_eqn(b, t):\n return (\n b.temperature_main_steam[t]\n == b.turb.throttle_valve[1].control_volume.properties_in[t].temperature\n )\n\n # Variable for gross power output in MW\n fs.power_output = pyo.Var(fs.time, initialize=300, doc=\"gross power output in MW\")\n\n # Constraint to calculate gross power output\n @fs.Constraint(fs.time)\n def power_output_eqn(b, t):\n return b.power_output[t] == -b.turb.power[t] / 1e6\n\n if m.dynamic is True:\n # Add PID controllers if the flowsheet model is a dynamic model\n # PI controller to control level of fwh2\n fs.fwh2_ctrl = PIDController(\n process_var=fs.fwh2.condense.level,\n manipulated_var=fs.fwh2_valve.valve_opening,\n controller_type=ControllerType.PI,\n calculate_initial_integral=False,\n )\n\n # PI controller to control level of fwh3\n fs.fwh3_ctrl = PIDController(\n process_var=fs.fwh3.condense.level,\n manipulated_var=fs.fwh3_valve.valve_opening,\n controller_type=ControllerType.PI,\n calculate_initial_integral=False,\n )\n\n # PI controller to control level of fwh5\n fs.fwh5_ctrl = PIDController(\n process_var=fs.fwh5.condense.level,\n manipulated_var=fs.fwh5_valve.valve_opening,\n controller_type=ControllerType.PI,\n calculate_initial_integral=False,\n )\n\n # PI controller to control level of fwh6\n fs.fwh6_ctrl = PIDController(\n process_var=fs.fwh6.condense.level,\n manipulated_var=fs.fwh6_valve.valve_opening,\n controller_type=ControllerType.PI,\n calculate_initial_integral=False,\n )\n\n # PI controller to control level of deaerator tank\n fs.da_ctrl = PIDController(\n process_var=fs.da_tank.tank_level,\n manipulated_var=fs.cond_valve.valve_opening,\n controller_type=ControllerType.PI,\n calculate_initial_integral=False,\n )\n\n # PI controller to control level of hotwell tank\n fs.makeup_ctrl = PIDController(\n process_var=fs.hotwell_tank.tank_level,\n manipulated_var=fs.makeup_valve.valve_opening,\n controller_type=ControllerType.PI,\n mv_bound_type=ControllerMVBoundType.SMOOTH_BOUND,\n calculate_initial_integral=False,\n )\n\n # PID controller to control main steam temperature\n fs.spray_ctrl = PIDController(\n process_var=fs.temperature_main_steam,\n manipulated_var=fs.spray_valve.valve_opening,\n controller_type=ControllerType.PID,\n mv_bound_type=ControllerMVBoundType.SMOOTH_BOUND,\n calculate_initial_integral=False,\n )\n\n return m", "title": "" }, { "docid": "ef423f73af216d7275e52a7b54e355d7", "score": "0.5018522", "text": "def update_technology_dispatch(self, year):\n self.stock.act_tech = self.stock.technology.loc[:,year].to_frame()\n self.stock.act_rem = (self.stock.remaining.loc[:,year].to_frame().groupby(level=util.ix_incl(self.stock.act_tech,self.stock.act_tech.index.names)).sum())\n self.stock.act_tech_or_rem = self.stock.act_tech.fillna(self.stock.act_rem)\n self.stock.act_tech_or_rem = DfOper.add([self.stock.act_tech_or_rem,util.remove_df_levels(self.stock.dispatch_cap.loc[:,year].to_frame(),'vintage')])", "title": "" }, { "docid": "998cd84bba23c8645a73a206dd061b5b", "score": "0.50183886", "text": "def calculate_input_stock(self):\n levels = self.stock.rollover_group_levels + [self.years] + [self.tech_names]\n names = self.stock.rollover_group_names + ['year'] + ['supply_technology']\n index = pd.MultiIndex.from_product(levels,names=names)\n if self.stock._has_data is True and 'supply_technology' in self.stock.raw_values.index.names:\n #remap to technology stocks\n self.stock.years = self.years\n self.stock.remap(map_from='raw_values', map_to='technology', converted_geography=GeoMapper.supply_primary_geography, fill_timeseries=True, fill_value=np.nan)\n #TODO add to clean timeseries. Don't allow filling of timseries before raw values.\n self.stock.technology[self.stock.technology.index.get_level_values('year')<min(self.stock.raw_values.index.get_level_values('year'))] = np.nan\n self.convert_stock('stock', 'technology')\n self.stock.technology = self.stock.technology.reorder_levels(names)\n self.stock.technology = self.stock.technology.reindex(index)\n #if there's case_specific stock data, we must use that to replace reference technology stocks\n if hasattr(self.case_stock,'technology'):\n # if there are levels in the case specific stock that are not in the reference stock, we must remove that level from the case stock\n mismatched_levels = [x for x in self.case_stock.technology.index.names if x not in self.stock.technology.index.names]\n if len(mismatched_levels):\n self.case_stock.technology= util.remove_df_levels(self.case_stock.technology,mismatched_levels)\n #if there are still level mismatches, it means the reference stock has more levels, which returns an error\n if np.any(util.difference_in_df_names(self.case_stock.technology, self.stock.technology,return_bool=True)):\n raise ValueError(\"technology stock indices in node %s do not match input energy system stock data\" %self.name)\n else:\n #if the previous test is passed, we use the reference stock to fill in the Nans of the case stock\n self.case_stock.technology = self.case_stock.technology.reorder_levels(names)\n self.case_stock.technology = self.case_stock.technology.reindex(index)\n self.stock.technology = self.case_stock.technology.fillna(self.stock.technology)\n self.stock.technology = self.stock.technology.unstack('year')\n self.stock.technology.columns = self.stock.technology.columns.droplevel()\n self.stock.technology = util.reindex_df_level_with_new_elements(self.stock.technology,'supply_technology',self.tech_names)\n elif hasattr(self.case_stock,'technology'):\n # if there are levels in the case specific stock that are not in the rollover groups, we must remove that level from the case stock\n mismatched_levels = [x for x in self.case_stock.technology.index.names if x not in names]\n if len(mismatched_levels):\n self.case_stock.technology = util.remove_df_levels(self.case_stock.technology,mismatched_levels)\n #if there are still level mismatches, it means the rollover has more levels, which returns an error\n if len([x for x in self.stock.rollover_group_names if x not in self.case_stock.technology.index.names]) :\n raise ValueError(\"technology stock levels in node %s do not match other node input data\" %self.name)\n else:\n #if the previous test is passed we reindex the case stock for unspecified technologies\n self.case_stock.technology = self.case_stock.technology.reorder_levels(names)\n structure_df = pd.DataFrame(1,index=index,columns=['value'])\n self.case_stock.technology = self.case_stock.technology.reindex(index)\n self.stock.technology = self.case_stock.technology\n self.stock.technology = self.stock.technology.unstack('year')\n self.stock.technology.columns = self.stock.technology.columns.droplevel()\n self.stock.technology = util.reindex_df_level_with_new_elements(self.stock.technology,'supply_technology',self.tech_names)\n else:\n levels = self.stock.rollover_group_levels + [self.tech_names]\n names = self.stock.rollover_group_names + ['supply_technology']\n index = pd.MultiIndex.from_product(levels,names=names)\n self.stock.technology = util.empty_df(index=index,columns=self.years,fill_value=np.NaN)\n\n if self.stock._has_data is True and 'supply_technology' not in self.stock.raw_values.index.names:\n levels = self.stock.rollover_group_levels + [self.years]\n names = self.stock.rollover_group_names + ['year']\n index = pd.MultiIndex.from_product(levels,names=names)\n structure_df = pd.DataFrame(1,index=index,columns=['value'])\n self.stock.remap(map_from='raw_values', map_to='total', converted_geography=GeoMapper.supply_primary_geography, time_index = self.years,fill_timeseries=True, fill_value=np.nan)\n #TODO add to clean timeseries. Don't allow filling of timseries before raw values.\n self.stock.total[self.stock.total.index.get_level_values('year')<min(self.stock.raw_values.index.get_level_values('year'))] = np.nan\n self.stock.total = DfOper.mult([self.stock.total,structure_df],fill_value=np.nan)\n self.convert_stock('stock', 'total')\n if hasattr(self.case_stock,'total'):\n mismatched_levels = [x for x in self.case_stock.total.index.names if x not in names]\n if len(mismatched_levels):\n self.case_stock.total = util.remove_df_levels(self.case_stock.total,mismatched_levels)\n #if there are still level mismatches, it means the reference stock has more levels, which returns an error\n if np.any(util.difference_in_df_names(self.case_stock.total, self.stock.total,return_bool=True)):\n raise ValueError(\"total stock indices in node %s do not match input energy system stock data\" %self.name)\n else:\n #if the previous test is passed, we use the reference stock to fill in the Nans of the case stock\n self.case_stock.total= self.case_stock.total.reorder_levels(names)\n self.stock.total = self.stock.total.reorder_levels(names)\n structure_df = pd.DataFrame(1,index=index,columns=['value'])\n self.case_stock.total = DfOper.mult([self.case_stock.total,structure_df],fill_value=np.nan)\n self.stock.total = DfOper.mult([self.stock.total,structure_df],fill_value=np.nan)\n self.stock.total = self.case_stock.total.fillna(self.stock.total)\n\n self.stock.total = self.stock.total.unstack('year')\n self.stock.total.columns = self.stock.total.columns.droplevel()\n elif hasattr(self.case_stock,'total'):\n levels = self.stock.rollover_group_levels + [self.years]\n names = self.stock.rollover_group_names + ['year']\n index = pd.MultiIndex.from_product(levels,names=names)\n # if there are levels in the case specific stock that are not in the rollover groups, we must remove that level from the case stock\n mismatched_levels = [x for x in self.case_stock.total.index.names if x not in names]\n if len(mismatched_levels):\n self.case_stock.total = util.remove_df_levels(self.case_stock.total,mismatched_levels)\n #if there are still level mismatches, it means the rollover has more levels, which returns an error\n if len([x for x in names if x not in self.case_stock.total.index.names]) :\n raise ValueError(\"total stock levels in node %s do not match other node input data\" %self.name)\n else:\n self.case_stock.total= self.case_stock.total.reorder_levels(names)\n self.case_stock.total = self.case_stock.total.reindex(index)\n self.stock.total = self.case_stock.total\n self.stock.total = self.stock.total.unstack('year')\n self.stock.total.columns = self.stock.total.columns.droplevel()\n else:\n index = pd.MultiIndex.from_product(self.stock.rollover_group_levels,names=self.stock.rollover_group_names)\n self.stock.total = util.empty_df(index=index,columns=self.years,fill_value=np.NaN)\n if self.stock._has_data or hasattr(self.case_stock,'data') and self.case_stock._has_data == True:\n self.stock._has_data = True\n self.max_total()\n if cfg.rio_supply_run and self.name not in cfg.rio_excluded_nodes:\n self.stock.technology.loc[:, cfg.supply_years] = self.stock.technology.loc[:, cfg.supply_years].fillna(0)\n self.format_rollover_stocks()", "title": "" }, { "docid": "d04f362ef14e86d3c526cf128b1c32d4", "score": "0.5014126", "text": "def __init__(self, name, fuel, fanciness):\n super().__init__(name, fuel)\n self.price_per_km *= fanciness\n self.current_fare_distance = 0", "title": "" }, { "docid": "58f96e662628372d9a0614b07194af13", "score": "0.5006129", "text": "def add_categories_0116(apps, schema_editor):\n db_alias = schema_editor.connection.alias\n\n approved_fuel = apps.get_model('api', 'ApprovedFuel')\n default_carbon_intensity_category = apps.get_model(\n 'api', 'DefaultCarbonIntensityCategory')\n energy_density_category = apps.get_model(\n 'api', 'EnergyDensityCategory')\n energy_effectiveness_ratio_category = apps.get_model(\n 'api', 'EnergyEffectivenessRatioCategory')\n\n approved_fuel.objects.using(db_alias).filter(\n name=\"Biodiesel\"\n ).update(\n default_carbon_intensity_category=default_carbon_intensity_category\n .objects.using(db_alias).get(\n name=\"Renewable Fuel in relation to diesel class fuel\"\n ),\n energy_density_category=energy_density_category\n .objects.using(db_alias).get(\n name=\"Biodiesel\"\n ),\n energy_effectiveness_ratio_category=energy_effectiveness_ratio_category\n .objects.using(db_alias).get(\n name=\"Petroleum-based diesel fuel or renewable fuel in relation \"\n \"to diesel class fuel\"\n )\n )\n\n approved_fuel.objects.using(db_alias).filter(\n name=\"CNG\"\n ).update(\n default_carbon_intensity_category=default_carbon_intensity_category\n .objects.using(db_alias).get(\n name=\"CNG\"\n ),\n energy_density_category=energy_density_category\n .objects.using(db_alias).get(\n name=\"CNG\"\n ),\n energy_effectiveness_ratio_category=energy_effectiveness_ratio_category\n .objects.using(db_alias).get(\n name=\"CNG\"\n )\n )\n\n approved_fuel.objects.using(db_alias).filter(\n name=\"Electricity\"\n ).update(\n default_carbon_intensity_category=default_carbon_intensity_category\n .objects.using(db_alias).get(\n name=\"Electricity\"\n ),\n energy_density_category=energy_density_category\n .objects.using(db_alias).get(\n name=\"Electricity\"\n ),\n energy_effectiveness_ratio_category=energy_effectiveness_ratio_category\n .objects.using(db_alias).get(\n name=\"Electricity\"\n )\n )\n\n approved_fuel.objects.using(db_alias).filter(\n name=\"Ethanol\"\n ).update(\n default_carbon_intensity_category=default_carbon_intensity_category\n .objects.using(db_alias).get(\n name=\"Renewable Fuel in relation to gasoline class fuel\"\n ),\n energy_density_category=energy_density_category\n .objects.using(db_alias).get(\n name=\"Ethanol\"\n ),\n energy_effectiveness_ratio_category=energy_effectiveness_ratio_category\n .objects.using(db_alias).get(\n name=\"Petroleum-based gasoline, natural gas-based gasoline or \"\n \"renewable fuel in relation to gasoline class fuel\"\n )\n )\n\n approved_fuel.objects.using(db_alias).filter(\n name=\"HDRD\"\n ).update(\n default_carbon_intensity_category=default_carbon_intensity_category\n .objects.using(db_alias).get(\n name=\"Renewable Fuel in relation to diesel class fuel\"\n ),\n energy_density_category=energy_density_category\n .objects.using(db_alias).get(\n name=\"Hydrogenation-derived renewable diesel fuel\"\n ),\n energy_effectiveness_ratio_category=energy_effectiveness_ratio_category\n .objects.using(db_alias).get(\n name=\"Petroleum-based diesel fuel or renewable fuel in relation \"\n \"to diesel class fuel\"\n )\n )\n\n approved_fuel.objects.using(db_alias).filter(\n name=\"Hydrogen\"\n ).update(\n default_carbon_intensity_category=default_carbon_intensity_category\n .objects.using(db_alias).get(\n name=\"Hydrogen\"\n ),\n energy_density_category=energy_density_category\n .objects.using(db_alias).get(\n name=\"Hydrogen\"\n ),\n energy_effectiveness_ratio_category=energy_effectiveness_ratio_category\n .objects.using(db_alias).get(\n name=\"Hydrogen\"\n )\n )\n\n approved_fuel.objects.using(db_alias).filter(\n name=\"LNG\"\n ).update(\n default_carbon_intensity_category=default_carbon_intensity_category\n .objects.using(db_alias).get(\n name=\"LNG\"\n ),\n energy_density_category=energy_density_category\n .objects.using(db_alias).get(\n name=\"LNG\"\n ),\n energy_effectiveness_ratio_category=energy_effectiveness_ratio_category\n .objects.using(db_alias).get(\n name=\"LNG\"\n )\n )\n\n approved_fuel.objects.using(db_alias).filter(\n name=\"Natural gas-based gasoline\"\n ).update(\n default_carbon_intensity_category=default_carbon_intensity_category\n .objects.using(db_alias).get(\n name=\"Natural gas-based gasoline\"\n ),\n energy_density_category=energy_density_category\n .objects.using(db_alias).get(\n name=\"Petroleum-based gasoline, natural gas-based gasoline or \"\n \"gasoline produced from biomass\"\n ),\n energy_effectiveness_ratio_category=energy_effectiveness_ratio_category\n .objects.using(db_alias).get(\n name=\"Petroleum-based gasoline, natural gas-based gasoline or \"\n \"renewable fuel in relation to gasoline class fuel\"\n )\n )\n\n approved_fuel.objects.using(db_alias).filter(\n name=\"Petroleum-based diesel\"\n ).update(\n default_carbon_intensity_category=default_carbon_intensity_category\n .objects.using(db_alias).get(\n name=\"Petroleum-based diesel\"\n ),\n energy_density_category=energy_density_category\n .objects.using(db_alias).get(\n name=\"Petroleum-based diesel fuel or diesel fuel produced from \"\n \"biomass\"\n ),\n energy_effectiveness_ratio_category=energy_effectiveness_ratio_category\n .objects.using(db_alias).get(\n name=\"Petroleum-based diesel fuel or renewable fuel in relation \"\n \"to diesel class fuel\"\n )\n )\n\n approved_fuel.objects.using(db_alias).filter(\n name=\"Petroleum-based gasoline\"\n ).update(\n default_carbon_intensity_category=default_carbon_intensity_category\n .objects.using(db_alias).get(\n name=\"Petroleum-based gasoline\"\n ),\n energy_density_category=energy_density_category\n .objects.using(db_alias).get(\n name=\"Petroleum-based gasoline, natural gas-based gasoline or \"\n \"gasoline produced from biomass\"\n ),\n energy_effectiveness_ratio_category=energy_effectiveness_ratio_category\n .objects.using(db_alias).get(\n name=\"Petroleum-based gasoline, natural gas-based gasoline or \"\n \"renewable fuel in relation to gasoline class fuel\"\n )\n )\n\n approved_fuel.objects.using(db_alias).filter(\n name=\"Propane\"\n ).update(\n default_carbon_intensity_category=default_carbon_intensity_category\n .objects.using(db_alias).get(\n name=\"Propane\"\n ),\n energy_density_category=energy_density_category\n .objects.using(db_alias).get(\n name=\"Propane\"\n ),\n energy_effectiveness_ratio_category=energy_effectiveness_ratio_category\n .objects.using(db_alias).get(\n name=\"Propane\"\n )\n )\n\n approved_fuel.objects.using(db_alias).filter(\n name=\"Renewable diesel\"\n ).update(\n default_carbon_intensity_category=default_carbon_intensity_category\n .objects.using(db_alias).get(\n name=\"Petroleum-based diesel\"\n ),\n energy_density_category=energy_density_category\n .objects.using(db_alias).get(\n name=\"Petroleum-based diesel fuel or diesel fuel produced from \"\n \"biomass\"\n ),\n energy_effectiveness_ratio_category=energy_effectiveness_ratio_category\n .objects.using(db_alias).get(\n name=\"Petroleum-based diesel fuel or renewable fuel in relation \"\n \"to diesel class fuel\"\n )\n )\n\n approved_fuel.objects.using(db_alias).filter(\n name=\"Renewable gasoline\"\n ).update(\n default_carbon_intensity_category=default_carbon_intensity_category\n .objects.using(db_alias).get(\n name=\"Petroleum-based gasoline\"\n ),\n energy_density_category=energy_density_category\n .objects.using(db_alias).get(\n name=\"Petroleum-based gasoline, natural gas-based gasoline or \"\n \"gasoline produced from biomass\"\n ),\n energy_effectiveness_ratio_category=energy_effectiveness_ratio_category\n .objects.using(db_alias).get(\n name=\"Petroleum-based gasoline, natural gas-based gasoline or \"\n \"renewable fuel in relation to gasoline class fuel\"\n )\n )", "title": "" }, { "docid": "85b57ef7b85f54516d9e56610bb0104f", "score": "0.49957237", "text": "def update_technology(self, year):\n self.stock.act_tech_energy = util.DfOper.mult([self.stock.technology.loc[:,year].to_frame(), self.stock.act_energy_capacity_ratio],fill_value=np.nan)\n self.stock.act_tech_or_rem_energy = self.stock.act_tech_energy.fillna(self.stock.act_rem_energy.groupby(level=self.stock.act_tech_energy.index.names).sum())\n self.stock.act_tech_or_rem = util.remove_df_levels(util.DfOper.divi([self.stock.act_tech_or_rem_energy, self.stock.act_energy_capacity_ratio]),'supply_technology')", "title": "" }, { "docid": "5fd420f76875d9e99ab84a88224acfa5", "score": "0.49942207", "text": "def add_concrete_material(self, name, fc, fu, Ec, eu):\n if name in self.materials_dictionary:\n return\n else:\n self.materials_dictionary[name] = MF.Concrete_Material(name, fc, fu, Ec, eu)\n self.Lb.insert(tk.END, name)\n self.controller.update_material_dropdown()", "title": "" }, { "docid": "4109f63c0f39b8f4c8c1dae29e129450", "score": "0.49941784", "text": "def main():\n ss_flowsheet = ss_sim.main()\n flowsheet = Flowsheet(name='MB_Model') \n\n # fill in values of IC parameters from steady state solve\n setICs(flowsheet,ss_flowsheet)\n \n # Fix variables\n setInputs(flowsheet) \n\n # Initialize at steady state\n initialize_ss(flowsheet,ss_flowsheet)\n\n mb = flowsheet.MB_fuel\n\n # Then perturb\n # ^ that function should go in this file, probably\n # input: dict mapping state names (strings) to new values\n\n # this seems like as much work as doing the perturbation...\n # maybe just make a self contained function\n #input_perturbation = { ('Solid_In_x',mb.t) : { ['Fe2O3'] : 0.25 },\n # ('Solid_In_x',mb.t) : { ['Al2O3'] : 0.75 } }\n\n perturbInputs(flowsheet)\n\n # perturb states\n\n # should put this in a dedicated ~intialize~ function\n # that also intelligently initializes the model after perturbation\n mb.eq_d4.deactivate()\n mb.eq_d5.deactivate()\n mb.eq_d8.deactivate()\n mb.eq_d9.deactivate()\n mb.eq_d10.deactivate()\n mb.eq_g7.deactivate()\n mb.eq_g8.deactivate()\n mb.eq_g10.deactivate()\n mb.eq_g11.deactivate()\n mb.eq_g12.deactivate()\n mb.eq_g13.deactivate()\n mb.eq_g14.deactivate()\n mb.eq_g4.deactivate()\n mb.eq_g5.deactivate()\n mb.eq_g2.deactivate()\n mb.Tg_GW.fix(0.0)\n mb.Tg_refractory.fix(0.0)\n mb.Tw_Wamb.fix()\n mb.Tw.fix()\n mb.Nuw.fix()\n mb.Nu_ext.fix()\n mb.hw.fix()\n mb.hext.fix()\n mb.hext2.fix()\n mb.U.fix()\n mb.Uw.fix()\n mb.Pr_ext.fix()\n mb.Ra.fix()\n mb.Re.fix()\n ###\n \n\n '''\n ts = time.time() \n # Initialize fuel reactor\n flowsheet.MB_fuel._initialize(outlvl=1,\n optarg={\"tol\" : 1e-8,\n \"max_cpu_time\" : 600,\n \"print_level\" : 5,\n \"halt_on_ampl_error\": 'yes'}) \n '''\n\n # Create a solver\n opt = SolverFactory('ipopt')\n opt.options = {'tol': 1e-8,\n 'linear_solver' : 'ma27',\n 'bound_push': 1e-8,\n 'max_cpu_time': 600,\n 'print_level': 5,\n 'halt_on_ampl_error': 'yes'}\n flowsheet.write('fs.nl')\n\n with open('dyn_fs.txt','w') as f:\n flowsheet.display(ostream=f)\n\n print('Constraints violated pre-solve:')\n for const in flowsheet.MB_fuel.component_objects(Constraint,active=True):\n if not isinstance(const,SimpleConstraint):\n for idx in const:\n if (value(const[idx].body) > value(const[idx].upper) + 1.0e-7) or \\\n (value(const[idx].body) < value(const[idx].lower) - 1.0e-7):\n print(const.name,idx)\n else:\n if (value(const.body) > value(const.upper) + 1.0e-7) or \\\n (value(const.body) < value(const.lower) - 1.0e-7):\n print(const.name)\n print('- - -\\n')\n\n print('Variable bounds violated pre-solve:')\n for var in flowsheet.MB_fuel.component_objects(Var,active=True):\n # don't use IndexedComponent here, variables are always indexed components \n # could also do this by iterating over component_objects(SimpleVar)...?\n if not isinstance(var,SimpleVar):\n for idx in var:\n if not (var[idx].lb is None):\n if (var[idx].value < var[idx].lb - 1.0e-7):\n pdb.set_trace()\n print(var.name,idx)\n if not (var[idx].ub is None):\n if (var[idx].value > var[idx].ub + 1.0e-7):\n pdb.set_trace()\n print(var.name,idx)\n else:\n if (var.value > var.ub + 1.0e-7) or \\\n (var.value < var.lb - 1.0e-7):\n print(var.name)\n print('- - -\\n')\n\n # initialized at steady state, works regardless:\n flowsheet.strip_bounds()\n\n for z in mb.z:\n for t in mb.t:\n mb.Cg[z,'CH4',t].setlb(1e-6)\n\n\n # want a function to integrate the model one step from a specified time point\n # will call for all t\n integrate(flowsheet,0)\n \n #results = opt.solve(flowsheet,tee=True,symbolic_solver_labels=False,\n # keepfiles=False)\n\n #with open('dyn_fs_sol.txt','w') as f:\n # flowsheet.display(ostream=f)\n\n\n \n '''\n \n print(\"\\n\")\n print(\"----------------------------------------------------------\")\n print('Total simulation time: ', value(time.time() - ts), \" s\")\n print(\"----------------------------------------------------------\")\n\n \n # Print some variables \n print_summary_fuel_reactor(flowsheet) \n\n # Plot some variables \n #results_plot_fuel_reactor(flowsheet) \n\n #with open('m_fs.txt','w') as f:\n # flowsheet.display(ostream=f)\n\n # Store the flowsheet \n '''\n return flowsheet", "title": "" }, { "docid": "03b4e4ce6255aeb472f7ce4bfbf83a15", "score": "0.4981729", "text": "def fuel_for_fuel(fuel: int) -> int:\n fuel_needed = 0\n additional_fuel = fuel\n\n while True:\n additional_fuel = launch_fuel_required(additional_fuel)\n if additional_fuel <= 0:\n break\n \n fuel_needed += additional_fuel\n \n return fuel_needed", "title": "" }, { "docid": "54aa4b9cdbe040d77b4b7f7e7035ba0e", "score": "0.49801105", "text": "def testAddTax(self):\n\tdates = pd.date_range(dt.datetime(2015,01,01), dt.datetime(2025,01,01), freq = 'D')\n\ta = range(0,len(dates))\n\ta1 = np.array([100*1.0001**i for i in a])\n\tb = df.DataFrame({'income':a1}, index = dates)\n\ta2 = a1*.05\n\ta3 = a1*.03\n\td = df.DataFrame({'depreciation':a2,'interest':a3}, index = dates)\n\tr = 0.35\n\tkw2 = {'basis':b}\n\tcr = pf.TaxCredit(name = 'ITC', refundable = False, kind = 'Fractional', rate = 0.15, **kw2)\n\tc = [cr]\n\n\tkwargs = {}\n\tkwargs['name'] = 'fed1'\n\tkwargs['rate'] = r\n\tt = pf.FractionalTax(**kwargs)\n\tmanager = pf.TaxManager()\n\tmanager.create_tax(kind = 'Fractional', **kwargs)\n\t\n\tself.assertTrue(t.rate == manager.taxes['fed1'].rate)\n\tself.assertTrue(t.name == manager.taxes['fed1'].name)\n\n\n\tmanager= pf.TaxManager()\n\tmanager.create_tax(kind= 'Fractional', **kwargs)\n\tmanager.add_revenue(b, name = 'US')\n\tmanager.add_deductions(d, name = 'US')\t\n\n\tmanager.associate_revenue('fed1', ['US_income'])\n\tmanager.associate_deductions('fed1', ['US_depreciation','US_interest'])\n\tkwargs2 = {}\n\tkwargs2['name'] = 'fed1'\n\tkwargs2['rate'] = r\n\tkwargs2['basis'] = b\n\tkwargs2['deductions'] = d\n\t\n\tt = pf.FractionalTax(**kwargs2)\n\tt.basis.rename(columns = lambda x: \"US_%s\" % x, inplace = True)\n\tt.deductions.rename(columns = lambda x: \"US_%s\" % x, inplace = True)\n\tmanager.build_tax_schedule()\n\tself.assertTrue((t.basis == manager.taxes['fed1'].basis).all().all())\n\tself.assertTrue((t.deductions == manager.taxes['fed1'].deductions).all().all())\n\tself.assertTrue(t.rate == manager.taxes['fed1'].rate)\n\tself.assertTrue(t.name == manager.taxes['fed1'].name)\n\n\t#Test that fractional tax is created correctly\n\t\n\n\t#Test that tax is created correctly with associated income, depreciation columns", "title": "" }, { "docid": "b175bbeb09505ebd751e60f0ceb93f8a", "score": "0.49752685", "text": "def plant(self, *legumes: veg.Vegetable):\n legumes = [self.__get_vegetable(dict) for dict in legumes ]\n\n for leg in legumes:\n\n reste = self.mygarden.add(leg)\n print(f\"Gardener plants {leg.get_nb_graine()} {type(leg).__name__}.s\")\n\n if reste:\n legumes.insert(legumes.index(leg) + 1, reste)\n self.__next_garden()\n continue", "title": "" }, { "docid": "1ea6775487bbea1d069597779ace8883", "score": "0.49738714", "text": "def add_transport_modes_and_approved_fuels_0093(apps, schema_editor):\n\n db_alias = schema_editor.connection.alias\n\n # By retrieving the models via apps.get_model, we get one appropriately\n # versioned for this migration (so this shouldn't ever need to be\n # maintained if fields change)\n transport_mode = apps.get_model('api', 'TransportMode')\n transport_mode.objects.using(db_alias).bulk_create([\n transport_mode(\n name='Truck',\n effective_date='2017-01-01'\n ),\n transport_mode(\n name='Rail',\n effective_date='2017-01-01'\n ),\n transport_mode(\n name='Marine',\n effective_date='2017-01-01'\n ),\n transport_mode(\n name='Adjacent',\n effective_date='2017-01-01'\n ),\n transport_mode(\n name='Pipeline',\n effective_date='2017-01-01'\n )\n ])\n\n approved_fuel = apps.get_model('api', 'ApprovedFuel')\n approved_fuel.objects.using(db_alias).bulk_create(\n map(lambda af: approved_fuel(\n name=af,\n effective_date='2017-01-01'\n ), approved_fuel_names_0093)\n )", "title": "" }, { "docid": "8ef4ee47703a673fc606c21b38202c26", "score": "0.49727106", "text": "def Fuel_type_reefer_DEF(self):\n\n #Input constants\n\n generic_trans_amount = \"$5.00\" # any value that gets an approval from the host\n card_to_use_NGFC = 'NGFC' # using this card to get all commercial prompts\n \n amounts_to_dispense = {\n \"buffer_1\":{\n \"grade\": 3,\n \"value\": \"3\"\n },\n \"buffer_2\":{\n \"grade\": 1,\n \"value\": \"2\"\n }\n } \n\n prompts = {\n \"Additional Products Y/N?\": { \n \"buttons\": [\"Yes\"]\n },\n \"Do you want to print a receipt?\": {\n \"buttons\": [\"No\"]\n },\n \"Select fuel products to dispense\": {\n \"buttons\": [\"Reefer fuel\"]\n },\n \"DEF?\": {\n \"buttons\": [\"Yes\"]\n }\n }\n\n messages_to_verify = {\n 'preauth_request_verifications': {\n 'Fuel Purchase': '500', \n '001 - Wex OTR Flags': 'C - Commercial'\n },\n 'preauth_response_verifications': {\n 'Response Code': '2', \n 'Approved Amount': '000000500',\n '002 - Wex OTR Non-Fuel Product Data - Product code: CADV (Company funds cash advance)': 'Amount: 0.00',\n '002 - Wex OTR Non-Fuel Product Data - Product code: MERC (Default category for merchandise)': 'Amount: 30.00', \n f'003 - Wex OTR Fuel Product Limits - Product Code: {self.helpers.grade_1_Product }': 'Product limit: 50.00',\n f'003 - Wex OTR Fuel Product Limits - Product Code: {self.helpers.grade_2_Product }': 'Product limit: 50.00',\n f'003 - Wex OTR Fuel Product Limits - Product Code: {self.helpers.grade_3_Product }': 'Product limit: 50.00',\n '005 - Wex OTR Customer Information': 'ABC TRUCKING DENVER CO234W987'\n },\n 'capture_request_verifications': {\n 'Fuel Purchase': '500', \n '001 - Wex OTR Flags': 'C - Commercial'\n },\n 'capture_response_verifications': {\n 'Response Code': '0', \n 'Approved Amount': '000000500',\n '002 - Wex OTR Non-Fuel Product Data - Product code: CADV (Company funds cash advance)': 'Amount: 20.00',\n '002 - Wex OTR Non-Fuel Product Data - Product code: MERC (Default category for merchandise)': 'Amount: 30.00',\n f'003 - Wex OTR Fuel Product Limits - Product Code: {self.helpers.grade_1_Product }': 'Product limit: 50.00',\n f'003 - Wex OTR Fuel Product Limits - Product Code: {self.helpers.grade_2_Product }': 'Product limit: 50.00',\n f'003 - Wex OTR Fuel Product Limits - Product Code: {self.helpers.grade_3_Product }': 'Product limit: 50.00',\n '005 - Wex OTR Customer Information': 'ABC TRUCKING DENVER CO234W987'}\n }\n\n #Output verificacions\n\n receipt_data = [f\"{self.helpers.grade_1_Name_reefer} CA PUMP# 1\",\n \"2.000 GAL @ $1.000/GAL $2.00 99\",\n f\"{self.helpers.grade_3_Name} CA PUMP# 1\",\n \"3.000 GAL @ $1.000/GAL $3.00 99\",\n \"Subtotal = $5.00\",\n \"Tax = $0.00\",\n \"Total = $5.00\",\n \"Change Due = $0.00\",\n \"Credit $5.00\"]\n\n self.helpers.prepay_transaction(\n card=card_to_use_NGFC,\n prepay_amount=generic_trans_amount,\n prompts=prompts,\n amounts_to_dispense=amounts_to_dispense,\n messages_to_verify=messages_to_verify,\n receipt_data=receipt_data\n )", "title": "" }, { "docid": "69ba4745de36cf5acc1b2bf59f3f30b1", "score": "0.4964572", "text": "def add_stock(self):\n self.stock = SupplyStock(supply_node=self.name, scenario=self.scenario)", "title": "" }, { "docid": "0ce187e0acf4d0801e01fc0f66411853", "score": "0.49607182", "text": "def addFleet(self, fleet:Fleet):\n print (\"ADDING FLEET\")\n self.fleet = fleet", "title": "" }, { "docid": "2cc26d2414953d16f8db2422d86f4f7e", "score": "0.4944704", "text": "def __init__(self, name, fuel, reliability):\n super().__init__(name, fuel)\n self.reliability = reliability", "title": "" }, { "docid": "2cc26d2414953d16f8db2422d86f4f7e", "score": "0.4944704", "text": "def __init__(self, name, fuel, reliability):\n super().__init__(name, fuel)\n self.reliability = reliability", "title": "" }, { "docid": "7d01c2e4ccd3531f3eb532b265db7a39", "score": "0.49305838", "text": "def _add_species_basket(self, weight, count):\n self._logger.debug('Add species basket. wt: {}, ct: {}'.format(weight, count))\n if self._current_species_comp_item is None:\n self._logger.error('Species ID / Current Species Comp ID is None')\n return\n\n try:\n new_basket = SpeciesCompositionBaskets.create(\n species_comp_item=self._current_species_comp_item,\n basket_weight_itq=weight,\n fish_number_itq=count,\n created_by=ObserverDBUtil.get_current_user_id(),\n # FIELD-2087: Using default dateformat for time display; reformat before sync later\n created_date=ObserverDBUtil.get_arrow_datestr(date_format=ObserverDBUtil.default_dateformat),\n is_fg_tally_local=1 if self._is_fixed_gear else None,\n is_subsample=None\n )\n self._baskets_model.add_basket(new_basket)\n self._logger.info(f'Added basket wt: {weight} ct: {count}')\n\n finally:\n self._calculate_totals()\n self.basketAdded.emit()\n # self.dataExistsChanged.emit()", "title": "" }, { "docid": "36e7bc57113f2a17351d5bfe5b5d9052", "score": "0.49286556", "text": "def __init__(self, name, color, max_size=2, fuel=10):\n Backpack.__init__(self,name,color,max_size)\n self.fuel = fuel", "title": "" }, { "docid": "fbcd41a83b91d34ba26cdcd9f66eae16", "score": "0.49237648", "text": "def __init__(self,name,color,max_size=2,fuel=10):\n Backpack.__init__(self,name,color,max_size)\n self.fuel=fuel", "title": "" }, { "docid": "e8dacbd577ced23b408179ae46706c72", "score": "0.49079818", "text": "def fly(self,fuel):\n if self.fuel-fuel>=0:\n self.fuel-=fuel\n else:\n print(\"Not enough fuel!\")", "title": "" }, { "docid": "bda09fcd2f27d0272d4d2a849788262c", "score": "0.48983583", "text": "def calc_proposed_HF_consumption (self):\n building_data = self.comp_specs['building inventory']\n percent_savings = self.comp_specs['cohort savings percent'] / 100.0\n #~ fuel_type = 'Fuel Oil'\n #~ try:\n fuel_types = ['Fuel Oil',\n 'Natural Gas',\n 'HW District',\n 'Propane',\n 'Biomass']\n for fuel_type in fuel_types:\n fuel_vals = building_data[fuel_type + ' Post']\n idx = fuel_vals.isnull()\n fuel_vals[idx] = building_data[fuel_type][idx] *\\\n (1 - percent_savings)\n\n #~ except TypeError:\n\n\n\n #~ self.comp_specs['building inventory'][\"Fuel Oil Post\"] = \\\n #~ self.comp_specs['building inventory'][\"Fuel Oil\"]*\\\n #~ self.comp_specs['cohort savings multiplier']\n\n\n\n\n # by fuel type\n self.proposed_fuel_Hoil_consumption = \\\n building_data['Fuel Oil Post'].sum()\n self.proposed_fuel_lng_consumption = \\\n building_data['Natural Gas Post'].fillna(0).sum()\n self.proposed_fuel_hr_consumption = \\\n building_data['HW District Post'].fillna(0).sum()\n self.proposed_fuel_propane_consumption = \\\n building_data['Propane Post'].fillna(0).sum()\n self.proposed_fuel_biomass_consumption = \\\n building_data['Biomass Post'].fillna(0).sum()\n\n # mmbtu\n self.proposed_HF_consumption = \\\n self.proposed_fuel_Hoil_consumption / constants.mmbtu_to_gal_HF +\\\n self.proposed_fuel_hr_consumption/constants.mmbtu_to_gal_HF+\\\n self.proposed_fuel_lng_consumption/constants.mmbtu_to_Mcf+\\\n self.proposed_fuel_propane_consumption/constants.mmbtu_to_gal_LP+\\\n self.proposed_fuel_biomass_consumption/constants.mmbtu_to_cords", "title": "" }, { "docid": "0eba9dbb71df991f6d1d8c0e75cdfb7d", "score": "0.48977476", "text": "def vessel_bloodflow(bf_cr, bf_ms, bf_fat, bf_sk, bf_ava_hand, bf_ava_foot):\n xbf = bf_cr + bf_ms + bf_fat + bf_sk\n\n bf_art = np.zeros(17)\n bf_vein = np.zeros(17)\n\n #Head\n bf_art[0] = xbf[0]\n bf_vein[0] = xbf[0]\n\n #Neck (+Head)\n bf_art[1] = xbf[1] + xbf[0]\n bf_vein[1] = xbf[1] + xbf[0]\n\n #Chest\n bf_art[2] = xbf[2]\n bf_vein[2] = xbf[2]\n\n #Back\n bf_art[3] = xbf[3]\n bf_vein[3] = xbf[3]\n\n #Pelvis (+Thighs, Legs, Feet, AVA_Feet)\n bf_art[4] = xbf[4] + xbf[11:17].sum() + 2*bf_ava_foot\n bf_vein[4] = xbf[4] + xbf[11:17].sum() + 2*bf_ava_foot\n\n #L.Shoulder (+Arm, Hand, (arteryのみAVA_Hand))\n bf_art[5] = xbf[5:8].sum() + bf_ava_hand\n bf_vein[5] = xbf[5:8].sum()\n\n #L.Arm (+Hand)\n bf_art[6] = xbf[6:8].sum() + bf_ava_hand\n bf_vein[6] = xbf[6:8].sum()\n\n #L.Hand\n bf_art[7] = xbf[7] + bf_ava_hand\n bf_vein[7] = xbf[7]\n\n #R.Shoulder (+Arm, Hand, (arteryのみAVA_Hand))\n bf_art[8] = xbf[8:11].sum() + bf_ava_hand\n bf_vein[8] = xbf[8:11].sum()\n\n #R.Arm (+Hand)\n bf_art[9] = xbf[9:11].sum() + bf_ava_hand\n bf_vein[9] = xbf[9:11].sum()\n\n #R.Hand\n bf_art[10] = xbf[10] + bf_ava_hand\n bf_vein[10] = xbf[10]\n\n #L.Thigh (+Leg, Foot, (arteryのみAVA_Foot))\n bf_art[11] = xbf[11:14].sum() + bf_ava_foot\n bf_vein[11] = xbf[11:14].sum()\n\n #L.Leg (+Foot)\n bf_art[12] = xbf[12:14].sum() + bf_ava_foot\n bf_vein[12] = xbf[12:14].sum()\n\n #L.Foot\n bf_art[13] = xbf[13] + bf_ava_foot\n bf_vein[13] = xbf[13]\n\n #R.Thigh (+Leg, Foot, (arteryのみAVA_Foot))\n bf_art[14] = xbf[14:17].sum() + bf_ava_foot\n bf_vein[14] = xbf[14:17].sum()\n\n #R.Leg (+Foot)\n bf_art[15] = xbf[15:17].sum() + bf_ava_foot\n bf_vein[15] = xbf[15:17].sum()\n\n #R.Foot\n bf_art[16] = xbf[16] + bf_ava_foot\n bf_vein[16] = xbf[16]\n\n return bf_art, bf_vein", "title": "" }, { "docid": "3e17c90bb505b040d69dd15f68570980", "score": "0.4893315", "text": "def fill_gas_tank():\n print(\"This car doesn't need a gas tank!\")", "title": "" }, { "docid": "c892f382168fd2d1e7ff5ba96624b188", "score": "0.48805892", "text": "def calculate_fuel_costs(self):\r\n self.coal_price = self.coal_cost_fixed + self.coal_cost_variable*self.coal_fuel\r\n self.gas_price= self.gas_cost_fixed+self.gas_cost_variable*self.gas_fuel", "title": "" }, { "docid": "81c9bebc65222ad959a0d1280b3e8ac9", "score": "0.48712495", "text": "def berth_invest(self, year, handysize, handymax, panamax):\n\n # report on the status of all berth elements\n self.report_element(Berth, year)\n self.report_element(Quay_wall, year)\n self.report_element(Cyclic_Unloader, year)\n self.report_element(Continuous_Unloader, year)\n self.report_element(Conveyor_Quay, year)\n self.report_element(Storage, year)\n self.report_element(Conveyor_Hinter, year)\n self.report_element(Unloading_station, year)\n if self.debug:\n print('')\n print(' Start analysis:')\n\n # calculate berth occupancy\n berth_occupancy_planned, berth_occupancy_online, crane_occupancy_planned, crane_occupancy_online = self.calculate_berth_occupancy(\n year, handysize, handymax, panamax)\n factor, waiting_time_occupancy = self.waiting_time(year)\n if self.debug:\n print(' Berth occupancy planned (@ start of year): {}'.format(berth_occupancy_planned))\n print(' Berth occupancy online (@ start of year): {}'.format(berth_occupancy_online))\n print(' Crane occupancy planned (@ start of year): {}'.format(crane_occupancy_planned))\n print(' Crane occupancy online (@ start of year): {}'.format(crane_occupancy_online))\n print(' waiting time factor (@ start of year): {}'.format(factor))\n print(' waiting time occupancy (@ start of year): {}'.format(waiting_time_occupancy))\n\n while berth_occupancy_planned > self.allowable_berth_occupancy:\n\n # add a berth when no crane slots are available\n if not (self.check_crane_slot_available()):\n if self.debug:\n print(' *** add Berth to elements')\n berth = Berth(**agribulk_defaults.berth_data)\n berth.year_online = year + berth.delivery_time\n self.elements.append(berth)\n\n berth_occupancy_planned, berth_occupancy_online, crane_occupancy_planned, crane_occupancy_online = self.calculate_berth_occupancy(\n year, handysize, handymax, panamax)\n if self.debug:\n print(' Berth occupancy planned (after adding berth): {}'.format(berth_occupancy_planned))\n print(' Berth occupancy online (after adding berth): {}'.format(berth_occupancy_online))\n\n # check if a quay is needed\n berths = len(self.find_elements(Berth))\n quay_walls = len(self.find_elements(Quay_wall))\n if berths > quay_walls:\n length_v = max(agribulk_defaults.handysize_data[\"LOA\"],agribulk_defaults.handymax_data[\"LOA\"],\n agribulk_defaults.panamax_data[\"LOA\"]) # average size\n draft = max(agribulk_defaults.handysize_data[\"draft\"],agribulk_defaults.handymax_data[\"draft\"],\n agribulk_defaults.panamax_data[\"draft\"])\n # apply PIANC 2014:\n # see Ijzermans, 2019 - infrastructure.py line 107 - 111\n if quay_walls == 0:\n # - length when next quay is n = 1\n length = length_v + 2 * 15 # ref: PIANC 2014\n elif quay_walls == 1:\n # - length when next quay is n > 1\n length = 1.1 * berths * (length_v + 15) - (length_v + 2 * 15) # ref: PIANC 2014\n else:\n length = 1.1 * berths * (length_v + 15) - 1.1 * (berths - 1) * (length_v + 15)\n\n # - depth\n quay_wall = Quay_wall(**agribulk_defaults.quay_wall_data)\n depth = np.sum([draft, quay_wall.max_sinkage, quay_wall.wave_motion, quay_wall.safety_margin])\n self.quay_invest(year, length, depth)\n\n berth_occupancy_planned, berth_occupancy_online, crane_occupancy_planned, crane_occupancy_online = self.calculate_berth_occupancy(\n year, handysize, handymax, panamax)\n if self.debug:\n print(' Berth occupancy planned (after adding quay): {}'.format(berth_occupancy_planned))\n print(' Berth occupancy online (after adding quay): {}'.format(berth_occupancy_online))\n\n # check if a crane is needed\n if self.check_crane_slot_available():\n self.crane_invest(year)\n\n berth_occupancy_planned, berth_occupancy_online, crane_occupancy_planned, crane_occupancy_online = self.calculate_berth_occupancy(\n year, handysize, handymax, panamax)\n if self.debug:\n print(' Berth occupancy planned (after adding crane): {}'.format(berth_occupancy_planned))\n print(' Berth occupancy online (after adding crane): {}'.format(berth_occupancy_online))", "title": "" }, { "docid": "be0ed39569cb97ebe043e32bbf2808bc", "score": "0.4857554", "text": "def refill(self, fuel):\n if self.fuel_amount == self.fuel_capacity:\n raise ToMuchFuel('You already have full tank!')\n\n self._fuel_amount += fuel\n\n if self.fuel_amount > self.fuel_capacity:\n self._fuel_amount = self.fuel_capacity", "title": "" }, { "docid": "7c310cb5f6a3542e5fec828b27279159", "score": "0.4856016", "text": "def remove_credit_calculation_fuel_types_0111(apps, schema_editor):\n db_alias = schema_editor.connection.alias\n\n approved_fuel = apps.get_model('api', 'ApprovedFuel')\n\n approved_fuel.objects.using(db_alias).bulk_create([\n approved_fuel(\n name=\"Petroleum-based diesel fuel or renewable fuel in relation \"\n \"to diesel class fuel\",\n effective_date=\"2017-01-01\",\n credit_calculation_only=True\n ),\n approved_fuel(\n name=\"Petroleum-based gasoline, natural gas-based gasoline or \"\n \"renewable fuel in relation to gasoline class fuel\",\n effective_date=\"2017-01-01\",\n credit_calculation_only=True\n ),\n approved_fuel(\n name=\"Petroleum-based diesel fuel or diesel fuel produced from \"\n \"biomass\",\n effective_date=\"2017-01-01\",\n credit_calculation_only=True\n ),\n approved_fuel(\n name=\"Petroleum-based gasoline, natural gas-based gasoline or \"\n \"gasoline produced from biomass\",\n effective_date=\"2017-01-01\",\n credit_calculation_only=True\n ),\n approved_fuel(\n name=\"Hydrogenation-derived renewable diesel fuel\",\n effective_date=\"2017-01-01\",\n credit_calculation_only=True\n ),\n approved_fuel(\n name=\"Renewable fuel in relation to diesel class fuel\",\n effective_date=\"2017-01-01\",\n credit_calculation_only=True\n ),\n approved_fuel(\n name=\"Renewable fuel in relation to gasoline class fuel\",\n effective_date=\"2017-01-01\",\n credit_calculation_only=True\n )\n ])", "title": "" }, { "docid": "bb1821a0dbd0c9c1f90f2f8d46a3bf54", "score": "0.48557195", "text": "def test_additional_fuel_requirements():\n assert list(day_1.additional_fuel_requirements(2)) == []\n assert list(day_1.additional_fuel_requirements(654)) == [216, 70, 21, 5]\n assert list(day_1.additional_fuel_requirements(33584)) == [\n 11192, 3728, 1240, 411, 135, 43, 12, 2\n ]", "title": "" }, { "docid": "338e251f70d29585f913f8c0635001e6", "score": "0.48507738", "text": "def initialise_fluids(self, nw):\n N_2 = 0.7655\n O_2 = 0.2345\n\n n_fuel = 1\n lamb = 3\n\n fact_fuel = {}\n sum_fuel = 0\n for f in self.fuel_list:\n fact_fuel[f] = 0\n for i in self.inl:\n fact_fuel[f] += i.fluid.val[f] / 2\n sum_fuel += fact_fuel[f]\n\n for f in self.fuel_list:\n fact_fuel[f] /= sum_fuel\n\n m_co2 = 0\n m_h2o = 0\n m_fuel = 0\n for f in self.fuel_list:\n m_co2 += (n_fuel * self.fuels[f]['C'] * molar_masses[self.co2] *\n fact_fuel[f])\n m_h2o += (n_fuel * self.fuels[f]['H'] /\n 2 * molar_masses[self.h2o] * fact_fuel[f])\n m_fuel += n_fuel * molar_masses[f] * fact_fuel[f]\n\n n_o2 = (m_co2 / molar_masses[self.co2] +\n 0.5 * m_h2o / molar_masses[self.h2o]) * lamb\n\n m_air = n_o2 * molar_masses[self.o2] / O_2\n m_fg = m_air + m_fuel\n\n m_o2 = n_o2 * molar_masses[self.o2] * (1 - 1 / lamb)\n m_n2 = N_2 * m_air\n\n fg = {\n self.n2: m_n2 / m_fg,\n self.co2: m_co2 / m_fg,\n self.o2: m_o2 / m_fg,\n self.h2o: m_h2o / m_fg\n }\n\n o = self.outl[2]\n for fluid, x in o.fluid.val.items():\n if not o.fluid.val_set[fluid] and fluid in fg.keys():\n o.fluid.val[fluid] = fg[fluid]", "title": "" }, { "docid": "85cbeb6d0afecead5d1523bd689337f2", "score": "0.48417068", "text": "def calculate_storage_tank_obj(self, tsm, hk_inp_temp, hk_inp_volfl_m3s, hk_out_temp, chp_inp_temp, chp_out_temp,\n chp_inp_volfl_m3s, gb_inp_temp, gp_out_temp, gb_inp_volfl_m3s, dhw_inp_temp, dhw_out_temp, dhw_inp_volfl_m3s, \n el_heat_status, current_time, t_ambient):\n # inp enters unit & leaves storage tank, out leaves unit & enters storage tank\n # hk - heating circuit, chp - chp unit, gb - gas boiler\n # temp - temperature; volfl - volume flow\n \n # heating circuit - output from the system - water taken out of the storage tank by the heating system: hk_inp_temp\n hk_rho = utils.rho_fluid_water(hk_inp_temp, self.p_atm, 1)\n hk_cp = utils.cp_fluid_water(hk_inp_temp, self.p_atm, 1)\n hk_mstr = hk_rho * hk_inp_volfl_m3s # in kg/s = kg/m3 * m3/s \n # combined power and heat unit - input to the storage tank: chp_out_temp\n chp_rho = utils.rho_fluid_water(chp_out_temp, self.p_atm, 1)\n chp_cp = utils.cp_fluid_water(chp_out_temp, self.p_atm, 1)\n chp_mstr = chp_rho * chp_inp_volfl_m3s # in kg/s = kg/m3 * m3/s \n # gas boiler unit - input to the storage tank\n gb_rho = utils.rho_fluid_water(gp_out_temp, self.p_atm, 1)\n gb_cp = utils.cp_fluid_water(gp_out_temp, self.p_atm, 1)\n gb_mstr = gb_rho * gb_inp_volfl_m3s # in kg/s = kg/m3 * m3/s \n\n # mixing of streams from chp and gas boiler units - input to the storage tank\n mix_up_mstr = chp_mstr + gb_mstr\n if(mix_up_mstr != 0.0):\n t_mix_up = (gb_mstr * gb_cp * gp_out_temp + chp_mstr * chp_cp * chp_out_temp) / (mix_up_mstr * hk_cp)\n # iterate out the temperature of the mixed flow of hot water\n mix_cp = utils.cp_fluid_water(t_mix_up, self.p_atm, 1)\n t_1 = (gb_mstr * gb_cp * gp_out_temp + chp_mstr * chp_cp * chp_out_temp) / (mix_up_mstr * mix_cp)\n else:\n t_mix_up = min(gp_out_temp, chp_out_temp)\n mix_cp = utils.cp_fluid_water(t_mix_up, self.p_atm, 1)\n t_1 = t_mix_up\n anz_iter = 0\n while ((abs(t_1 - t_mix_up) > 0.000001) and (anz_iter < 1000)):\n t_mix_up = t_1\n mix_cp = utils.cp_fluid_water(t_mix_up, self.p_atm, 1)\n t_1 = (gb_mstr * gb_cp * gp_out_temp + chp_mstr * chp_cp * chp_out_temp) / (mix_up_mstr * mix_cp)\n anz_iter = anz_iter + 1\n if (anz_iter == 1000):\n print('actual time = {}'.format(self.acttime))\n print('error = {}'.format(t_mix_up - t_1))\n print(' | t in C | mstr in kg/s ')\n print(' mixed | {} | {} '.format(t_mix_up,mix_up_mstr))\n print(' chp | {} | {} '.format(chp_out_temp,chp_mstr))\n print(' gas boi | {} | {} '.format(gp_out_temp,gb_mstr))\n print('---------+----------+-----------------')\n \n # balance of the hot water in the upper part of the storage tank - plus means input of hot water, minus means more hot water being taken out from the tank\n netto_hot_mstr = mix_up_mstr - hk_mstr # in kg/s = kg/m3 * m3/s \n netto_cold_mstr = hk_mstr - mix_up_mstr # in kg/s = kg/m3 * m3/s \n #print('mix_up_mstr = {}; hk_mstr = {}'.format(mix_up_mstr, hk_mstr))\n # netto gain of hot mass in the upper part of storage\n hot_gain = netto_hot_mstr * self.tsm.get_timestep() # in kg = kg/s * s\n hot_volume = hot_gain / utils.rho_fluid_water(t_mix_up, self.p_atm, 1) # in m3 = kg / kg/m3\n # misc number of slices that are fully filled hot water being pumped in m3\n #print('hot vol = {}; sliceVol = {}: mix_mstr = {}; hk_mstr = {};chp_mstr = {}; gb_mstr = {};hk_inp_volfl_m3s = {}, typ ={}'.format(hot_volume, self.slice_volume,mix_up_mstr , hk_mstr, chp_mstr, gb_mstr, hk_inp_volfl_m3s, type(hk_inp_volfl_m3s)))\n anz_slices = int(hot_volume / self.slice_volume)\n # height_slices --> self.effective_heigth\n # hot_volume --> self.effective_volume\n height_slices = hot_volume * self.effective_height / self.effective_volume\n #print('hot_gain = {}; hot_volume = {}; anz_slices = {}; height_slices = {}'.format(hot_gain,hot_volume,anz_slices,height_slices))\n # hk_inp_temp = self.t24\n cold_cp = utils.cp_fluid_water(hk_inp_temp, self.p_atm, 1)\n \n \n \n mix_down_mstr = hk_mstr + netto_hot_mstr # = mix_up_mstr = chp_mstr + gb_mstr\n t_st = self.get_output_temperature()\n cptst = utils.cp_fluid_water(t_st, self.p_atm, 1)\n hk_cp_out = utils.cp_fluid_water(hk_out_temp, self.p_atm, 1)\n if(mix_down_mstr != 0.0):\n t_mix_down = (t_st * cptst * self.mstr_hw + hk_out_temp * hk_cp_out * hk_mstr) / (mix_down_mstr * cptst)\n mix_cp = utils.cp_fluid_water(t_mix_down, self.p_atm, 1)\n t_2 = (t_st * cptst * self.mstr_hw + hk_out_temp * hk_cp_out * hk_mstr) / (mix_down_mstr * mix_cp)\n else:\n # whole flow comes from from heating system - chp and boiler are most likely off\n t_mix_down = hk_out_temp\n mix_cp = utils.cp_fluid_water(t_mix_down, self.p_atm, 1)\n t_2 = t_mix_down\n anz_iter = 0\n while((abs(t_2 - t_mix_down) > 0.000001) and (anz_iter < 1000)):\n t_mix_down = t_2\n mix_cp = utils.cp_fluid_water(t_mix_down, self.p_atm, 1)\n t_2 = (t_st * cptst * self.mstr_hw + hk_out_temp * hk_cp_out * hk_mstr) / (mix_down_mstr * mix_cp)\n anz_iter = anz_iter + 1\n if (anz_iter == 1000):\n print('actual time = {}'.format(self.acttime))\n print('error = {}'.format(t_mix_down - t_2))\n print(' | t in C | mstr in kg/s ')\n print(' mixed | {} | {} '.format(t_mix_down,mix_down_mstr))\n print(' st.tank | {} | {} '.format(t_st,mix_down_mstr))\n print(' heating | {} | {} '.format(hk_out_temp,hk_mstr))\n print('---------+----------+-----------------')\n \n \n\n #print('t_mix_up = {}; mstr = {}; wechsel = {}'.format(t_mix_up,netto_cold_mstr,self.slice_volume/(netto_cold_mstr*3.6),))\n\n # heat flow from heating appliances to the storage tank - convective influx with t_1, in W\n Q_c_hot_inp = netto_hot_mstr * mix_cp * (t_mix_up - self.temp[0])\n # heat flow from storage tank to the heating appliances to the heating appliances balanced with the input from heating system - convective influx\n Q_c_cold_inp = netto_hot_mstr * cold_cp * (hk_inp_temp - self.temp[self.nr_calc - 1])\n#\n# ================================================================\n # DOMESTIC HOT WATER - always from 0 to nr_calc-1 - only one flow direction possible\n #tinp_dhw = dhw_inp_temp\n #in_idx_dhw = 0\n #out_idx_dhw = self.nr_calc - 1\n dhw_mstr = dhw_inp_volfl_m3s * utils.rho_fluid_water(dhw_inp_temp, self.p_atm, 1)\n#\n# ----------------------------------------------------------------\n\n [tinp_hw, in_idx_hw, out_idx_hw] = self.calc_inputs_hw(netto_cold_mstr, hk_out_temp, t_mix_up)\n# ----------------------------------------------------------------\n self.calc_init_hydraulik(current_time, netto_cold_mstr, dhw_mstr, tinp_hw, dhw_inp_temp, in_idx_hw, out_idx_hw)\n# ----------------------------------------------------------------\n # domestic hot water\n t_new_dhw = []\n # heating water\n t_new_hw = []\n # \n [t_new_hw, t_new_dhw] = self.solve_hydraulic(t_new_dhw, t_new_hw, tinp_hw, dhw_inp_temp, in_idx_hw, 0, out_idx_hw, self.nr_calc - 1, el_heat_status, t_ambient) \n\n# ----------------------------------------------------------------\n # balances for all elements and heat exchange between them\n\n # \n#\n self.temp_dhw = t_new_dhw\n for elem in self.t_dhw:\n elem.set_average_temp(t_new_dhw[elem.get_id()])\n self.temp = t_new_hw\n\n for elem in self.t_hw:\n elem.set_average_temp(t_new_hw[elem.get_id()])\n #print(' {:8.3f} {} {}; hk_inp_volfl_m3s = {}; gb_inp_volfl_m3s = {}; chp_inp_volfl_m3s = {}; dhw_inp_volfl_m3s = {}'.format(self.acttime, t_new_hw[6], self.t_hw[6].get_mstr(), hk_inp_volfl_m3s, gb_inp_volfl_m3s, chp_inp_volfl_m3s, dhw_inp_volfl_m3s))\n #print(' {:8.3f} {} {} {} {} {}'.format(self.acttime, t_new_dhw[0], self.t_dhw[0].get_mstr(), self.t_dhw[0].get_mstr(), self.t_dhw[0].get_input_temp(), self.t_dhw[0].get_average_temp(), self.t_dhw[0].get_output_temp()))\n#\n# ----------------------------------------------------------------\n # OUTPUTS\n self.mstr_hw = netto_cold_mstr\n self.mstr_dhw = dhw_mstr\n self.out_idx_hw = out_idx_hw\n if(self.mstr_hw !=0.0):\n self.wechsel_zeit_in_h = self.slice_volume * utils.rho_fluid_water(tinp_hw, self.p_atm, 1) / (self.mstr_hw * 3600.0) # h = kg / kg/h = m3 * kg/m3 / (kg/s * 3600 s/h)\n else:\n self.wechsel_zeit_in_h = 0.0", "title": "" }, { "docid": "68d31bf48746d89f05abf28f44abe8d8", "score": "0.48378253", "text": "def setUp(self,):\n\n\t\tself.make_constraint()\n\n\t\tself.set_types()\n\t\tself.feasible = 1 # 1 = infeasible, 0 = feasible - store the value here because we'll reference it layer in a closure\n\n\t\tavailable_species = {}\n\t\tfor huc in self.hucs: # prepopulate all the species so we can skip a condition later - don't use all species because it's possible that some won't be present. Only use the species in all the hucs\n\t\t\tfor species in huc.assemblage.all():\n\t\t\t\tavailable_species[species.common_name] = 1\n\n\t\tself.available_species = available_species.keys()\n\t\tlog.debug(\"Total Species in Area: {}\".format(len(available_species.keys())))\n\n\t\tself.eflows_nfe = 0", "title": "" }, { "docid": "c5afd3a65bdacc695828d423a78707bf", "score": "0.48360804", "text": "def upadate_leverage(self):\n \n all_asset = [0 for _ in range(self.n)] \n all_liability = [0 for _ in range(self.n)] \n all_equity = [0 for _ in range(self.n)] \n for bank_index in range(self.n):\n all_asset[bank_index] += self.AssetMat.get_int_asset(bank_index)\n all_asset[bank_index] += self.ext_asset[bank_index]\n \n all_liability[bank_index] += self.AssetMat.get_int_liability(bank_index)\n all_liability[bank_index] += self.ext_asset[bank_index]\n \n equity = all_asset[bank_index] - all_liability[bank_index] \n\n if equity < 0 : \n self.default_set.add(bank_index)\n equity = 0\n # \n \n all_equity[bank_index] = equity\n # \n \n for bank_idx_i in range(self.n):\n for bank_index_j in range(self.n):\n val = min(self.AssetMat.get_mat()[bank_idx_i, bank_index_j] , 0) / equity[bank_index_j]\n self.LevMat.update_element(bank_idx_i, bank_index_j, val)\n #\n #\n pass", "title": "" }, { "docid": "2860a835873ec7e19fad60f2f0cc5aa5", "score": "0.48321462", "text": "def Fuel_type_both_NotDEF(self):\n \n #Input constants\n\n generic_trans_amount = \"$5.00\" # any value that gets an approval from the host \n card_to_use_NGFC = 'NGFC' # using this card to get all commercial prompts\n \n amounts_to_dispense = {\n \"buffer_1\":{\n \"grade\": 1,\n \"value\": \"2\"\n },\n \"buffer_2\":{\n \"grade\": 1,\n \"value\": \"3\"\n }\n \n }\n prompts = {\n \"Additional Products Y/N?\": { \n \"buttons\": [\"Yes\"]\n },\n \"Do you want to print a receipt?\": {\n \"buttons\": [\"No\"]\n },\n \"Select fuel products to dispense\": {\n \"buttons\": [\"Both fuels\"]\n },\n \"DEF?\": {\n \"buttons\": [\"No\"]\n }\n }\n\n #Output verifications\n\n messages_to_verify = {\n 'preauth_request_verifications': {\n 'Fuel Purchase': '500', \n '001 - Wex OTR Flags': 'C - Commercial'\n },\n 'preauth_response_verifications': {\n 'Response Code': '2', \n 'Approved Amount': '000000500',\n '002 - Wex OTR Non-Fuel Product Data - Product code: CADV (Company funds cash advance)': 'Amount: 0.00',\n '002 - Wex OTR Non-Fuel Product Data - Product code: MERC (Default category for merchandise)': 'Amount: 30.00',\n f'003 - Wex OTR Fuel Product Limits - Product Code: {self.helpers.grade_1_Product }': 'Product limit: 50.00',\n f'003 - Wex OTR Fuel Product Limits - Product Code: {self.helpers.grade_2_Product }': 'Product limit: 50.00',\n f'003 - Wex OTR Fuel Product Limits - Product Code: {self.helpers.grade_3_Product }': 'Product limit: 50.00',\n '005 - Wex OTR Customer Information': 'ABC TRUCKING DENVER CO234W987'\n },\n 'capture_request_verifications': {\n 'Fuel Purchase': '500', \n '001 - Wex OTR Flags': 'C - Commercial'\n },\n 'capture_response_verifications': {\n 'Response Code': '0', \n 'Approved Amount': '000000500',\n '002 - Wex OTR Non-Fuel Product Data - Product code: CADV (Company funds cash advance)': 'Amount: 20.00',\n '002 - Wex OTR Non-Fuel Product Data - Product code: MERC (Default category for merchandise)': 'Amount: 30.00',\n f'003 - Wex OTR Fuel Product Limits - Product Code: {self.helpers.grade_1_Product }': 'Product limit: 50.00',\n f'003 - Wex OTR Fuel Product Limits - Product Code: {self.helpers.grade_2_Product }': 'Product limit: 50.00',\n f'003 - Wex OTR Fuel Product Limits - Product Code: {self.helpers.grade_3_Product }': 'Product limit: 50.00',\n '005 - Wex OTR Customer Information': 'ABC TRUCKING DENVER CO234W987'\n }\n }\n \n receipt_data = [f\"{self.helpers.grade_1_Name} CA PUMP# 1\",\n \"2.000 GAL @ $1.000/GAL $2.00 99\",\n f\"{self.helpers.grade_1_Name_reefer} CA PUMP# 1\",\n \"3.000 GAL @ $1.000/GAL $3.00 99\", \n \"Subtotal = $5.00\",\n \"Tax = $0.00\",\n \"Total = $5.00\",\n \"Change Due = $0.00\",\n \"Credit $5.00\"]\n \n self.helpers.prepay_transaction(\n card=card_to_use_NGFC,\n prepay_amount=generic_trans_amount,\n prompts=prompts,\n amounts_to_dispense=amounts_to_dispense,\n messages_to_verify=messages_to_verify,\n receipt_data=receipt_data\n )", "title": "" }, { "docid": "479e595ee4bd9ccd2266755690e50ae8", "score": "0.48295414", "text": "def calc_baseline_HF_consumption (self):\n HDD_ests = self.comp_specs[\"consumption estimates\"][\"HDD\"]\n gal_sf_ests = self.comp_specs[\"consumption estimates\"][\"Gal/sf\"]\n\n measure = \"Fuel Oil\"\n data = self.comp_specs['building inventory']\n keys = data.T.keys()\n keys = set(keys)\n data[\"HDD ESTS\"] = 0\n data[\"GAL/SF\"] = 0\n for key in keys:\n #~ print key\n try:\n data[\"HDD ESTS\"].ix[key] = \\\n self.cd[\"heating degree days\"]/HDD_ests[key]\n data[\"GAL/SF\"].ix[key] = gal_sf_ests.ix[key]\n except KeyError:\n data[\"HDD ESTS\"].ix[key] = \\\n self.cd[\"heating degree days\"]/HDD_ests.ix['Other']#unitless\n data[\"GAL/SF\"].ix[key] = gal_sf_ests.ix['Other'] # (gal)/sqft\n\n\n idx = data[['Fuel Oil', \"Natural Gas\",\n 'HW District','Propane',\"Biomass\"]].isnull().all(1)\n if not self.cd['natural gas used']:\n data['Fuel Oil'].ix[idx] = data[idx]['Square Feet'] * \\\n data[idx]['HDD ESTS'] * \\\n data[idx]['GAL/SF']\n else:\n data['Natural Gas'].ix[idx] = data[idx]['Square Feet'] * \\\n data[idx]['HDD ESTS'] * \\\n data[idx]['GAL/SF'] / \\\n constants.mmbtu_to_gal_HF *\\\n constants.mmbtu_to_Mcf\n del data[\"GAL/SF\"]\n del data[\"HDD ESTS\"]\n\n\n\n self.baseline_fuel_Hoil_consumption = data['Fuel Oil'].fillna(0).sum()\n self.baseline_fuel_lng_consumption = data['Natural Gas'].fillna(0).sum()\n self.baseline_fuel_hr_consumption = data['HW District'].fillna(0).sum()\n self.baseline_fuel_propane_consumption = data['Propane'].fillna(0).sum()\n self.baseline_fuel_biomass_consumption = data['Biomass'].fillna(0).sum()\n\n\n self.baseline_HF_consumption = \\\n self.baseline_fuel_Hoil_consumption / constants.mmbtu_to_gal_HF +\\\n self.baseline_fuel_hr_consumption/constants.mmbtu_to_gal_HF+\\\n self.baseline_fuel_lng_consumption/constants.mmbtu_to_Mcf+\\\n self.baseline_fuel_propane_consumption/constants.mmbtu_to_gal_LP+\\\n self.baseline_fuel_biomass_consumption/constants.mmbtu_to_cords", "title": "" }, { "docid": "b4429dc783690bfcec9cc7f15d16b710", "score": "0.4825849", "text": "def calculate_storage_tank(self, Timestep, hk_inp_temp, hk_inp_volfl_m3s, hk_out_temp, chp_inp_temp, chp_out_temp,\n chp_inp_volfl_m3s, gb_inp_temp, gp_out_temp, gb_inp_volfl_m3s, dhw_inp_temp, dhw_out_temp, dhw_inp_volfl_m3s):\n # inp enters unit & leaves storage tank, out leaves unit & enters storage tank\n # hk - heating circuit, chp - chp unit, gb - gas boiler\n # temp - temperature; volfl - volume flow\n \n # heating circuit - output from the system - water taken out of the storage tank by the heating system\n hk_rho = utils.rho_fluid_water(hk_inp_temp, self.p_atm, 1)\n hk_cp = utils.cp_fluid_water(hk_inp_temp, self.p_atm, 1)\n hk_mstr = hk_rho * hk_inp_volfl_m3s # in kg/s = kg/m3 * m3/s \n # combined power and heat unit - input to the storage tank\n chp_rho = utils.rho_fluid_water(chp_out_temp, self.p_atm, 1)\n chp_cp = utils.cp_fluid_water(chp_out_temp, self.p_atm, 1)\n chp_mstr = chp_rho * chp_inp_volfl_m3s # in kg/s = kg/m3 * m3/s \n # gas boiler unit - input to the storage tank\n gb_rho = utils.rho_fluid_water(gb_inp_temp, self.p_atm, 1)\n gb_cp = utils.cp_fluid_water(gb_inp_temp, self.p_atm, 1)\n gb_mstr = gb_rho * gb_inp_volfl_m3s # in kg/s = kg/m3 * m3/s \n # mixing of streams from chp and gas boiler units - input to the storage tank\n mix_up_mstr = chp_mstr + gb_mstr\n if(mix_up_mstr != 0.0):\n t_mix_up = (gb_mstr * gb_cp * gb_inp_temp + chp_mstr * chp_cp * chp_out_temp) / (mix_up_mstr * hk_cp)\n # iterate out the temperature of the mixed flow of hot water\n mix_cp = utils.cp_fluid_water(t_mix_up, self.p_atm, 1)\n t_1 = (gb_mstr * gb_cp * gb_inp_temp + chp_mstr * chp_cp * chp_out_temp) / (mix_up_mstr * mix_cp)\n else:\n t_mix_up = 0.5 * (gb_inp_temp + chp_out_temp)\n anz_iter = 0\n while ((abs(t_1 - t_mix_up) > 0.000001) and (anz_iter < 1000)):\n t_mix_up = t_1\n mix_cp = utils.cp_fluid_water(t_mix_up, self.p_atm, 1)\n t_1 = (gb_mstr * gb_cp * gb_inp_temp + chp_mstr * chp_cp * chp_out_temp) / (mix_up_mstr * mix_cp)\n anz_iter = anz_iter + 1\n if (anz_iter == 1000):\n print('actual time = {}'.format(self.acttime))\n print('error = {}'.format(t_mix_up - t_1))\n print(' | t in C | mstr in kg/s ')\n print(' mixed | {} | {} '.format(t_mix_up,mix_up_mstr))\n print(' chp | {} | {} '.format(chp_out_temp,chp_mstr))\n print(' gas boi | {} | {} '.format(gb_out_temp,gb_mstr))\n print('---------+----------+-----------------')\n \n # balance of the hot water in the upper part of the storage tank - plus means input of hot water, minus means more hot water being taken out from the tank\n netto_hot_mstr = mix_up_mstr - hk_mstr # in kg/s = kg/m3 * m3/s \n # netto gain of hot mass in the upper part of storage\n hot_gain = netto_hot_mstr * Timestep # in kg = kg/s * s\n hot_volume = hot_gain / utils.rho_fluid_water(t_mix_up, self.p_atm, 1) # in m3 = kg / kg/m3\n # misc number of slices that are fully filled hot water being pumped in m3\n anz_slices = int(hot_volume / self.slice_volume)\n # height_slices --> self.effective_heigth\n # hot_volume --> self.effective_volume\n height_slices = hot_volume * self.effective_height / self.effective_volume\n #print('hot_gain = {}; hot_volume = {}; anz_slices = {}; height_slices = {}'.format(hot_gain,hot_volume,anz_slices,height_slices))\n # hk_out_temp = self.t24\n cold_cp = utils.cp_fluid_water(hk_out_temp, self.p_atm, 1)\n \n\n # heat flow from heating appliances to the storage tank - convective influx with t_1, in W\n Q_c_hot_inp = netto_hot_mstr * mix_cp * (t_mix_up - self.temp[0])\n # heat flow from storage tank to the heating appliances to the heating appliances balanced with the input from heating system - convective influx\n Q_c_cold_inp = netto_hot_mstr * cold_cp * (hk_out_temp - self.temp[self.nr_calc - 1])\n \n if (netto_hot_mstr >= 0.0): # flow direction from above down\n tinp = t_mix_up\n downwards = True\n else: # flow direction from below up\n tinp = hk_out_temp\n downwards = False\n\n tinp_dhw = dhw_inp_temp\n\n t_new_dhw = []\n t_new = []\n # \n # calculate output temperatures\n for ii in range(self.nr_calc - 2):\n self.tout_dhw[ii] = 0.5 * (self.temp_dhw[ii] + self.temp_dhw[ii + 1])\n self.tout[ii] = 0.5 * (self.temp[ii] + self.temp[ii + 1])\n self.tout_dhw[self.nr_calc - 1] = self.temp_dhw[self.nr_calc - 1]\n self.tout[self.nr_calc - 1] = self.temp[self.nr_calc - 1]\n# for ii in range(self.nr_calc):\n# if (ii < (self.nr_calc - 1)):\n# self.tout_dhw[ii] = 0.5 * (self.temp_dhw[ii] + self.temp_dhw[ii + 1])\n# else:\n# self.tout_dhw[ii] = self.temp_dhw[ii]\n#\n# if downwards: # flow direction from above down\n# if (ii == 0): # outflow temp - first element is the lowerst one\n# self.tout[ii] = self.temp[ii]\n# else:\n# self.tout[ii] = 0.5 * (self.temp[ii] + self.temp[ii - 1])\n# \n# else: # flow direction from below up\n# if (ii < (self.nr_calc - 1)):\n# self.tout[ii] = 0.5 * (self.temp[ii] + self.temp[ii + 1])\n# else:\n# self.tout[ii] = self.temp[ii]\n\n # dhw calculation\n for ii in range(self.nr_calc):\n # domestic hot water calculation\n if(ii == 0):\n Qdhw_from_below = utils.cp_fluid_water(tinp_dhw, self.p_atm, 1) * netto_hot_mstr * tinp_dhw # kW = kJ/kg/K * kg/s * K\n else:\n Qdhw_from_below = utils.cp_fluid_water(self.tout_dhw[ii - 1], self.p_atm, 1) * netto_hot_mstr * self.tout_dhw[ii - 1] # kW = kJ/kg/K * kg/s * K\n Qdhw_from_above = utils.cp_fluid_water(self.tout_dhw[ii], self.p_atm, 1) * netto_hot_mstr * self.tout_dhw[ii] # kW = kJ/kg/K * kg/s * K\n # interface between domestic hot water and heating water \n Rwall = (1.0 / self.alpha_dhw + 1.0 / self.k_coef + 1.0 / self.alpha_tank) / self.pipe_surf_slice # K/W = m2*K/W / m2\n Q_wall = (self.temp[ii] - self.temp_dhw[ii]) / Rwall # in W = K * W/K\n Q_netto_dhw = Q_wall - Qdhw_from_below + Qdhw_from_above\n \n # heating water calculation\n Q_loss = self.alpha_loss * self.red_out_surf_slice * (self.t_outside - self.temp[ii])\n if(downwards): # flow direction from above down\n if(ii == self.nr_calc):\n Qhk_from_above = utils.cp_fluid_water(tinp, self.p_atm, 1) * netto_hot_mstr * tinp # kW = kJ/kg/K * kg/s * K\n else:\n Qhk_from_above = utils.cp_fluid_water(self.tout[ii], self.p_atm, 1) * netto_hot_mstr * self.tout[ii] # kW = kJ/kg/K * kg/s * K\n Qhk_from_below = utils.cp_fluid_water(self.tout[ii - 1], self.p_atm, 1) * netto_hot_mstr * self.tout[ii - 1] # kW = kJ/kg/K * kg/s * K\n # balance for domestic hot water\n \n Q_netto_heatWater = Q_loss + Qhk_from_above - Qhk_from_below - Q_wall\n twyn = self.temp[0]\n \n else: # flow direction from below up\n if(ii == 0):\n Qhk_from_below = utils.cp_fluid_water(tinp, self.p_atm, 1) * netto_hot_mstr * tinp # kW = kJ/kg/K * kg/s * K\n else:\n Qhk_from_below = utils.cp_fluid_water(self.tout[ii - 1], self.p_atm, 1) * netto_hot_mstr * self.tout[ii - 1] # kW = kJ/kg/K * kg/s * K\n Qhk_from_above = utils.cp_fluid_water(self.tout[ii - 1], self.p_atm, 1) * netto_hot_mstr * self.tout[ii] # kW = kJ/kg/K * kg/s * K\n # balance for domestic hot water\n Q_netto_heatWater = Q_loss + Qhk_from_above - Qhk_from_below - Q_wall\n twyn = self.temp[self.nr_calc - 1]\n \n \n \n # balance for storage tank water\n Q_netto_heatWater = Q_loss + Qhk_from_above - Qhk_from_below - Q_wall\n \n\n t_new_dhw.append(self.temp_dhw[ii] + Timestep * Q_netto_dhw / (\n self.pipe_volume_slice \n * utils.rho_fluid_water(self.temp_dhw[ii], self.p_atm, 1) \n * utils.cp_fluid_water(self.temp_dhw[ii], self.p_atm, 1)))\n t_new.append(self.temp[ii] - Timestep * Q_netto_heatWater / (\n (self.slice_volume - self.pipe_volume_slice) \n * utils.rho_fluid_water(self.temp[ii], self.p_atm, 1)\n * utils.cp_fluid_water(self.temp[ii],self.p_atm, 1)))\n \n self.temp = t_new\n self.temp_dhw = t_new_dhw\n \n \n #print('time = {}; ii = {}; Q_netto_dhw ={}; Q_netto_heatWater = {}'.format(self.acttime, ii, Q_netto_dhw, Q_netto_heatWater))\n #print('time = {}; ii = {}; Q_wall ={}; self.temp[99] = {}; self.temp_dhw[99] = {}'.format(self.acttime, ii, Q_wall, self.temp[99], self.temp_dhw[99]))\n #print('time = {}; ii = {}; Q_wall ={}; Q_loss = {};'.format(self.acttime, ii, Q_wall, Q_loss))\n #print('time = {}; ii = {}; Q_c_hot_inp ={}; Q_c_cold_inp = {}; netto_hot_mstr = {}'.format(self.acttime, ii, Q_c_hot_inp, Q_c_cold_inp, netto_hot_mstr))\n #print('time = {}; ii = {}; t_mix_up ={}; hk_out_temp = {}; netto_hot_mstr = {}; self.temp[self.nr_calc - 1] = {}'.format(self.acttime, ii, t_mix_up, hk_out_temp, netto_hot_mstr,self.temp[self.nr_calc - 1]))\n #print('time = {}; ii = {}; self.temp[0] ={}; self.temp[1] = {}; netto_hot_mstr = {}; self.temp_dhw[0] = {}'.format(self.acttime, ii, self.temp[0], self.temp[1], netto_hot_mstr,self.temp_dhw[0]))\n #print('time = {}; ii = {}; n[0] ={}; self.temp[0] = {}; netto_hot_mstr = {}; self.temp_dhw[0] = {}'.format(self.acttime, ii, hot_volume/self.pipe_volume_slice, self.temp[0], netto_hot_mstr,self.temp_dhw[0]))\n #sleep(1)", "title": "" }, { "docid": "379f15768dc6fec75c4f842c47d730de", "score": "0.4823592", "text": "def setUp(self):\n self.system1 = seapy.system.System()\n self.system1.frequency.center = [500.0, 1000.0, 2000.0, 4000.0, 8000.0]\n self.steel = self.system1.addMaterial(\n \"steel\",\n \"MaterialSolid\",\n loss_factor=np.ones(len(self.system1.frequency.center)) * 0.2,\n )", "title": "" }, { "docid": "5b9c030135c2b1eea66402d0b11de258", "score": "0.48214135", "text": "def tank_with_field(tank: Tank.Tank) -> None:\n arr_x, arr_y = Rectangle.points_rectangle(tank.body.rectangle)\n tank_max_x = FIELD_X + FIELD_WIDTH - BORDER_WIDTH - max(arr_x)\n tank_max_y = FIELD_Y + FIELD_HEIGHT - BORDER_WIDTH - max(arr_y)\n tank_min_x = FIELD_X + BORDER_WIDTH + max(arr_x)\n tank_min_y = FIELD_Y + BORDER_WIDTH + max(arr_y)\n touch = contact_with_field_boundaries(tank.body)\n if touch == \"x\":\n new_x = -tank.body.rectangle.centre.x + max(tank_min_x, min(tank.body.rectangle.centre.x, tank_max_x))\n Tank.move_tank(tank, Rectangle.create_point(new_x, 0))\n tank.speed.x = 0\n elif touch == \"y\":\n new_y = -tank.body.rectangle.centre.y + max(tank_min_y, min(tank.body.rectangle.centre.y, tank_max_y))\n Tank.move_tank(tank, Rectangle.create_point(0, new_y))\n tank.speed.y = 0", "title": "" }, { "docid": "79f89dbcc7db479d9243123baf9b8b27", "score": "0.48129183", "text": "def initialise_fluids(self, nw):\n N_2 = 0.7655\n O_2 = 0.2345\n\n n_fuel = 1\n lamb = 3\n\n fact_fuel = {}\n sum_fuel = 0\n for f in self.fuel_list:\n fact_fuel[f] = 0\n for i in self.inl:\n fact_fuel[f] += i.fluid.val[f] / 2\n sum_fuel += fact_fuel[f]\n\n for f in self.fuel_list:\n fact_fuel[f] /= sum_fuel\n\n m_co2 = 0\n m_h2o = 0\n m_fuel = 0\n for f in self.fuel_list:\n m_co2 += (n_fuel * self.fuels[f]['C'] * molar_masses[self.co2] *\n fact_fuel[f])\n m_h2o += (n_fuel * self.fuels[f]['H'] / 2 *\n molar_masses[self.h2o] * fact_fuel[f])\n m_fuel += n_fuel * molar_masses[f] * fact_fuel[f]\n\n n_o2 = (m_co2 / molar_masses[self.co2] +\n 0.5 * m_h2o / molar_masses[self.h2o]) * lamb\n\n m_air = n_o2 * molar_masses[self.o2] / O_2\n m_fg = m_air + m_fuel\n\n m_o2 = n_o2 * molar_masses[self.o2] * (1 - 1 / lamb)\n m_n2 = N_2 * m_air\n\n fg = {\n self.n2: m_n2 / m_fg,\n self.co2: m_co2 / m_fg,\n self.o2: m_o2 / m_fg,\n self.h2o: m_h2o / m_fg\n }\n\n for o in self.outl:\n for fluid, x in o.fluid.val.items():\n if not o.fluid.val_set[fluid] and fluid in fg.keys():\n o.fluid.val[fluid] = fg[fluid]", "title": "" }, { "docid": "8dd910b1051fa60aed1b8b0b58895061", "score": "0.47961774", "text": "def prepare_electricity_storage_nodes(self,year,loop):\n self.storage_efficiency_dict = util.recursivedict()\n self.storage_capacity_dict = util.recursivedict()\n for node in [x for x in self.nodes.values() if x.supply_type == 'Storage']:\n for zone in self.dispatch_zones:\n node.calculate_dispatch_coefficients(year, loop)\n if hasattr(node,'active_dispatch_coefficients'):\n storage_node_location = list(set(node.active_dispatch_coefficients.index.get_level_values('supply_node')))\n if len(storage_node_location)>1:\n raise ValueError('StorageNode %s has technologies with two different supply node locations' %node.name)\n if storage_node_location[0] in self.electricity_nodes[zone]+[zone]:\n capacity = node.stock.values.loc[:,year].to_frame().groupby(level=[GeoMapper.supply_primary_geography,'supply_technology']).sum()\n efficiency = copy.deepcopy(node.active_dispatch_coefficients)\n if 'demand_sector' not in capacity.index.names and zone == self.distribution_node_name:\n capacity = pd.concat([capacity]*len(self.demand_sectors), keys=self.demand_sectors, names=['demand_sector']) / len(self.demand_sectors)\n efficiency = pd.concat([efficiency]*len(self.demand_sectors), keys=self.demand_sectors, names=['demand_sector'])\n if GeoMapper.dispatch_geography != GeoMapper.supply_primary_geography:\n geography_map_key = node.geography_map_key if hasattr(node, 'geography_map_key') and node.geography_map_key is not None else GeoMapper.default_geography_map_key\n map_df = GeoMapper.get_instance().map_df(GeoMapper.supply_primary_geography,GeoMapper.dispatch_geography, normalize_as='total', map_key=geography_map_key, eliminate_zeros=False)\n capacity = DfOper.mult([capacity, map_df],fill_value=0.0)\n efficiency = DfOper.divi([util.remove_df_levels(DfOper.mult([efficiency, capacity]), GeoMapper.supply_primary_geography),util.remove_df_levels(capacity,GeoMapper.supply_primary_geography)]).fillna(0)\n capacity = util.remove_df_levels(capacity, GeoMapper.supply_primary_geography)\n # creates an empty database to fill with duration values, which are a technology parameter\n duration = copy.deepcopy(capacity) * 0\n duration = duration.sort_index()\n for tech in node.technologies.values():\n for geography in GeoMapper.supply_geographies:\n tech_indexer = util.level_specific_indexer(duration,['supply_technology',GeoMapper.supply_primary_geography], [tech.name,geography])\n year_indexer = util.level_specific_indexer(tech.duration.values,['year',GeoMapper.supply_primary_geography],[year,geography])\n duration.loc[tech_indexer,:] = tech.duration.values.loc[year_indexer,:].values[0]\n efficiency = util.remove_df_levels(efficiency,'supply_node') \n if zone == self.distribution_node_name:\n indexer = util.level_specific_indexer(self.dispatch_feeder_allocation, 'year', year)\n capacity = util.DfOper.mult([capacity, self.dispatch_feeder_allocation.loc[indexer, ]])\n duration = DfOper.divi([util.remove_df_levels(DfOper.mult([duration, capacity]),'demand_sector'),util.remove_df_levels(capacity,'demand_sector')]).fillna(0)\n efficiency = DfOper.divi([util.remove_df_levels(DfOper.mult([efficiency, capacity]),'demand_sector'),util.remove_df_levels(capacity,'demand_sector')]).fillna(1)\n capacity = util.remove_df_levels(capacity,'demand_sector')\n for geography in GeoMapper.dispatch_geographies:\n for dispatch_feeder in self.dispatch_feeders:\n for technology in node.technologies.keys():\n indexer = util.level_specific_indexer(efficiency, [GeoMapper.dispatch_geography, 'dispatch_feeder','supply_technology'],[geography,dispatch_feeder,technology])\n self.storage_efficiency_dict[geography][zone][dispatch_feeder][technology] = efficiency.loc[indexer,:].values[0][0]\n indexer = util.level_specific_indexer(capacity, [GeoMapper.dispatch_geography, 'dispatch_feeder','supply_technology'],[geography,dispatch_feeder,technology])\n self.storage_capacity_dict['power'][geography][zone][dispatch_feeder][technology] = capacity.loc[indexer,:].values[0][0]\n indexer = util.level_specific_indexer(duration, [GeoMapper.dispatch_geography, 'dispatch_feeder','supply_technology'],[geography,dispatch_feeder,technology])\n self.storage_capacity_dict['duration'][geography][zone][dispatch_feeder][technology] = duration.loc[indexer,:].values[0][0]\n else:\n for geography in GeoMapper.dispatch_geographies:\n for technology in node.technologies.keys():\n indexer = util.level_specific_indexer(capacity, [GeoMapper.dispatch_geography, 'supply_technology'],[geography,technology])\n tech_capacity = self.ensure_frame(util.remove_df_levels(capacity.loc[indexer,:], 'demand_sector'))\n indexer = util.level_specific_indexer(duration, [GeoMapper.dispatch_geography,'supply_technology'],[geography,technology])\n tech_duration = self.ensure_frame(util.remove_df_levels(duration.loc[indexer,:], 'demand_sector'))\n indexer = util.level_specific_indexer(efficiency, [GeoMapper.dispatch_geography, 'supply_technology'],[geography,technology])\n tech_efficiency = self.ensure_frame(util.remove_df_levels(efficiency.loc[indexer,:], 'demand_sector'))\n if tech_capacity.values[0][0] == 0:\n continue\n else:\n self.storage_capacity_dict['power'][geography][zone]['bulk'][technology] = tech_capacity.values[0][0]\n self.storage_capacity_dict['duration'][geography][zone]['bulk'][technology] = tech_duration.values[0][0]\n self.storage_efficiency_dict[geography][zone]['bulk'][technology] = tech_efficiency.values[0][0]", "title": "" }, { "docid": "e9ff8083d62b246131cc5e5dee8c4849", "score": "0.4795844", "text": "def reaction_balance(self, fluid):\n if self.air_alias.val in ['air', 'Air']:\n air = self.air_alias.val\n else:\n air = 'TESPy::' + self.air_alias.val\n fuel = 'TESPy::' + self.fuel_alias.val\n flue_gas = 'TESPy::' + self.fuel_alias.val + '_fg'\n\n ######################################################################\n # calculate fuel and air mass flow\n m_fuel = 0\n for i in self.inl:\n m_fuel += i.m.val_SI * i.fluid.val[fuel]\n\n m_air = 0\n for i in self.inl:\n m_air += i.m.val_SI * i.fluid.val[air]\n\n m_air_min = self.air_min * m_fuel\n\n ######################################################################\n # calculate lambda if not specified\n if not self.lamb.is_set:\n self.lamb.val = m_air / (self.air_min * m_fuel)\n\n ######################################################################\n # calculate excess fuel if lambda is smaller than 1\n m_fuel_exc = 0\n if self.lamb.val < 1:\n m_fuel_exc = m_fuel - m_air / (self.lamb.val * self.air_min)\n\n ######################################################################\n # equation for air\n if fluid == air:\n if self.lamb.val >= 1:\n dm = -m_air_min\n else:\n dm = -m_air\n\n ######################################################################\n # equation for fuel\n elif fluid == fuel:\n dm = -(m_fuel - m_fuel_exc)\n\n ######################################################################\n # equation for flue gas\n elif fluid == flue_gas:\n dm = m_air_min + m_fuel\n\n ######################################################################\n # equation for other components\n else:\n dm = 0\n\n res = dm\n for i in self.inl:\n res += i.fluid.val[fluid] * i.m.val_SI\n for o in self.outl:\n res -= o.fluid.val[fluid] * o.m.val_SI\n return res", "title": "" }, { "docid": "7f67f994547c8bc87f79d72fd2f745ef", "score": "0.47861287", "text": "def module_fuel_required(mass: int) -> int:\n module_fuel = launch_fuel_required(mass)\n fuel_fuel = fuel_for_fuel(module_fuel)\n\n return module_fuel + fuel_fuel", "title": "" }, { "docid": "80ffa279879c68d92ea3b4f58763a5de", "score": "0.4780072", "text": "def add_steel_material(self, name, fy, fu, Es, ey, eu):\n if name in self.materials_dictionary:\n return\n else:\n self.materials_dictionary[name] = MF.Steel_Material(name, fy, fu, Es, ey, eu)\n self.Lb.insert(tk.END, name)\n self.controller.update_material_dropdown()", "title": "" }, { "docid": "a031256b7bf2f6dda4f6263a6f46c089", "score": "0.47720638", "text": "def Fuel_type_both_DEF(self):\n \n #Input constants\n\n generic_trans_amount = \"$5.00\" # any value that gets an approval from the host\n card_to_use_NGFC = 'NGFC' # using this card to get all commercial prompts\n \n amounts_to_dispense = {\n \"buffer_1\":{\n \"grade\": 1,\n \"value\": \"2\"\n },\n \"buffer_2\":{\n \"grade\": 2,\n \"value\": \"1\"\n },\n \"buffer_3\":{\n \"grade\": 1,\n \"value\": \"2\"\n }\n }\n \n prompts = {\n \"Additional Products Y/N?\": { \n \"buttons\": [\"Yes\"]\n },\n \"Do you want to print a receipt?\": {\n \"buttons\": [\"No\"]\n },\n \"Select fuel products to dispense\": {\n \"buttons\": [\"Both fuels\"]\n },\n \"DEF?\": {\n \"buttons\": [\"Yes\"]\n }\n }\n #Output verifications\n\n messages_to_verify = {\n 'preauth_request_verifications': {\n 'Fuel Purchase': '500', \n '001 - Wex OTR Flags': 'C - Commercial'\n },\n 'preauth_response_verifications': {\n 'Response Code': '2', \n 'Approved Amount': '000000500',\n '002 - Wex OTR Non-Fuel Product Data - Product code: CADV (Company funds cash advance)': 'Amount: 0.00',\n '002 - Wex OTR Non-Fuel Product Data - Product code: MERC (Default category for merchandise)': 'Amount: 30.00',\n f'003 - Wex OTR Fuel Product Limits - Product Code: {self.helpers.grade_1_Product }': 'Product limit: 50.00',\n f'003 - Wex OTR Fuel Product Limits - Product Code: {self.helpers.grade_2_Product }': 'Product limit: 50.00',\n f'003 - Wex OTR Fuel Product Limits - Product Code: {self.helpers.grade_3_Product }': 'Product limit: 50.00',\n '005 - Wex OTR Customer Information': 'ABC TRUCKING DENVER CO234W987'\n },\n 'capture_request_verifications': {\n 'Fuel Purchase': '500', \n '001 - Wex OTR Flags': 'C - Commercial'\n },\n 'capture_response_verifications': {\n 'Response Code': '0', \n 'Approved Amount': '000000500',\n '002 - Wex OTR Non-Fuel Product Data - Product code: CADV (Company funds cash advance)': 'Amount: 20.00',\n '002 - Wex OTR Non-Fuel Product Data - Product code: MERC (Default category for merchandise)': 'Amount: 30.00',\n f'003 - Wex OTR Fuel Product Limits - Product Code: {self.helpers.grade_1_Product }': 'Product limit: 50.00',\n f'003 - Wex OTR Fuel Product Limits - Product Code: {self.helpers.grade_2_Product }': 'Product limit: 50.00',\n f'003 - Wex OTR Fuel Product Limits - Product Code: {self.helpers.grade_3_Product }': 'Product limit: 50.00',\n '005 - Wex OTR Customer Information': 'ABC TRUCKING DENVER CO234W987'\n }\n }\n\n receipt_data = [f\"{self.helpers.grade_1_Name} CA PUMP# 1\",\n \"2.000 GAL @ $1.000/GAL $2.00 99\",\n f\"{self.helpers.grade_2_Name} CA PUMP# 1\",\n \"1.000 GAL @ $1.000/GAL $1.00 99\",\n f\"{self.helpers.grade_1_Name_reefer} CA PUMP# 1\",\n \"2.000 GAL @ $1.000/GAL $2.00 99\",\n \"Subtotal = $5.00\",\n \"Tax = $0.00\",\n \"Total = $5.00\",\n \"Change Due = $0.00\",\n \"Credit $5.00\"]\n \n self.helpers.prepay_transaction(\n card=card_to_use_NGFC,\n prepay_amount=generic_trans_amount,\n prompts=prompts,\n amounts_to_dispense=amounts_to_dispense,\n messages_to_verify=messages_to_verify,\n receipt_data=receipt_data\n )", "title": "" }, { "docid": "134cb08469e6040ec67750aa4bb34996", "score": "0.4766695", "text": "def tank_up(self, amount):\r\n if amount <= 0:\r\n raise FuelError(amount)\r\n if self.fuel + amount > self.tank_volume:\r\n raise TankUpError(self.tank_volume, self.fuel, amount)\r\n self.fuel += amount\r\n print(\"The {} has been fueled up.\".format(self.__class__.__name__.lower()))", "title": "" }, { "docid": "c9b168dcd959437cb4231bbcaf25cc62", "score": "0.47617945", "text": "def design(self, clk_inv_params, drv_tinv_params, ff_inv_params, fb_tinv_params,\n dum_info, debug, **kwargs):\n self.instances['INV0'].design(**ff_inv_params)\n self.instances['INV1'].design(**ff_inv_params)\n self.instances['INV2'].design(**clk_inv_params)\n self.instances['INV3'].design(**clk_inv_params)\n\n self.instances['TINV0'].design(**drv_tinv_params)\n self.instances['TINV1'].design(**drv_tinv_params)\n self.instances['TINV2'].design(**fb_tinv_params)\n self.instances['TINV3'].design(**fb_tinv_params)\n\n self.design_dummy_transistors(dum_info, 'XDUM', 'VDD', 'VSS')\n\n if debug is False:\n self.remove_pin('mem1')\n self.remove_pin('mem2')\n self.remove_pin('latch')\n self.remove_pin('iclk')\n self.remove_pin('iclkb')", "title": "" }, { "docid": "af6bc6c18a7c8095162973f603f44fac", "score": "0.4755927", "text": "def __init__(self, species, qty):\n\n # user super method to use superclass init on species and qty\n super(DomesticMelonOrder, self).__init__(species, qty, 'domestic', 0.08, None)", "title": "" }, { "docid": "21cc92a9c234e33cec66c176148f1934", "score": "0.47447366", "text": "def setup_gratingcoupler_3d_base_project(self, fdtd): \n \n ## CLEAR SESSION\n #fdtd.clear()\n fdtd.newproject()\n \n ## Start adding base components\n fdtd.redrawoff()\n\n ## Set FDTD properties\n props = OrderedDict([\n (\"dimension\", \"3D\"),\n (\"x min\", self.x_min),\n (\"x max\", self.x_max), \n (\"y min\", self.y_min), \n (\"y max\", self.y_max), \n (\"z min\", self.z_min), \n (\"z max\", self.z_max), \n (\"background material\", self.mat_bg),\n (\"y min bc\", \"anti-symmetric\"),\n (\"simulation time\", 5000e-15), \n (\"auto shutoff min\", 1e-6),\n (\"mesh refinement\", \"conformal variant 0\"),\n (\"meshing tolerance\", 1.2e-15), \n (\"use legacy conformal interface detection\", False)\n ])\n\n if self.mat_bg == \"<Object defined dielectric>\":\n props[\"index\"] = self.n_bg\n\n if self.optim:\n props[\"mesh refinement\"] = \"precise volume average\"\n props[\"meshing refinement\"] = 11\n\n if self.pol_angle == 0:\n props[\"y min bc\"] = \"symmetric\"\n\n fdtd.addfdtd(properties=props)\n \n fdtd.addgaussian(name=\"source\", injection_axis=\"z-axis\", direction=\"backward\", polarization_angle=self.pol_angle, x=self.x_fib, x_span=self.x_fib_span, y_min=self.y_min, y_max=self.y_max, z=self.z_fib,\n beam_parameters=\"Waist size and position\", waist_radius_w0=5.2e-6, distance_from_waist=0.0, angle_theta=self.theta_fib_mat,\n center_wavelength=self.lambda0, wavelength_span=0.1e-6, optimize_for_short_pulse=False)\n \n fdtd.setglobalsource(\"center wavelength\",self.lambda0)\n fdtd.setglobalsource(\"wavelength span\",0.1e-6)\n fdtd.setglobalsource(\"optimize for short pulse\",False)\n fdtd.setglobalmonitor(\"frequency points\",11)\n fdtd.setglobalmonitor(\"use wavelength spacing\",True)\n \n fdtd.addmesh(name=\"source_mesh\",x=self.x_fib, x_span=24e-6, y_min=self.y_min, y_max=self.y_max, z=self.z_fib, z_span=2*self.dz, override_x_mesh=False, override_y_mesh=False, override_z_mesh=True, dz=self.dz)\n fdtd.setnamed(\"source_mesh\",\"enabled\",False) #< Disable by default but need to check the effect\n \n if self.material_name == \"<Object defined dielectric>\":\n fdtd.addrect(name=\"substrate\", x_min=(self.x_min-2e-6), x_max=(self.x_max+2e-6), y_min=(self.y_min-2e-6), y_max=(self.y_max+2e-6), z_min=-4e-6, z_max=-2e-6, material=self.material_name, index=self.index_wg, alpha=0.1)\n else:\n fdtd.addrect(name=\"substrate\", x_min=(self.x_min-2e-6), x_max=(self.x_max+2e-6), y_min=(self.y_min-2e-6), y_max=(self.y_max+2e-6), z_min=-4e-6, z_max=-2e-6, material=self.material_name, alpha=0.1)\n\n \n fdtd.addpower(name=\"fom\", monitor_type=\"2D X-normal\", x=self.mode_pos_x, y=0, y_span=self.mode_span_y, z=0, z_span=self.mode_span_z)\n fdtd.addmesh(name=\"fom_mesh\", x=self.mode_pos_x, x_span=2*self.dx, y=0, y_span=self.mode_span_y, z=0, z_span=self.mode_span_z, override_x_mesh=True, dx=self.dx, override_y_mesh=False, override_z_mesh=False )\n \n fdtd.addpower(name=\"opt_fields\",monitor_type=\"3D\", x_min=self.x_min_opt_region, x_max=self.x_max, y_min=self.y_min, y_max=self.y_max, z_min=self.wg_height-self.etch_depth, z_max=self.wg_height,\n output_Hx=False, output_Hy=False, output_Hz=False, output_power=False)\n fdtd.addmesh(name=\"opt_fields_mesh\", x_min=self.x_min_opt_region, x_max=self.x_max, y_min=self.y_min, y_max=self.y_max, z_min=self.wg_height-self.etch_depth, z_max=self.wg_height, dx=self.dx, dy=self.dy, dz=self.dz)\n \n fdtd.addindex(name=\"index_xy\", monitor_type=\"2D Z-normal\", x_min=self.x_min, x_max=self.x_max, y_min=self.y_min, y_max=self.y_max, z=self.wg_height-(self.etch_depth/2.), spatial_interpolation='none', enabled=False)\n fdtd.addindex(name=\"index_xz\", monitor_type=\"2D Y-normal\", x_min=self.x_min, x_max=self.x_max, y=0, z_min=self.z_min, z_max=self.z_max, spatial_interpolation='none', enabled=False)\n \n \n if self.material_name == \"<Object defined dielectric>\":\n fdtd.addrect(name='wg', x_min=(self.x_min-2e-6), x_max=2e-6, y=0, y_span=self.wg_width, z_min=0, z_max=self.wg_height, material=self.material_name, index=self.index_wg)\n else:\n fdtd.addrect(name='wg', x_min=(self.x_min-2e-6), x_max=2e-6, y=0, y_span=self.wg_width, z_min=0, z_max=self.wg_height, material=self.material_name)\n\n\n theta_start = self.initial_theta_taper\n theta_stop = 360.0 - theta_start\n\n if self.material_name == \"<Object defined dielectric>\":\n fdtd.addring(name='silicon', x=0, y=0, z_min=0, z_max=self.wg_height, inner_radius=0, outer_radius=60e-6, theta_start=theta_stop, theta_stop=theta_start, material=self.material_name, index=self.index_wg)\n else:\n fdtd.addring(name='silicon', x=0, y=0, z_min=0, z_max=self.wg_height, inner_radius=0, outer_radius=60e-6, theta_start=theta_stop, theta_stop=theta_start, material=self.material_name)\n\n \n fdtd.redrawon()", "title": "" }, { "docid": "97df60dae26b41b9302a3ee0fc182462", "score": "0.47366068", "text": "def prepare_simulation(self, components):\n # Check level of destination storage component: if it is below specified threshold,\n # implement low artificial costs (to encourage system to fill it)\n # Check level of all non-central storage component and use the one with the\n # highest amount of h2:\n # if it is below specified threshold, the trailer cannot take any hydrogen from it\n\n # In the model definition, the foreign states must be defined in the following order:\n # 1) all origin storage levels [kg]\n # 2) all origin minimum storage levels [kg]\n # 3) all origin capacities [kg]\n # 4) destination storage level [kg]\n # 5) destination scapacity [kg]\n # The order for each of these in terms of production sites must also be the same e.g.\n # the first entry relates to the first site, the second entry relates\n # to the second site etc.\n if self.fs_component_name is not None:\n\n # n is the number of production sites that the trailer is connected to\n n = int((len(self.fs_component_name) - 2) / 3)\n # Creates an index list for the number of foreign states considered\n index_list = list(range(0, len(self.fs_component_name)))\n # List containing the origin storage levels [kg]\n fs_origin_storage_levels = []\n # List containing the origin minimum storage levels [kg]\n fs_origin_min_storage_levels = []\n # List containing the origin capacities [kg]\n fs_origin_capacities = []\n # List containing the origin available masses [kg]\n fs_origin_available_masses = []\n\n # Obtains a list of the origin storage levels for n sites\n for i in index_list[0:n]:\n this_origin_storage_level = self.get_foreign_state_value(components, index=i)\n fs_origin_storage_levels.append(this_origin_storage_level)\n\n # Obtains a list of the origin minimum storage levels for n sites\n for i in index_list[n:2 * n]:\n this_min_storage_level = self.get_foreign_state_value(components, index=i)\n fs_origin_min_storage_levels.append(this_min_storage_level)\n\n # Obtains a list of the origin capacity levels for n sites\n for i in index_list[2 * n:3 * n]:\n this_capacity = self.get_foreign_state_value(components, index=i)\n fs_origin_capacities.append(this_capacity)\n\n # Obtains a list for the available masses that can be taken from the\n # origin storage [kg].\n # It cannot take more than half of the capacity into account\n for i in range(int(n)):\n this_available_kg = min((fs_origin_storage_levels[i]\n - fs_origin_min_storage_levels[i]),\n fs_origin_capacities[i] / 2)\n fs_origin_available_masses.append(this_available_kg)\n\n # Get the availability mass of hydrogen of the fullest origin storage\n self.fs_origin_available_kg = max(fs_origin_available_masses)\n # Obtains the destination storage level [kg]\n fs_destination_storage_level = \\\n self.get_foreign_state_value(components, index=index_list[-2])\n # Obtains the destination storage capacity [kg]\n fs_destination_capacity = \\\n self.get_foreign_state_value(components, index=index_list[-1])\n # Obtains the available mass that can be delivered to the destination storage [kg]\n fs_destination_available_storage = \\\n fs_destination_capacity - fs_destination_storage_level\n\n # Checks if the destination storage level is below the threshold:\n # if yes, delivery possible\n\n # todo: implement multiple storage delivery in one time step from different wind\n # parks - low priority\n\n if fs_destination_storage_level \\\n < self.fs_destination_storage_threshold * fs_destination_capacity:\n\n # If the available mass [kg] in the destination storage and the amount of\n # available hydrogen [kg] in the origin storage exceed the trailer capacity,\n # the trailer should be completely filled\n if fs_destination_available_storage >= self.trailer_capacity \\\n and self.fs_origin_available_kg >= self.trailer_capacity:\n self.hydrogen_needed = self.trailer_capacity\n # If the available mass [kg] in the destination storage exceeds the trailer\n # capacity but the amount of available hydrogen in the origin storage is less\n # than the trailer capacity, the trailer should be filled with the maximum amount\n # of available hydrogen from the origin storage\n elif fs_destination_available_storage > \\\n self.trailer_capacity > self.fs_origin_available_kg:\n self.hydrogen_needed = self.fs_origin_available_kg\n # Else, the trailer should deliver the maximum amount of hydrogen that can fit\n # into the destination storage\n else:\n self.hydrogen_needed = fs_destination_available_storage\n # If the destination storage level is not below the threshold no delivery possible\n else:\n self.hydrogen_needed = 0\n\n self.current_ac = self.get_costs_and_art_costs()", "title": "" }, { "docid": "2e8ce950ec3f60a3c592b78c62649117", "score": "0.47321063", "text": "def createGases(self):\n #Building objects for fuel and air\n self.fuel = ct.Solution(self.mechs[0])\n self.atmosphere = ct.Solution(self.mechs[1])\n self.exhausts = [ct.Solution(mech) for mech in self.mechs[2:]]", "title": "" }, { "docid": "bfcb772888cc842bd209911b20bcad52", "score": "0.47316712", "text": "def add_liquidity(self, add_liquidity):\n\n self._add_liquidity = add_liquidity", "title": "" }, { "docid": "66bbc098093daf9dc728ffaeb907d685", "score": "0.47211936", "text": "def define_components(mod):\n\n # TODO: maybe rename fuel_cost_per_timepoint component and/or .csv file to fuel_cost\n # (but that could cause confusion in the documentation?)\n mod.ZONE_FUEL_TIMEPOINTS = Set(\n dimen=3,\n validate=lambda m, z, f, p: (\n z in m.LOAD_ZONES and f in m.FUELS and p in m.TIMEPOINTS\n ),\n )\n mod.fuel_cost_per_timepoint = Param(\n mod.ZONE_FUEL_TIMEPOINTS, within=NonNegativeReals\n )\n mod.min_data_check(\"ZONE_FUEL_TIMEPOINTS\", \"fuel_cost_per_timepoint\")\n\n # don't allow use of a fuel when no cost has been specified\n mod.GEN_TP_FUELS_UNAVAILABLE = Set(\n dimen=3,\n initialize=mod.GEN_TP_FUELS,\n filter=lambda m, g, t, f: (m.gen_load_zone[g], f, t)\n not in m.ZONE_FUEL_TIMEPOINTS,\n )\n mod.Enforce_Fuel_Unavailability = Constraint(\n mod.GEN_TP_FUELS_UNAVAILABLE,\n rule=lambda m, g, t, f: m.GenFuelUseRate[g, t, f] == 0,\n )\n\n # Summarize total fuel costs in each timepoint for the objective function\n def FuelCostsPerTP_rule(m, t):\n if not hasattr(m, \"FuelCostsPerTP_dict\"):\n # cache all Fuel_Cost_TP values in a dictionary (created in one pass)\n m.FuelCostsPerTP_dict = {t2: 0.0 for t2 in m.TIMEPOINTS}\n for (g, t2, f) in m.GEN_TP_FUELS:\n if (g, t2, f) not in m.GEN_TP_FUELS_UNAVAILABLE:\n m.FuelCostsPerTP_dict[t2] += (\n m.GenFuelUseRate[g, t2, f]\n * m.fuel_cost_per_timepoint[m.gen_load_zone[g], f, t2]\n )\n # return a result from the dictionary and pop the element each time\n # to release memory\n return m.FuelCostsPerTP_dict.pop(t)\n\n mod.FuelCostsPerTP = Expression(mod.TIMEPOINTS, rule=FuelCostsPerTP_rule)\n mod.Cost_Components_Per_TP.append(\"FuelCostsPerTP\")", "title": "" }, { "docid": "0ca4d83062a265d26174cd8194f4be6d", "score": "0.47184825", "text": "def start_engine(self):\r\n if self.fuel == 0:\r\n raise FuelError(self.fuel)\r\n print(\"Starting the engine...\")", "title": "" }, { "docid": "d1ca68fdc819c414de5c5a717d62bb91", "score": "0.47082067", "text": "def accept(self, button=None):\n self.getParallelRebarsData()\n self.getCrossRebarsData()\n self.getColumnsData()\n self.getTiesData()\n self.getMainRebarsData()\n self.getSecondaryRebarsData()\n if not self.FootingReinforcementGroup:\n FootingReinforcementGroup = makeFootingReinforcement(\n parallel_rebar_type=self.parallel_rebars_type,\n parallel_front_cover=self.parallel_front_cover,\n parallel_rear_cover=self.parallel_rear_cover,\n parallel_left_cover=self.parallel_left_cover,\n parallel_right_cover=self.parallel_right_cover,\n parallel_top_cover=self.parallel_top_cover,\n parallel_bottom_cover=self.parallel_bottom_cover,\n parallel_diameter=self.parallel_diameter,\n parallel_amount_spacing_check=self.parallel_amount_spacing_check,\n parallel_amount_spacing_value=self.parallel_amount_spacing_value,\n cross_rebar_type=self.cross_rebars_type,\n cross_front_cover=self.cross_front_cover,\n cross_rear_cover=self.cross_rear_cover,\n cross_left_cover=self.cross_left_cover,\n cross_right_cover=self.cross_right_cover,\n cross_top_cover=self.cross_top_cover,\n cross_bottom_cover=self.cross_bottom_cover,\n cross_diameter=self.cross_diameter,\n cross_amount_spacing_check=self.cross_amount_spacing_check,\n column_front_spacing=self.column_front_spacing,\n column_left_spacing=self.column_left_spacing,\n column_right_spacing=self.column_right_spacing,\n column_rear_spacing=self.column_rear_spacing,\n tie_top_cover=self.tie_top_cover,\n tie_bottom_cover=self.tie_bottom_cover,\n tie_bent_angle=self.tie_bent_angle,\n tie_extension_factor=self.tie_extension_factor,\n tie_diameter=self.tie_diameter,\n tie_number_spacing_check=self.tie_number_spacing_check,\n tie_number_spacing_value=self.tie_number_spacing_value,\n column_main_rebar_diameter=self.column_main_rebar_diameter,\n column_main_rebars_t_offset=self.column_main_rebars_t_offset,\n cross_amount_spacing_value=self.cross_amount_spacing_value,\n column_width=self.column_width,\n column_length=self.column_length,\n xdir_column_amount_spacing_check=self.xdir_column_amount_spacing_check,\n xdir_column_amount_spacing_value=self.xdir_column_amount_spacing_value,\n ydir_column_amount_spacing_check=self.ydir_column_amount_spacing_check,\n ydir_column_amount_spacing_value=self.ydir_column_amount_spacing_value,\n parallel_rounding=self.parallel_rounding,\n parallel_l_shape_hook_orintation=self.parallel_l_shape_hook_orintation,\n cross_rounding=self.cross_rounding,\n cross_l_shape_hook_orintation=self.cross_l_shape_hook_orintation,\n column_main_rebars_type=self.column_main_rebars_type,\n column_main_hook_orientation=self.column_main_hook_orientation,\n column_main_hook_extend_along=self.column_main_hook_extend_along,\n column_l_main_rebar_rounding=self.column_l_main_rebar_rounding,\n column_main_hook_extension=self.column_main_hook_extension,\n column_sec_rebar_check=self.column_sec_rebar_check,\n column_sec_rebars_t_offset=self.column_sec_rebars_t_offset,\n column_sec_rebars_number_diameter=self.column_sec_rebars_number_diameter,\n column_sec_rebars_type=self.column_sec_rebars_type,\n column_sec_hook_orientation=self.column_sec_hook_orientation,\n column_l_sec_rebar_rounding=self.column_l_sec_rebar_rounding,\n column_sec_hook_extension=self.column_sec_hook_extension,\n mesh_cover_along=self.mesh_cover_along,\n structure=self.SelectedObj,\n facename=self.FaceName,\n )\n else:\n FootingReinforcementGroup = editFootingReinforcement(\n self.FootingReinforcementGroup,\n parallel_rebar_type=self.parallel_rebars_type,\n parallel_front_cover=self.parallel_front_cover,\n parallel_rear_cover=self.parallel_rear_cover,\n parallel_left_cover=self.parallel_left_cover,\n parallel_right_cover=self.parallel_right_cover,\n parallel_top_cover=self.parallel_top_cover,\n parallel_bottom_cover=self.parallel_bottom_cover,\n parallel_diameter=self.parallel_diameter,\n parallel_amount_spacing_check=self.parallel_amount_spacing_check,\n parallel_amount_spacing_value=self.parallel_amount_spacing_value,\n cross_rebar_type=self.cross_rebars_type,\n cross_front_cover=self.cross_front_cover,\n cross_rear_cover=self.cross_rear_cover,\n cross_left_cover=self.cross_left_cover,\n cross_right_cover=self.cross_right_cover,\n cross_top_cover=self.cross_top_cover,\n cross_bottom_cover=self.cross_bottom_cover,\n cross_diameter=self.cross_diameter,\n cross_amount_spacing_check=self.cross_amount_spacing_check,\n column_front_spacing=self.column_front_spacing,\n column_left_spacing=self.column_left_spacing,\n column_right_spacing=self.column_right_spacing,\n column_rear_spacing=self.column_rear_spacing,\n tie_top_cover=self.tie_top_cover,\n tie_bottom_cover=self.tie_bottom_cover,\n tie_bent_angle=self.tie_bent_angle,\n tie_extension_factor=self.tie_extension_factor,\n tie_diameter=self.tie_diameter,\n tie_number_spacing_check=self.tie_number_spacing_check,\n tie_number_spacing_value=self.tie_number_spacing_value,\n column_main_rebar_diameter=self.column_main_rebar_diameter,\n column_main_rebars_t_offset=self.column_main_rebars_t_offset,\n cross_amount_spacing_value=self.cross_amount_spacing_value,\n column_width=self.column_width,\n column_length=self.column_length,\n xdir_column_amount_spacing_check=self.xdir_column_amount_spacing_check,\n xdir_column_amount_spacing_value=self.xdir_column_amount_spacing_value,\n ydir_column_amount_spacing_check=self.ydir_column_amount_spacing_check,\n ydir_column_amount_spacing_value=self.ydir_column_amount_spacing_value,\n parallel_rounding=self.parallel_rounding,\n parallel_l_shape_hook_orintation=self.parallel_l_shape_hook_orintation,\n cross_rounding=self.cross_rounding,\n cross_l_shape_hook_orintation=self.cross_l_shape_hook_orintation,\n column_main_rebars_type=self.column_main_rebars_type,\n column_main_hook_orientation=self.column_main_hook_orientation,\n column_main_hook_extend_along=self.column_main_hook_extend_along,\n column_l_main_rebar_rounding=self.column_l_main_rebar_rounding,\n column_main_hook_extension=self.column_main_hook_extension,\n column_sec_rebar_check=self.column_sec_rebar_check,\n column_sec_rebars_t_offset=self.column_sec_rebars_t_offset,\n column_sec_rebars_number_diameter=self.column_sec_rebars_number_diameter,\n column_sec_rebars_type=self.column_sec_rebars_type,\n column_sec_hook_orientation=self.column_sec_hook_orientation,\n column_l_sec_rebar_rounding=self.column_l_sec_rebar_rounding,\n column_sec_hook_extension=self.column_sec_hook_extension,\n mesh_cover_along=self.mesh_cover_along,\n structure=self.SelectedObj,\n facename=self.FaceName,\n )\n\n self.FootingReinforcementGroup = FootingReinforcementGroup\n if (\n self.form.standardButtonBox.buttonRole(button)\n != QtWidgets.QDialogButtonBox.ApplyRole\n ):\n self.form.close()", "title": "" }, { "docid": "c79e3f5389f572ebcc61dcdb92976a8e", "score": "0.47073346", "text": "def crane_invest(self, year):\n if self.debug:\n print(' *** add Harbour crane to elements')\n # add unloader object\n if (self.crane_type_defaults[\"crane_type\"] == 'Gantry crane' or\n self.crane_type_defaults[\"crane_type\"] == 'Harbour crane' or\n self.crane_type_defaults[\"crane_type\"] == 'Mobile crane'):\n crane = Cyclic_Unloader(**self.crane_type_defaults)\n elif self.crane_type_defaults[\"crane_type\"] == 'Screw unloader':\n crane = Continuous_Unloader(**self.crane_type_defaults)\n\n # - capex\n unit_rate = crane.unit_rate\n mobilisation = unit_rate * crane.mobilisation_perc\n crane.capex = int(unit_rate + mobilisation)\n\n # - opex\n crane.insurance = unit_rate * crane.insurance_perc\n crane.maintenance = unit_rate * crane.maintenance_perc\n\n # labour\n labour = Labour(**agribulk_defaults.labour_data)\n '''old formula --> crane.labour = crane.crew * self.operational_hours / labour.shift_length '''\n crane.shift = ((crane.crew * self.operational_hours) / (\n labour.shift_length * labour.annual_shifts))\n crane.labour = crane.shift * labour.operational_salary\n\n # apply proper timing for the crane to come online (in the same year as the latest Quay_wall)\n years_online = []\n for element in self.find_elements(Quay_wall):\n years_online.append(element.year_online)\n crane.year_online = max([year + crane.delivery_time, max(years_online)])\n\n # add cash flow information to quay_wall object in a dataframe\n crane = self.add_cashflow_data_to_element(crane)\n\n # add object to elements\n self.elements.append(crane)", "title": "" }, { "docid": "4e1f22653df3a6afced7978588cef094", "score": "0.46998996", "text": "def simulate_scheil_solidification(dbf, comps, phases, composition,\n start_temperature, step_temperature=1.0,\n liquid_phase_name='LIQUID', eq_kwargs=None,\n stop=0.0001, verbose=False, adaptive=True):\n eq_kwargs = eq_kwargs or dict()\n STEP_SCALE_FACTOR = 1.2 # How much to try to adapt the temperature step by\n MAXIMUM_STEP_SIZE_REDUCTION = 5.0\n T_STEP_ORIG = step_temperature\n phases = filter_phases(dbf, unpack_components(dbf, comps), phases)\n models = instantiate_models(dbf, comps, phases)\n if verbose:\n print('building callables... ', end='')\n cbs = build_callables(dbf, comps, phases, models, additional_statevars={v.P, v.T, v.N}, build_gradients=True, build_hessians=True)\n if verbose:\n print('done')\n solid_phases = sorted(set(phases) - {liquid_phase_name})\n temp = start_temperature\n independent_comps = sorted([str(comp)[2:] for comp in composition.keys()])\n x_liquid = {comp: [composition[v.X(comp)]] for comp in independent_comps}\n fraction_solid = [0.0]\n temperatures = [temp]\n phase_amounts = {ph: [0.0] for ph in solid_phases}\n ord_disord_dict = order_disorder_dict(dbf, comps, phases)\n\n if adaptive and ('points' in eq_kwargs.get('calc_opts', {})):\n # Dynamically add points as the simulation runs\n species = unpack_components(dbf, comps)\n dof_dict = {ph: generate_dof(dbf.phases[ph], species)[1] for ph in phases}\n else:\n adaptive = False\n\n converged = False\n phases_seen = {liquid_phase_name, ''}\n liquid_comp = composition\n while fraction_solid[-1] < 1:\n conds = {v.T: temp, v.P: 101325.0, v.N: 1.0}\n comp_conds = liquid_comp\n fmt_comp_conds = ', '.join([f'{c}={val:0.2f}' for c, val in comp_conds.items()])\n conds.update(comp_conds)\n eq = equilibrium(dbf, comps, phases, conds, callables=cbs, model=models, **eq_kwargs)\n if adaptive:\n # Update the points dictionary with local samples around the equilibrium site fractions\n points_dict = eq_kwargs['calc_opts']['points']\n for vtx in range(eq.vertex.size):\n masked = eq.isel(vertex=vtx)\n ph = str(masked.Phase.values.squeeze())\n pts = points_dict.get(ph)\n if pts is not None:\n if verbose:\n print(f'Adding points to {ph}. ', end='')\n dof = dof_dict[ph]\n points_dict[ph] = np.concatenate([pts, local_sample(masked.Y.values.squeeze()[:sum(dof)].reshape(1, -1), dof, pdens=20)], axis=0)\n\n eq_phases = order_disorder_eq_phases(eq, ord_disord_dict)\n num_eq_phases = np.nansum(np.array([str(ph) for ph in eq_phases]) != '')\n new_phases_seen = set(eq_phases).difference(phases_seen)\n if len(new_phases_seen) > 0:\n if verbose:\n print(f'New phases seen: {new_phases_seen}. ', end='')\n phases_seen |= new_phases_seen\n if liquid_phase_name not in eq[\"Phase\"].values.squeeze():\n found_ph = set(eq_phases) - {''}\n if verbose:\n print(f'No liquid phase found at T={temp:0.3f}, {fmt_comp_conds}. (Found {found_ph}) ', end='')\n if len(found_ph) == 0:\n # No phases found in equilibrium. Just continue on lowering the temperature without changing anything\n if verbose:\n print(f'(Convergence failure) ', end='')\n if T_STEP_ORIG / step_temperature > MAXIMUM_STEP_SIZE_REDUCTION:\n # Only found solid phases and the step size has already been reduced. Stop running without converging.\n if verbose:\n print('Maximum step size reduction exceeded. Stopping.')\n converged = False\n break\n else:\n # Only found solid phases. Try reducing the step size to zero-in on the correct phases\n if verbose:\n print(f'Stepping back and reducing step size.')\n temp += step_temperature\n step_temperature /= STEP_SCALE_FACTOR\n continue\n # TODO: Will break if there is a liquid miscibility gap\n liquid_vertex = sorted(np.nonzero(eq[\"Phase\"].values.squeeze().flat == liquid_phase_name))[0]\n liquid_comp = {}\n for comp in independent_comps:\n x = float(eq[\"X\"].isel(vertex=liquid_vertex).squeeze().sel(component=comp).values)\n x_liquid[comp].append(x)\n liquid_comp[v.X(comp)] = x\n np_liq = np.nansum(eq.where(eq[\"Phase\"] == liquid_phase_name).NP.values)\n current_fraction_solid = float(fraction_solid[-1])\n found_phase_amounts = [(liquid_phase_name, np_liq)] # tuples of phase name, amount\n for solid_phase in solid_phases:\n if solid_phase not in eq_phases:\n phase_amounts[solid_phase].append(0.0)\n continue\n np_tieline = np.nansum(eq.isel(vertex=eq_phases.index(solid_phase))[\"NP\"].values.squeeze())\n found_phase_amounts.append((solid_phase, np_tieline))\n delta_fraction_solid = (1 - current_fraction_solid) * np_tieline\n current_fraction_solid += delta_fraction_solid\n phase_amounts[solid_phase].append(delta_fraction_solid)\n fraction_solid.append(current_fraction_solid)\n temperatures.append(temp)\n NL = 1 - fraction_solid[-1]\n if verbose:\n phase_amnts = ' '.join([f'NP({ph})={amnt:0.3f}' for ph, amnt in found_phase_amounts])\n if NL < 1.0e-3:\n print(f'T={temp:0.3f}, {fmt_comp_conds}, ΔT={step_temperature:0.3f}, NL: {NL:.2E}, {phase_amnts} ', end='')\n else:\n print(f'T={temp:0.3f}, {fmt_comp_conds}, ΔT={step_temperature:0.3f}, NL: {NL:0.3f}, {phase_amnts} ', end='')\n if NL < stop:\n if verbose:\n print(f'Liquid fraction below criterion {stop} . Stopping at {fmt_comp_conds}')\n converged = True\n break\n if verbose:\n print() # add line break\n temp -= step_temperature\n\n if fraction_solid[-1] < 1:\n for comp in independent_comps:\n x_liquid[comp].append(np.nan)\n fraction_solid.append(1.0)\n temperatures.append(temp)\n # set the final phase amount to the phase fractions in the eutectic\n # this method gives the sum total phase amounts of 1.0 by construction\n for solid_phase in solid_phases:\n if solid_phase in eq_phases:\n amount = np.nansum(eq.isel(vertex=eq_phases.index(solid_phase))[\"NP\"].values.squeeze())\n phase_amounts[solid_phase].append(float(amount) * (1 - current_fraction_solid))\n else:\n phase_amounts[solid_phase].append(0.0)\n\n return SolidificationResult(x_liquid, fraction_solid, temperatures, phase_amounts, converged, \"scheil\")", "title": "" }, { "docid": "a4f42a97b845fb2ecc0c9406e3dbab64", "score": "0.4697101", "text": "def placeBets(self):\n raise NotImplementedError", "title": "" }, { "docid": "bd91de486ae217716303491368e6e8ca", "score": "0.46919805", "text": "def fuel_required_for_part1(self) -> int:\n return self.mass//3 - 2", "title": "" }, { "docid": "47824ae1f4bd2b5705be6f1c75a20c83", "score": "0.46808332", "text": "def fill_gas_tank(self):\n print(f\"This {self.make.title()} now has a full tank of gas!\")", "title": "" }, { "docid": "17f94e9d43a9c0e15882d8974ca9afc4", "score": "0.46776524", "text": "def Calculate_Defect_Carrier_Concentration(self):\r\n\t\t\r\n\t\t# Initialize\r\n\t\tintrinsic_defect_carrier_concentration_temperature = {}\r\n\t\textrinsic_defect_carrier_concentration_temperature = {}\r\n\t\tfor temperature in self.temperature_array:\r\n\t\t\tintrinsic_defect_carrier_concentration_temperature[temperature] = np.zeros(len(self.fermi_energy_array))\r\n\t\t\textrinsic_defect_carrier_concentration_temperature[temperature] = np.zeros(len(self.fermi_energy_array))\r\n\t\t\r\n\t\t# Obtain intrinsic defect carrier concentration\r\n\t\tfor defect in self.ternary_defects_data.keys():\r\n\t\t\t\r\n\t\t\t# Check that item is truly a defect\r\n\t\t\tif \"_\" not in defect:\r\n\t\t\t\tcontinue\r\n\t\t\t\r\n\t\t\tif self.ternary_defects_data[defect][\"Extrinsic\"] == \"No\":\r\n\t\t\t\t\r\n\t\t\t\tfor charge in self.ternary_defects_data[defect][\"charge\"].keys():\r\n\t\t\t\t\t\r\n\t\t\t\t\t# Formation energy\r\n\t\t\t\t\t\"\"\"\r\n\t\t\t\t\tdefect_formation_enthalpy = self.ternary_defects_data[defect][\"charge\"][charge][\"Energy\"] \\\r\n\t\t\t\t\t\t\t\t\t\t\t\t- self.ternary_defects_data[\"supercellsize\"] * self.main_compound_total_energy \\\r\n\t\t\t\t\t\t\t\t\t\t\t\t- self.ternary_defects_data[defect][\"n_\"+self.species_a] * ( self.first_element_mu0 + float(dmu_a) ) \\\r\n\t\t\t\t\t\t\t\t\t\t\t\t- self.ternary_defects_data[defect][\"n_\"+self.species_b] * ( self.second_element_mu0 + float(dmu_b) ) \\\r\n\t\t\t\t\t\t\t\t\t\t\t\t- self.ternary_defects_data[defect][\"n_\"+self.species_c] * ( self.third_element_mu0 + float(dmu_c) ) \\\r\n\t\t\t\t\t\t\t\t\t\t\t\t+ float(charge) * self.fermi_energy_array \\\r\n\t\t\t\t\t\t\t\t\t\t\t\t+ self.ternary_defects_data[defect][\"charge\"][charge][\"ECorr\"]\r\n\t\t\t\t\t\"\"\"\r\n\t\t\t\t\tdefect_formation_enthalpy = self.ternary_defects_data[defect][\"charge\"][charge][\"Energy\"] \\\r\n\t\t\t\t\t\t\t\t\t\t\t\t- self.ternary_defects_data[\"supercellsize\"] * self.main_compound_total_energy \\\r\n\t\t\t\t\t\t\t\t\t\t\t\t+ float(charge) * self.fermi_energy_array \\\r\n\t\t\t\t\t\t\t\t\t\t\t\t+ self.ternary_defects_data[defect][\"charge\"][charge][\"ECorr\"]\r\n\t\t\t\t\tfor element in self.elements_list:\r\n\t\t\t\t\t\tdefect_formation_enthalpy -= self.ternary_defects_data[defect][\"n_\"+element] * ( self.mu_elements[element][\"mu0\"] + self.mu_elements[element][\"deltamu\"] )\r\n\t\t\t\t\t\r\n\t\t\t\t\t# Defect concentration\r\n\t\t\t\t\tif self.first_element == defect.split(\"_\")[-1]:\r\n\t\t\t\t\t\tN = self.main_compound_number_first_specie/self.vol\r\n\t\t\t\t\telif self.second_element == defect.split(\"_\")[-1]:\r\n\t\t\t\t\t\tN = self.main_compound_number_second_specie/self.vol\r\n\t\t\t\t\telif self.third_element == defect.split(\"_\")[-1]:\r\n\t\t\t\t\t\tN = self.main_compound_number_third_specie/self.vol\r\n\t\t\t\t\t\r\n\t\t\t\t\tfor temperature in self.temperature_array:\r\n\t\t\t\t\t\tdefect_carrier_concentration = float(charge) * N * np.exp(-defect_formation_enthalpy / (self.k * temperature) )\r\n\t\t\t\t\t\tintrinsic_defect_carrier_concentration_temperature[temperature] += defect_carrier_concentration\r\n\t\t\r\n\t\t# Check that user-selected extrinsic defect is not \"None\"\r\n\t\tif self.extrinsic_defect == \"None\":\r\n\t\t\treturn intrinsic_defect_carrier_concentration_temperature, extrinsic_defect_carrier_concentration_temperature\r\n\t\t\r\n\t\t# Obtain extrinsic defect carrier concentration\r\n\t\tfor charge in self.ternary_defects_data[self.extrinsic_defect][\"charge\"].keys():\r\n\t\t\t\r\n\t\t\t# Formation energy\r\n\t\t\t\"\"\"\r\n\t\t\textrinsic_defect_formation_enthalpy = self.ternary_defects_data[self.extrinsic_defect][\"charge\"][charge][\"Energy\"] \\\r\n\t\t\t\t\t\t\t\t\t\t\t\t- self.ternary_defects_data[\"supercellsize\"] * self.main_compound_total_energy \\\r\n\t\t\t\t\t\t\t\t\t\t\t\t- self.ternary_defects_data[self.extrinsic_defect][\"n_\"+self.species_a] * ( self.first_element_mu0 + float(dmu_a) ) \\\r\n\t\t\t\t\t\t\t\t\t\t\t\t- self.ternary_defects_data[self.extrinsic_defect][\"n_\"+self.species_b] * ( self.second_element_mu0 + float(dmu_b) ) \\\r\n\t\t\t\t\t\t\t\t\t\t\t\t- self.ternary_defects_data[self.extrinsic_defect][\"n_\"+self.species_c] * ( self.third_element_mu0 + float(dmu_c) ) \\\r\n\t\t\t\t\t\t\t\t\t\t\t\t- (self.extrinsic_defect_mu0 + self.extrinsic_defect_deltamu) \\\r\n\t\t\t\t\t\t\t\t\t\t\t\t+ float(charge) * self.fermi_energy_array \\\r\n\t\t\t\t\t\t\t\t\t\t\t\t+ self.ternary_defects_data[self.extrinsic_defect][\"charge\"][charge][\"ECorr\"]\r\n\t\t\t\"\"\"\r\n\t\t\textrinsic_defect_formation_enthalpy = self.ternary_defects_data[self.extrinsic_defect][\"charge\"][charge][\"Energy\"] \\\r\n\t\t\t\t\t\t\t\t\t\t\t\t- self.ternary_defects_data[\"supercellsize\"] * self.main_compound_total_energy \\\r\n\t\t\t\t\t\t\t\t\t\t\t\t- (self.extrinsic_defect_mu0 + self.extrinsic_defect_deltamu) \\\r\n\t\t\t\t\t\t\t\t\t\t\t\t+ float(charge) * self.fermi_energy_array \\\r\n\t\t\t\t\t\t\t\t\t\t\t\t+ self.ternary_defects_data[self.extrinsic_defect][\"charge\"][charge][\"ECorr\"]\r\n\t\t\tfor element in self.elements_list:\r\n\t\t\t\textrinsic_defect_formation_enthalpy -= self.ternary_defects_data[self.extrinsic_defect][\"n_\"+element] * ( self.mu_elements[element][\"mu0\"] + self.mu_elements[element][\"deltamu\"] )\r\n\t\t\t\r\n\t\t\t# Extrinsic defect concentration\r\n\t\t\tif self.first_element == defect.split(\"_\")[-1]:\r\n\t\t\t\tN = self.main_compound_number_first_specie/self.vol\r\n\t\t\telif self.second_element == defect.split(\"_\")[-1]:\r\n\t\t\t\tN = self.main_compound_number_second_specie/self.vol\r\n\t\t\telif self.third_element == defect.split(\"_\")[-1]:\r\n\t\t\t\tN = self.main_compound_number_third_specie/self.vol\r\n\t\t\t\r\n\t\t\tfor temperature in self.temperature_array:\r\n\t\t\t\textrinsic_defect_carrier_concentration = float(charge) * N * np.exp(-extrinsic_defect_formation_enthalpy / (self.k * temperature) )\r\n\t\t\t\textrinsic_defect_carrier_concentration_temperature[temperature] += extrinsic_defect_carrier_concentration\r\n\t\t\r\n\t\treturn intrinsic_defect_carrier_concentration_temperature, extrinsic_defect_carrier_concentration_temperature\t\t# Returns a dictionary with temperature as keys and array of defect-induced carrier concentrations (not defect concentrations) as values\r", "title": "" }, { "docid": "1f3df972e7d3930717bf34d2d564ee3c", "score": "0.46776056", "text": "def __init__(self, name, fuel, price_per_km):\n super().__init__(name, fuel)\n self.price_per_km = price_per_km\n self.current_fare_distance = 0", "title": "" }, { "docid": "8a4d2db6eeeaa9890edc5bf449d25db9", "score": "0.4672864", "text": "def fuel_required_for_part2(self) -> int:\n total_fuel_required = added_fuel_required = self.mass//3 - 2\n while (added_fuel_required := added_fuel_required//3 - 2) > 0:\n total_fuel_required += added_fuel_required\n return total_fuel_required", "title": "" }, { "docid": "847ffdc16f66838897c4995bddee8f9c", "score": "0.46675283", "text": "def Fuel_type_reefer_NotDEF(self):\n \n #Input constants\n\n generic_trans_amount = \"$5.00\" # any value that gets an approval from the host\n card_to_use_NGFC = 'NGFC' # using this card to get all commercial prompts\n \n prompts = {\n \"Additional Products Y/N?\": { \n \"buttons\": [\"Yes\"]\n },\n \"Do you want to print a receipt?\": {\n \"buttons\": [\"No\"]\n },\n \"Select fuel products to dispense\": {\n \"buttons\": [\"Reefer fuel\"]\n },\n \"DEF?\": {\n \"buttons\": [\"No\"]\n }\n }\n\n messages_to_verify = {\n 'preauth_request_verifications': {\n 'Fuel Purchase': '500', \n '001 - Wex OTR Flags': 'C - Commercial'\n },\n 'preauth_response_verifications': {\n 'Response Code': '2', \n 'Approved Amount': '000000500',\n '002 - Wex OTR Non-Fuel Product Data - Product code: CADV (Company funds cash advance)': 'Amount: 0.00',\n '002 - Wex OTR Non-Fuel Product Data - Product code: MERC (Default category for merchandise)': 'Amount: 30.00',\n f'003 - Wex OTR Fuel Product Limits - Product Code: {self.helpers.grade_1_Product }': 'Product limit: 50.00',\n f'003 - Wex OTR Fuel Product Limits - Product Code: {self.helpers.grade_2_Product }': 'Product limit: 50.00',\n f'003 - Wex OTR Fuel Product Limits - Product Code: {self.helpers.grade_3_Product }': 'Product limit: 50.00',\n '005 - Wex OTR Customer Information': 'ABC TRUCKING DENVER CO234W987'\n },\n 'capture_request_verifications': {\n 'Fuel Purchase': '500', \n '001 - Wex OTR Flags': 'C - Commercial'\n },\n 'capture_response_verifications': {\n 'Response Code': '0', \n 'Approved Amount': '000000500',\n '002 - Wex OTR Non-Fuel Product Data - Product code: CADV (Company funds cash advance)': 'Amount: 20.00',\n '002 - Wex OTR Non-Fuel Product Data - Product code: MERC (Default category for merchandise)': 'Amount: 30.00',\n f'003 - Wex OTR Fuel Product Limits - Product Code: {self.helpers.grade_1_Product }': 'Product limit: 50.00',\n f'003 - Wex OTR Fuel Product Limits - Product Code: {self.helpers.grade_2_Product }': 'Product limit: 50.00',\n f'003 - Wex OTR Fuel Product Limits - Product Code: {self.helpers.grade_3_Product }': 'Product limit: 50.00',\n '005 - Wex OTR Customer Information': 'ABC TRUCKING DENVER CO234W987'\n }\n }\n\n #Output verifications\n\n receipt_data = [f\"{self.helpers.grade_1_Name_reefer} CA PUMP# 1\",\n \"5.000 GAL @ $1.000/GAL $5.00 99\",\n \"Subtotal = $5.00\",\n \"Tax = $0.00\",\n \"Total = $5.00\",\n \"Change Due = $0.00\",\n \"Credit $5.00\"]\n\n self.helpers.prepay_transaction(\n card=card_to_use_NGFC,\n prepay_amount=generic_trans_amount,\n prompts=prompts,\n messages_to_verify=messages_to_verify,\n receipt_data=receipt_data\n )", "title": "" }, { "docid": "9163deaae2f5ae34d9616a47b87654a0", "score": "0.46662685", "text": "def fk_system(self):\n\n self.fk_controls = []\n \n self.fk_chain = joints_utils.related_clean_joint_chain(self.main_chain, self.side, \"fk\", True)\n self.fk_system_objs.append(self.fk_chain[0])\n\n # cmds.error(\"number of joint: {}\".format(len(self.fk_chain)))\n\n for i, jnt in enumerate(self.fk_chain):\n \n if i == (len(self.fk_chain) - 1):\n break\n ctrl = None\n if i != 0:\n ctrl = controller.Control(\"{}\".format(jnt[:len(jnt)-4]), 5.0, 'circle', jnt, jnt, self.fk_controls[i-1].get_control(), ['v'], '', True, True, False)\n elif i == 0:\n ctrl = controller.Control(\"{}\".format(jnt[:len(jnt)-4]), 5.0, 'circle', jnt, jnt, '', ['v'], '', True, True, False)\n else:\n ctrl = controller.Control(\"{}\".format(jnt[:len(jnt)-4]), 5.0, 'circle', jnt, jnt, '', ['v'], '', True, True, False)\n self.fk_controls.append(ctrl)\n \n cmds.parentConstraint(ctrl.get_control(), jnt, maintainOffset=True)\n cmds.scaleConstraint(ctrl.get_control(), jnt, maintainOffset=True)\n\n self.fk_system_grp = cmds.group(empty=True, name=\"{}_{}_fkSystem_GRP\".format(self.side, self.name))\n cmds.parent(self.fk_system_objs, self.fk_system_grp)\n cmds.group(empty=True, name=self.fk_ctrls_main_grp)\n transforms_utils.align_objs(self.clavicle_jnt, self.fk_ctrls_main_grp)\n cmds.parentConstraint(self.clavicle_jnt, self.fk_ctrls_main_grp, maintainOffset=True)\n # scale fix\n for axis in [\"X\", \"Y\", \"Z\"]:\n cmds.connectAttr(\"{}.scale{}\".format(self.clavicle_jnt, axis), \"{}.scale{}\".format(self.fk_ctrls_main_grp, axis), force=True)\n cmds.parent(self.fk_controls[0].get_offset_grp(), self.fk_ctrls_main_grp)\n self.module_main_grp(self.fk_system_grp)\n\n return True", "title": "" } ]
53115a545456a4f0c2aade0d3a8c94ed
loads the specified image.
[ { "docid": "49beec7092879a0b08ea6d7fed006276", "score": "0.0", "text": "def __load__(self, item):\n \n # the name for parent of parent directory where the image is located and the name of the image are same.\n # an example directory breakup is shown below -\n # - data-science-bowl-2018/\n # - stage1_train/\n # - abc\n # - image\n # - abc\n # - mask\n full_image_path = os.path.join(self.path, item, \"images\", item) + \".png\"\n mask_dir_path = os.path.join(self.path, item, \"masks/\")\n all_masks = os.listdir(mask_dir_path)\n \n # load the images\n image = cv2.imread(full_image_path, 1)\n image = cv2.resize(image, (self.image_size, self.image_size))\n \n masked_img = np.zeros((self.image_size, self.image_size, 1))\n \n # load and prepare the corresponding mask.\n for mask in all_masks:\n fullPath = mask_dir_path + mask\n _masked_img = cv2.imread(fullPath, -1)\n _masked_img = cv2.resize(_masked_img, (self.image_size, self.image_size))\n _masked_img = np.expand_dims(_masked_img, axis = -1)\n masked_img = np.maximum(masked_img, _masked_img)\n \n # mormalize the mask and the image. \n image = image/255.0\n masked_img = masked_img/255.0\n \n return image, masked_img", "title": "" } ]
[ { "docid": "8738269cbd579759308a2a386d6ce210", "score": "0.77812254", "text": "def load_image(self, image_id):\n info = self.image_info[image_id]\n image = info['img']\n return image", "title": "" }, { "docid": "a978904aa9088dba70ec448b95ab2849", "score": "0.7713859", "text": "def load_image(self):\n self.load('Load image',self.place_image)", "title": "" }, { "docid": "56fd55d3e32390ff34cff6d91cf8d4ec", "score": "0.7456635", "text": "def load_image(self, image_id):\n return self.image_info[image_id][\"image\"]", "title": "" }, { "docid": "459cbeebfc55aa7463f304ec9ba50945", "score": "0.7322855", "text": "def load_image(self, image: Union[widgets.Image, str, pathlib.Path]):\n image = load_img(image)\n self.current_image = image\n self._display_image()\n self.init_empty_data()", "title": "" }, { "docid": "862d16b5b7c544e8c666c6a0b9e7c558", "score": "0.729733", "text": "def load_image(self, image_id):\n # Load image\n path = self.image_info[image_id]['path']\n url = self.image_info[image_id]['url']\n\n is_url = urllib.request.urlopen(url).getcode() == 200\n\n if is_url:\n image = skimage.io.imread(url)\n else:\n image = skimage.io.imread(path)\n # If grayscale. Convert to RGB for consistency.\n if image.ndim != 3:\n image = skimage.color.gray2rgb(image)\n # If has an alpha channel, remove it for consistency\n if image.shape[-1] == 4:\n image = image[..., :3]\n return image", "title": "" }, { "docid": "f5a722d84b84784e9f67de7dff96dc8f", "score": "0.72675246", "text": "def load_image(namespace, filename):\n try:\n file_obj = HTTPLoader.get_fileobj(namespace, filename)\n img = Image.open(file_obj)\n\n except NotFoundError as e:\n raise NotFoundError(\"An error occurred: '%s'\" % str(e))\n\n return image", "title": "" }, { "docid": "f2f7e9d9d4c51963558c77bfde2b267b", "score": "0.72297585", "text": "def load_image(filename):\n return load('image', filename)", "title": "" }, { "docid": "506ff2394e31bfd629ebe307a65485cc", "score": "0.71853673", "text": "def load_image(image_path):\n with open(image_path, \"rb\") as image_file:\n return load_image_from_bytes(image_file.read())", "title": "" }, { "docid": "1966443bd0c4e8281ddc0f0f2b8aa437", "score": "0.71544623", "text": "def load_image(self, image_id):\n # Load image\n image = cv2.imread(self.image_info[image_id]['path'])\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n # If grayscale. Convert to RGB for consistency.\n if image.ndim != 3:\n image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)\n # If has an alpha channel, remove it for consistency\n if image.shape[-1] == 4:\n image = image[..., :3]\n if self.use_rgbd:\n # TODO: think about using the loading of open3d\n depth = cv2.imread(self.image_info[image_id]['depth'], -1)\n image = np.concatenate((image, np.expand_dims(depth / 10000., 2)), axis=2)\n return image", "title": "" }, { "docid": "73fdaf195e2454197c512d03295bc5c8", "score": "0.7046602", "text": "def loadImage(self, inDirectory, inFilename):\n success = self.load(inDirectory + inFilename)\n self.imageLoaded = success", "title": "" }, { "docid": "e260af3ad6ad71732aa720463dabfd25", "score": "0.6961212", "text": "def loadImage(self, imagePath=None):\n if imagePath is None:\n imagePath = self.getModel().getImagePath()\n if not self.__imageName: \n self.__imageName = self.getModel().getSpriteName()\n imageName = os.path.join(imagePath, self.__imageName) \n self.imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(imageName) \n self.anchor = guiobjects.getOffset(self.imgPath)\n self.topAnchor = guiobjects.getTopOffset(self.anchor, imageName)\n pos = self.__position\n scrPos = GG.utils.p3dToP2d(pos, self.anchor)\n zOrder = (pow(pos[0], 2) + pow(pos[1], 2))*10\n self.__img = guiobjects.getSprite(imageName, scrPos, zOrder)", "title": "" }, { "docid": "ca931d033556fdc01dccc714498da3b4", "score": "0.6921727", "text": "def load_image(self, image_id, cache=True):\n full_image = self.frames.load_image(image_id, cache=cache)\n return full_image", "title": "" }, { "docid": "7c3f9bcf359379c5bae2ac6d1e46d14e", "score": "0.6917364", "text": "def load_image_async(self, image_url, *params):\n print \"reading image from url %s\" % image_url\n rb_utils.get_url_async(image_url, self.load_image_cb, *params)", "title": "" }, { "docid": "8947aaec0c5427dbabbcf9789f089a19", "score": "0.69082767", "text": "def load_image(self, image_path=None):\n if image_path is None:\n # Prompt user to choose an image\n image_path, sel_filter = QtWidgets.QFileDialog.getOpenFileName(\n self,\n \"Load image\"\n )\n if not image_path:\n return\n\n self.image_plane.load_image(image_path)\n\n # Input color space or the current view could be changed by file and\n # viewing rules on image load. Update the GUI without triggering a\n # re-render.\n with self._ocio_signals_blocked():\n self.set_input_color_space(self.image_plane.input_colorspace())\n self.set_display_view(\n self.image_plane.display(),\n self.image_plane.view()\n )", "title": "" }, { "docid": "6479bd2bb00ad482508c65c82360a6ac", "score": "0.6821809", "text": "def load_image(self):\r\n try:\r\n image_file, _ = QtWidgets.QFileDialog.getOpenFileName(parent=self, caption=\"Load image\")\r\n if image_file == '':\r\n return\r\n with open(image_file, \"rb\") as f:\r\n buffer = BytesIO(f.read())\r\n image = Image.open(buffer).convert(\"RGB\")\r\n imgarray = np.array(image)\r\n imgdata = (imgarray, buffer)\r\n self.process_new_image(imgdata)\r\n\r\n except Exception as e:\r\n self.statusbar.showMessage(\"[Error] {}: {}\".format(type(e).__name__, str(e)))\r\n return", "title": "" }, { "docid": "c8604f57578844e7ff4a8b5eae93b9c8", "score": "0.677367", "text": "def load_image(self, image_id):\n # Load image\n image = skimage.io.imread(self.image_info[image_id]['path'])\n # If grayscale. Convert to RGB for consistency.\n if image.ndim != 3:\n image = skimage.color.gray2rgb(image)\n return image", "title": "" }, { "docid": "6a4f97fd482f36dfd666400ba3f16ec9", "score": "0.6760915", "text": "def load_image(namespace, filename):\n # Path to the great grandparent directory of this file\n try:\n image_bytes = FileSystemLoader.get_fileobj(namespace, filename)\n image = Image.open(image_bytes)\n except FileNotFoundError as e:\n raise NotFoundError(\"File Not Found at %s\" % (Path(namespace + \"/\" + filename)))\n\n return image", "title": "" }, { "docid": "32d280ecc7da2e00d1b6ecb065ca6633", "score": "0.67478627", "text": "def load_image(path):\n try:\n canonical_path = str(path).replace(\"/\", os.sep).replace(\"\\\\\", os.sep)\n if canonical_path in ImageManager._images_dict.keys():\n # load image from img_dict\n _image = ImageManager._images_dict[canonical_path]\n else:\n try:\n _image = pygame.image.load(canonical_path).convert_alpha()\n ImageManager._images_dict[canonical_path] = _image\n except pygame.error:\n raise FileExistsError(\"File '{0}' does not exist. Check your path to the image.\".format(path))\n return _image\n except FileExistsError:\n raise FileExistsError(\"File '{0}' does not exist. Check your path to the image.\".format(path))", "title": "" }, { "docid": "f85c9d69dc48b6be9f99f1c67f269dee", "score": "0.67160517", "text": "def load_img(path, img_id):\n return imread(path + '/images/' + img_id + '.png')", "title": "" }, { "docid": "db00023f1545e7799f11d43deda99482", "score": "0.6700112", "text": "def load_image(fname):\n\n if not os.path.isfile(fname):\n raise FileNotFoundError(fname)\n\n try:\n import freeimgldr\n return _load_image_frimgldr(fname)\n except ImportError:\n pass\n\n typ = _detect_format(fname)\n if typ is None:\n return None #unknown type of image file\n typ = typ.lower()\n if typ in _image_loaders:\n return _image_loaders[typ](fname)\n\n return None #we don't have loader for requested type of image file", "title": "" }, { "docid": "7b2d58b40db3432159585d79e7c86c2e", "score": "0.66812533", "text": "def loadImage(self, imagePath: str):\n\n # Load image. Converts to QPixmap automatically.\n self.imageView.setImage(imagePath)\n\n # Check fit status\n self.fitImage()\n\n # This will run only when the first image is loaded.\n if self.currentImage is None:\n # Replace the intro text with the image.\n self.mainLayout.replaceWidget(self.introLabel, self.imageView)\n\n # Enable buttons. They will be hidden/shown later.\n self.buttonPrevious.setEnabled(True)\n self.buttonNext.setEnabled(True)\n\n # Enable action.\n self.fitToWindowAct.setEnabled(True)\n\n # Store the current image.\n self.currentImage = imagePath\n\n # Scan the current image's directory.\n self.scanDir(os.path.dirname(self.currentImage))\n\n # Update the index\n self.updateIndex()", "title": "" }, { "docid": "32006ada3795acb0b04bf5112741be10", "score": "0.66740733", "text": "def load_sample_image(image_name):\n ...", "title": "" }, { "docid": "bad76fdb0c358caf6e263a0f7c410253", "score": "0.664944", "text": "def load_image(path,image_name) :\n data_path = os.path.join(path)\n image = skimage.io.imread(os.path.join(data_path, image_name))\n \n return image", "title": "" }, { "docid": "bfc64962a14b5a3d6d205a5e24a29647", "score": "0.6573593", "text": "def load(self, filepath):\n image = loader.load_data(filepath, logger=self.logger)\n self.set_image(image)", "title": "" }, { "docid": "849bc063050a8d33fe77dc8c1e4bf05c", "score": "0.6561718", "text": "def load_img(path):\n try:\n return Image.open(path)\n except IOError:\n return None", "title": "" }, { "docid": "225c5ca3e728716e2e420bcb9fff9b9e", "score": "0.65608877", "text": "def image_load(path):\n from PIL import Image\n image = Image.open(path)\n\n #\n # import cv2\n # image = cv2.imread(path)\n return image", "title": "" }, { "docid": "6fca5189e24afcc1d2cb32dbfc40c178", "score": "0.6538374", "text": "def image_loader(image_name):\r\n image = Image.open(img_path)\r\n image = loader(image).float()\r\n image = Variable(image, requires_grad=True)\r\n image = image.unsqueeze(0) #this is for VGG, may not be needed for ResNet\r\n return image.cuda() #assumes that you're using GPU\r", "title": "" }, { "docid": "7dc7ba0da1702b832c3f1a1f44470f2c", "score": "0.6520938", "text": "def load_image(img_path):\n image = image_to_tensor(Image.open(img_path).convert('RGB'))\n return image", "title": "" }, { "docid": "abebecf4ac59e891945db31f87ea34f5", "score": "0.6464524", "text": "def load_image(image):\r\n if isinstance(image, pygame.Surface):\r\n return image\r\n\r\n try:\r\n surface = pygame.image.load(image)\r\n return surface\r\n except pygame.error as message:\r\n print(\"Cannot load : \", image)\r\n print(\"Reason : \", message)\r\n\r\n return pygame.Surface((0, 0))", "title": "" }, { "docid": "361f465558c1a2c6ab789d9df56eb77e", "score": "0.6419893", "text": "def loadImage(imagePath):\n return cv2.imread(imagePath)", "title": "" }, { "docid": "7507358536e4de90b135962e8c855294", "score": "0.6398999", "text": "def image_loader(self,image_path):\n image = Image.open(image_path).convert(\"RGB\")\n image = loader(image).float()\n image = Variable(image, requires_grad=True)\n image = image.unsqueeze(0) #this is for VGG, may not be needed for ResNet\n return image", "title": "" }, { "docid": "c3c20ad3648e6c1e221c7ae7af7c3efe", "score": "0.63935006", "text": "def loadReferenceImage(self):\n refFileName = QtWidgets.QFileDialog.getOpenFileName(self, caption = \"Open reference image\", filter=\"Image Files (*.png *.jpg)\")[0]\n try:\n self.refImage.load(refFileName)\n self.redisplayAnsi()\n except:\n pass", "title": "" }, { "docid": "43b46e64cafcbb49987fe2fc68b98c61", "score": "0.63855404", "text": "def image_loader(img):\n loader = transforms.Compose([transforms.Resize((256, 256), Image.BICUBIC), \n transforms.ToTensor(), \n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n \n image = loader(img).float()\n image = image.unsqueeze(0) #this is for VGG, may not be needed for ResNet\n return image #assumes that you're using GPU", "title": "" }, { "docid": "1691766d8b3532a2c5376a7ef9586927", "score": "0.638254", "text": "def load_image(directory, image_file):\n return mpimg.imread(os.path.join(directory, image_file.strip()))", "title": "" }, { "docid": "1ca1049d6c735c6794b30e568b961b8e", "score": "0.6379491", "text": "def loadImage(cls, image_data_stream):\n raise NotImplementedError()", "title": "" }, { "docid": "f5a0da8d07279b27fa99a1348f1a9e39", "score": "0.636756", "text": "def load_image(data_dir, image_file):\n return mpimg.imread(os.path.join(data_dir, image_file.strip()))", "title": "" }, { "docid": "c386d8322d7bfe873d27645811ead625", "score": "0.6356187", "text": "def load_image(self,image_id):\r\n image = skimage.io.imread(self.image_info[image_id][\"path\"])\r\n if image.ndim != 3:\r\n image = skimage.color.gray2rgb(image)\r\n if image.shape[-1] == 4:\r\n image = image[:,:,:3]\r\n return image", "title": "" }, { "docid": "aa873429e0be8dfd1be7b2276ca27963", "score": "0.63477254", "text": "def image_loader(image_name):\n image = Image.open(image_name).convert(\"RGB\")\n image = loader(image).float()\n image = Variable(image, requires_grad=True)\n image = image.unsqueeze(0) #this is for VGG, may not be needed for ResNet\n return image", "title": "" }, { "docid": "ac242ced77ac1dd11ae83ce5bd43aa10", "score": "0.63471216", "text": "def load_image(self):\n img = wx.Image(self.current_image, wx.BITMAP_TYPE_ANY)\n\n # scale the image, preserving the aspect ratio\n W = img.GetWidth()\n H = img.GetHeight()\n if W > H:\n new_w = self.max_size\n new_h = int(self.max_size * H / W)\n else:\n new_h = self.max_size\n new_w = int(self.max_size * W / H)\n img = img.Scale(new_w, new_h)\n\n self.image_ctrl.SetBitmap(wx.Bitmap(img))\n self.Refresh()", "title": "" }, { "docid": "f9aa5749aa3d8a1ff57470f174317f7e", "score": "0.6313478", "text": "def load_image(name):\r\n fullname = os.path.join('data/images', name)\r\n try:\r\n image = pygame.image.load(fullname).convert_alpha()\r\n except pygame.error, message:\r\n print 'Cannot load image:', name\r\n raise SystemExit, message\r\n return image", "title": "" }, { "docid": "bfdee46e4189d22fabafa73808aa0ce4", "score": "0.6292713", "text": "def load_full_image(image_tag_string, image_type='oci', load_until_layer=0):\n if image_type == 'oci':\n image = OCIImage(image_tag_string.replace('docker-daemon:', ''))\n elif image_type == 'docker':\n image = DockerImage(image_tag_string.replace('docker-daemon:', ''))\n failure_origin = formats.image_load_failure.format(\n testimage=image.repotag)\n try:\n image.load_image(load_until_layer)\n except (NameError,\n subprocess.CalledProcessError,\n IOError,\n docker.errors.APIError,\n ValueError,\n EOFError) as error:\n logger.warning('Error in loading image: %s', str(error))\n image.origins.add_notice_to_origins(\n failure_origin, Notice(str(error), 'error'))\n return image", "title": "" }, { "docid": "423dcb12a40c43fa77814d8512417c75", "score": "0.62877816", "text": "def load_img(img_path):\r\n assert os.path.isfile(img_path), f'img file not exist: {img_path}'\r\n img = Image.open(img_path)\r\n return img.convert('RGB')", "title": "" }, { "docid": "7c5f5cf10a9bf4a4a1327709d38d05ac", "score": "0.62840444", "text": "def LoadImage(self, path):\n # load the image\n bmp = wx.Bitmap(path)\n self.SetImage(bmp)\n self.SetScale(self.GetScale())\n\n # hide the button\n if self.btn:\n self.btn.Hide()\n self.btn = None\n\n # set members\n self.path = path\n self.orig = bmp\n self.GetParent().SetFocus()", "title": "" }, { "docid": "fb2883457f5b09adf74434051ef1a8c2", "score": "0.62824696", "text": "def checkLoadImage(self):\n try:\n self.imageHyper.loadImage(self.imageFile)\n except Warning:\n self.msgError = \"Please select a correct npy file that contains image data\"\n QMessageBox.warning(self, \"Wrong npy file\", self.msgError)\n return\n\n self.boolImageLoaded = True", "title": "" }, { "docid": "90129f3ad2ad70f4281dc952ceedd376", "score": "0.6280957", "text": "def loadSourceImage( self ):\n imgPath = os.path.join(self.directory, self.imageID + \".tif\")\n img = ImagePlus(imgPath)\n if img is None:\n raise ValueError(\"Couldn't find the image\")\n self.sourceImage = img", "title": "" }, { "docid": "72939799ab3431634d37996cfa58e1f7", "score": "0.62765235", "text": "def imgLoadPil(imgPath):\n # load\n from PIL import Image\n img = Image.open(imgPath)\n return img", "title": "" }, { "docid": "c516df5c7c03ec3e02a0f134d350cbc1", "score": "0.62756073", "text": "def load_image(self, image_id):\n\n info = self.image_info[image_id]\n\n # If not a WAD dataset image, delegate to parent class\n if info[\"source\"] != 'WAD':\n return super(self.__class__, self).load_image(image_id)\n\n # Load image\n path = join(self.root_dir + '_color', info['path'])\n image = skimage.io.imread(path)\n\n # If has an alpha channel, remove it for consistency\n if image.shape[-1] == 4:\n image = image[..., :3]\n\n return image", "title": "" }, { "docid": "e5dded7c9cfc976e96996cf99a1956bb", "score": "0.6262443", "text": "def load_image(filename, url=None):\n img = None\n\n if url:\n img = Image.open(StringIO(urllib2.urlopen(url).read()))\n\n if filename:\n img = Image.open(filename)\n\n return img", "title": "" }, { "docid": "ccedc097e769459652207cba3203abe3", "score": "0.62621444", "text": "def load_image(self, image_id):\n info = self.image_info[image_id]\n image = np.load(info['path'])\n image = image[:, :, 0] # To make image 2D for padding\n image = np.concatenate([np.zeros((256, 42)), image], axis=1)\n if image.ndim != 3:\n image = skimage.color.gray2rgb(image)\n return image", "title": "" }, { "docid": "c75f54bacbd282c97e68a98b544e9a42", "score": "0.62394977", "text": "def load_image(data_dir, image_path):\n image_path = os.path.join(data_dir, image_path)\n if not os.path.isfile(image_path):\n raise Exception('{} is not a file, check your data_dir argument'.format(data_dir))\n image = cv2.imread(image_path)\n return image", "title": "" }, { "docid": "f4628ceddf5fad659e4c8e7eb34f3900", "score": "0.62305474", "text": "def load_image(path): \n return imread(path, mode=\"L\") / 255", "title": "" }, { "docid": "3334fdb484fd86ef3d2d938328c0404b", "score": "0.62272096", "text": "def __read_image(self, filename):\n\n with urllib.request.urlopen(filename) as url:\n s = url.read()\n\n resp = urllib.request.urlopen(filename)\n img = np.asarray(bytearray(s), dtype=\"uint8\")\n\n self.image = cv2.imdecode(img, cv2.IMREAD_COLOR)\n\n self.image = cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB)\n\n self.im_copy = self.image.copy()\n\n self.height, self.width = self.image.shape[:2]\n\n self.debug = 0", "title": "" }, { "docid": "c57e8c9f1fab3229dc81799521b2a0a5", "score": "0.6222295", "text": "def image_loader(img):\r\n image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n image = Image.fromarray(img)\r\n image = loader(image).float()\r\n image = image.unsqueeze(0)\r\n return image", "title": "" }, { "docid": "258f7ecab574749c7811a4ac7e33eb0f", "score": "0.62199837", "text": "def open(self, image_path=None):\n try:\n self.image = imread(image_path, mode='RGB')\n self.path = image_path\n except (FileNotFoundError, AttributeError):\n self.image = None\n print(f'image {image_path} not found !!!')", "title": "" }, { "docid": "6d723b3976c3122da0df31e17d98203d", "score": "0.6210197", "text": "def load_image(type):\n st.write(f\"### Upload {type} Image\")\n f = st.file_uploader(f\"Upload {type} Image\")\n img = None\n path = \"\"\n file_name = \"\"\n if f is None:\n return \"\", \"\", \"\"\n if f is not None:\n file_name = f.name\n format_file = file_name.split(\".\")\n if len(format_file) == 1:\n return \"_\", \"_\", \"_\"\n elif format_file[1] not in [\"png\", \"jpg\", \"jpeg\"]:\n return \"_\", \"_\", \"_\"\n tfile = tempfile.NamedTemporaryFile(delete=False)\n tfile.write(f.read())\n path = tfile.name\n img = img_as_float(io.imread(tfile.name))\n st.write(f\"### {type} Image\")\n st.write(f\"Shape of {type} Image: \", img.shape)\n im = st.image(img)\n return img, path, file_name", "title": "" }, { "docid": "39ce9e29bca85025edc3e3544b980a97", "score": "0.62019885", "text": "def load_img(self, images_input_path):\n self.img_name = self.queue.pop()\n img_path = os.path.join(images_input_path, self.img_name)\n\n self.img = cv2.imread(img_path)\n return self.img, self.img_name", "title": "" }, { "docid": "ee68a31b0910c3bcd12d6282eba3b950", "score": "0.61911076", "text": "def load(self, *latgs):\n content = LoadDialog(load=self._load_helper, cancel=self._dismiss_popup)\n self._popup = Popup(title=\"Load image\", content=content, size_hint=(0.9, 0.9))\n self._popup.open()", "title": "" }, { "docid": "9046d5a165360b30d2f0cc65de332101", "score": "0.6184551", "text": "def load_img(path):\n return cv2.imread(path)", "title": "" }, { "docid": "1066fccb62f61f8f3304730788711b73", "score": "0.6183681", "text": "def load_image(image_path):\n image = Image.open(image_path)\n image = np.array(image)\n return image", "title": "" }, { "docid": "bac3b6226b4b8733339848902e13bf27", "score": "0.618037", "text": "def image_loader(image):\n my_image = Image.open(os.path.realpath(image))\n return extract_features(my_image)", "title": "" }, { "docid": "d5a626b875781e6b90d74227fc451091", "score": "0.6177321", "text": "def image_loader(image_name):\r\n image = Image.open(image_name).convert('RGB') #load an image\r\n image = transform(image).float() #apply transformation\r\n image = Variable(image, requires_grad=True) #Convert to tensor\r\n image = image.unsqueeze(0)\r\n return image", "title": "" }, { "docid": "b61cee7f16723260c45b0470b2f7e5f9", "score": "0.61764014", "text": "def load_image(path):\n print(\"Loading an image from :\", path)\n\n #Scales, crops, and normalizes a PIL image for a PyTorch model, returns an Numpy array\n transformations = transforms.Compose([transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n\n img = Image.open(path)\n img_tensor = transformations(img)\n img_tensor.unsqueeze_(0)\n\n print(\"Image loaded...\")\n\n return im_tensor", "title": "" }, { "docid": "7b4f2fba1c4ca4cf7147f32efa019f7d", "score": "0.6133668", "text": "def loadImage(self, imgNo, dataset='train'):\n if dataset == 'train':\n image = self.dh.train[imgNo]\n elif dataset == 'test':\n image = self.dh.test[imgNo]\n else:\n logger.error('[loadImage] Only \\'test\\' or \\'train\\' datasets can be used')\n raise ValueError\n\n if image.shape != self.v.shape:\n logger.error('[loadImage] Size of provided image does not match v layer size!')\n raise ValueError\n self.v = image\n self.imgInfo = (dataset, imgNo)", "title": "" }, { "docid": "9285b6a3b10ece80782f6a23ee014afa", "score": "0.61222655", "text": "def load_image(self, image_path):\n config = ocio.GetCurrentConfig()\n\n # Get input color space (file rule)\n cs_name, rule_idx = config.getColorSpaceFromFilepath(image_path)\n if not cs_name:\n # Use previous or config default\n if self._ocio_input_cs:\n cs_name = self._ocio_input_cs\n else:\n cs_name = ocio.ROLE_DEFAULT\n self._ocio_input_cs = cs_name\n\n # Get default view for input color space (viewing rule)\n self._ocio_view = config.getDefaultView(\n self._ocio_display,\n self._ocio_input_cs\n )\n\n buf = oiio.ImageBuf(image_path)\n spec = buf.spec()\n\n # Convert to RGBA, filling missing color channels with 0.0, and a\n # missing alpha with 1.0.\n if spec.nchannels < 4:\n buf = oiio.ImageBufAlgo.channels(\n buf,\n tuple(\n list(range(spec.nchannels))\n + ([0.0] * (4 - spec.nchannels - 1))\n + [1.0]\n ),\n newchannelnames=(\"R\", \"G\", \"B\", \"A\")\n )\n elif spec.nchannels > 4:\n buf = oiio.ImageBufAlgo.channels(\n buf,\n (0, 1, 2, 3),\n newchannelnames=(\"R\", \"G\", \"B\", \"A\")\n )\n\n # Get pixels as 32-bit float NumPy array\n data = buf.get_pixels(oiio.FLOAT)\n\n # Stash image size for pan/zoom calculations\n self._image_pos.x = spec.x\n self._image_pos.y = spec.y\n self._image_size.x = spec.width\n self._image_size.y = spec.height\n\n # Load image data into texture\n self.makeCurrent()\n\n GL.glBindTexture(GL.GL_TEXTURE_2D, self._image_tex)\n GL.glTexImage2D(\n GL.GL_TEXTURE_2D,\n 0,\n GL.GL_RGBA32F,\n spec.width,\n spec.height,\n 0,\n GL.GL_RGBA,\n GL.GL_FLOAT,\n data.ravel()\n )\n\n self._update_model_view_mat(update=False)\n\n self.update_ocio_proc(\n input_cs=self._ocio_input_cs,\n view=self._ocio_view\n )", "title": "" }, { "docid": "9da15bdccbce1cbd3aebfedbf5d35501", "score": "0.6121519", "text": "def load_person_image(self, person):\n media_list = person.get_media_list()\n if media_list:\n media_ref = media_list[0]\n object_handle = media_ref.get_reference_handle()\n obj = self.dbstate.db.get_object_from_handle(object_handle)\n full_path = media_path_full(self.dbstate.db, obj.get_path())\n mime_type = obj.get_mime_type()\n if mime_type and mime_type.startswith(\"image\"):\n self.photo.set_image(full_path, mime_type,\n media_ref.get_rectangle())\n self.photo.set_uistate(self.uistate, object_handle)\n else:\n self.photo.set_image(None)\n self.photo.set_uistate(None, None)\n else:\n self.photo.set_image(None)\n self.photo.set_uistate(None, None)", "title": "" }, { "docid": "d8c5f35230b75651a2017c3199f90a98", "score": "0.61072826", "text": "def load_sample_images():\n ...", "title": "" }, { "docid": "2505bd4fce44e630ef2b26e5b96e1460", "score": "0.61036724", "text": "def load(self):\n img = Image.open(self.path).convert(\"RGBA\")\n img_data = img.tobytes()\n width, height = img.size\n self.texture = gl.glGenTextures(1)\n gl.glBindTexture(gl.GL_TEXTURE_2D, self.texture)\n gl.glTexParameterf(\n gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR)\n gl.glTexParameterf(\n gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR)\n gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGBA, width, height, 0,\n gl.GL_RGBA, gl.GL_UNSIGNED_BYTE, img_data)\n gl.glEnable(gl.GL_TEXTURE_2D)\n self.loaded = True", "title": "" }, { "docid": "57009f4934b881dd9e9de8b1973dc6c9", "score": "0.60727423", "text": "def loadSourceImage(self,srcImg):\n self.srcImg = srcImg # File basename, without full path\n self.imgOrig = mpimg.imread(os.path.join(projRoot,srcImg))\n imgDims = np.shape(self.imgOrig)\n self.imgDimX = imgDims[1] # Yeah, this isn't IDL, deal with it\n self.imgDimY = imgDims[0] # Yeah, this still isn't IDL, deal with it\n self.numChan = imgDims[2]\n print(\"Loaded source image: %s\" % self.srcImg)", "title": "" }, { "docid": "b9f94654a4a7fcfc2ab94967eb23edc6", "score": "0.60721624", "text": "def load_image(file_path):\n assert os.path.exists(file_path), 'File does NOT exist! (' + file_path + ')'\n return cv2.imread(file_path)", "title": "" }, { "docid": "f36e7b21b2cd837a9d2848114f31188d", "score": "0.60709524", "text": "def reload_nonauto(self):\n self.get_img()\n self.load_img()", "title": "" }, { "docid": "3da04338bd86a8924d290e315857eaef", "score": "0.6063735", "text": "def loadimage(self, uuid: str):\n image_path = \"{}/images/{}.jpg\".format(self.storage_path, uuid)\n return img_to_array(load_img(image_path), dtype=np.float32)", "title": "" }, { "docid": "8221e538bf4aae2a7c002263c91f3903", "score": "0.6063564", "text": "def load_image(data_dir, image_file):\n path = os.path.join(data_dir,\n image_file.strip().split(\"\\\\\")[-3],\n image_file.strip().split(\"\\\\\")[-2],\n image_file.strip().split(\"\\\\\")[-1])\n # Transform to RGB, so that preprocessing is same for train and realtime (unity sends rgb image)\n return cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2RGB)", "title": "" }, { "docid": "2543c8ee46c097fe4aa1ab8fd2e6bba6", "score": "0.60453176", "text": "def read_image(image_dir):\n pass", "title": "" }, { "docid": "e7e9db8d648300104114f46eda83b01a", "score": "0.60251856", "text": "def loadImage(self, imageFilePath, flag=cv2.IMREAD_UNCHANGED):\n\n self.original_img = cv2.imread(imageFilePath, flag)\n # check for an error opening the file: this openCV function returns none if the file did not load\n\n if self.original_img is None:\n raise IOError(\"OpenCV failed to load an image using this file path: \" + imageFilePath)", "title": "" }, { "docid": "bbb92a2f69ea502a81ea08c4d05caa44", "score": "0.6018281", "text": "def load_image(path):\n max_dim=512 # To make a perfect view\n img = Image.open(path) # Opening image\n print(img.size)\n long = max(img.size) # Max Dimension of image\n scale = max_dim/long # scale is made so that image can be scaled down in both directions appropriately\n img = img.resize((round(img.size[0]*scale), round(img.size[1]*scale)), Image.ANTIALIAS)\n img = img_to_array(img) # For converting image to array\n # We need to broadcast the image array such that it has a batch dimension \n img = np.expand_dims(img, axis=0)\n return img", "title": "" }, { "docid": "445270903caad4ed5f13dac6f42fec34", "score": "0.6017126", "text": "def load_image(filename=None):\n if filename is None:\n raise ValueError(\"filename is None\")\n return cv2.imread(filename, 0)", "title": "" }, { "docid": "3401b5d772aaea62780693286b89f6fb", "score": "0.60169935", "text": "def genericLoader(self, fileName, flag=0):\n\n fif = self.genericFileDeducer(fileName)\n\n #If a type is found, return the loaded image\n if (fif != FIF_UNKNOWN) and self.FIFSupportsReading(fif):\n dib = self.Load(fif, fileName, flag);\n return dib\n #else return none\n return None", "title": "" }, { "docid": "7297fe469a24f524e83284487460365b", "score": "0.5994823", "text": "def load_img(self,image_name):\n path_to_project = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..')\n image = cv2.imread(os.path.join(path_to_project,\"assets\", \"towers\", image_name))\n # image [height, width]\n scale = image.shape[0]//self.height\n tower_img = pygame.image.load(os.path.join(path_to_project,\"assets\", \"towers\", image_name))\n # pygame image [width, height]\n tower_img = pygame.transform.scale(tower_img, (image.shape[1]//scale, image.shape[0]//scale))\n return tower_img", "title": "" }, { "docid": "2d9fd8bd2a5507cb0d65c47080f53d43", "score": "0.59943676", "text": "def load_image(image_file: str):\n try:\n image = resize(pygame.image.load(image_file).convert())\n except pygame.error as message: # pylint: disable=no-member\n print('Image at', image_file, 'unreachable')\n raise SystemExit(message)\n\n return image", "title": "" }, { "docid": "05d59aec3ff2502ae8ec3c645183b4b8", "score": "0.5994102", "text": "def loadImage(self):\n print(\"here\")\n self.filePath, self.format = QtWidgets.QFileDialog.getOpenFileName(\n None,\n \"Load Image\",\n \"\",\n \"Images (*.png *.xpm *.jpg);;\",\n options=QtWidgets.QFileDialog.DontUseNativeDialog,\n )\n self.inputCornersImage = cv2.imread(self.filePath)\n self.cornersInput.setImage(self.inputCornersImage.T)", "title": "" }, { "docid": "e355caef48973c54a4ebdd04df500f7c", "score": "0.59938085", "text": "def load_single_image(imdb_title, attempt_download=False):\r\n\r\n import imageio\r\n import os.path\r\n\r\n image_path = 'data/images/{}.jpg'.format(imdb_title)\r\n\r\n if not os.path.isfile(image_path):\r\n if not attempt_download:\r\n return None\r\n\r\n status_code = download_image(imdb_title)\r\n if status_code != 0:\r\n return None\r\n\r\n img_array = imageio.imread(image_path)\r\n return img_array", "title": "" }, { "docid": "c97609315a077fc81f10c50e580518a7", "score": "0.5987319", "text": "def load_image(cls, name):\r\n image = cls._images.get(name)\r\n if image is None:\r\n image = QImage(':dock_images/%s.png' % name)\r\n cls._images[name] = image\r\n return image", "title": "" }, { "docid": "1cdb02277fbb22a38e19b0e25e07a351", "score": "0.59783894", "text": "def _load_raw_image(filename:str):\n\n # WARNING: THIS ONE IS BAD, something strange happens when we load binary images: its non-detemrinsitc, smthimes the loaded image is crap!!\n # img = scipy.misc.imread(filename) \n return skimage.io.imread(filename)", "title": "" }, { "docid": "25c1ad4a00e6770d7f97f85618cb8216", "score": "0.5976029", "text": "def load_image(self, image_id):\n image = skimage.io.imread(self.image_info[image_id]['path'])\n y,x,i = image.shape\n c = random.randint(0,2)\n D = image[:,:,c].reshape((y,x,1))\n image = np.concatenate((image, D), axis=2)\n return image", "title": "" }, { "docid": "5ade7c3c18c0e002d077f73ed62e2ecf", "score": "0.59722924", "text": "def load_gifti(img):\n\n try:\n img = nib.load(img)\n except (ImageFileError, TypeError) as err:\n # it's gzipped, so read the gzip and pipe it in\n if isinstance(err, ImageFileError) and str(err).endswith('.gii.gz\"'):\n with gzip.GzipFile(img) as gz:\n img = nib.GiftiImage.from_bytes(gz.read())\n # it's not a pre-loaded GiftiImage so error out\n elif (isinstance(err, TypeError)\n and not str(err) == 'stat: path should be string, bytes, os.'\n 'PathLike or integer, not GiftiImage'):\n raise err\n\n return img", "title": "" }, { "docid": "96783b9d396e01f502db134c11fddeb6", "score": "0.59709334", "text": "def imread(url):\n return Image.open(url)", "title": "" }, { "docid": "50f1e2c9a592e68f61a6b7f4a7d1561f", "score": "0.5969777", "text": "def load(url):\n response = requests.get(url)\n pil_image = Image.open(BytesIO(response.content)).convert(\"RGB\")\n # convert to BGR format\n image = np.array(pil_image)[:, :, [2, 1, 0]]\n return image", "title": "" }, { "docid": "d0fbc5a17738217f4e6e3ff69714fd33", "score": "0.59675467", "text": "def image_load(target):\r\n image = cv2.imread(target, 0)\r\n image = image[75:135, :]\r\n image = cv2.resize(image, (64,64))\r\n return image", "title": "" }, { "docid": "57e38cd63e3a7a53582ed3dce329457a", "score": "0.59487796", "text": "def load(self, begin=None, end=None):\n self.img_dict.clear()\n if begin == None:\n start = (\n self.list_idx\n if self.list_idx < self.maxLen\n else self.list_idx - self.maxLen\n )\n load_len = (\n self.maxLen + start\n if self.maxLen + start < self.vidLen\n else self.vidLen\n )\n elif begin != None:\n start = begin\n if self.list_idx >= self.vidLen:\n logging.info(\n \"Skip Load {0} {1} {2}\".format(self.list_idx, self.vidLen, start)\n )\n\n load_len = (\n start + self.maxLen + 1\n if start + self.maxLen + 1 < self.vidLen\n else self.vidLen\n )\n self.list_idx = start\n img = Image.open(self.img_list[start])\n img = self.prepareImg(img)\n self.current = img\n logging.info(\"Loading... {0} {1}\".format(start, load_len))\n for i in range(start, load_len):\n worker = ImageLoader.Worker(\n self.loadImg, self.img_list[self.list_idx], self.list_idx % self.maxLen\n ) # Any other args, kwargs are passed to the run function\n self.threadpool.start(worker)\n self.list_idx += 1", "title": "" }, { "docid": "d8d66dd735fe1e4cd8a8365ba8855a24", "score": "0.59438246", "text": "def get_image(self, image_id):\n res = self.do_request(\"GET\", \"/images/%s\" % image_id)\n data = json.loads(res.read())['image']\n return data", "title": "" }, { "docid": "b61d2a8f126e517af3ef81454bec1968", "score": "0.59420204", "text": "def load_image(file_path, verbose=0):\n check_path(file_path)\n im = Image.open(file_path).convert(mode=\"RGB\")\n if verbose:\n print('image {} loaded'.format(file_path))\n print(im.format, im.size, im.mode)\n return im", "title": "" }, { "docid": "fe46384270ee092b38082d91f1071b0e", "score": "0.59418887", "text": "def load_image(img_path: str) -> Image:\n original_image = PILImage.open(img_path)\n\n exif_data = original_image.getexif()\n if exif_data is not None:\n parsed_data = {}\n for tag, value in exif_data.items():\n if tag in TAGS:\n parsed_data[TAGS.get(tag)] = value\n elif tag in GPSTAGS:\n parsed_data[GPSTAGS.get(tag)] = value\n else:\n parsed_data[tag] = value\n\n exif_data = parsed_data\n\n return Image(np.asarray(original_image), exif_data)", "title": "" }, { "docid": "59cdc1647d34236867f876694b6fa68c", "score": "0.59353197", "text": "def load_image(image_url):\n try:\n image_get_response = urllib.request.urlopen(image_url)\n image_array = np.asarray(bytearray(image_get_response.read()), dtype=np.uint8)\n image = cv2.imdecode(image_array, cv2.IMREAD_COLOR)\n image = cv2.resize(image, (224, 224))\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = image / 255\n except HTTPError as e:\n log.error('Server couldn\\'t fulfill the request. For url:' + image_url)\n return []\n except URLError as e:\n log.error('We failed to reach the server. For url:' + image_url)\n return []\n else:\n return image", "title": "" }, { "docid": "ffbc4c58bebe99b3ab1786e3fa926ee6", "score": "0.5927383", "text": "def load_image(image_path):\n image = None\n if image_path.endswith(\"jpg\") or image_path.endswith(\"jpeg\"):\n image = Image.open(image_path)\n pixels = list(image.getdata())\n width, height = image.size\n pixels = [pixels[i * width:(i + 1) * width] for i in range(height)]\n size = image.size\n image = np.asarray(pixels)\n image = image * (10**19)\n elif image_path.endswith(\"mat\"):\n image = loadmat(image_path, appendmat=False)['dxImage']['img'][0][0]\n image_rescaled = image\n return image_rescaled", "title": "" }, { "docid": "fe66656262021980d0f5945bb73c200d", "score": "0.59122163", "text": "def load_image(self, image):\n\n # Make sure it's an RGB with correct width and height\n\t\timage = image.resize((self.width, self.height), Image.ANTIALIAS)\n\t\timage = image.convert(\"RGB\")\n\n # Extract the pixels\n\t\tpix = image.load()\n\n\t\t# Add each pixel to the buffer\n\t\ti = 0\n\t\tw, h = image.size\n\t\tfor col in xrange(0, w):\n\t\t\tfor row in xrange(0, h):\n\t\t\t\tr,g,b = pix[col, row]\n\t\t\t\tcolor = color565(r, g, b)\n\t\t\t\tself._buffer[i] = color\n\t\t\t\ti += 1", "title": "" }, { "docid": "bb683be3ba4f28a13d7aa4b967975fc5", "score": "0.5903647", "text": "def load_image(name):\n try:\n image = pygame.image.load(name)\n except pygame.error:\n print('Cannot load image:', name)\n return None\n return image.convert(), image.get_rect()", "title": "" }, { "docid": "9f3fe6eace255b17a4c86ccccaf3b079", "score": "0.59035844", "text": "def load(self):\n for image in self:\n image.load()", "title": "" }, { "docid": "422c776dfc8676bd01d5baf5cdc60605", "score": "0.58976287", "text": "def imread(self):\n try:\n return imageio.imread(self.local)\n except OSError:\n print(\n \"You need to download this image first. \"\n \"Use this_image.download(local_directory).\"\n )\n raise", "title": "" }, { "docid": "99eb6eb86a8d91863e8d23333e24dbaa", "score": "0.5889638", "text": "def load_image(name, colorkey=None):\r\n fullname = os.path.join('images', name)\r\n try:\r\n image = pygame.image.load(fullname)\r\n except pygame.error, message:\r\n print 'Cannot load image:', fullname\r\n raise SystemExit, message\r\n image = image.convert()\r\n if colorkey is not None:\r\n if colorkey is -1:\r\n colorkey = image.get_at((0,0))\r\n image.set_colorkey(colorkey)\r\n return image", "title": "" }, { "docid": "7a31bfd5ed891cd6a33db9b638069f58", "score": "0.58886814", "text": "def loadImg(self, img_path, idx):\n img = Image.open(img_path)\n img = self.prepareImg(img)\n logging.info(\"Added: {0}\".format(idx))\n self.img_dict[idx] = (img, idx)", "title": "" }, { "docid": "fbf51920fe46c82f72b6ecc1a400e8e5", "score": "0.5888152", "text": "def read_image(path):\n try:\n image = Image.open(path)\n return image\n except Exception as e:\n print(e)", "title": "" } ]
06fa4b50e745e149999779d1fefdcc29
Get this user's given name.
[ { "docid": "a9bedb947418ec71ca1ce03e332b80e8", "score": "0.7923014", "text": "def get_given_name(self):\r\n return self.__given_name", "title": "" } ]
[ { "docid": "d3e35db1a2327055baa7cf8197681877", "score": "0.85153365", "text": "def get_name(self):\n return self._user.name", "title": "" }, { "docid": "cd962d543d325b6cb86da8222ad995fe", "score": "0.84088993", "text": "def get_user_name(self):\n user = User.by_id(self.user_id)\n return user.name", "title": "" }, { "docid": "cd962d543d325b6cb86da8222ad995fe", "score": "0.84088993", "text": "def get_user_name(self):\n user = User.by_id(self.user_id)\n return user.name", "title": "" }, { "docid": "c38c7a91662edff55079ca9d841f1a65", "score": "0.83776337", "text": "def user_name(self) -> str:\n return pulumi.get(self, \"user_name\")", "title": "" }, { "docid": "f7bab5c0a5b6f6e241debda84af97c71", "score": "0.83414084", "text": "def get_user_name(self):\n return self._user_name", "title": "" }, { "docid": "fe817f7455c01924b62a4c06ff8cf370", "score": "0.8308692", "text": "def get_user_name(self):\n return self.user.get_full_name()", "title": "" }, { "docid": "75ffdb0315385c149937b055f51ad796", "score": "0.82164556", "text": "def user_name(self) -> Optional[str]:\n return pulumi.get(self, \"user_name\")", "title": "" }, { "docid": "cd78d8a314967636155e57271b078b67", "score": "0.81838244", "text": "def user_name(self) -> str:\n return self._user_name", "title": "" }, { "docid": "d1a78600d0b4f612c20b3e5cbaed86e7", "score": "0.8176067", "text": "def name(self):\n if self.user is not None:\n return self.user.get_full_name()\n else:\n return self.user_name", "title": "" }, { "docid": "487b7448bc34fa92e472a685658555ed", "score": "0.81500876", "text": "def username(self):\n name = self['user'].get('name', None)\n if name:\n return name\n else:\n return self['user'].get('username', None)", "title": "" }, { "docid": "b7afbe8548d6c6124d2b87297dfa8928", "score": "0.81100714", "text": "def get_full_name(self):\n return self.username", "title": "" }, { "docid": "b7afbe8548d6c6124d2b87297dfa8928", "score": "0.81100714", "text": "def get_full_name(self):\n return self.username", "title": "" }, { "docid": "b7afbe8548d6c6124d2b87297dfa8928", "score": "0.81100714", "text": "def get_full_name(self):\n return self.username", "title": "" }, { "docid": "e5120174d6c1af71c91b2dc402a16ab5", "score": "0.8067261", "text": "def user_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_name\")", "title": "" }, { "docid": "e5120174d6c1af71c91b2dc402a16ab5", "score": "0.8067261", "text": "def user_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_name\")", "title": "" }, { "docid": "e5120174d6c1af71c91b2dc402a16ab5", "score": "0.8067261", "text": "def user_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_name\")", "title": "" }, { "docid": "e5120174d6c1af71c91b2dc402a16ab5", "score": "0.8067261", "text": "def user_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_name\")", "title": "" }, { "docid": "e5120174d6c1af71c91b2dc402a16ab5", "score": "0.8067261", "text": "def user_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_name\")", "title": "" }, { "docid": "e5120174d6c1af71c91b2dc402a16ab5", "score": "0.8067261", "text": "def user_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_name\")", "title": "" }, { "docid": "e5120174d6c1af71c91b2dc402a16ab5", "score": "0.8067261", "text": "def user_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_name\")", "title": "" }, { "docid": "e5120174d6c1af71c91b2dc402a16ab5", "score": "0.8067261", "text": "def user_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_name\")", "title": "" }, { "docid": "e5120174d6c1af71c91b2dc402a16ab5", "score": "0.8067261", "text": "def user_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_name\")", "title": "" }, { "docid": "e5120174d6c1af71c91b2dc402a16ab5", "score": "0.8067261", "text": "def user_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_name\")", "title": "" }, { "docid": "e5120174d6c1af71c91b2dc402a16ab5", "score": "0.8067261", "text": "def user_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_name\")", "title": "" }, { "docid": "2eed35af81cd943dae63c5ad4b1b6f24", "score": "0.80647093", "text": "def user_name(self):\n return self._user_name", "title": "" }, { "docid": "e195f343b3f09c9f7c3d90450c15f36c", "score": "0.80296254", "text": "def user_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"user_name\")", "title": "" }, { "docid": "e195f343b3f09c9f7c3d90450c15f36c", "score": "0.80296254", "text": "def user_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"user_name\")", "title": "" }, { "docid": "17c744ece37ee95e4966af26d33877e1", "score": "0.8010694", "text": "def name(self) -> Optional[str]:\n return self._user", "title": "" }, { "docid": "11ef98b342879ff22bc36ee8764234c4", "score": "0.8006749", "text": "def user_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"user_name\")", "title": "" }, { "docid": "11ef98b342879ff22bc36ee8764234c4", "score": "0.8006749", "text": "def user_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"user_name\")", "title": "" }, { "docid": "2209fa258ff098919b22ccc5670c9c73", "score": "0.7958158", "text": "def get_full_name(self):\n return six.text_type(self.get_username())", "title": "" }, { "docid": "8b7e2af122fcd9bdbd43f77500e4a5d2", "score": "0.780032", "text": "def get_username():\n return user", "title": "" }, { "docid": "e3aafb6a92f942cc513ec0d3a01dc742", "score": "0.777432", "text": "def getUserName(self):\n return self.getId()", "title": "" }, { "docid": "ee064f3d24b3ab0154b6c10107fa98b1", "score": "0.7714885", "text": "def get_username(self) -> str:\n try:\n user = self.user.username\n except ObjectDoesNotExist:\n user = \"\"\n return user", "title": "" }, { "docid": "a0a5f5c8881b0276540c99ac64b6318b", "score": "0.7699066", "text": "def full_name(self):\n return self.user.get_full_name()", "title": "" }, { "docid": "3caacaad65f672faf9d1a77a9d60675f", "score": "0.76468724", "text": "def _get_user_name(**kwargs):\n return kwargs['user'].user_name", "title": "" }, { "docid": "c3fa275510b41c0afd0dca7c00638721", "score": "0.7643679", "text": "def getUserName():", "title": "" }, { "docid": "bbf8426ada05e08426d5efbbaab67419", "score": "0.76348865", "text": "def given_name(self):\n return self.getattr('given_name')", "title": "" }, { "docid": "425379f12160c4d8be11e9515768ffea", "score": "0.7619373", "text": "def username(self):\n return self.person.usernames[0] if self.person and len(self.person.usernames) > 0 else None", "title": "" }, { "docid": "92e04997314ffdf726ce2ee97ac1f6a2", "score": "0.7575072", "text": "def getUserName(self):\n return self.context.getUserName()", "title": "" }, { "docid": "9cc1066737f4579f46b8c123d7f734e5", "score": "0.75701725", "text": "def user_name(self):\r\n return self.update.effective_user.full_name", "title": "" }, { "docid": "7bb843d9693ecc4234421ed1920cfe1d", "score": "0.75541955", "text": "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip() or self.username", "title": "" }, { "docid": "7730103ff724d43f61b294833b2801ee", "score": "0.75505465", "text": "def user_profile_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_profile_name\")", "title": "" }, { "docid": "8e817303567f35fd453c9bf2b94dcecb", "score": "0.7534759", "text": "def full_name(self) -> str:\n if self.name or self.family_name:\n return f\"{self.name or ''} {self.family_name or ''}\".strip()\n return self.username", "title": "" }, { "docid": "91e255011c16a0b6118a9ef0019dc136", "score": "0.7512216", "text": "def get_short_name(self):\n return self.username", "title": "" }, { "docid": "91e255011c16a0b6118a9ef0019dc136", "score": "0.7512216", "text": "def get_short_name(self):\n return self.username", "title": "" }, { "docid": "91e255011c16a0b6118a9ef0019dc136", "score": "0.7512216", "text": "def get_short_name(self):\n return self.username", "title": "" }, { "docid": "91e255011c16a0b6118a9ef0019dc136", "score": "0.7512216", "text": "def get_short_name(self):\n return self.username", "title": "" }, { "docid": "91e255011c16a0b6118a9ef0019dc136", "score": "0.7512216", "text": "def get_short_name(self):\n return self.username", "title": "" }, { "docid": "91e255011c16a0b6118a9ef0019dc136", "score": "0.7512216", "text": "def get_short_name(self):\n return self.username", "title": "" }, { "docid": "91e255011c16a0b6118a9ef0019dc136", "score": "0.7512216", "text": "def get_short_name(self):\n return self.username", "title": "" }, { "docid": "0060f4227c0e0906f8da6878e69e5274", "score": "0.7488508", "text": "def get_user_name(self, user_id: str) -> str:\n return self.SqlData.get_user_name(user_id)", "title": "" }, { "docid": "7516dd39e058547ce68b2a17044d0820", "score": "0.7442402", "text": "def user_profile_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"user_profile_name\")", "title": "" }, { "docid": "b9f3d48e59dd013ff027376c19cf5463", "score": "0.7441075", "text": "def username(self) -> str:\n return pulumi.get(self, \"username\")", "title": "" }, { "docid": "b9f3d48e59dd013ff027376c19cf5463", "score": "0.7441075", "text": "def username(self) -> str:\n return pulumi.get(self, \"username\")", "title": "" }, { "docid": "b9f3d48e59dd013ff027376c19cf5463", "score": "0.7441075", "text": "def username(self) -> str:\n return pulumi.get(self, \"username\")", "title": "" }, { "docid": "b9f3d48e59dd013ff027376c19cf5463", "score": "0.7441075", "text": "def username(self) -> str:\n return pulumi.get(self, \"username\")", "title": "" }, { "docid": "b9f3d48e59dd013ff027376c19cf5463", "score": "0.7441075", "text": "def username(self) -> str:\n return pulumi.get(self, \"username\")", "title": "" }, { "docid": "b9f3d48e59dd013ff027376c19cf5463", "score": "0.7441075", "text": "def username(self) -> str:\n return pulumi.get(self, \"username\")", "title": "" }, { "docid": "b9f3d48e59dd013ff027376c19cf5463", "score": "0.7441075", "text": "def username(self) -> str:\n return pulumi.get(self, \"username\")", "title": "" }, { "docid": "b9f3d48e59dd013ff027376c19cf5463", "score": "0.7441075", "text": "def username(self) -> str:\n return pulumi.get(self, \"username\")", "title": "" }, { "docid": "b9f3d48e59dd013ff027376c19cf5463", "score": "0.7441075", "text": "def username(self) -> str:\n return pulumi.get(self, \"username\")", "title": "" }, { "docid": "b9f3d48e59dd013ff027376c19cf5463", "score": "0.7441075", "text": "def username(self) -> str:\n return pulumi.get(self, \"username\")", "title": "" }, { "docid": "6bf051bacff7fd330da9e23971b9e9bd", "score": "0.7416411", "text": "def getUserName(self):\n return None", "title": "" }, { "docid": "358cec07721aedec626d26dbed2f5ef0", "score": "0.7408613", "text": "def get_user_name(self, userid):\n name_info = 'firstname, lastname, username'\n name = self.sql('select {} from ssismdl_user where id = {}'.format(name_info, userid))()[0]\n firstname, lastname, username = name\n email = '<a href=\"mailto:{username}@ssis-suzhou.net\">email</a>'.format(**dict(username=username))\n name = \"{} {}\".format(firstname, lastname, email)\n self.verbose and print(\"Name: {}\".format(name))\n return name", "title": "" }, { "docid": "26e3e3c3c5930dfca3927a8f61de932b", "score": "0.74084187", "text": "def get_user_name(user):\n return user['UserName']", "title": "" }, { "docid": "3a2847773cee2d5bde79266cddb6eac3", "score": "0.7404665", "text": "def display_name(self):\n user_name = self.username if self.username else \"Unknown user\"\n user_full_name = (\n (\n (self.first_name + \" \" if self.first_name else \"\")\n + (self.last_name if self.last_name else \"\")\n )\n if (self.first_name or self.last_name)\n else user_name\n )\n return user_full_name.strip()", "title": "" }, { "docid": "7f763fff6792411d207e88ff82c2a8b6", "score": "0.7379036", "text": "def full_name(self) -> str:\n if self.name or self.family_name:\n return f\"{self.name or ''} {self.family_name or ''}\".strip()\n return self.username", "title": "" }, { "docid": "69087b9ac9c8487d332b74092bd688e8", "score": "0.7371651", "text": "def user_name(self, obj):\n return \"{} <{}>\".format(obj.user.get_full_name(), obj.user.email)", "title": "" }, { "docid": "45cf3421ab1d4e80ad1c24358fe7164e", "score": "0.7344011", "text": "def getusername(self, id):\n user = self.getuser(id)\n return (\"%s %s (%s)\" % (\n user[1],\n user[2],\n user[0]))", "title": "" }, { "docid": "2546486f561693145982afcb28541217", "score": "0.7343832", "text": "def get_name(self):\r\n return self.profile().get('name','none')", "title": "" }, { "docid": "da23b9d09c536e1c114314e540202478", "score": "0.7303339", "text": "def user_profile_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"user_profile_name\")", "title": "" }, { "docid": "92d8eb5409e1443d39a743937025d14f", "score": "0.72995764", "text": "def get_name_from_user(user_name):\n cur.execute(\"\"\"SELECT nombre, apellido_1, apellido_2\n FROM personas\n WHERE usuario = %s\"\"\", (user_name,))\n name = cur.fetchone()\n return name", "title": "" }, { "docid": "791fd5981ffa92f0157b683762792762", "score": "0.72916895", "text": "def _get_username(self):\n return self.generator.get_username()", "title": "" }, { "docid": "56ad4eaa5e2a324172fcd7bbc4d6dbce", "score": "0.7282299", "text": "def username(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"username\")", "title": "" }, { "docid": "56ad4eaa5e2a324172fcd7bbc4d6dbce", "score": "0.7282299", "text": "def username(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"username\")", "title": "" }, { "docid": "a0575d47090a001929f97914106bb611", "score": "0.72822803", "text": "def username(self) -> Optional[str]:\n return pulumi.get(self, \"username\")", "title": "" }, { "docid": "a0575d47090a001929f97914106bb611", "score": "0.72822803", "text": "def username(self) -> Optional[str]:\n return pulumi.get(self, \"username\")", "title": "" }, { "docid": "a0575d47090a001929f97914106bb611", "score": "0.72822803", "text": "def username(self) -> Optional[str]:\n return pulumi.get(self, \"username\")", "title": "" }, { "docid": "a0575d47090a001929f97914106bb611", "score": "0.72822803", "text": "def username(self) -> Optional[str]:\n return pulumi.get(self, \"username\")", "title": "" }, { "docid": "a0575d47090a001929f97914106bb611", "score": "0.72822803", "text": "def username(self) -> Optional[str]:\n return pulumi.get(self, \"username\")", "title": "" }, { "docid": "a0575d47090a001929f97914106bb611", "score": "0.72822803", "text": "def username(self) -> Optional[str]:\n return pulumi.get(self, \"username\")", "title": "" }, { "docid": "a0575d47090a001929f97914106bb611", "score": "0.72822803", "text": "def username(self) -> Optional[str]:\n return pulumi.get(self, \"username\")", "title": "" }, { "docid": "a0575d47090a001929f97914106bb611", "score": "0.72822803", "text": "def username(self) -> Optional[str]:\n return pulumi.get(self, \"username\")", "title": "" }, { "docid": "6c9a5e356b2681216a5795b4b2a7659e", "score": "0.72795564", "text": "def get_full_name_or_username(self):\n user = self.user\n if user.first_name or user.last_name:\n # We will return this as translated string. Maybe there are some\n # countries that first display the last name.\n name = _(\"%(first_name)s %(last_name)s\") % \\\n {'first_name': user.first_name,\n 'last_name': user.last_name}\n else:\n # Fallback to the username if usernames are used\n if not userena_settings.USERENA_WITHOUT_USERNAMES:\n name = \"%(username)s\" % {'username': user.username}\n else:\n name = \"%(email)s\" % {'email': user.email}\n return name.strip()", "title": "" }, { "docid": "17dfb8a4b7531df0223bcbe642857878", "score": "0.7236698", "text": "def get_username(self) -> str:\n return self._oidc.user_getfield('preferred_username')", "title": "" }, { "docid": "5c6e0ccc4ededb62e83d1eaba9bd7fbe", "score": "0.7226644", "text": "def get_name(self, id):\n vals = self.c.execute(\"SELECT name FROM users WHERE id = '%s'\" % id)\n names = [x for x in vals]\n if len(names) == 1:\n return str(names[0][0])\n return None", "title": "" }, { "docid": "1767f2b141950170cce6808003b36ad5", "score": "0.7222824", "text": "def personaname(self):\n return self._personaname", "title": "" }, { "docid": "7682487e28b513b989054b2b83199b10", "score": "0.7221455", "text": "def username(self) -> str:\n return self._username", "title": "" }, { "docid": "0ce38b2426152ddcaa5191345460f073", "score": "0.7219045", "text": "def getUsername():\n return username", "title": "" }, { "docid": "ecde48856f59d3ef8b25a8d47c631fbf", "score": "0.72167003", "text": "def get_user_display(user) -> str:\n full_name = user.get_full_name()\n return full_name if len(full_name.strip()) else user.username", "title": "" }, { "docid": "879819bc5bafdd12ea338949b0a03a42", "score": "0.7216359", "text": "def get_username(self):\n raise NotImplementedError", "title": "" }, { "docid": "8a2de4d87c37eeae572b3865c415b327", "score": "0.72132367", "text": "def get_full_name(self):\n\t\treturn self.user.first_name + \" \" + self.user.last_name", "title": "" }, { "docid": "1e6c43cf4bf93b2cac85c3d93abb7fda", "score": "0.72052646", "text": "def get_username(self):\n username = self.get_value('username')\n if username:\n return username\n else:\n return getpass.getuser()\n \n if self.cf.has_option(self.heading, 'username'):\n return self.cf.get(self.heading, 'username')", "title": "" }, { "docid": "bdfaaf54d71d0d8eac9e9b78040b62bc", "score": "0.71873975", "text": "def _getUserName(self):\n return getpass.getuser()", "title": "" }, { "docid": "a85da6a45e001af42805dbf54255224a", "score": "0.71776205", "text": "def username(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"username\")", "title": "" }, { "docid": "a85da6a45e001af42805dbf54255224a", "score": "0.71776205", "text": "def username(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"username\")", "title": "" }, { "docid": "a85da6a45e001af42805dbf54255224a", "score": "0.71776205", "text": "def username(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"username\")", "title": "" }, { "docid": "5e0e7bc71f8ba11560030703db15123b", "score": "0.71574616", "text": "async def get_full_name(self) -> Optional[str]:\n return pwd.getpwnam(self.user).pw_gecos.split(\",\")[0]", "title": "" }, { "docid": "52f98cd6daf6d43607be397056304b2e", "score": "0.7153635", "text": "def name(self):\n return self.given_name + ' ' + self.surname", "title": "" } ]
7b22a96ecec5b39ecad79489fa76e28b
Given a dictionary and a set of valid keys, return a dictionary which only contains keys found in `valid`.
[ { "docid": "08a3ed9cbce96c06a4e9829a12d1274d", "score": "0.65921986", "text": "def _strip_keys(obj, valid):\n return {key: obj[key] for key in obj if key in valid}", "title": "" } ]
[ { "docid": "e7640a96ac8c550bd84f13f830e1a7f1", "score": "0.6878941", "text": "def subset_dict(dictionary, keys):\n\n result = dictionary.copy()\n for original_key in dictionary:\n if original_key not in keys:\n del result[original_key]\n\n return result", "title": "" }, { "docid": "2f25e2e7be737b9ccce4a27f3aed3633", "score": "0.6434246", "text": "def check_keys(dict_, keys):\n\n for k in keys:\n if k not in dict_:\n return False\n\n return True", "title": "" }, { "docid": "0e649c27571867ce1d5f10139409b167", "score": "0.6394572", "text": "def dict_contains_only(dct, allowed, allow_mpp=True):\n\n for key in dct.keys():\n if allow_mpp and key.startswith(\"mpp-\"):\n continue\n if key in allowed:\n continue\n return False\n return True", "title": "" }, { "docid": "04c826035c6321df2b2e7c312f1485b9", "score": "0.634918", "text": "def filter_dict(data, *keys):\n d = {}\n for k in keys:\n val = data.get(k)\n if val:\n d[k] = val\n return d", "title": "" }, { "docid": "1426726677cda6202e28cf687a776626", "score": "0.63116944", "text": "def dict_has_keys(d: Mapping, *keys):\n dk = d.keys()\n for k in keys:\n if k not in dk:\n logging.info(f'key {k} is not included in the dict')\n return False\n return True", "title": "" }, { "docid": "2157fce2288d15155a0c8ecc6ef1f189", "score": "0.620142", "text": "def _validate_dict_keys(dict_to_check, required_keys, optional_keys):\n assert set(required_keys) <= set(dict_to_check.keys()), (\n 'Missing keys: %s' % dict_to_check)\n assert set(dict_to_check.keys()) <= set(required_keys + optional_keys), (\n 'Extra keys: %s' % dict_to_check)", "title": "" }, { "docid": "f48beab27f1c77267bee696bbb538858", "score": "0.61690384", "text": "def prune_dict(self, d, keys):\n return {k: v for k, v in d.items() if k not in keys}", "title": "" }, { "docid": "1732dad975dbd5d6f76bf511199ef0e1", "score": "0.6159179", "text": "def sub_dict(d, keep):\n return {\n key: value\n for key, value in d.items()\n if key in keep\n }", "title": "" }, { "docid": "a0b375d6ef4be10e3386dfd7cdcc3ae2", "score": "0.61275613", "text": "def filter_kwargs(kwargs, list_of_keys, out=False):\n new_kwargs = {}\n for key in kwargs:\n if (key not in list_of_keys and out) or \\\n (key in list_of_keys and not out):\n new_kwargs[key] = kwargs[key]\n return new_kwargs", "title": "" }, { "docid": "d34977374fd9ed515cb8cf80ee88dbb1", "score": "0.6120651", "text": "def _filter_dict(dict_to_filter, subset):\n if dict_to_filter is None or not subset:\n return dict_to_filter\n\n return {\n k: d for k, d in dict_to_filter.iteritems()\n if _dict_has_subset(d, subset)\n }", "title": "" }, { "docid": "4d532239a0f944d508ca4c2b631bdd24", "score": "0.6110579", "text": "def _filter_dict(old_dict, cb):\n return {k: v for k, v in old_dict.items() if cb(k)}", "title": "" }, { "docid": "213d24a0a00b9c504b54deb028ee0d25", "score": "0.609966", "text": "def subdict(d, expected_dict):\n return {k: v for k, v in d.items() if k in expected_dict}", "title": "" }, { "docid": "3e3eedb235d1c0210d083fb559027ff3", "score": "0.6074622", "text": "def reduced_params(params, unwanted_keys):\n if not isinstance(unwanted_keys, set):\n unwanted_keys = set(unwanted_keys)\n\n return {k: params[k] for k in params.keys() - unwanted_keys}", "title": "" }, { "docid": "e8a7152d190ae52b29dbab694d07a7f7", "score": "0.6065296", "text": "def filterkeys(predicate, dict_):\n predicate = bool if predicate is None else ensure_callable(predicate)\n ensure_mapping(dict_)\n return dict_.__class__((k, v) for k, v in iteritems(dict_) if predicate(k))", "title": "" }, { "docid": "a79e5473fad817dbae7055be04ff4b95", "score": "0.60485023", "text": "def dict_without_keys(d, *omitkeys):\n return {key: d[key] for key in d.keys() if key not in omitkeys}", "title": "" }, { "docid": "c342b009c2758d7defe243b23cceacec", "score": "0.60253656", "text": "def dictconfig_filter_keys(d: DictConfig, fn: Optional[Callable] = None) -> DictConfig:\n if fn is None:\n fn = lambda _: True\n return DictConfig(\n {\n k: dictconfig_filter_keys(v, fn) if isinstance(v, DictConfig) else v\n for k, v in d.items()\n if fn(k)\n }\n )", "title": "" }, { "docid": "d96681beb8eba4d9fcc7e7396482cb13", "score": "0.59987354", "text": "def select_keys(d, keys):\n return {key: d.get(key) for key in keys}", "title": "" }, { "docid": "f6e9e7636ac33a2a65b519bfed88933e", "score": "0.5987169", "text": "def select_keys(mapping, keys, **kwargs):\n # type: (Mapping[str, Any], Iterable[str], **Any) -> Dict[str, Any]\n result = {key: mapping[key] for key in keys if key in mapping}\n result.update(**kwargs)\n return result", "title": "" }, { "docid": "7c7a36acd96421dbc8c9e1cda8cce62d", "score": "0.5970194", "text": "def pick(keys, dict):\n picked_dict = {}\n for k in dict.keys():\n if k in keys:\n picked_dict[k] = dict[k]\n return picked_dict", "title": "" }, { "docid": "f268ae385399533ab489bed5047bf48f", "score": "0.59593", "text": "def pick(base: Dict[str, Any], *keys: str) -> Dict[str, Any]:\n\n return {key: base[key] for key in keys if key in base and base[key] is not None}", "title": "" }, { "docid": "ff82ea51153f55f0b2808313c9103e34", "score": "0.5865604", "text": "def subdict(d, keys):\r\n return dict((k, d[k]) for k in keys if k in d)", "title": "" }, { "docid": "aa64b369fe97d509343743ff3a9d88a3", "score": "0.58567107", "text": "def extract_keys(d: dict[str, Any], keys: Iterable[str]) -> dict[str, Any]:\n\n return {k: d[k] for k in keys}", "title": "" }, { "docid": "1bf6ed268c4faa02881ebfb07950b046", "score": "0.5846738", "text": "def remove_dict_keys(d, keys):\n return {k: deepcopy(v) for k, v in d.items() if k not in keys}", "title": "" }, { "docid": "43bb2f5e9da9e96b00b106d8d397b6bc", "score": "0.58441204", "text": "def find_keys(form, keys):\n assert isinstance(form, dict) and isinstance(keys, list)\n data = form.copy()\n # add keys if they are missing\n for key in keys:\n if key not in data:\n data[key] = None\n # ensure there are not any extra keys\n if len(keys) != len(form.keys()):\n raise KeyError('invalid fields')", "title": "" }, { "docid": "50e9fbf1d3b0b9c9cbdf3424f9508c55", "score": "0.5831708", "text": "def find_all_by_key(d, key, ignore_keys=None):\n if ignore_keys is None:\n ignore_keys = default_ignore_keys\n\n acc = []\n for g in dict_generator(d):\n # Ignore paths 'g' in 'd' that contain ignored keys\n if len([e for e in g if e in ignore_keys]) == 0 and key in g:\n acc.append(g)\n return acc", "title": "" }, { "docid": "73b81e49ca9337532d6ae39978953472", "score": "0.581862", "text": "def get_valid_request_keys(keys, type_map):\n return set(key for key in keys if key in type_map)", "title": "" }, { "docid": "73b1754f706b3208af57aee1e96a7455", "score": "0.57385033", "text": "def clean_empty_keyvalues_from_dict(d):\n if not isinstance(d, (dict, list)):\n return d\n if isinstance(d, list):\n return [v for v in (clean_empty_keyvalues_from_dict(v) for v in d) if v]\n return {k: v for k, v in ((k, clean_empty_keyvalues_from_dict(v)) for k, v in d.items()) if v}", "title": "" }, { "docid": "4c89053a6d3cbc3b1829c27f0c32629c", "score": "0.57359403", "text": "def check_valid_keys(in_dict):\n\n valid_keys = set([\n 'Vertical_Coordinate',\n 'Duration',\n 'Source',\n 'LeadTime',\n 'property',\n 'Procedure'\n ])\n\n for k in list(in_dict.keys()):\n try: \n assert k in valid_keys\n except AssertionError:\n logging.error( str(k) + \" is not a valid key\")\n logging.error(\"valid keys are:\\n\" + \",\\n\".join(valid_keys))\n raise", "title": "" }, { "docid": "d328b797ac3ab2f79d27869813df3876", "score": "0.5717519", "text": "def key_intersection(*dicts: dict) -> dict:\n return functools.reduce(lambda a, b: a & b.keys(), dicts)", "title": "" }, { "docid": "074145eb6044d7c1b71e2ce4060e39e7", "score": "0.56937224", "text": "def _cleanse_dict(original):\n return dict((k, v) for k, v in six.iteritems(original) if \"_pass\" not in k)", "title": "" }, { "docid": "6ac550213b0c34260682eb7ccc031f7c", "score": "0.56740266", "text": "def starfilteritems(predicate, dict_):\n ensure_mapping(dict_)\n\n if predicate is None:\n predicate = lambda k, v: all((k, v))\n else:\n ensure_callable(predicate)\n\n return dict_.__class__((k, v) for k, v in iteritems(dict_)\n if predicate(k, v))", "title": "" }, { "docid": "b12cf0c1b2747e055924f8f32cace65f", "score": "0.5632604", "text": "def subdict_by_filter(dct, func, remove_original=False):\n keylist = list(filter(func, dct))\n subdict = dict((k, dct[k]) for k in keylist)\n if remove_original:\n for k in keylist:\n del dct[k]\n return subdict", "title": "" }, { "docid": "2ed2d9c70d56fc7c7db23728303e961c", "score": "0.56311554", "text": "def items_with_keys(d, keys):\n for key, value in d.items():\n if key in keys:\n yield key, value", "title": "" }, { "docid": "45493e8b93e530c038b6bd96eced3701", "score": "0.5623356", "text": "def required(mapping, keys):\n assert(isinstance(mapping,collections.Mapping)), \"Not a mapping.\"\n for key in keys:\n if key not in mapping:\n raise KeyError(key)", "title": "" }, { "docid": "de6bc6de2bbbe76ec101c5e2b60b77f8", "score": "0.55767024", "text": "def remove_from_key_rd_dicts( key_rd_dict, key_rd_dicts ):\n k = key_rd_dict.keys()[ 0 ]\n v = key_rd_dict[ k ]\n clean_key_rd_dicts = []\n for krd_dict in key_rd_dicts:\n key = krd_dict.keys()[ 0 ]\n val = krd_dict[ key ]\n if key == k and val == v:\n continue\n clean_key_rd_dicts.append( krd_dict )\n return clean_key_rd_dicts", "title": "" }, { "docid": "0869e192529d967740251290936cce04", "score": "0.5546951", "text": "def verify(self, dictionary):\n if isinstance(dictionary, dict):\n if len(dictionary) == len(self.get_accepted_keys()):\n for k, v in dictionary.items():\n if k in self.get_accepted_keys():\n pass\n else:\n return False\n return True\n return False", "title": "" }, { "docid": "f874b92cb8c20d46e2d7ca5d813854cd", "score": "0.5545825", "text": "def all_keys_known(dictionary, known_keys, logger=None):\n\n unknown_keys = [k for k in dictionary if k not in known_keys]\n\n if unknown_keys:\n if logger:\n logger.error('The following keys are unknown: {}.\\n'.\n format(','.join([\"'{}'\".format(x) for x in unknown_keys])))\n else:\n print('ERROR: The following keys are unknown: {}.\\n'.\n format(','.join([\"'{}'\".format(x) for x in unknown_keys])))\n return False\n else:\n return True", "title": "" }, { "docid": "a14d5a4dd8c93321feea705847663f3f", "score": "0.5537621", "text": "def prune_dict(d, predicate):\n\n keys = [k for k, v in d.items() if predicate(k, v)]\n for k in keys:\n del d[k]", "title": "" }, { "docid": "b8b36c84915644b8961b4011bf12cf08", "score": "0.55073327", "text": "def assert_keys_exist(keys: List[Hashable], dictionary: Dict) -> None:\n\n # iterate over every key and ensure that it is contained in the dictionary\n for key in keys:\n assert key in dictionary, f\"Missing key {key} in {dictionary}.\"", "title": "" }, { "docid": "c0fedde16a2faa2c283379804a34f456", "score": "0.5491811", "text": "def dictionary_compress(\n dictionary_to_compress: dict[str, Any],\n keys_to_keep: set,\n) -> dict[str, Any]:\n return {dict_key: dictionary_to_compress[dict_key] for dict_key in keys_to_keep}", "title": "" }, { "docid": "86e639be526f92b4fcfdeeea3164a0af", "score": "0.54584986", "text": "def filteritems(predicate, dict_):\n predicate = all if predicate is None else ensure_callable(predicate)\n ensure_mapping(dict_)\n return dict_.__class__(ifilter(predicate, iteritems(dict_)))", "title": "" }, { "docid": "cf3876a6c5e316db3acc3f322d307d22", "score": "0.5456094", "text": "def dic_remove_entries(in_dic, filter_dic):\n # Checks.\n assert in_dic, \"given dictionary in_dic empty\"\n assert filter_dic, \"given dictionary filter_dic empty\"\n # Filter.\n for filter_id in filter_dic:\n if filter_id in in_dic:\n del in_dic[filter_id]\n return in_dic", "title": "" }, { "docid": "3f71eb0df9839861f28868172fccf88c", "score": "0.54447484", "text": "def test_in_dict_false(self):\n my_dict = {'A': 3, 'B': 44, 'C': 8}\n self.assertFalse(in_dict(my_dict, 'D'))", "title": "" }, { "docid": "68deb2ca92c36a3739bfb0fa04499042", "score": "0.544466", "text": "def asselect(item:Dict, keys:List, *, excludes:List=None) -> Dict:\n keys = keys or askeys(item)\n keys = [\n key for key in keys\n if key not in (excludes or [])\n ]\n return {\n key:item[key] for key in keys if key in item\n }", "title": "" }, { "docid": "b1919774647b1bbef1b3e3bdd2ba5f71", "score": "0.5439396", "text": "def contains(hidict, keys):\n assert 'Mary' in hidict\n assert ('Mary', 'likes', 'lot') in hidict\n assert ('Mary', 'likes') in hidict\n assert ('Marvin', 'likes') not in hidict\n assert ('Mary', 'likes', 'lot', 'Bob') in hidict", "title": "" }, { "docid": "8eb2537d16f184a17fc857ab189e7c2c", "score": "0.5386315", "text": "def ensure_dict_key(\n in_dict: Dict, keys: str, delimiter: str = DEFAULT_TARGET_DELIM\n) -> Dict:\n if delimiter in keys:\n a_keys = keys.split(delimiter)\n else:\n a_keys = [keys]\n dict_pointer = in_dict\n while a_keys:\n current_key = a_keys.pop(0)\n if current_key not in dict_pointer or not isinstance(\n dict_pointer[current_key], dict\n ):\n dict_pointer[current_key] = {}\n dict_pointer = dict_pointer[current_key]\n return in_dict", "title": "" }, { "docid": "6831771ee081a117fddea51781e3edc5", "score": "0.5372264", "text": "def in_key_rd_dicts( key_rd_dict, key_rd_dicts ):\n k = key_rd_dict.keys()[ 0 ]\n v = key_rd_dict[ k ]\n for key_rd_dict in key_rd_dicts:\n for key, val in key_rd_dict.items():\n if key == k and val == v:\n return True\n return False", "title": "" }, { "docid": "7be4cf0471ddbe45b473f4cf8d71b81d", "score": "0.5337741", "text": "def subset(self, keys):\r\n sub = self.__class__()\r\n\r\n self_keys = set(self.store.keys())\r\n subset_keys = self_keys.intersection(keys)\r\n removed_keys = self_keys.difference(subset_keys)\r\n\r\n sub.store = {k: self.store[k] for k in subset_keys}\r\n for loc, sorted_items in six.iteritems(self.sorted):\r\n loc_keys = copy(self.sorted[loc][0])\r\n loc_values = copy(self.sorted[loc][1])\r\n for k in removed_keys:\r\n i = loc_keys.index(k)\r\n del loc_keys[i]\r\n del loc_values[i]\r\n sub.sorted[loc] = (loc_keys, loc_values)\r\n return sub", "title": "" }, { "docid": "b08e17b9a12c048b775e4da2793e5177", "score": "0.53296703", "text": "def filter_options(options, desired_key_set):\n return filter_dict_none_values(filter_dict_by_key_set(options, desired_key_set))", "title": "" }, { "docid": "c7bc5848b14770deb327f218ed0e6654", "score": "0.53145516", "text": "def validateMapKeys(dataDict):\n neededKeys = set(['DN', 'ID', 'LOGIN', 'NAME', 'ROLES'])\n for entry in dataDict:\n if not all(elem in list(entry.keys()) for elem in neededKeys):\n print(\"Needed keys not found in response: %s\" % (neededKeys - set(entry.keys())), file=sys.stderr)\n print(\"It needs to be reported to the CRIC developers!\", file=sys.stderr)\n exit(3)\n for key in set(entry.keys()) - neededKeys:\n entry.pop(key)", "title": "" }, { "docid": "cf906482d9714d895053739c1c9e06d8", "score": "0.5309691", "text": "def _dict_has_subset(dict_to_check, subset):\n if not subset: return True\n\n for k, v in subset.iteritems():\n if dict_to_check.get(k) is None:\n return False\n elif (dict_to_check[k] != v and str(dict_to_check[k]) != str(v)):\n return False\n return True", "title": "" }, { "docid": "b14f87772a0a671310f5e9c876b7b9a6", "score": "0.530853", "text": "def dict_by(keys: Set[str], f: Callable[[str], Any]) -> Dict[str, Any]:\n return {k: f(k) for k in keys}", "title": "" }, { "docid": "cf954a6442363c9df7930e89ab61e9f6", "score": "0.5306642", "text": "def required_keys(obj, keys, sentinel=object()):\n missing_keys = list(filter(lambda k: deepgetattr(obj, k, sentinel) == sentinel, keys))\n\n if len(missing_keys):\n raise ValueError(f\"Required parameters: {', '.join(missing_keys)}\")", "title": "" }, { "docid": "1a97847e127751da4f4870b4d6e4f25d", "score": "0.52912146", "text": "def _prune_dict(dict_, on_keys, for_values, ignore_paths, path):\r\n pruned = {}\r\n for k, v in dict_.items():\r\n if k in on_keys and v in for_values:\r\n # current `obj` matches pruning criteria: do *not* return it\r\n return None\r\n elif v:\r\n # recursively prune current key's value\r\n path.append(k)\r\n pruned_v = prune_obj(\r\n obj=v,\r\n on_keys=on_keys,\r\n for_values=for_values,\r\n ignore_paths=ignore_paths,\r\n path=path,\r\n )\r\n if pruned_v:\r\n pruned[k] = pruned_v\r\n path.pop()\r\n else:\r\n # catch and return empty value; it may be meaningful\r\n pruned[k] = v\r\n return pruned", "title": "" }, { "docid": "4b809f58bb3b2a5293c42b9cc7dceeb0", "score": "0.5271548", "text": "def _has_keys(self, keys, d):\n for key in keys:\n if not key in d:\n # self.logger.debug(\"Key missing %s: %s\" % (key, d))\n self.logger.error(\"ERROR: Key is missing %s\" % key)\n return False\n for key in d.keys():\n if not key in keys:\n self.logger.error(\"ERROR: Dictionary has an extra key %s\" % key)\n return False\n\n return True", "title": "" }, { "docid": "55ca1310041ff5f53723f40f7e2f486b", "score": "0.527063", "text": "def _filter_params(cls, params):\n return dict((k, v) for (k, v) in params.iteritems() if k in cls.fields)", "title": "" }, { "docid": "340fb9e33920a625570cfd0e2dcf5b7c", "score": "0.52670044", "text": "def _filter_hidden(dictionary):\n # type: (Attrs) -> Attrs\n return {k: v for k, v in dictionary.viewitems() if not k.startswith('_')}", "title": "" }, { "docid": "b30ec460dfe07c420997ebb40ec343e6", "score": "0.5258602", "text": "def get_missing_keys(data, required_keys):\n return [ key for key in required_keys if not key in data ]", "title": "" }, { "docid": "abaccd5e85651db13509bfa7c678edfe", "score": "0.525216", "text": "def assert_keys(form, keys):\n assert isinstance(form, dict) and isinstance(keys, list)\n # ensure all keys are valid\n for key in keys:\n if key not in form:\n raise KeyError('invalid fields')\n # error if number of keys is off\n if len(keys) != len(form.keys()):\n raise KeyError('missing required fields')", "title": "" }, { "docid": "c97ae36364db9e6852feed064bd6a099", "score": "0.52318764", "text": "def filter_dictionaries(*args, **kwargs):\n filtered = []\n\n for item in args[0]:\n for key in kwargs.keys():\n if key in item:\n if item[key] == kwargs[key]:\n filtered.append(item)\n\n return filtered", "title": "" }, { "docid": "d0de68f5fab679484834bfef3be06375", "score": "0.52050763", "text": "def validate_keys(data, required_keys, option_keys=None):\n key_set = set(data.keys())\n required_set = set(required_keys)\n is_ok = (required_set - key_set) == set()\n if is_ok and option_keys:\n option_set = set(option_keys)\n is_ok = (key_set - required_set - option_set) == set()\n return is_ok", "title": "" }, { "docid": "8e8d7dca6efeb8ce993d56478d3ac638", "score": "0.52008206", "text": "def filter_df_dict_col(df, col, keys):\n \n sdf = extract_dict(df, col)\n f = partial(filter_dict, keys = keys)\n return list(map(f, sdf))", "title": "" }, { "docid": "734a6061adb381fa769fe7a63c499513", "score": "0.5193562", "text": "def empty(key, dict):\n if key in dict.keys():\n if dict[key]:\n return False\n return True", "title": "" }, { "docid": "bd9fa2ef496c2571d3864cd683e11e86", "score": "0.5190214", "text": "def dict_update_verify(dict_in, dict_add):\n for key, value_add in dict_add.items():\n if key not in dict_in:\n dict_in[key] = value_add\n else:\n if isinstance(value_add, np.ndarray):\n if np.any(dict_in[key] != value_add):\n msg = \"dict value mismatch for key = %s\" % key\n raise RuntimeError(msg)\n elif dict_in[key] != value_add:\n msg = \"dict value mismatch for key = %s\" % key\n raise RuntimeError(msg)\n return dict_in", "title": "" }, { "docid": "76babaddbb564f86f440dfb8c218ec12", "score": "0.51874703", "text": "def get_multi(self, keys):\n dictionary = self.dictionary\n pairs = ((key, dictionary[key]) for key in keys if key in dictionary)\n now = datetime.datetime.now\n return dict((key, value) for key, (value, exp) in pairs\n if not exp or exp > now())", "title": "" }, { "docid": "a155ed61c099edac4da75344b4bac7b4", "score": "0.5182667", "text": "def _copy_dic(orig_dic, excluded_keys=None):\n if excluded_keys is None:\n excluded_keys = []\n dic = {}\n for k, v in orig_dic.items():\n if k not in excluded_keys:\n dic[k] = copy(v)\n return dic", "title": "" }, { "docid": "2d1c0df3e74a05050e9255d902f81a00", "score": "0.5174145", "text": "def required(mandatory):\n def decorator(f):\n @wraps(f)\n def wrapper(*dicts):\n for key in mandatory:\n if key not in dicts[1]:\n raise ValueError('Key \"%s\" is missing from argument' % (\n key))\n return f(*dicts)\n return wrapper\n return decorator", "title": "" }, { "docid": "a64a8e8debe47821ae3975110eb89469", "score": "0.5167925", "text": "def injective(d):\n v = d.values()\n v_set = set(v)\n return len(v) == len(v_set)", "title": "" }, { "docid": "043b9a174791d1a270f53f78848ab951", "score": "0.51627827", "text": "def _remove_matching_keys(d: Dict[str, Any], regex: str):\n compiled_regex = re.compile(regex)\n keys_to_delete = [k for k in d.keys() if compiled_regex.match(k)]\n for k in keys_to_delete:\n del d[k]", "title": "" }, { "docid": "33e4c6e62c86c7c0505f2450046a2c83", "score": "0.5162098", "text": "def copy_dict(d, *keys):\n return {key: d[key] for key in keys}", "title": "" }, { "docid": "b31f393c7eb3cd64b7b14e9cd4e694f3", "score": "0.51574826", "text": "def hide_keys(dictionary, keys_to_hide, new_value='xxx'):\n _new_dict = {}\n for key, value in dictionary.items():\n _new_dict[key] = new_value if key in keys_to_hide else value\n return _new_dict", "title": "" }, { "docid": "c1959b3f36762c168cb2f1db4716e645", "score": "0.51493233", "text": "def _check_dictionary(evaluation_dict):\n\n missing_keys = list(set(REQUIRED_KEYS) - set(evaluation_dict.keys()))\n if len(missing_keys) == 0:\n return\n\n error_string = (\n '\\n{0:s}\\nKeys listed above were expected, but not found, in '\n 'dictionary.'\n ).format(str(missing_keys))\n\n raise ValueError(error_string)", "title": "" }, { "docid": "a67dc6b9f3cd06d6687753529f74c6de", "score": "0.5140313", "text": "def fill_dict_defaults(d, required_keys=None, opt_keys=None, noleft=True):\n if required_keys is None:\n required_keys = []\n if opt_keys is None:\n opt_keys = {}\n if d is None:\n if not required_keys:\n if opt_keys is None:\n raise TypeError(\"`d` and `opt_keys` are both None.\")\n return opt_keys.copy()\n else:\n raise ValueError(\"`d` is None, but `required_keys` is not empty.\")\n\n d = d.copy()\n out = {}\n # Set required keys\n for key in required_keys:\n if key in d:\n out[key] = d.pop(key)\n else:\n raise KeyError(\"Dict is missing required key '{}'.\".format(key))\n # Set optional values, if key not given\n for key, val in opt_keys.items():\n out[key] = d.pop(key, val)\n # Extra keys are left in d: Complain, drop or use them\n if d:\n if noleft not in [True, \"drop\", \"use\"]:\n raise ValueError(\"`noleft` can be one of `True`, 'drop', 'use'.\")\n if noleft is True:\n raise KeyError(\"Leftover keys ['{}'].\".format(\n arr2str(list(d.keys()), sep=\"', '\")))\n elif noleft == \"use\":\n for key, val in d.items():\n out[key] = val\n else: # Drop\n pass\n\n return out", "title": "" }, { "docid": "5ad1e57ad3e2ea148615e282fb402a1d", "score": "0.5138393", "text": "def _psd_params_checker(params):\n if params is None:\n return dict()\n elif not isinstance(params, dict):\n raise ValueError('The parameter `psd_params` has type %s. Expected '\n 'dict instead.' % type(params))\n else:\n expected_keys = ['welch_n_fft', 'welch_n_per_seg', 'welch_n_overlap']\n valid_keys = list()\n for n in params:\n if n not in expected_keys:\n raise ValueError('The key %s in `psd_params` is not valid and '\n 'will be ignored. Valid keys are: %s' %\n (n, str(expected_keys)))\n else:\n valid_keys.append(n)\n valid_params = {n: params[n] for n in valid_keys}\n return valid_params", "title": "" }, { "docid": "2510d3ab94dff02c9cb91562cb265a53", "score": "0.51352537", "text": "def omegaconf_filter_keys(d, fn=None):\n if fn is None:\n fn = lambda _: True\n if is_list(d):\n return ListConfig([omegaconf_filter_keys(v, fn) for v in d])\n elif is_dict(d):\n return DictConfig(\n {k: omegaconf_filter_keys(v, fn) for k, v in d.items() if fn(k)}\n )\n else:\n return d", "title": "" }, { "docid": "1849691f22caa21cd0acea88c4eedc22", "score": "0.5133167", "text": "def request_keys(keys):\n\n @decorator\n def _request_keys(fn, self, *args):\n json_keys = self.json.keys()\n for each in keys:\n if each not in json_keys:\n raise ResponseException('keys not in list: %s, %s' % (json_keys, keys))\n return fn(self, *args)\n\n return _request_keys", "title": "" }, { "docid": "61007ab96bd217cdad93e12d65d4d498", "score": "0.51303834", "text": "def subdict(d, keys):\r\n return dict((k, d[k]) for k in keys)", "title": "" }, { "docid": "09bfec63d144c94c17ee74027a85ff77", "score": "0.51295996", "text": "def _select_allowed_items(item_dict, allow_patterns, disallow_patterns):\n items_selected = {}\n for item in item_dict:\n select_item = False\n if allow_patterns:\n if allow_patterns[0] is None:\n select_item = True\n else:\n for pattern in allow_patterns:\n if re.search(pattern, item):\n select_item = True\n break\n if select_item:\n if disallow_patterns and (disallow_patterns[0] is not None):\n for pattern in disallow_patterns:\n if re.search(pattern, item):\n select_item = False\n break\n if select_item:\n items_selected[item] = copy.deepcopy(item_dict[item])\n\n return items_selected", "title": "" }, { "docid": "4c39f381e3ec927f5cda5471c78dba2e", "score": "0.51236546", "text": "def sub_dicts_eq(keys: set, *objs: object) -> bool:\n if not objs:\n return True\n\n eq_subset = partial(select_keys, keys)\n return all_eq(*map(eq_subset, objs))", "title": "" }, { "docid": "1f1bf07b1bfc16c4b6c995ba21e4d91a", "score": "0.5111044", "text": "def _verify_config_dict(self, valid, config, dev_os, key_path=None):\n if not key_path:\n key_path = []\n for key, value in valid.items():\n self._verify_config_key(key, value, valid, config, dev_os, key_path)", "title": "" }, { "docid": "6672ce2a7c6adb84bce68e0d0d6f9f59", "score": "0.5104399", "text": "def test_not_in(self):\n cd = TestDict('test')\n cd.clear()\n assert len(list(cd.keys())) == 0\n assert 'bob' not in cd", "title": "" }, { "docid": "6367b8b3e6903129ca3a8de674533fb9", "score": "0.5091958", "text": "def m(dict, *kv):\r\n for (k, v) in kv:\r\n if v:\r\n dict[k] = v\r\n return dict", "title": "" }, { "docid": "2376ea068dfecb1826bfb95edf1fb9fd", "score": "0.5080572", "text": "def contains(d1, d2):\n if any((not hasattr(d1, 'keys'), not hasattr(d2, 'keys'))):\n return d1 == d2\n return all([contains(d1[k], d2[k]) if k in d1.keys() else False for k in d2.keys()])", "title": "" }, { "docid": "f0b1cba0e240a292d44caa0fef113a76", "score": "0.50755644", "text": "def validate_dict_key(input_dict, invalid_chr, suggested_chr):\n invalid_key_list = []\n for k, v in input_dict.items():\n if isinstance(v, dict):\n if isinstance(k, str) and invalid_chr in k:\n invalid_key_list.append(k)\n validate_dict_key(v, invalid_chr, suggested_chr)\n else:\n if isinstance(k, str) and invalid_chr in k:\n invalid_key_list.append(k)\n if invalid_key_list:\n raise RuntimeError(\n \"Sadly our database can't digest periods in \"\n \"dictionary keys. We have found a number of \"\n \"entries in your spreadsheet that will \"\n \"violate this. these are listed below:\\n{}\\n\"\n \"As annoying as this is, we suggest you change \"\n \"the sample name to remove the {} characters,\"\n \" for example, you could replace {} with {} in \"\n \"your spreadsheet.\".format(\n invalid_key_list, invalid_chr, invalid_chr, suggested_chr\n )\n )", "title": "" }, { "docid": "a598e36e7681eb8494dc29c157357398", "score": "0.5070648", "text": "def sortedDictKeys(d, onlykeys=[], reverse=False):\n if onlykeys != []:\n keys = intersect(d.keys(), onlykeys)\n else:\n keys = list(d)\n keys.sort()\n if reverse:\n keys.reverse()\n return keys", "title": "" }, { "docid": "041195d415668321ca296024ccc6bec9", "score": "0.5058789", "text": "def in_dict(a_dict, a_key):\n return a_key in a_dict", "title": "" }, { "docid": "4a9e63c7273b4b0c7b772a8dadd9cd76", "score": "0.5049973", "text": "def filtered(self):\n return {k: v for k, v in self.__dict__.items() if not\n k.startswith('_')}", "title": "" }, { "docid": "b326f18f31ab19b7bd3230871ef97964", "score": "0.5045093", "text": "def validate_dictionary_keys(self, dict_key, dict_value, message):\n if dict_key not in [*dict_value]:\n raise serializers.ValidationError(message)", "title": "" }, { "docid": "2bb6aa15130b9f75c60a92feec4f23d6", "score": "0.5041604", "text": "def _check_keys(dict):\n for key in dict:\n if isinstance(dict[key], spio.matlab.mio5_params.mat_struct):\n dict[key] = _todict(dict[key])\n return dict", "title": "" }, { "docid": "5238708e8bbaa80d7b7aa4c53f4fe52f", "score": "0.50398624", "text": "def ensure_dict(d: Mapping[K, V], *, copy: bool = False) -> dict[K, V]:\n if type(d) is dict:\n return d.copy() if copy else d\n try:\n layers = d.layers # type: ignore\n except AttributeError:\n return dict(d)\n\n result = {}\n for layer in toolz.unique(layers.values(), key=id):\n result.update(layer)\n return result", "title": "" }, { "docid": "e4a5ace2cfd82469042c3f7f53efbedb", "score": "0.5024665", "text": "def validate_dictionary(cls: NamedTuple, dic: dict):\n valid_dic = {}\n fields_list_lower = [k.lower() for k in cls._fields]\n for k in dic.keys():\n index = fields_list_lower.index(k.lower()) # Compare all in lower case. Avoid Caps sensitive.\n safe_key = cls._fields[index]\n\n if cls._field_types.get(safe_key) == int:\n valid_dic[safe_key] = int(dic.get(k))\n elif cls._field_types.get(safe_key) == float:\n valid_dic[safe_key] = float(dic.get(k))\n elif cls._field_types.get(safe_key) == bool:\n if hasattr(dic.get(k), \"capitalize\"):\n valid_dic[safe_key] = True if dic.get(k).capitalize() == \"True\" else False\n else:\n valid_dic[safe_key] = dic.get(k)\n elif cls._field_types.get(safe_key) == str:\n if dic.get(k) == \"null\":\n valid_dic[safe_key] = None\n else:\n valid_dic[safe_key] = str(dic.get(k))\n else:\n valid_dic[safe_key] = dic.get(k)\n return valid_dic", "title": "" }, { "docid": "22a005eb4db709c788936655741d6038", "score": "0.5021864", "text": "def _validate_train_parameters(parameters: dict, keys: dict):\n\n for k, v in keys.items():\n if k not in parameters:\n raise RuntimeError(f'Missing key \"{k}\" in parameters for training')\n if isinstance(v, list):\n for i in v:\n if i not in parameters[k]:\n raise RuntimeError(f'Missing key \"{k}.{i}\" in parameters for training')\n if isinstance(v, dict):\n _validate_train_parameters(parameters[k], v)", "title": "" }, { "docid": "140f406d9f3725e9dd0e9cd617701fc5", "score": "0.50195307", "text": "def filtervalues(predicate, dict_):\n predicate = bool if predicate is None else ensure_callable(predicate)\n ensure_mapping(dict_)\n return dict_.__class__((k, v) for k, v in iteritems(dict_) if predicate(v))", "title": "" }, { "docid": "02c3febd223dfff63524f5d8ee92af53", "score": "0.501811", "text": "def make_query(i, required_keys=None):\r\n query = {}\r\n for k, v in i.items():\r\n if k.startswith('_'):\r\n continue\r\n if required_keys and k not in required_keys:\r\n continue\r\n if v == '':\r\n v = None\r\n query[k] = v\r\n return query", "title": "" }, { "docid": "4cb2b6dfc2060d31e1114d7e7dbf3505", "score": "0.50152856", "text": "def slice_parset(d, key):\n return dict((k, v) for k, v in list(d.items()) if k.startswith(key))", "title": "" }, { "docid": "9ae063870b6c2dd8df7c8f9bfaff4828", "score": "0.5014754", "text": "def filter_lower_case_keys(dict):\n return {key: value for key, value in dict.items() if key.islower()}", "title": "" }, { "docid": "c051be1e9177e25deec8344973268e9d", "score": "0.5013101", "text": "def safeget(dct, keys):\n for key in keys:\n try:\n dct = dct[key]\n except KeyError:\n return None\n return dct", "title": "" }, { "docid": "c051be1e9177e25deec8344973268e9d", "score": "0.5013101", "text": "def safeget(dct, keys):\n for key in keys:\n try:\n dct = dct[key]\n except KeyError:\n return None\n return dct", "title": "" }, { "docid": "5ed3d217e97733da5f3ae642b5bbdff1", "score": "0.50121284", "text": "def filter_only_exposed(data, exposed_keys, additional_information_function=None):\n\n if data is None:\n return None\n\n new_data = [None] * len(data)\n if type(data) == list:\n for i, d in enumerate(data):\n new_data[i] = filter_only_exposed(d, exposed_keys)\n return new_data\n\n new_item = {}\n for k, v in data.items():\n if k in exposed_keys:\n new_item[k] = v\n\n if additional_information_function is not None:\n additional_information_function(new_item)\n\n return new_item\n # return {k: v for k, v in data.items() if k in exposed_keys}", "title": "" }, { "docid": "0bca5e2d8892abeeba98457c132981c9", "score": "0.50117964", "text": "def _check_keys(d):\n for key in d:\n elem = d[key]\n if isinstance(elem,\n scipy.io.matlab.mio5_params.mat_struct):\n d[key] = _todict(elem)\n elif _has_struct(elem):\n d[key] = _tolist(elem)\n return d", "title": "" } ]
c35652b26ac9b90ab52b8a7de68039e5
Returns true if there is any boolean True in A
[ { "docid": "488f7cc7ab70b194a5fd819ee1942cf5", "score": "0.7278255", "text": "def any_(A):\n return A.any()", "title": "" } ]
[ { "docid": "2b72524cb32c8aea61e7fb5e34fc692e", "score": "0.7511237", "text": "def __nonzero__(self):\n return any([bool(i) for i in self])", "title": "" }, { "docid": "fb85a89afbc2a2508032f356aedf04dd", "score": "0.6771524", "text": "def all(self):\n if not all(dt == 'bool' for nm, dt, null in self.sdbtype.full_rep):\n raise TypeError(\"any() only valid for boolean arrays\")\n\n return self.aggregate(*('min(%s)' % att for att in self.att_names)) > 0", "title": "" }, { "docid": "f5618de45b1d16b2131d917353c10e46", "score": "0.6705862", "text": "def any_is_true(generator):\n for flag in generator:\n if flag:\n return True\n return False", "title": "" }, { "docid": "cff9184c82b0cbe4a8107643bc3d01fc", "score": "0.6680552", "text": "def any(self):\n if not all(dt == 'bool' for nm, dt, null in self.sdbtype.full_rep):\n raise TypeError(\"any() only valid for boolean arrays\")\n\n return self.aggregate(*('max(%s)' % att for att in self.att_names)) > 0", "title": "" }, { "docid": "f9b5b9992a3f73589b6f089f7e9f9ed4", "score": "0.6645404", "text": "def any(iterable):\n for element in iterable:\n if element:\n return True\n return False", "title": "" }, { "docid": "bbdc733a9b2b85a56adceb165cf8f9ff", "score": "0.6489734", "text": "def _all_zero(self, A):\n for x in A:\n if x != 0:\n return False\n return True", "title": "" }, { "docid": "1882810dab2052f67be27989a6775c22", "score": "0.6469668", "text": "def isTrue(list_truth_values):\n if list_truth_values == []:\n return False\n else:\n return reduce(operator.and_, list_truth_values, True)", "title": "" }, { "docid": "24afb54142bf181356aa23fb935ee4ee", "score": "0.6432742", "text": "def countTruth(boolFunc, aList):\n return len(filter(boolFunc, aList))", "title": "" }, { "docid": "cf039d499d5169151daa5fcc8eb6a465", "score": "0.6395526", "text": "def all(iterable):\n for element in iterable:\n if not element:\n return False\n return True", "title": "" }, { "docid": "1d2df2ca5f4477ee4f16a4cf25b428dd", "score": "0.6394134", "text": "def __bool__(self):\n return self.__nonzero__()", "title": "" }, { "docid": "1d2df2ca5f4477ee4f16a4cf25b428dd", "score": "0.6394134", "text": "def __bool__(self):\n return self.__nonzero__()", "title": "" }, { "docid": "1d2df2ca5f4477ee4f16a4cf25b428dd", "score": "0.6394134", "text": "def __bool__(self):\n return self.__nonzero__()", "title": "" }, { "docid": "c3106680aee5be84134835e6dce9f792", "score": "0.637476", "text": "def any(iterable):\n for element in iterable:\n if element:\n return True\n return False", "title": "" }, { "docid": "fdbe3d0d711045e004d137ab9c8f70b9", "score": "0.6328298", "text": "def all(iterable):\n for element in iterable:\n if not element:\n return False\n return True", "title": "" }, { "docid": "fdbe3d0d711045e004d137ab9c8f70b9", "score": "0.6328298", "text": "def all(iterable):\n for element in iterable:\n if not element:\n return False\n return True", "title": "" }, { "docid": "1d46d699c7d8ed8b9308eba468f47d64", "score": "0.6289138", "text": "def any(iterable):\n return True", "title": "" }, { "docid": "41fe42accd705585f565e5f7e34f19c4", "score": "0.6273393", "text": "def is_all_true(self) -> bool:\r\n return self.pixels_in_mask == 0", "title": "" }, { "docid": "95fe78761d542a3d4b9b9563fa539f1f", "score": "0.6267928", "text": "def myall(iterable):\n \"\"\" Returns True only if all the elements in the iterable are truthy, else returns False \"\"\"\n for item in iterable:\n if not item:\n return False\n return True", "title": "" }, { "docid": "946d3a23a06fc92fa0904852376fbe8d", "score": "0.62348896", "text": "def isTrue():", "title": "" }, { "docid": "6f9d25b10913227ccb731cfcbf72820e", "score": "0.6222977", "text": "def __bool__(self):\n\t\treturn self._N != 0", "title": "" }, { "docid": "5b976b6fe901e03f6f6e33ed02213f41", "score": "0.6157532", "text": "def __nonzero__(self):\n raise ValueError(\n f\"The truth value of a {self.__class__.__name__} is ambiguous. \"\n + \"Use a.empty, a.bool(), a.item(), a.any() or a.all().\"\n )", "title": "" }, { "docid": "6ae58a47fa8333fb529bdce97b6a6040", "score": "0.6095795", "text": "def __nonzero__(self):\n return bool(self._list)", "title": "" }, { "docid": "424696b24f9220eb47bc881718a218ca", "score": "0.6068682", "text": "def turns_true_with(self, var, val):\n for a, v in var.atom_values(val):\n if self.unsatisfied and v in self.atom_index2literals[a.index]:\n return True\n return False", "title": "" }, { "docid": "ba40a9e74948dadf36e45605c7486863", "score": "0.60676944", "text": "def is_true(self, elem_list):\n raise NotImplementedError()", "title": "" }, { "docid": "ae01a7daf6faa679ee9b5aa981f8061b", "score": "0.6043491", "text": "def has_any(self):\n\n return len(self.get_all()) > 0", "title": "" }, { "docid": "a1cafe758ab9eaf5b82e78be7eba1a46", "score": "0.602879", "text": "def __bool__(self):\n return not self.isempty()", "title": "" }, { "docid": "71f2e74ab8f1e111aea9916360816c46", "score": "0.6024775", "text": "def checkState(self, nparray):\n if all(i >= 0 for i in nparray):\n return True\n else:\n return False", "title": "" }, { "docid": "4b8ad2859a11de0940bd8b8d7de83b4a", "score": "0.60147244", "text": "def true(shape):\n return full(shape, True, dtype=bool)", "title": "" }, { "docid": "af7c18ff321aadd7657aa6df465728f6", "score": "0.6014013", "text": "def has_single_liaisons(liaisons):\n return not any(i == j == 1 for i, j in zip(liaisons, liaisons[1:]))", "title": "" }, { "docid": "3e267c350aafc3ee07acd208233660ec", "score": "0.5997192", "text": "def goal_test(self):\n return all(goal in self.initial for goal in self.goals)", "title": "" }, { "docid": "8df81cb23cfc9b10e1ee5f23f987947e", "score": "0.5991744", "text": "def consistent_all(self, A):\n assert len(A) <= self.num_vars\n for i in range(len(A)):\n if not self.consistent_other(i, A):\n return False\n return True", "title": "" }, { "docid": "ddd97887e8da591e40d7b370d53dc5b1", "score": "0.5980589", "text": "def state(self):\r\n return bool(self.state_true_count() != 0)", "title": "" }, { "docid": "e40f150f519022d984c57f270edee7d1", "score": "0.5933973", "text": "def is_flag_set(flag):\n return any_flags_set(flag)", "title": "" }, { "docid": "fa4f1d91c3ae06d16a66e475038a8a50", "score": "0.59121734", "text": "def get_bool_result(self) -> bool:\n return all(passage_result.result for passage_result in self._results)", "title": "" }, { "docid": "3c3764e759ffb1c330eb653ebe8a237c", "score": "0.5902867", "text": "def __bool__(self) -> bool:\n return bool(self.and_ or self.or_ or self.not_)", "title": "" }, { "docid": "b2ae1a2bdeaf33179f4f4cbd49b708e3", "score": "0.587973", "text": "def All(self):\n return self.include is None and not self.exclude", "title": "" }, { "docid": "9414da4b2e3d6a368eaaac47fb076e09", "score": "0.5870207", "text": "def __nonzero__(self):\r\n return bool(self.eval())", "title": "" }, { "docid": "e601585602baa8caab41b84858fdc466", "score": "0.5855268", "text": "def truth(self, *args) -> bool:", "title": "" }, { "docid": "7c07288a31dd6eae0cdeebde63a2e56c", "score": "0.5852711", "text": "def __bool__(self):\n\n return self._n > 0", "title": "" }, { "docid": "0a33e2305d91617fee5721271286c4b8", "score": "0.58477974", "text": "def __nonzero__(self):\n return bool(self.oids)", "title": "" }, { "docid": "3d5aee59fb22da88853db14bc61b788c", "score": "0.5837937", "text": "def every(predicate, seq):\n for x in seq:\n if not predicate(x): return False\n return True", "title": "" }, { "docid": "35b8087b3792fd05875f1f49b0d4cf4c", "score": "0.5829817", "text": "def __bool__(self):\n return not self.is_empty()", "title": "" }, { "docid": "9e6c07a55c5faf04680e50487c6ec970", "score": "0.5828629", "text": "def _booleanAnd(self, elem):\n return self.lhs.value(elem) and self.rhs.value(elem)", "title": "" }, { "docid": "f0b87c7cd5f1e7dd30decff3eb754a3a", "score": "0.5824575", "text": "def isAny(self,test):\n for x in np.nditer(self.t, op_flags=['readonly']):\n if test(x):\n return True\n return False", "title": "" }, { "docid": "9060dc708d83dcd615203e979437faf0", "score": "0.5823648", "text": "def __bool__(self):\n return bool(self._residues)", "title": "" }, { "docid": "c2d63835ff0182dcfa10dc23a6a5e464", "score": "0.5762349", "text": "def test_any(self, *iterable):\r\n for expr in iterable:\r\n if self.test(expr):\r\n return True\r\n return False", "title": "" }, { "docid": "c2d63835ff0182dcfa10dc23a6a5e464", "score": "0.5762349", "text": "def test_any(self, *iterable):\r\n for expr in iterable:\r\n if self.test(expr):\r\n return True\r\n return False", "title": "" }, { "docid": "2822850d6dbdc5e1c923e166b2b988b9", "score": "0.5760255", "text": "def g1(a, b): \r\n if a == True and b == True:\r\n return False\r\n else:\r\n return True", "title": "" }, { "docid": "1e276a8c88efcdac9aaa9745a986b98b", "score": "0.5757271", "text": "def n_all(iterable):\n logger.info(\"In n_all..\")\n\n for element in iterable:\n if element is None:\n return False\n return True", "title": "" }, { "docid": "db07b44c03dc35781454ffab9854d94f", "score": "0.5752071", "text": "def any(self, *names):\n return any(self.__contains__(name) for name in names)", "title": "" }, { "docid": "3001bc030b5fb011873755ec90f8080c", "score": "0.5747407", "text": "def isSatisfiable(self):\n bools = [c.isSatisfiable() for c in self.constraints.values()]\n return all(bools)", "title": "" }, { "docid": "a633ceb0eaa8eda76fed10222bbf50fc", "score": "0.57309043", "text": "def __nonzero__(self):\n return all([self.project])", "title": "" }, { "docid": "f0f283510fbe4f8b90122dc7b4a951ee", "score": "0.57272243", "text": "def any_(obj):\n return any(obj)", "title": "" }, { "docid": "baae751515a73f4ccec86f1c88361e6b", "score": "0.5723565", "text": "def boolean_TTcondition(belong_to_training):\n return any([not b for b in belong_to_training])", "title": "" }, { "docid": "5f38d4e6f8bf7ba0395fd1443599f19e", "score": "0.571831", "text": "def all(self, predicate):\n\n return self.filter(lambda v: not predicate(v)).some().map(lambda b: not b)", "title": "" }, { "docid": "d3d8919da610803f7fd44b594b785b9e", "score": "0.57161695", "text": "def all_(obj):\n return all(obj)", "title": "" }, { "docid": "2f2bf9c872ff12a1aff0810d01b08c9a", "score": "0.5712127", "text": "def any_flags_set(*desired_flags):\n active_flags = get_flags()\n return any(flag in active_flags for flag in desired_flags)", "title": "" }, { "docid": "3e6428d6d93ba716b88b91ecd89e91b0", "score": "0.5710732", "text": "def isActive(self):\n if self.alwaysActive:\n return True\n activeVariables = sum(\n [v.active if v is not None else True for v in self.listOfVariables]\n )\n return activeVariables > 0", "title": "" }, { "docid": "9697bb42daa2f41a9a94cdd3072aa984", "score": "0.5706596", "text": "def all_filled(self):\n has_none = np.sum(np.array([v is None for v in self.cache.values()]))\n return False if has_none > 0 else True\n # check_df = self.df.isnull()*1\n # return False if check_df.sum(axis=1).sum(axis=0) > 0 else True", "title": "" }, { "docid": "0f7d8be7496e258186da55df526814c9", "score": "0.57042426", "text": "def truth(a): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "38b3a260b25ec92e5bd1ec01c2527b7d", "score": "0.5698759", "text": "def some(self, func):\n return any(func(value) for value in self.args)", "title": "" }, { "docid": "e6a438574ac5a97f5f8d7c127261d4db", "score": "0.56779104", "text": "def check(a, x):\n return x in a", "title": "" }, { "docid": "b93bf921f2e785d2658b02bcdd3d9e3a", "score": "0.56776375", "text": "def __bool__(self):\n\n return bool(self.__terms)", "title": "" }, { "docid": "8e7ab4918b2b78d416e826b272d7e25e", "score": "0.56757563", "text": "def __nonzero__(self):\r\n return True", "title": "" }, { "docid": "615f7313e2852e5e7ae8b7c0a8357fc7", "score": "0.5667287", "text": "def is_detecting_anything(self):\n nn = lambda x:x is not None\n return len(filter(nn, self._prev_level))>0 or len(filter(nn, self._minmax))>0", "title": "" }, { "docid": "8d046af264f51c584f7a8cdf26be5a52", "score": "0.5660552", "text": "def _HasChanges(args, flags):\n return any(FlagIsExplicitlySet(args, flag) for flag in flags)", "title": "" }, { "docid": "cd373b5a146d6dd6a473d2b882812dfb", "score": "0.5654258", "text": "def is_anulada(self):\n if self.estado == self.__class__.ANULADA or \\\n any(self.historial_set.filter(estado=Historial.RECHAZADA)):\n return True\n return False", "title": "" }, { "docid": "9bb30c2e568193806c115c14387ee862", "score": "0.56444037", "text": "def __bool__(self):\n return self.taxonomy.exists and self.alignment.exists", "title": "" }, { "docid": "4b6773c803f2a1cec0cf524f9284ed66", "score": "0.5640802", "text": "def Any(self):\n return self.include is None or len(self.include)", "title": "" }, { "docid": "cf4b8d7f9b168a4afab79df819ffdea3", "score": "0.5636154", "text": "def __bool__(self) -> bool:\n return len(self) > 0", "title": "" }, { "docid": "7d91173891b285433d9143c96ee7e68b", "score": "0.56340426", "text": "def all_by(pred: Pred, seq: Seq) -> bool:\n pred = to_callable(pred)\n return all(map(pred, seq))", "title": "" }, { "docid": "31cc69a554dd40bb8dc0f188c9f509fe", "score": "0.5616245", "text": "def is_detecting_anything(self):\n nn = lambda x:x is not None\n return len(list(filter(nn, self._prev_level)))>0 or len(list(filter(nn, self._minmax)))>0", "title": "" }, { "docid": "ae1b83317bce7e2bd9517719296bd605", "score": "0.5614786", "text": "def __bool__(self):\n return bool(self.points)", "title": "" }, { "docid": "6675f32f31e9448c338ebe707efe5316", "score": "0.5613473", "text": "def scheme_truep(val):\n return val is not False", "title": "" }, { "docid": "aab0ccd73f3d2b8a42aee91f82d47e3e", "score": "0.5611146", "text": "def empty(self):\n return False if self.q[self.flag] else True", "title": "" }, { "docid": "a6fe27a4d84857083f2b21cd9a82e9fd", "score": "0.5605707", "text": "def any(self, name):\n self._validate_column_name(name)\n dtype = self.get_dtype(name)\n if dtype != 'boolean':\n msg = ('all() can only be called on a boolean column; you '\n 'provided column of dtype={}').format(dtype)\n raise TypeError(msg)\n return self._df[self._df[name].isin([True])].count() > 0", "title": "" }, { "docid": "3a0a8848277f48ec91f1e68790d45671", "score": "0.5600929", "text": "def any(fn, *args):\n seen = False\n while True:\n if not fn(*args): break\n seen = True\n return seen", "title": "" }, { "docid": "efbe3362777d87bac43564311ad8d2ba", "score": "0.5595323", "text": "def aggregate(values: Series) -> bool:\n return values.fillna(False).astype(bool).all()", "title": "" }, { "docid": "62485e3299c4cb4b38d5c34a0178a23a", "score": "0.5590884", "text": "def has_fullHouse(self):\n return((self.has_3ofAKind()) and (self.has_pair()))", "title": "" }, { "docid": "af07cc52b13e1f0b5678c51862590772", "score": "0.5587857", "text": "def boolean_TTcondition(belong_to_training):\n return all(belong_to_training)", "title": "" }, { "docid": "36d18cb080c523a46a3bf84926da6cbd", "score": "0.55823314", "text": "def __bool__(self) -> bool:\n return any((self.excluded_ids, self.excluded_auth_ids, self.excluded_auth_roles,\n self.eligible_ids, self.eligible_auth_ids, self.eligible_auth_roles))", "title": "" }, { "docid": "0a08ff0da660d3b83cd99474d0b628a1", "score": "0.55779356", "text": "def __nonzero__(self): #pragma: no cover\n return bool(self())", "title": "" }, { "docid": "38c1741f966a47a1ecc2956ed266e080", "score": "0.5576794", "text": "def all_flags_set(*desired_flags):\n active_flags = get_flags()\n return all(flag in active_flags for flag in desired_flags)", "title": "" }, { "docid": "fc2d1d6fc7d6e67914ffe0505d969f56", "score": "0.5574398", "text": "def __nonzero__ (self) :\n if len(self) :\n return True\n return False", "title": "" }, { "docid": "8a50b68e8d479a6190f3b8581a88bbe3", "score": "0.55677193", "text": "def g3(a, b): \r\n return not (a and b)", "title": "" }, { "docid": "982727ab34038ee4d54492ede864c2e1", "score": "0.55671394", "text": "def resolved(self):\n # type: () -> bool\n accumulator = True\n for item in self:\n accumulator = bool(accumulator and item.resolved)\n return accumulator", "title": "" }, { "docid": "333e2257ead7717dc67108fd43e6c341", "score": "0.55662954", "text": "def __nonzero__(self):\n return True", "title": "" }, { "docid": "333e2257ead7717dc67108fd43e6c341", "score": "0.55662954", "text": "def __nonzero__(self):\n return True", "title": "" }, { "docid": "758bdf5408b794effcb3b911f2c9a778", "score": "0.5563776", "text": "def check_condition(self):\n pop_fitness_list = np.array([ind.fitness for ind in self.population], dtype=int)\n if np.any(pop_fitness_list == 0):\n return True", "title": "" }, { "docid": "3bfe9eb4380a8aed4b1d3a9bac5c56ee", "score": "0.55602986", "text": "def statement3(my_set: set[int],\n my_p: Callable[[int], bool],\n my_q: Callable[[int], bool]) -> bool:\n return all([my_p(x) and my_q(x) for x in my_set])", "title": "" }, { "docid": "96c069147f4ad8c2a00fd6aabfb9177e", "score": "0.5560026", "text": "def __nonzero__(self):\n return bool(self.products)", "title": "" }, { "docid": "2e4629259d346a71c44f09e79e648ca1", "score": "0.55586714", "text": "def match_any(iterable, predicate):\n for x in iterable:\n if predicate(x):\n return True\n return False", "title": "" }, { "docid": "87c49ed0aa20b6439f7401897adc91c9", "score": "0.5557968", "text": "def __nonzero__(self):\n return self.values.__nonzero__()", "title": "" }, { "docid": "9bd0616260f3598a67338ecfb921ee5e", "score": "0.5556244", "text": "def visit_true(self, *_):\n\n return True", "title": "" }, { "docid": "8ed902c1739dd1ed086dffc196346b73", "score": "0.555507", "text": "def isSet(self):\n return self._isSet", "title": "" }, { "docid": "b5d5bc4c628c2c5f7f56c4699447421f", "score": "0.55547386", "text": "def containsAny(str, set):\n return True in []", "title": "" }, { "docid": "5e14d820740a0c68dd6e143f506cac45", "score": "0.5550138", "text": "def passed(self):\n values = [self.state[k] for k in self.tests]\n passes = [s['pass'] for s in values if s['pass'] is not None]\n if passes:\n return all(passes)\n return False", "title": "" }, { "docid": "549207deced4879d432bf1f8b5be7e24", "score": "0.5545806", "text": "def aggregate(values: Series) -> bool:\n return values.fillna(False).astype(bool).any()", "title": "" }, { "docid": "0d9449c1173b1c482c8cda85d6757291", "score": "0.5544268", "text": "def __bool__(self):\n self._cache.extend(self._itr)\n return bool(self._cache)", "title": "" }, { "docid": "8492f34b210fc3f2afb0728c8d0eac4a", "score": "0.5543846", "text": "def every(pred, coll):\n pred = _make_pred(pred)\n\n for e in coll:\n if not pred(e):\n return False\n\n return True", "title": "" } ]
3471ed748751f1988b094a1dd75394f2
Allows to get Piano C chord with predetermined octave, harmony and duration
[ { "docid": "19446f336a41d59941282262ac238341", "score": "0.844064", "text": "def create_c_chord(self, octave, duration, harmony):\n\n result = None\n\n if str(harmony) == \"major\":\n c_note = AudioSegment.from_file(self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/c.wav\")\n temp1 = c_note.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/g.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave+1)+\"/c.wav\"))\n temp3 = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave+1)+\"/e.wav\"))\n result = temp3.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave+1)+\"/g.wav\"))\n elif str(harmony) == \"minor\":\n c_note = AudioSegment.from_file(self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/c.wav\")\n temp1 = c_note.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/g.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave+1)+\"/c.wav\"))\n temp3 = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave+1)+\"/d_sharp.wav\"))\n result = temp3.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave+1)+\"/g.wav\"))\n\n return Piano.calculate_duration(result, duration, self.tempo)", "title": "" } ]
[ { "docid": "07e77d4f3665179daa18aa85ae1ad833", "score": "0.84081155", "text": "def create_c_chord(self, octave, duration, harmony):\n\n result = None\n\n if str(harmony) == \"major\":\n c_note = AudioSegment.from_file(self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/c.wav\")\n temp1 = c_note.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/g.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave+1)+\"/c.wav\"))\n temp3 = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave+1)+\"/e.wav\"))\n result = temp3.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave+1)+\"/g.wav\"))\n elif str(harmony) == \"minor\":\n c_note = AudioSegment.from_file(self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/c.wav\")\n temp1 = c_note.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/g.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave+1)+\"/c.wav\"))\n temp3 = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave+1)+\"/d_sharp.wav\"))\n result = temp3.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave+1)+\"/g.wav\"))\n\n return Piano.calculate_duration(result, duration, self.tempo)", "title": "" }, { "docid": "289be3303d8348b0f45eea19c1805114", "score": "0.78927076", "text": "def create_a_chord(self, octave, duration, harmony):\n\n result = None\n\n if str(harmony) == \"major\":\n temp = AudioSegment.from_file(self.path_to_directory + \"/samples/piano/\"+str(octave - 1)+\"/a.wav\")\n temp1 = temp.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/e.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/a.wav\"))\n temp3 = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave + 1)+\"/c_sharp.wav\"))\n temp4 = temp3.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave + 1)+\"/e.wav\"))\n result = temp4.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave+1)+\"/a.wav\"))\n elif str(harmony) == \"minor\":\n temp = AudioSegment.from_file(self.path_to_directory + \"/samples/piano/\"+str(octave - 1)+\"/a.wav\")\n temp1 = temp.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/e.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/a.wav\"))\n temp3 = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave + 1)+\"/c.wav\"))\n temp4 = temp3.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave + 1)+\"/e.wav\"))\n result = temp4.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave + 1)+\"/a.wav\"))\n\n return Piano.calculate_duration(result, duration, self.tempo)", "title": "" }, { "docid": "d39678cb5bfeb4f5c8bb7db41d4ad3e9", "score": "0.7787941", "text": "def create_c_sharp_chord(self, octave, duration, harmony):\n\n result = None\n\n if str(harmony) == \"major\":\n c_note = AudioSegment.from_file(self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/c_sharp.wav\")\n temp1 = c_note.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/g_sharp.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave+1)+\"/c_sharp.wav\"))\n temp3 = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave+1)+\"/f.wav\"))\n result = temp3.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave+1)+\"/g_sharp.wav\"))\n elif str(harmony) == \"minor\":\n c_note = AudioSegment.from_file(self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/c_sharp.wav\")\n temp1 = c_note.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/g_sharp.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave+1)+\"/c_sharp.wav\"))\n temp3 = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave+1)+\"/e.wav\"))\n result = temp3.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave+1)+\"/g_sharp.wav\"))\n\n return Piano.calculate_duration(result, duration, self.tempo)", "title": "" }, { "docid": "32340495e3ac8d5c7baab1fbec59b91d", "score": "0.7779683", "text": "def create_a_chord(self, octave, duration, harmony):\n\n result = None\n\n if str(harmony) == \"major\":\n temp = AudioSegment.from_file(self.path_to_directory + \"/samples/guitar/\"+str(octave - 1)+\"/a.wav\")\n temp1 = temp.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/e.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/a.wav\"))\n temp3 = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave + 1)+\"/c_sharp.wav\"))\n temp4 = temp3.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave + 1)+\"/e.wav\"))\n result = temp4.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave+1)+\"/a.wav\"))\n elif str(harmony) == \"minor\":\n temp = AudioSegment.from_file(self.path_to_directory + \"/samples/guitar/\"+str(octave - 1)+\"/a.wav\")\n temp1 = temp.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/e.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/a.wav\"))\n temp3 = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave + 1)+\"/c.wav\"))\n temp4 = temp3.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave + 1)+\"/e.wav\"))\n result = temp4.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave + 1)+\"/a.wav\"))\n\n return Piano.calculate_duration(result, duration, self.tempo)", "title": "" }, { "docid": "67b79292611f31802ce7a9a41e5a8bb0", "score": "0.77772343", "text": "def create_c_sharp_chord(self, octave, duration, harmony):\n\n result = None\n\n if str(harmony) == \"major\":\n c_note = AudioSegment.from_file(self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/c_sharp.wav\")\n temp1 = c_note.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/g_sharp.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave+1)+\"/c_sharp.wav\"))\n temp3 = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave+1)+\"/f.wav\"))\n result = temp3.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave+1)+\"/g_sharp.wav\"))\n elif str(harmony) == \"minor\":\n c_note = AudioSegment.from_file(self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/c_sharp.wav\")\n temp1 = c_note.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/g_sharp.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave+1)+\"/c_sharp.wav\"))\n temp3 = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave+1)+\"/e.wav\"))\n result = temp3.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave+1)+\"/g_sharp.wav\"))\n\n return Piano.calculate_duration(result, duration, self.tempo)", "title": "" }, { "docid": "52fd98755430851a0d8618e034e240cb", "score": "0.7711506", "text": "def create_d_chord(self, octave, duration, harmony):\n\n result = None\n\n if str(harmony) == \"major\":\n temp = AudioSegment.from_file(self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/d.wav\")\n temp1 = temp.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/a.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave+1)+\"/d.wav\"))\n result = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave+1)+\"/f_sharp.wav\"))\n elif str(harmony) == \"minor\":\n temp = AudioSegment.from_file(self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/d.wav\")\n temp1 = temp.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/a.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave+1)+\"/d.wav\"))\n result = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave+1)+\"/f.wav\"))\n\n return Piano.calculate_duration(result, duration, self.tempo)", "title": "" }, { "docid": "2e205bc4cdf05f5347855f8352a9ac99", "score": "0.7625333", "text": "def create_g_chord(self, octave, duration, harmony):\n\n result = None\n\n if str(harmony) == \"major\":\n temp = AudioSegment.from_file(self.path_to_directory + \"/samples/piano/\"+str(octave - 1)+\"/g.wav\")\n temp1 = temp.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/d.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/g.wav\"))\n temp3 = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/h.wav\"))\n temp4 = temp3.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave + 1)+\"/d.wav\"))\n result = temp4.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave+1)+\"/g.wav\"))\n elif str(harmony) == \"minor\":\n temp = AudioSegment.from_file(self.path_to_directory + \"/samples/piano/\"+str(octave - 1)+\"/g.wav\")\n temp1 = temp.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/d.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/g.wav\"))\n temp3 = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/a_sharp.wav\"))\n temp4 = temp3.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave + 1)+\"/d.wav\"))\n result = temp4.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave+1)+\"/g.wav\"))\n\n return Piano.calculate_duration(result, duration, self.tempo)", "title": "" }, { "docid": "7e59019251d97da65f465a714681b46a", "score": "0.76127225", "text": "def create_d_chord(self, octave, duration, harmony):\n\n result = None\n\n if str(harmony) == \"major\":\n temp = AudioSegment.from_file(self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/d.wav\")\n temp1 = temp.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/a.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave+1)+\"/d.wav\"))\n result = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave+1)+\"/f_sharp.wav\"))\n elif str(harmony) == \"minor\":\n temp = AudioSegment.from_file(self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/d.wav\")\n temp1 = temp.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/a.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave+1)+\"/d.wav\"))\n result = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave+1)+\"/f.wav\"))\n\n return Piano.calculate_duration(result, duration, self.tempo)", "title": "" }, { "docid": "11ba1d5992a328281ce7c884262a2a61", "score": "0.75451344", "text": "def create_e_chord(self, octave, duration, harmony):\n\n result = None\n\n if str(harmony) == \"major\":\n temp = AudioSegment.from_file(self.path_to_directory + \"/samples/piano/\"+str(octave - 1)+\"/e.wav\")\n temp1 = temp.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave - 1)+\"/h.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/e.wav\"))\n temp3 = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/g_sharp.wav\"))\n temp4 = temp3.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/h.wav\"))\n result = temp4.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave+1)+\"/e.wav\"))\n elif str(harmony) == \"minor\":\n temp = AudioSegment.from_file(self.path_to_directory + \"/samples/piano/\"+str(octave - 1)+\"/e.wav\")\n temp1 = temp.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave - 1)+\"/h.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/e.wav\"))\n temp3 = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/g.wav\"))\n temp4 = temp3.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/h.wav\"))\n result = temp4.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave+1)+\"/e.wav\"))\n\n return Piano.calculate_duration(result, duration, self.tempo)", "title": "" }, { "docid": "2c039e7b8735ebc1efce7520cd9159d0", "score": "0.75377893", "text": "def create_h_chord(self, octave, duration, harmony):\n\n result = None\n\n if str(harmony) == \"major\":\n temp = AudioSegment.from_file(self.path_to_directory + \"/samples/piano/\"+str(octave - 1)+\"/h.wav\")\n temp1 = temp.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/f_sharp.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/h.wav\"))\n temp3 = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave + 1)+\"/d_sharp.wav\"))\n result = temp3.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave + 1)+\"/f_sharp.wav\"))\n elif str(harmony) == \"minor\":\n temp = AudioSegment.from_file(self.path_to_directory + \"/samples/piano/\"+str(octave - 1)+\"/h.wav\")\n temp1 = temp.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/f_sharp.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/h.wav\"))\n temp3 = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave + 1)+\"/d.wav\"))\n result = temp3.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave + 1)+\"/f_sharp.wav\"))\n\n return Piano.calculate_duration(result, duration, self.tempo)", "title": "" }, { "docid": "85dfd1b3577cb1ddd83ca07cda1fcee8", "score": "0.7478326", "text": "def create_g_chord(self, octave, duration, harmony):\n\n result = None\n\n if str(harmony) == \"major\":\n temp = AudioSegment.from_file(self.path_to_directory + \"/samples/guitar/\"+str(octave - 1)+\"/g.wav\")\n temp1 = temp.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/d.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/g.wav\"))\n temp3 = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/h.wav\"))\n temp4 = temp3.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave + 1)+\"/d.wav\"))\n result = temp4.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave+1)+\"/g.wav\"))\n elif str(harmony) == \"minor\":\n temp = AudioSegment.from_file(self.path_to_directory + \"/samples/guitar/\"+str(octave - 1)+\"/g.wav\")\n temp1 = temp.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/d.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/g.wav\"))\n temp3 = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/a_sharp.wav\"))\n temp4 = temp3.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave + 1)+\"/d.wav\"))\n result = temp4.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave+1)+\"/g.wav\"))\n\n return Piano.calculate_duration(result, duration, self.tempo)", "title": "" }, { "docid": "52f6fb736ee4755c988dc5be1eedde38", "score": "0.7460553", "text": "def create_h_chord(self, octave, duration, harmony):\n\n result = None\n\n if str(harmony) == \"major\":\n temp = AudioSegment.from_file(self.path_to_directory + \"/samples/guitar/\"+str(octave - 1)+\"/h.wav\")\n temp1 = temp.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/f_sharp.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/h.wav\"))\n temp3 = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave + 1)+\"/d_sharp.wav\"))\n result = temp3.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave + 1)+\"/f_sharp.wav\"))\n elif str(harmony) == \"minor\":\n temp = AudioSegment.from_file(self.path_to_directory + \"/samples/guitar/\"+str(octave - 1)+\"/h.wav\")\n temp1 = temp.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/f_sharp.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/h.wav\"))\n temp3 = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave + 1)+\"/d.wav\"))\n result = temp3.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave + 1)+\"/f_sharp.wav\"))\n\n return Piano.calculate_duration(result, duration, self.tempo)", "title": "" }, { "docid": "27ac38cd0ed8ad9cb94916e2e4437a75", "score": "0.7444743", "text": "def create_e_chord(self, octave, duration, harmony):\n\n result = None\n\n if str(harmony) == \"major\":\n temp = AudioSegment.from_file(self.path_to_directory + \"/samples/guitar/\"+str(octave - 1)+\"/e.wav\")\n temp1 = temp.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave - 1)+\"/h.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/e.wav\"))\n temp3 = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/g_sharp.wav\"))\n temp4 = temp3.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/h.wav\"))\n result = temp4.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave+1)+\"/e.wav\"))\n elif str(harmony) == \"minor\":\n temp = AudioSegment.from_file(self.path_to_directory + \"/samples/guitar/\"+str(octave - 1)+\"/e.wav\")\n temp1 = temp.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave - 1)+\"/h.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/e.wav\"))\n temp3 = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/g.wav\"))\n temp4 = temp3.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/h.wav\"))\n result = temp4.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave+1)+\"/e.wav\"))\n\n return Piano.calculate_duration(result, duration, self.tempo)", "title": "" }, { "docid": "1e3f1cbc0378b3297dd61b3e04fbb436", "score": "0.72458655", "text": "def create_f_chord(self, octave, duration, harmony):\n\n result = None\n\n if str(harmony) == \"major\":\n temp = AudioSegment.from_file(self.path_to_directory + \"/samples/piano/\"+str(octave - 1)+\"/f.wav\")\n temp1 = temp.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/c.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/f.wav\"))\n temp3 = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/a.wav\"))\n temp4 = temp3.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave + 1)+\"/c.wav\"))\n result = temp4.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave+1)+\"/f.wav\"))\n elif str(harmony) == \"minor\":\n temp = AudioSegment.from_file(self.path_to_directory + \"/samples/piano/\"+str(octave - 1)+\"/f.wav\")\n temp1 = temp.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/c.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/f.wav\"))\n temp3 = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/g_sharp.wav\"))\n temp4 = temp3.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave + 1)+\"/c.wav\"))\n result = temp4.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave+1)+\"/f.wav\"))\n\n return Piano.calculate_duration(result, duration, self.tempo)", "title": "" }, { "docid": "2f5ffde54a36090a9c0dab6072b72736", "score": "0.7195761", "text": "def get_default_chord(self, chord, octave, duration, harmony):\n\n result = None\n\n if chord == \"c\":\n result = self.create_c_chord(octave, duration, harmony)\n elif chord == \"c_sharp\":\n result = self.create_c_sharp_chord(octave, duration, harmony)\n elif chord == \"d\":\n result = self.create_d_chord(octave, duration, harmony)\n elif chord == \"d_sharp\":\n result = self.create_d_sharp_chord(octave, duration, harmony)\n elif chord == \"e\":\n result = self.create_e_chord(octave, duration, harmony)\n elif chord == \"f\":\n result = self.create_f_chord(octave, duration, harmony)\n elif chord == \"f_sharp\":\n result = self.create_f_sharp_chord(octave, duration, harmony)\n elif chord == \"g\":\n result = self.create_g_chord(octave, duration, harmony)\n elif chord == \"g_sharp\":\n result = self.create_g_sharp_chord(octave, duration, harmony)\n elif chord == \"a\":\n result = self.create_a_chord(octave, duration, harmony)\n elif chord == \"a_sharp\":\n result = self.create_a_sharp_chord(octave, duration, harmony)\n elif chord == \"h\":\n result = self.create_h_chord(octave, duration, harmony)\n\n return result", "title": "" }, { "docid": "2f5ffde54a36090a9c0dab6072b72736", "score": "0.7195761", "text": "def get_default_chord(self, chord, octave, duration, harmony):\n\n result = None\n\n if chord == \"c\":\n result = self.create_c_chord(octave, duration, harmony)\n elif chord == \"c_sharp\":\n result = self.create_c_sharp_chord(octave, duration, harmony)\n elif chord == \"d\":\n result = self.create_d_chord(octave, duration, harmony)\n elif chord == \"d_sharp\":\n result = self.create_d_sharp_chord(octave, duration, harmony)\n elif chord == \"e\":\n result = self.create_e_chord(octave, duration, harmony)\n elif chord == \"f\":\n result = self.create_f_chord(octave, duration, harmony)\n elif chord == \"f_sharp\":\n result = self.create_f_sharp_chord(octave, duration, harmony)\n elif chord == \"g\":\n result = self.create_g_chord(octave, duration, harmony)\n elif chord == \"g_sharp\":\n result = self.create_g_sharp_chord(octave, duration, harmony)\n elif chord == \"a\":\n result = self.create_a_chord(octave, duration, harmony)\n elif chord == \"a_sharp\":\n result = self.create_a_sharp_chord(octave, duration, harmony)\n elif chord == \"h\":\n result = self.create_h_chord(octave, duration, harmony)\n\n return result", "title": "" }, { "docid": "e6bc55b276a23ddc65fe9cf99a8f3543", "score": "0.714988", "text": "def create_f_chord(self, octave, duration, harmony):\n\n result = None\n\n if str(harmony) == \"major\":\n temp = AudioSegment.from_file(self.path_to_directory + \"/samples/guitar/\"+str(octave - 1)+\"/f.wav\")\n temp1 = temp.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/c.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/f.wav\"))\n temp3 = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/a.wav\"))\n temp4 = temp3.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave + 1)+\"/c.wav\"))\n result = temp4.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave+1)+\"/f.wav\"))\n elif str(harmony) == \"minor\":\n temp = AudioSegment.from_file(self.path_to_directory + \"/samples/guitar/\"+str(octave - 1)+\"/f.wav\")\n temp1 = temp.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/c.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/f.wav\"))\n temp3 = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/g_sharp.wav\"))\n temp4 = temp3.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave + 1)+\"/c.wav\"))\n result = temp4.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave+1)+\"/f.wav\"))\n\n return Piano.calculate_duration(result, duration, self.tempo)", "title": "" }, { "docid": "06e9c698f54795034de1f480b85cab0a", "score": "0.7058978", "text": "def create_a_sharp_chord(self, octave, duration, harmony):\n\n result = None\n\n if str(harmony) == \"major\":\n temp = AudioSegment.from_file(self.path_to_directory + \"/samples/piano/\"+str(octave - 1)+\"/a_sharp.wav\")\n temp1 = temp.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/f.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/a_sharp.wav\"))\n temp3 = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave + 1)+\"/d.wav\"))\n temp4 = temp3.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave + 1)+\"/f.wav\"))\n result = temp4.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave + 1)+\"/a_sharp.wav\"))\n elif str(harmony) == \"minor\":\n temp = AudioSegment.from_file(self.path_to_directory + \"/samples/piano/\"+str(octave - 1)+\"/a_sharp.wav\")\n temp1 = temp.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/f.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/a_sharp.wav\"))\n temp3 = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave + 1)+\"/c_sharp.wav\"))\n temp4 = temp3.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave + 1)+\"/f.wav\"))\n result = temp4.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave + 1)+\"/a_sharp.wav\"))\n\n return Piano.calculate_duration(result, duration, self.tempo)", "title": "" }, { "docid": "f26cbf54727772749225061c103bfdb3", "score": "0.7057801", "text": "def create_g_sharp_chord(self, octave, duration, harmony):\n\n result = None\n\n if str(harmony) == \"major\":\n temp = AudioSegment.from_file(self.path_to_directory + \"/samples/piano/\"+str(octave - 1)+\"/g_sharp.wav\")\n temp1 = temp.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/d_sharp.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/g_sharp.wav\"))\n temp3 = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave + 1)+\"/c.wav\"))\n temp4 = temp3.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave + 1)+\"/d_sharp.wav\"))\n result = temp4.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave + 1)+\"/g_sharp.wav\"))\n elif str(harmony) == \"minor\":\n temp = AudioSegment.from_file(self.path_to_directory + \"/samples/piano/\"+str(octave - 1)+\"/g_sharp.wav\")\n temp1 = temp.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/d_sharp.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/g_sharp.wav\"))\n temp3 = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/h.wav\"))\n temp4 = temp3.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave + 1)+\"/d_sharp.wav\"))\n result = temp4.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave + 1)+\"/g_sharp.wav\"))\n\n return Piano.calculate_duration(result, duration, self.tempo)", "title": "" }, { "docid": "82937ac07d95a22e0509d49f5578f09c", "score": "0.703113", "text": "def create_a_sharp_chord(self, octave, duration, harmony):\n\n result = None\n\n if str(harmony) == \"major\":\n temp = AudioSegment.from_file(self.path_to_directory + \"/samples/guitar/\"+str(octave - 1)+\"/a_sharp.wav\")\n temp1 = temp.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/f.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/a_sharp.wav\"))\n temp3 = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave + 1)+\"/d.wav\"))\n temp4 = temp3.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave + 1)+\"/f.wav\"))\n result = temp4.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave + 1)+\"/a_sharp.wav\"))\n elif str(harmony) == \"minor\":\n temp = AudioSegment.from_file(self.path_to_directory + \"/samples/guitar/\"+str(octave - 1)+\"/a_sharp.wav\")\n temp1 = temp.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/f.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/a_sharp.wav\"))\n temp3 = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave + 1)+\"/c_sharp.wav\"))\n temp4 = temp3.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave + 1)+\"/f.wav\"))\n result = temp4.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave + 1)+\"/a_sharp.wav\"))\n\n return Piano.calculate_duration(result, duration, self.tempo)", "title": "" }, { "docid": "5e2dcef98de0c9547d51e4c5feda11fb", "score": "0.7019466", "text": "def create_g_sharp_chord(self, octave, duration, harmony):\n\n result = None\n\n if str(harmony) == \"major\":\n temp = AudioSegment.from_file(self.path_to_directory + \"/samples/guitar/\"+str(octave - 1)+\"/g_sharp.wav\")\n temp1 = temp.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/d_sharp.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/g_sharp.wav\"))\n temp3 = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave + 1)+\"/c.wav\"))\n temp4 = temp3.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave + 1)+\"/d_sharp.wav\"))\n result = temp4.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave + 1)+\"/g_sharp.wav\"))\n elif str(harmony) == \"minor\":\n temp = AudioSegment.from_file(self.path_to_directory + \"/samples/guitar/\"+str(octave - 1)+\"/g_sharp.wav\")\n temp1 = temp.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/d_sharp.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/g_sharp.wav\"))\n temp3 = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/h.wav\"))\n temp4 = temp3.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave + 1)+\"/d_sharp.wav\"))\n result = temp4.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave + 1)+\"/g_sharp.wav\"))\n\n return Piano.calculate_duration(result, duration, self.tempo)", "title": "" }, { "docid": "2010714d4a28a28881eb9858934b9cce", "score": "0.69897336", "text": "def create_d_sharp_chord(self, octave, duration, harmony):\n\n result = None\n\n if str(harmony) == \"major\":\n temp = AudioSegment.from_file(self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/d_sharp.wav\")\n temp1 = temp.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/a_sharp.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave+1)+\"/d_sharp.wav\"))\n result = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave+1)+\"/g.wav\"))\n elif str(harmony) == \"minor\":\n temp = AudioSegment.from_file(self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/d_sharp.wav\")\n temp1 = temp.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/a_sharp.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave+1)+\"/d_sharp.wav\"))\n result = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave+1)+\"/f_sharp.wav\"))\n\n return Piano.calculate_duration(result, duration, self.tempo)", "title": "" }, { "docid": "30d9411fd6bcb3057e680eb6f6b0ec02", "score": "0.697627", "text": "def create_d_sharp_chord(self, octave, duration, harmony):\n\n result = None\n\n if str(harmony) == \"major\":\n temp = AudioSegment.from_file(self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/d_sharp.wav\")\n temp1 = temp.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/a_sharp.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave+1)+\"/d_sharp.wav\"))\n result = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave+1)+\"/g.wav\"))\n elif str(harmony) == \"minor\":\n temp = AudioSegment.from_file(self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/d_sharp.wav\")\n temp1 = temp.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/a_sharp.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave+1)+\"/d_sharp.wav\"))\n result = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave+1)+\"/f_sharp.wav\"))\n\n return Piano.calculate_duration(result, duration, self.tempo)", "title": "" }, { "docid": "083ed1ba8549f291d403729620d1eaf4", "score": "0.6800724", "text": "def create_f_sharp_chord(self, octave, duration, harmony):\n\n result = None\n\n if str(harmony) == \"major\":\n temp = AudioSegment.from_file(self.path_to_directory + \"/samples/piano/\"+str(octave - 1)+\"/f_sharp.wav\")\n temp1 = temp.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/c_sharp.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/f_sharp.wav\"))\n temp3 = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/a_sharp.wav\"))\n temp4 = temp3.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave + 1)+\"/c_sharp.wav\"))\n result = temp4.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave+1)+\"/f_sharp.wav\"))\n elif str(harmony) == \"minor\":\n temp = AudioSegment.from_file(self.path_to_directory + \"/samples/piano/\"+str(octave - 1)+\"/f_sharp.wav\")\n temp1 = temp.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/c_sharp.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/f_sharp.wav\"))\n temp3 = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave)+\"/a.wav\"))\n temp4 = temp3.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave + 1)+\"/c_sharp.wav\"))\n result = temp4.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/piano/\"+str(octave + 1)+\"/f_sharp.wav\"))\n\n return Piano.calculate_duration(result, duration, self.tempo)", "title": "" }, { "docid": "158ff374dddf9f64aa35e8426a3896e8", "score": "0.6761683", "text": "def create_f_sharp_chord(self, octave, duration, harmony):\n result = None\n\n if str(harmony) == \"major\":\n temp = AudioSegment.from_file(self.path_to_directory + \"/samples/guitar/\"+str(octave - 1)+\"/f_sharp.wav\")\n temp1 = temp.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/c_sharp.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/f_sharp.wav\"))\n temp3 = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/a_sharp.wav\"))\n temp4 = temp3.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave + 1)+\"/c_sharp.wav\"))\n result = temp4.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave+1)+\"/f_sharp.wav\"))\n elif str(harmony) == \"minor\":\n temp = AudioSegment.from_file(self.path_to_directory + \"/samples/guitar/\"+str(octave - 1)+\"/f_sharp.wav\")\n temp1 = temp.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/c_sharp.wav\"))\n temp2 = temp1.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/f_sharp.wav\"))\n temp3 = temp2.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave)+\"/a.wav\"))\n temp4 = temp3.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave + 1)+\"/c_sharp.wav\"))\n result = temp4.overlay(AudioSegment.from_file(\n self.path_to_directory + \"/samples/guitar/\"+str(octave + 1)+\"/f_sharp.wav\"))\n\n return Piano.calculate_duration(result, duration, self.tempo)", "title": "" }, { "docid": "c8ec1f7589fa34f4983d85c2a72bf744", "score": "0.6690171", "text": "def music21_to_chord_duration(p, key):\n p_chords = p.chordify()\n p_chords_o = p_chords.flat.getElementsByClass('Chord')\n chord_list = []\n chord_function_list = []\n duration_list = []\n for ch in p_chords_o:\n duration_list.append(ch.duration.quarterLength)\n ch.closedPosition(forceOctave=4, inPlace=True)\n rn = roman.romanNumeralFromChord(ch, key)\n rp = rn.pitches\n rp_names = \",\".join([pi.name + pi.unicodeNameWithOctave[-1] for pi in rp])\n chord_list.append(rp_names)\n chord_function_list.append(rn.figure)\n return chord_list, chord_function_list, duration_list", "title": "" }, { "docid": "46eccdf61ddcb855167f27fd282a92ce", "score": "0.6621113", "text": "def get_not_default_chord(self, notes_, duration):\n\n # Split each elements of input array by symbol,\n # In result we get array of array of string.\n # Each elements of its array is array which contain two elements:\n # first element ([0][0]) - name of note. Second ([0][1]) - octave of corresponding note\n notes = [[]]\n for item in notes_:\n temp = item.split(\",\")\n notes.append(temp)\n\n del notes[0] # Delete first element, because its empty\n i = 2 # i = 2, because before start loop, we concatenate first and second elements of array\n\n result = AudioSegment.from_file(self.path_to_directory + \"/samples/piano/\" + notes[0][1] + \"/\" +\n notes[0][0] + \".wav\")\n result = result.overlay(AudioSegment.from_file(self.path_to_directory + \"/samples/piano/\" + notes[1][1] +\n \"/\" + notes[1][0] + \".wav\"))\n\n while i < len(notes):\n # Concatenate current and previous element of array in parallel mode\n result = result.overlay(AudioSegment.from_file(self.path_to_directory + \"/samples/piano/\" +\n notes[i][1] + \"/\" + notes[i][0] + \".wav\"))\n i += 1\n\n return Piano.calculate_duration(result, duration, self.tempo)", "title": "" }, { "docid": "fd7518c2200a8daf57ced9e6c4116aa4", "score": "0.66190565", "text": "def generate_chord():\n A = generate_note(440,1)\n C = generate_note(532.25,1)\n E = generate_note(659.25,1)\n B = generate_note(493.88,1)\n D = generate_note(587.33,1)\n F = generate_note(698.46,1)\n G = generate_note(783.99,1)\n ACE = A.wave+C.wave+E.wave\n CEG = C.wave+E.wave+G.wave\n ACE = Signal(44100,ACE)\n CEG = Signal(44100,CEG)\n ACE.write_file('chord1')\n CEG.write_file('chordCEG')\n zeros = np.zeros(len(ACE.wave))\n mary = np.append(E.wave,D.wave)\n mary = np.append(mary,[C.wave,D.wave,E.wave,E.wave,E.wave,D.wave,D.wave,D.wave,E.wave,E.wave,E.wave])\n mary = Signal(44100,mary)\n mary.write_file('chord2')\n pass", "title": "" }, { "docid": "70805818eceeb3ab6bcc3b72a3961142", "score": "0.661035", "text": "def get_not_default_chord(self, notes_, duration):\n\n # Split each elements of input array by symbol ,\n # In result we get array of array of string.\n # Each elements of its array is array which contain two elements:\n # first element ([0][0]) - name of note. Second ([0][1]) - octave of corresponding note\n notes = [[]]\n for item in notes_:\n temp = item.split(\",\")\n notes.append(temp)\n\n del notes[0] # Delete first element, because its empty\n i = 2 # i = 2, because before start loop, we concatenate first and second elements of array\n\n result = AudioSegment.from_file(self.path_to_directory + \"/samples/piano/\" + notes[0][1] + \"/\" +\n notes[0][0] + \".wav\")\n result = result.overlay(AudioSegment.from_file(self.path_to_directory + \"/samples/piano/\" + notes[1][1] +\n \"/\" + notes[1][0] + \".wav\"))\n\n while i < len(notes):\n # Concatenate current and previous element of array in parallel mode\n result = result.overlay(AudioSegment.from_file(self.path_to_directory + \"/samples/piano/\" +\n notes[i][1] + \"/\" + notes[i][0] + \".wav\"))\n i += 1\n\n return Piano.calculate_duration(result, duration, self.tempo)", "title": "" }, { "docid": "8349e9c16cea3a078017575741c1874d", "score": "0.63813186", "text": "def playChord(self, notes=('C6', 'E6', 'G6'), durations=0.3):\n\n freqs = [self._pitch_to_freq(p) for p in notes]\n if not isinstance(durations, list):\n durations = [durations]*len(freqs)\n\n for f, d in zip(freqs, durations):\n self.beeper.beep(d, f)", "title": "" }, { "docid": "c1c4d32843a62e482dac86c4830f2e78", "score": "0.62916815", "text": "def getChordSymbol(line):\n\n\t# FIXME : Shouldn't assume sharp note names, but need CO to be extended to allow nameless root notes.\n\tnotes = ['C','C#','D','D#','E','F','F#','G','G#','A','A#','B']\n\t\n\ttry:\n\t\t[crotchetnum, timestamp, rootnum, ctype] = line.split(',')\n\texcept Exception,e :\n\t\tprint \"Exception ! Trying to split line : \"+line\n\t\traise\n\n\trootnote = notes[int(rootnum)]\n\t\n\tshorthandmap = {'major' : 'maj', 'minor' : 'min', 'sus4' : 'sus4', 'dim' : 'dim', 'sus9' : '(1,5,9)'}\n\t\n\t\t\n\treturn rootnote+':'+shorthandmap[ctype.strip()]", "title": "" }, { "docid": "86568ba7ddc3f665d2fc45f4b0f931f7", "score": "0.6118687", "text": "def chord(chord):\n if len(chord) == 1:\n root=degrees[chord]\n note_list = [root,root+4,root+7]\n elif chord[-1] =='m':\n root=degrees[chord[0]]\n note_list = [root,root+3,root+7]\n return note_list", "title": "" }, { "docid": "fba7e57ab8ababd57034367e2031b295", "score": "0.6068974", "text": "def play_chord(self, chord):\r\n #Settings are optimized for chords with a long release time\r\n vol = random.randint(30, 70)\r\n self.output.note_on(note, vol, 6)\r\n self.output.note_on(note + chord[0], 50 + vol, 6)\r\n self.output.note_on(note + chord[1], 50 + vol, 6)\r\n if len(chord) > 2:\r\n self.output.note_on(note + chord[2], 50)\r\n \r\n a = random.choice([-5, 0, 7, 12])\r\n b = random.randint(1,3)\r\n \r\n time.sleep(random.randint(1,3))\r\n self.output.note_on(note + a, 50)\r\n time.sleep(random.randint(1,3))\r\n \r\n self.output.note_off(note)\r\n self.output.note_off(note + chord[0])\r\n self.output.note_off(note + chord[1])\r\n self.output.note_off(note + a)\r\n self.output.note_off(note + b)\r\n \r\n if len(chord) > 2:\r\n self.output.note_off(note + chord[2])", "title": "" }, { "docid": "d81baffd411c23e5284c05a59bf79ecd", "score": "0.5919639", "text": "def get_note(self, note, octave, duration):\n\n result = None\n\n if note == \"c\":\n result = AudioSegment.from_file(self.path_to_directory + \"/samples/guitar/\" + str(octave) + \"/c.wav\")\n elif note == \"c_sharp\":\n result = AudioSegment.from_file(self.path_to_directory + \"/samples/guitar/\" + str(octave) + \"/c_sharp.wav\")\n elif note == \"d\":\n result = AudioSegment.from_file(self.path_to_directory + \"/samples/guitar/\" + str(octave) + \"/d.wav\")\n elif note == \"d_sharp\":\n result = AudioSegment.from_file(self.path_to_directory + \"/samples/guitar/\" + str(octave) + \"/d_sharp.wav\")\n elif note == \"e\":\n result = AudioSegment.from_file(self.path_to_directory + \"/samples/guitar/\" + str(octave) + \"/e.wav\")\n elif note == \"f\":\n result = AudioSegment.from_file(self.path_to_directory + \"/samples/guitar/\" + str(octave) + \"/f.wav\")\n elif note == \"f_sharp\":\n result = AudioSegment.from_file(self.path_to_directory + \"/samples/guitar/\" + str(octave) + \"/f_sharp.wav\")\n elif note == \"g\":\n result = AudioSegment.from_file(self.path_to_directory + \"/samples/guitar/\" + str(octave) + \"/g.wav\")\n elif note == \"g_sharp\":\n result = AudioSegment.from_file(self.path_to_directory + \"/samples/guitar/\" + str(octave) + \"/g_sharp.wav\")\n elif note == \"a\":\n result = AudioSegment.from_file(self.path_to_directory + \"/samples/guitar/\" + str(octave) + \"/a.wav\")\n elif note == \"a_sharp\":\n result = AudioSegment.from_file(self.path_to_directory + \"/samples/guitar/\" + str(octave) + \"/a_sharp.wav\")\n elif note == \"h\":\n result = AudioSegment.from_file(self.path_to_directory + \"/samples/guitar/\" + str(octave) + \"/h.wav\")\n elif note == \"silence\":\n result = AudioSegment.from_file(self.path_to_directory + \"/samples/guitar/silence.wav\")\n\n return Piano.calculate_duration(result, duration, self.tempo)", "title": "" }, { "docid": "85dff3cedbaf2755a7b2411487701937", "score": "0.58418316", "text": "def get_playlist(self, chord):\r\n \r\n steps = self.profile_manager.logic.bar_division\r\n pulses = random.randint(1,5)\r\n rhythm_list = self.rhythm_gen.bjorklund(pulses, steps)\r\n \r\n for index, item in enumerate(rhythm_list):\r\n if item == 0:\r\n rhythm_list[index] = \"0\"\r\n else:\r\n rhythm_list[index] = self.get_rand_note()\r\n \r\n \"SYNTAX: CHN-NOTE-LEN-VEL eg. 1-50-16-65\"\r\n \r\n return rhythm_list\r\n \r\n \r\n \"\"\"\" TEMPORARY TEST LISTS\r\n lsts = [\r\n [0, 0, \"1-C3-16-60\", 0],\r\n [\"2-D5-4-77\", \"3-D2-8-60\", 0, 0],\r\n [0, 0, 0, \"3-F2-16-80\"],\r\n [0, \"1-C1-2-65\", 0, 0]\r\n ]\r\n \"\"\"", "title": "" }, { "docid": "871706cb668eb9f60a6036b000e6220e", "score": "0.58189493", "text": "def cMaj():\n playNote(60,250,90) #play C note\n playNote(64,250,90) #play E note\n playNote(69,250,90) #play G note ", "title": "" }, { "docid": "3d64be4aae5f30cc5d88ee51e1e2e492", "score": "0.5816853", "text": "def get_note(self, note, octave, duration):\n\n result = None\n\n if note == \"c\":\n result = AudioSegment.from_file(self.path_to_directory + \"/samples/piano/\" + str(octave) + \"/c.wav\")\n elif note == \"c_sharp\":\n result = AudioSegment.from_file(self.path_to_directory + \"/samples/piano/\" + str(octave) + \"/c_sharp.wav\")\n elif note == \"d\":\n result = AudioSegment.from_file(self.path_to_directory + \"/samples/piano/\" + str(octave) + \"/d.wav\")\n elif note == \"d_sharp\":\n result = AudioSegment.from_file(self.path_to_directory + \"/samples/piano/\" + str(octave) + \"/d_sharp.wav\")\n elif note == \"e\":\n result = AudioSegment.from_file(self.path_to_directory + \"/samples/piano/\" + str(octave) + \"/e.wav\")\n elif note == \"f\":\n result = AudioSegment.from_file(self.path_to_directory + \"/samples/piano/\" + str(octave) + \"/f.wav\")\n elif note == \"f_sharp\":\n result = AudioSegment.from_file(self.path_to_directory + \"/samples/piano/\" + str(octave) + \"/f_sharp.wav\")\n elif note == \"g\":\n result = AudioSegment.from_file(self.path_to_directory + \"/samples/piano/\" + str(octave) + \"/g.wav\")\n elif note == \"g_sharp\":\n result = AudioSegment.from_file(self.path_to_directory + \"/samples/piano/\" + str(octave) + \"/g_sharp.wav\")\n elif note == \"a\":\n result = AudioSegment.from_file(self.path_to_directory + \"/samples/piano/\" + str(octave) + \"/a.wav\")\n elif note == \"a_sharp\":\n result = AudioSegment.from_file(self.path_to_directory + \"/samples/piano/\" + str(octave) + \"/a_sharp.wav\")\n elif note == \"h\":\n result = AudioSegment.from_file(self.path_to_directory + \"/samples/piano/\" + str(octave) + \"/h.wav\")\n elif note == \"silence\":\n result = AudioSegment.from_file(self.path_to_directory + \"/samples/piano/silence.wav\")\n\n return Piano.calculate_duration(result, duration, self.tempo)", "title": "" }, { "docid": "47a78ad056e110ac2a54e3ca5addd87b", "score": "0.58123404", "text": "def modal_chord_html(request):\n indexes = {\n 'Ab': 0,\n 'A': 1,\n 'Bb':2,\n 'B':3,\n 'Cb':3,\n 'C':4,\n 'C#':5,\n 'Db':5,\n 'D': 6,\n 'D#': 7,\n 'Eb': 7,\n 'E':8,\n 'F':9,\n 'F#':10,\n 'Gb':10,\n 'G':11,\n 'G#':0\n }\n ccli = int(request.GET.get('ccli'))\n transpose_final_key = request.GET.get('transpose_value_str')\n song = Song.objects.get(ccli=int(ccli))\n chord_stream = song.chords\n if not chord_stream:\n return\n lines = chord_stream.split('\\n')\n \n html = ''\n emphasis = False\n for line in lines:\n #handle directives first\n if '{' in line:\n if '{title:' in line:\n html += '<h3>'+line[7:-1]+'</h3>'\n continue\n elif '{st:' in line:\n html += '<h5>'+line[4:-1]+'</h5>'\n continue\n elif '{key:' in line:\n original_key = line[5:-1]\n continue\n elif '{start_of_chorus}' in line:\n emphasis = True\n continue\n elif '{end_of_chorus}' in line:\n emphasis = False\n continue\n #no directive in line\n else:\n if '[' in line:\n separations = []\n separations = line.split('[')\n chords = []\n lyrics = []\n # go through each section and get the chord and lyric pairings\n for section in separations:\n if section == '':\n continue\n #if no chord, still append nothing to chords\n elif ']' not in section:\n lyrics.append(section)\n chords.append('')\n continue \n chordlyric = section.split(']')\n chords.append(chordlyric[0])\n lyrics.append(chordlyric[1])\n html += '<table>'\n if emphasis:\n html += '<tr class=\"emphasis\">'\n else:\n html += '<tr>'\n\n transposed_chords = []\n \n if 'm' in original_key:\n # minor = True\n original_root = original_key[:-1]\n final_root = transpose_final_key[:-1]\n else:\n # minor = False\n original_root = original_key\n final_root = transpose_final_key\n original_key_index = indexes[original_root]\n final_key_index = indexes[final_root]\n \n # original_key_index = indexes[original_key]\n # final_key_index = indexes[transpose_final_key]\n transpose_step = final_key_index - original_key_index\n \n for chord in chords:\n if chord == '':\n transposed_chords.append('')\n continue\n transposed_chord = transpose(chord, transpose_step, original_key, transpose_final_key)\n transposed_chords.append(transposed_chord)\n\n for chord in transposed_chords:\n html += '<td>'+chord+'</td>'\n \n if emphasis:\n html += '</tr><tr class=\"emphasis\">'\n else:\n html += '</tr><tr>'\n \n for lyric in lyrics:\n html += '<td>'+lyric+'</td>'\n \n html += '</tr></table>'\n\n else:\n html += '<br>' + line\n #adding another br so last line doesn't get cut off\n html += '<br>'\n return HttpResponse(html)", "title": "" }, { "docid": "061cc36dc706558abe7c498a16e65d54", "score": "0.5729549", "text": "def get_chord(symbol, base_note):\n return [triad_notes[symbol][i] + base_note for i in range(len(triad_notes[symbol]))]", "title": "" }, { "docid": "a46203395044e9a769570a140ba2749d", "score": "0.5715923", "text": "def _chord_callback(data_value) -> List[InputMediaPhoto]:\n caption = data_value.split('-')[1]\n chord = caption.replace('#', 'x').lower()\n print('Chord:', caption)\n return ChordsParser.get_chord(chord, caption)", "title": "" }, { "docid": "7d458fe38a5c776227158fc4841374ca", "score": "0.5696006", "text": "def get_stream_from_chord_prog(chord_prog, key):\n #print(chord_prog)\n stream_chord_prog = stream.Stream()\n quintet = get_voice_led_quintet(chord_prog, key) #gets tuple of eight notes for bassline (except not lowered an octave). its actually a quintet with five parts. We only want 2 of the parts\n counter = 0\n for scale_figure in chord_prog:\n comp_chord = roman.RomanNumeral(scale_figure, key) # this creates a music21 object from the scale degree\n chord_pitches_list = list(comp_chord.pitches) #We want to add bass note to chord object, however, chord object is tuple and tuple can't be manipulated (tuples are immutable)\n #bass_note = comp_chord.bass() #gets lowest note from 'chord'\n bass_note = note.Note(quintet[0][counter])\n bass_note.octave = bass_note.octave - 2 #<<<#lowers the octave by TWO, making the note lower\n chord_pitches_list.append(bass_note) #<<made list of tuple and added bass note to it\n\n high_note = note.Note(quintet[3][counter])\n high_note.octave = high_note.octave + 1 #<<<#raises the octave by ONE, making the note higher\n chord_pitches_list.append(high_note) #<<chord pitches list is the list that is about to be added the strea,\n\n chord_pitches_tuple = tuple(chord_pitches_list) #turn edited version of chord into a tuple\n comp_chord = chord.Chord(chord_pitches_tuple) #make chord object out of tuple\n comp_chord.duration.quarterLength = 4.0 #this turns the chord into actual notes with a duration so it can be added onto the music21 stream\n stream_chord_prog.append(comp_chord)\n counter += 1\n\n return stream_chord_prog", "title": "" }, { "docid": "9dcde27d9a2a275056e51427148c7906", "score": "0.562131", "text": "def quarter_chord(self) -> cas.DM:\n return 0.75 * self.xyz_le + 0.25 * self.xyz_te()", "title": "" }, { "docid": "aff8e790f0f6f856298309f26fc3bd49", "score": "0.5526221", "text": "def __init__(self, start, end, direction):\n self.start = start\n self.end = end\n self.direction = direction\n v = 0.5 * (end-start) # half oriented chord\n angle = phase(v) - direction\n while angle > pi:\n angle -= 2*pi\n while angle <= -pi:\n angle += 2*pi\n self.angle = angle # signed angle between arc and chord\n self.center = 0.5*(start+end) + 1j*v / tan(angle) # center of circle\n absv = abs(v)\n self.l = 2 * absv # length of chord\n if angle >= 0:\n self.radius = absv / sin(angle) # radius\n self.alpha = phase(v) - pi/2 - angle # start angle\n self.beta = self.alpha + 2*angle # end angle\n else:\n self.radius = -absv / sin(angle)\n self.alpha = phase(v) + pi/2 - angle\n self.beta = self.alpha + 2*angle\n self.end_direction = direction + 2*angle # direction of arc in end point", "title": "" }, { "docid": "8ae05cd98e134758259a70e4e5ee2060", "score": "0.55210674", "text": "def construct_harmony(curr_loop):\n\n # Constructing the shell chords\n prev_chord = None\n prev_voice = None\n\n for i in range(curr_loop.measures):\n curr_chord = shell_chord(curr_loop, i, prev_chord)\n curr_voice = get_voicing(curr_chord, curr_loop, prev_voice)\n\n prev_chord = curr_chord\n prev_voice = curr_voice\n\n for curr_note in curr_loop.harm['shell'].measures[i].notes:\n curr_note.value = {'chord': curr_chord, 'voicing': curr_voice}\n\n # Decorating with passing chords\n curr_loop.harm['passing'] = curr_loop.harm['shell'].extend().collapse(curr_loop)\n for curr_measure in curr_loop.harm['passing'].measures:\n passing_chords(curr_measure, curr_loop)\n\n # Constructing the rhythmic attack/sustain Frame\n temp_harm = curr_loop.harm['rhythm'].extend()\n for curr_note in temp_harm.measures[0].notes:\n if curr_note.index == 0:\n curr_note.value = 'A'\n elif curr_loop.harm['passing'][curr_note.index].value == curr_loop.harm['passing'][curr_note.index-1].value:\n curr_note.value = 'S'\n else:\n curr_note.value = 'A'\n curr_loop.harm['rhythm'] = temp_harm.collapse(curr_loop)\n\n # Constructing the final compressed harmony Frame\n curr_loop.harm['final'] = curr_loop.harm['passing'].extend().collapse(curr_loop)\n curr_loop.harm['final'].compress()", "title": "" }, { "docid": "2a7d3c49a27b321e3cf280951b3215c6", "score": "0.5509813", "text": "def choose_chord(self):\r\n chords = [[3, 7], [4, 7], [3, 6], [5, 7], [5, 12], [4, 9, 12], [3, 7, 10]]\r\n \r\n return random.choice(chords)", "title": "" }, { "docid": "43cd0dc91a1315d2f20bc3d871c6ab4f", "score": "0.5481143", "text": "def getChordScale(self, n):", "title": "" }, { "docid": "5558a2277d2339f694775f65a6add6ec", "score": "0.54629415", "text": "def calculate_chord(radius, arc_degrees):\n\n # Calculate the arc_degrees in radians.\n # We need this because sin() expects it.\n arc_radians = radians(arc_degrees)\n\n # Calculate the chord.\n return radius * (2 * sin(arc_radians / 2)) # km", "title": "" }, { "docid": "48a9d4bc105a52d8a938b5a3cae4e7a2", "score": "0.54548794", "text": "def make_interval_chord_midi(chords, key):\n result = []\n c_transposed = transpose(chords, key, 'C')\n\n for chord in c_transposed:\n if chord[0] == 'H':\n result.append('-1')\n continue\n\n if len(chord) == 1 or (chord[1] != '#' and chord[1] != 'b'):\n name_index = 1\n if chord[0] == 'F':\n mode = 'b'\n else:\n mode = '#'\n else:\n name_index = 2\n mode = chord[1]\n\n interval = calc_interval('C', chord[:name_index])\n\n if interval in vocab_value:\n result_chord = vocab_interval[interval]\n else:\n if mode == '#':\n interval -= 0.5\n else:\n interval += 0.5\n result_chord = vocab_interval[interval] + mode\n\n result.append(result_chord + chord[name_index:])\n\n return result", "title": "" }, { "docid": "6fdb35b740d2c9a54ad87d270553c841", "score": "0.5394407", "text": "def CA_measures(self):\n for i in range(self.length):\n chord = self.chrd_prg[i%len(self.chrd_prg)]\n rmeasure = Measure(chord=chord, note_lengths=self.note_lengths)\n rmeasure.CA_notes()\n rmeasure.CA_bass()\n self.add_measure(rmeasure)", "title": "" }, { "docid": "776bb12e6e05e3812aae727bd4f29c7c", "score": "0.5390063", "text": "def make_chord_from_intervals(intervals, starting_pitch=0):\n chord = [starting_pitch]\n index = 0\n for num_half_steps in intervals:\n chord.append(mod_12(chord[index] + num_half_steps))\n index += 1\n return tuple(chord)", "title": "" }, { "docid": "4efe77093d3599cee0b16ecad7718972", "score": "0.53892785", "text": "def build_chromatic_scale():\n # letter names for note entries\n letter_names = [\"C\", \"D\", \"E\", \"F\", \"G\", \"A\", \"B\"]\n # symbolic accidental names for notes, \"\" is for a diatonic note without accidental\n accidental_names = [\"bb\", \"b\", \"\", \"#\", \"##\"]\n # safe accidental name variants\n accidental_safe_name = {\"bb\": \"ff\", \"b\": \"f\", \"\": \"\", \"#\": \"s\", \"##\": \"ss\"}\n # lowest octave name is 00 instead of -1.\n octave_names = [\"00\", \"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"]\n # semi-tone shifts applied to diatonic notes within the octave\n letter_steps = [0, 2, 4, 5, 7, 9, 11]\n # semi-tone shift applied to diatonic notes for notes with accidentals\n accidental_steps = [-2, -1, 0, 1, 2]\n # init table with midi keys, each holding a note array and frequency.\n # the note array will hold all chromatic note names for the midi key.\n table = {key: [['','','','',''], 440.0 * 2 ** ((key-69.0)/12)] for key in range(128)}\n # iterate all the octave for midi key numbers 0-127.\n for octave_index, octave_name in enumerate(octave_names):\n # starting key number for notes in the octave\n octave_midi = octave_index * 12\n # iterate all the diatonic letter names for notes.\n for letter_index, letter_name in enumerate(letter_names):\n # the midi key number of the diatonic note\n letter_midi = octave_midi + letter_steps[letter_index]\n # iterate the accidentals and create all possible note names for the letter\n for accidental_index, accidental_name in enumerate(accidental_names):\n # get the semitone amount to shift the letter note by\n accidental_step = accidental_steps[accidental_index]\n # calculate the midi key number for the note\n note_midi = letter_midi + accidental_step\n # stop notes outside of midi range (there are only a few)\n if 0 <= note_midi <= 127:\n accidental_name = accidental_names[accidental_index]\n # create the official note name\n note_name1 = letter_name + accidental_name + octave_name\n # create the Pitch for the official name.\n note_pitch = Pitch(note_name1)\n # create variants (lower case letter names, safe accidental names)\n note_name2 = letter_name.lower() + accidental_name + octave_name\n note_name3 = letter_name + accidental_safe_name[accidental_name] + octave_name\n note_name4 = letter_name.lower() + accidental_safe_name[accidental_name] + octave_name\n # fetch the midi data from the table\n midi_data = table[note_midi]\n # add the note to the note array\n ##midi_data[0][accidental_index] = note_name1\n midi_data[0][accidental_index] = note_pitch\n # get the frequency from the midi data and add it to the note data.\n note_freq = table[note_midi][1]\n # add the hash entry for the note name\n ##table[note_name1] = [note_midi, note_freq]\n table[note_name1] = [note_midi, note_freq, note_pitch]\n # add the variants (lower case letter, safe accidentals)\n table[note_name2] = table[note_name1]\n table[note_name3] = table[note_name1]\n table[note_name4] = table[note_name1]\n # add entries for musical rests\n r = Pitch()\n table['R'] = [-1, 0.0, r]\n table['r'] = table['R']\n #table[-1] = [[r,r,r,r,r], 0.0]\n return table", "title": "" }, { "docid": "bd37a3c965c17b03739e99eb400ad848", "score": "0.5384938", "text": "def convertChordsToNotes(s):", "title": "" }, { "docid": "58c3ff2226003166ac27f2589249276d", "score": "0.53816175", "text": "def circle(\r\n piece,\r\n pitch_type='tpc',\r\n measures=None, # need documentation\r\n log=False,\r\n vocabulary={0:'C', 1:'Db', 2:'D', 3:'Eb', 4:'E', 5:'F', 6:'Gb', 7:'G', 8:'Ab', 9:'A', 10:'Bb', 11:'B'},\r\n pitch_class_display=False,\r\n colorbar=True,\r\n duration=False,\r\n fifths=True,\r\n figsize=[7, 4],\r\n top=None,\r\n rotation=0,\r\n clockwise=True,\r\n cmap='Blues',\r\n nan_color=None,\r\n show=False,\r\n **kwargs):\r\n #settings\r\n df = get_df_short(piece, vocabulary=vocabulary, pitch_type=pitch_type, measures=measures, duration=duration)\r\n\r\n #color map\r\n cmap = matplotlib.cm.get_cmap(cmap)\r\n color_note = []\r\n\r\n #dataFrame for the plot if tpc\r\n df_tpc_pie = pd.DataFrame(columns=['note', 'part', 'pc'])\r\n\r\n #put top in the right form\r\n if pd.isnull(top) == False:\r\n if is_tpc(top) and pitch_class_display:\r\n top = get_pc(top)\r\n if is_pc(top) and not pitch_class_display:\r\n top = vocabulary[int(top)]\r\n\r\n #remember position of data in Series\r\n s_pos = pd.Series()\r\n count = 0\r\n part = 0\r\n letter = 'nan'\r\n s_fifth = pd.Series()\r\n \r\n fig = plt.figure(figsize=figsize)\r\n if not show:\r\n plt.close(fig)\r\n ax = fig.add_subplot(111, aspect='equal')\r\n \r\n #Set the order in function of fifth\r\n if fifths:\r\n s_tpc_format = pd.Series((0, 7, 2, 9, 4, 11, 6, 1, 8, 3, 10, 5))\r\n else:\r\n s_tpc_format = pd.Series((0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))\r\n\r\n #for plot if pitch_class_display\r\n s_twelve_ones = pd.Series((1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), index=s_tpc_format)\r\n\r\n #if it show the tpc values\r\n if pitch_class_display == False:\r\n #put the right values in 'number'\r\n if duration:\r\n df_data = df.copy()\r\n df_data.rename(columns={'duration': 'number'},inplace=True)\r\n else:\r\n df_data = df.copy()\r\n df_data.rename(columns={'nb': 'number'},inplace=True)\r\n\r\n #Normalize the values for the colors\r\n max_value = df_data['number'].max()\r\n min_value = df_data['number'].min()\r\n if log:\r\n norm = matplotlib.colors.LogNorm(vmin=min_value, vmax=max_value)\r\n else:\r\n norm = matplotlib.colors.Normalize(0, vmax=max_value)\r\n \r\n #for chromatic order\r\n if fifths == False:\r\n\r\n #for each pitch class values\r\n for i in range(12):\r\n\r\n #if a pitch class is represented in the data\r\n if df_data['pc'].isin([s_tpc_format[i]]).any():\r\n count = 0\r\n s_pos.drop(s_pos.index, inplace=True)\r\n \r\n #count how much time there is tpc values for a same pitch class\r\n for j in range(df_data['pc'].isin([s_tpc_format[i]]).shape[0]):\r\n if df_data['pc'].isin([s_tpc_format[i]])[j]:\r\n s_pos.at[count] = j\r\n count = count + 1\r\n\r\n #devide the pie part and set color\r\n for j in range(count):\r\n part = 1/count\r\n letter = df_data.at[s_pos.at[j], 'step']\r\n\r\n #write the notes\r\n letter = put_flat_sharp(letter, df_data.at[s_pos.at[j], 'acc'])\r\n\r\n #register the informations\r\n df_tpc_pie = df_tpc_pie.append({'note':letter, 'part':part},\r\n ignore_index=True)\r\n color_note.append(cmap(norm(df_data.at[s_pos.at[j], 'number'])))\r\n\r\n #if the pitch class do no appear in the piece\r\n else:\r\n letter = vocabulary[s_tpc_format[i]]\r\n\r\n df_tpc_pie = df_tpc_pie.append({'note':letter, 'part':1}, ignore_index=True)\r\n if pd.isnull(nan_color):\r\n color_note.append(cmap(0))\r\n else:\r\n color_note.append(nan_color)\r\n else:\r\n #get the fifth numbers of the notes\r\n for i in range(df_data.shape[0]):\r\n s_fifth.at[i] = get_fifth_nb(df_data.at[i, 'tpc'])\r\n df_data['fifth'] = s_fifth\r\n\r\n #create df_tpc_pie and get the colours\r\n for i in range(df_data['fifth'].max()-df_data['fifth'].min()+1):\r\n #the part are equal for the moment\r\n df_tpc_pie.at[i, 'part'] = 1\r\n df_tpc_pie.at[i, 'note'] = get_fifth_note(i + df_data['fifth'].min())\r\n df_tpc_pie.at[i, 'pc'] = get_pc(df_tpc_pie.at[i, 'note'])\r\n \r\n if df_data['fifth'].isin([i + df_data['fifth'].min()]).any():\r\n #get the colour for the note who has the good fifth number\r\n color_note.append(cmap(norm(df_data['number'][df_data['fifth']==(i + df_data['fifth'].min())].iat[0])))\r\n elif df_data['fifth'].isin([i + df_data['fifth'].min()]).any() == False and pd.isnull(nan_color) == False:\r\n color_note.append(nan_color)\r\n else:\r\n color_note.append(cmap(0))\r\n\r\n #if clockwise invert the order of the data to be displayed clockwise, inverse also the index\r\n if clockwise:\r\n df_tpc_pie = df_tpc_pie.iloc[::-1]\r\n color_note = list(reversed(color_note))\r\n\r\n #calculate the angle for the topPitchClass to be at the top\r\n if pd.isnull(top) == False and fifths == False and df_tpc_pie['note'].isin([top]).any() == True:\r\n if clockwise:\r\n rotation = rotation + 90 + df_tpc_pie.at[0, 'part'] * 15\r\n else:\r\n rotation = rotation + 90 - df_tpc_pie.at[0, 'part'] * 15\r\n for i in range(df_tpc_pie.shape[0]):\r\n if top == df_tpc_pie.at[i, 'note']:\r\n if df_tpc_pie.at[i, 'part'] != 1:\r\n if clockwise:\r\n rotation = rotation - 15*df_tpc_pie.at[i, 'part']\r\n else:\r\n rotation = rotation + 15*df_tpc_pie.at[i, 'part']\r\n break\r\n else:\r\n if clockwise:\r\n rotation = rotation + 30*df_tpc_pie.at[i, 'part']\r\n else:\r\n rotation = rotation - 30*df_tpc_pie.at[i, 'part']\r\n\r\n #put the top note at the top\r\n if pd.isnull(top) == False and fifths == True and df_tpc_pie['note'].isin([top]).any() == True:\r\n if clockwise:\r\n rotation = rotation + 90 + 180/df_tpc_pie.shape[0]\r\n else:\r\n rotation = rotation + 90 - 180/df_tpc_pie.shape[0]\r\n for i in range (df_tpc_pie.shape[0]):\r\n if df_tpc_pie.at[i, 'note'] == top:\r\n break\r\n else:\r\n #the sens of reading depend on the orientation\r\n if clockwise:\r\n rotation = rotation + 360/df_tpc_pie.shape[0]\r\n else:\r\n rotation = rotation - 360/df_tpc_pie.shape[0]\r\n \r\n\r\n #put nice sharps and flats\r\n for i in range(df_tpc_pie.shape[0]):\r\n df_tpc_pie.at[i, 'note'] = df_tpc_pie.at[i, 'note'].replace('b', r'$\\flat$')\\\r\n .replace('#', r'$\\sharp$')\r\n \r\n #plot the piechart with index 'tpc'\r\n df_tpc_pie.index = df_tpc_pie['note']\r\n \r\n #do the pie chart\r\n ax.pie(labels=df_tpc_pie.index, x=df_tpc_pie['part'], colors=color_note, startangle=rotation, **kwargs)\r\n\r\n #if asked plot the colorbar left of the piechart\r\n if colorbar:\r\n ax2 = fig.add_subplot(1, 10, 1)\r\n cb1 = matplotlib.colorbar.ColorbarBase(ax2, cmap=cmap, norm=norm, orientation='vertical')\r\n \r\n #display with the pc values\r\n else:\r\n #put the right values in 'number'\r\n if duration:\r\n df_data = pd.concat(\r\n [df['pc'], df['duration']],\r\n axis=1,\r\n keys=['pc', 'number'])\r\n else:\r\n df_data = pd.concat(\r\n [df['pc'], df['nb']],\r\n axis=1,\r\n keys=['pc', 'number'])\r\n\r\n #Normalize the values for the colors\r\n max_value = df_data['number'].max()\r\n min_value = df_data['number'].min()\r\n if log:\r\n norm = matplotlib.colors.LogNorm(vmin=min_value, vmax=max_value)\r\n else:\r\n norm = matplotlib.colors.Normalize(0, vmax=max_value)\r\n \r\n #set data df_data\r\n df_data = (df_data.groupby('pc')).sum()\r\n df_data = df_data.reindex(s_tpc_format)\r\n df_data.fillna(0, inplace=True)\r\n\r\n #set colors\r\n for i in range(0, 12):\r\n if df_data.iat[i, 0] != 0:\r\n color_note.append(cmap(norm(df_data.iat[i, 0])))\r\n else:\r\n if pd.isnull(nan_color):\r\n color_note.append(cmap(0))\r\n else:\r\n color_note.append(nan_color)\r\n\r\n #if clockwise invert the order of the data to be displayed clockwise\r\n if clockwise:\r\n s_twelve_ones = s_twelve_ones.iloc[::-1]\r\n color_note = list(reversed(color_note))\r\n\r\n #calculate the angle for the topPitchClass to be at the top\r\n if pd.isnull(top) == False:\r\n for i in range(s_tpc_format.shape[0]):\r\n if top == (s_twelve_ones.index)[i]:\r\n rotation = rotation + 75 - i * 30\r\n break\r\n ax.pie(labels=s_twelve_ones.index, x=s_twelve_ones, colors=color_note, startangle=rotation, **kwargs)\r\n\r\n #if asked plot the colorbar left of the piechart\r\n if colorbar:\r\n ax2 = fig.add_subplot(1, 10, 1)\r\n cb1 = matplotlib.colorbar.ColorbarBase(ax2, cmap=cmap, norm=norm, orientation='vertical')\r\n return fig", "title": "" }, { "docid": "96bc891fd8a531831923c45c61e834bb", "score": "0.53810406", "text": "def getChordScaleDegreesAsChord(self, n, degrees):", "title": "" }, { "docid": "d11eeb501fc43303b16778ecb8c6fe5d", "score": "0.5331633", "text": "def create_chord_from_random_notes_in_key(self, minNum=2, maxNum=6):\n nl = [np.random.choice(self.get_notes_in_key()) for i in range(np.random.randint(minNum,maxNum))]\n ch = chord.Chord(nl)\n return ch", "title": "" }, { "docid": "a964c7bae35ea170ff9130b53cf248bc", "score": "0.5285635", "text": "def CA_bass(self):\n chords = chord(self.chord)\n bass = Note(pitch = chords, duration = 4)\n self.add_bass(bass)", "title": "" }, { "docid": "d50341a0dffe5291a72c4650ae36f284", "score": "0.5267212", "text": "def __illustrate__(self):\n import abjad\n chord = abjad.Chord(self, abjad.Duration(1))\n voice = abjad.Voice([chord])\n staff = abjad.Staff([voice])\n score = abjad.Score([staff])\n lilypond_file = abjad.LilyPondFile.new(score)\n return lilypond_file", "title": "" }, { "docid": "6079cb601f8d09ad748995c4ec408565", "score": "0.52628016", "text": "def psychoAcousticDistance(chord1, chord2):", "title": "" }, { "docid": "34c00e6afb804011f5175562ae4c91c6", "score": "0.52496254", "text": "def generate_melody(key, progression, progression_repeats, major=True):\n out = []\n for _ in range(progression_repeats):\n time_used = 0.0 # Number of measures that have been generated so far\n for i, chord in enumerate(progression):\n all_tones = generate_scale(key, 2, major)\n chord_tones = get_chord(chord, note_number(key+'2'))\n chord_tones.extend([x+12 for x in chord_tones])\n non_chord_tones = list(set(all_tones[:-1]) - set(chord_tones))\n non_chord_tones.extend([x+12 for x in non_chord_tones])\n last_played = None\n\n # Generate a sequence of notes to fill a measure for this chord\n while time_used < i + 1:\n note_vals = [(0.125, 2), (0.25, 4), (0.375, 2), (0.5, 2), (0.75, 1), (1.0, 1), (1.25, 0.5), (1.5, 0.25)]\n # Only allow note lengths that will fit into the len(progression)\n # measures for this chord progression (i.e. don't allow spill\n # of notes into different repetitions of the progression)\n possible_note_vals = [x for x, p in note_vals if time_used + x <= len(progression)]\n note_vals_prob = [p for x, p in note_vals if time_used + x <= len(progression)]\n note_vals_prob = [x*1.0/sum(note_vals_prob) for x in note_vals_prob]\n # Choose a note length\n note_val = numpy.random.choice(possible_note_vals, p=note_vals_prob)\n # Choose the set of note numbers we could pick from (either\n # the chord tones or non-chord tones)\n select_from = non_chord_tones if int(randint(0, 10)/10.0) else chord_tones\n # Incentivize choosing notes that are close to the previously\n # played note so that we aren't just jumping all over the\n # place and sounding terribly random\n NEARBY_INCENTIVE = 2.0\n select_from_probabilities = [ (36 - int(math.fabs(last_played - x)))**NEARBY_INCENTIVE if last_played else 1 for x in select_from ]\n select_from_probabilities = [ x * 1.0 / sum(select_from_probabilities) for x in select_from_probabilities ]\n out.append((numpy.random.choice(select_from, p=select_from_probabilities), 80, note_val))\n last_played = out[-1][0]\n out.extend([None for x in range(int(note_val/0.125)-1)])\n time_used += note_val\n assert len(out) == 8 * len(progression) * progression_repeats\n return out", "title": "" }, { "docid": "c28fe18c4f71af211ccda3a21b8b6d9d", "score": "0.51965153", "text": "def CA_notes(self):\n beat = 0\n while beat < self.length:\n if beat+1 in self.str_beat:\n pitch = [random.choice(chord(self.chord))]\n else:\n pitch = [random.choice(notes)]\n duration = random.choices(list(self.note_lengths.keys()),list(self.note_lengths.values()))\n beat += duration[0]\n if beat > self.length:\n beat -= duration[0]\n duration = self.length - beat\n duration = [duration]\n beat += duration[0]\n rnote = Note(pitch, duration[0])\n self.add_note(rnote)\n assert beat == self.length", "title": "" }, { "docid": "f8b9d3dd93127f378765ea1056cd6190", "score": "0.5183889", "text": "def cpsmidi(cps: float) -> float:\n return 69 + 12 * np.log2(cps / 440.0)", "title": "" }, { "docid": "cd25ace712ad5321dca786806750271e", "score": "0.51812875", "text": "def octave(self, octave = 0):\r\n return Note(self.pitch + octave * 12)", "title": "" }, { "docid": "015a1c60b8980ac36f7c09d7f8e35bbb", "score": "0.5160207", "text": "def __init__(self, start_time: float, end_time: float, all_chords_list: list):\n self.all_chords_list = all_chords_list\n self.start_time = start_time\n self.end_time = end_time\n self.duration = end_time - start_time\n self.notes = []\n self.pitches = set()\n self.pitch_classes = set()\n self.chroma = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n self.most_likely_chord = None\n self.most_likely_chord_score = -10", "title": "" }, { "docid": "b763695c4cd07303bca85b1cf84fb6c8", "score": "0.5141266", "text": "def _parse_chord(self, chord_word):\n if chord_word != '' and chord_word[0].islower():\n res = chord_word[0].upper()\n res += 'm'\n return res\n elif (chord_word == '*'):\n return ''\n else:\n return chord_word", "title": "" }, { "docid": "5cc9fb7de89c0c6c6c46537d169b04df", "score": "0.5115718", "text": "def translate_to_chord(state_prog):\n list = ['C', 'D', 'E', 'F', 'G', 'A', 'B']\n result = []\n for state in state_prog:\n word = \"\"\n for i, p in enumerate(state):\n if i == 0:\n word += list[int(p) - 1]\n else:\n word += p\n result.append(word)\n\n return result", "title": "" }, { "docid": "26ad0028b519146c0d63597fbea1cebd", "score": "0.50975525", "text": "def chord_spaced_parameters(points):\n cds = chord_lengths(points)\n d = sum(cds)\n uk = [0.] + cds + [1.]\n return [sum(uk[0:i + 1]) / d for i in range(len(points))]", "title": "" }, { "docid": "dc4d18b36a9a2afb394fc596894976a3", "score": "0.507723", "text": "def composing(chord_pair_dict):\n full_song = []\n full_song.append(choose_theme_a(chord_pair_dict))\n full_song.append(choose_theme_b(chord_pair_dict))\n full_song.append(choose_theme_a(chord_pair_dict))\n stream_full_song = get_stream_from_chord_prog(full_song)\n stream_full_song = get_voice_led_stream(stream_full_song) #stub\n return", "title": "" }, { "docid": "49a15ef0f1320ba06eff446899d5502d", "score": "0.50548774", "text": "def random_note_in_chord(self, index_):\n\n\t\t# select random note from chord\n\t\tif self.chords[index_] == -1:\n\t\t\tsemitones_above_tonic = self.chords[index_]\n\t\t\treturn note.Rest(quarterLength=1)\n\n\t\telse:\n\t\t\tpossible_notes = self.chords[index_].split('/')\n\t\t\tselect_idx = np.random.randint(len(possible_notes))\n\t\t\tsemitones_above_tonic = int(possible_notes[select_idx])\n\t\t\t\n\t\t\t# return note\n\t\t\ttonic = self.key.getTonic()\n\t\t\tpitch_ = tonic.transpose(semitones_above_tonic)\n\n\t\t\t# select random obctave within vocal range\n\t\t\tlow, high = self.vocal_range\n\n\t\t\t#lowest octave\n\t\t\tlowest_oct = pitch_.transposeAboveTarget(low).octave\n\n\t\t\t# highest octave\n\t\t\thighest_oct = pitch_.transposeBelowTarget(high).octave\n\n\t\t\t# bug sometimes arises where low higher than high\n\t\t\tif lowest_oct >= highest_oct:\n\t\t\t\tlowest_oct = low.octave\n\n\t\t\t# highest + 1 as numpy.random.randint is exclusive of high \n\t\t\toct_ = np.random.randint(low=lowest_oct, high=highest_oct + 1)\n\t\t\tpitch_.octave = oct_\n\n\t\t\treturn note.Note(pitch_, quarterLength=1)", "title": "" }, { "docid": "0ab8fa8b931568248078ce7174654edb", "score": "0.5049438", "text": "def __init__(self, midi_pitch):\n self.midi_pitch = midi_pitch\n self.letter = PITCHES[midi_pitch % 12]\n self.octave = midi_pitch // 12", "title": "" }, { "docid": "ffb0ff6cc669fcd46cdf994b9b40334e", "score": "0.5039541", "text": "def choose_random_chord(chord_pair_dict):\n chord_prog = []\n first_chord = statistics\n #getting list of ALL chords and storing in \"chord_options\"\n chord_options = []\n for chord_pair in chord_pair_dict:\n chord_pair = chord_pair.split(\" -> \")\n print(chord_pair, chord_pair[0])\n chord_options.append(chord_pair[0])\n #getting \"corresponding_chord_probability\"\n corresponding_chord_probability = []\n total = 0\n for chord_pair in chord_pair_dict:\n total += chord_pair_dict[chord_pair] #this loop gets total number of chords in song\n\n for chord_num in chord_pair_dict:\n chance = chord_pair_dict[chord_num]/total #for every chord in the dictionary, find chance of occurence and add to \"corresponding_chord_probability\"\n corresponding_chord_probability.append(chance) #for each option that COULD follow, append the chance of it following\n print(\"chord options\", chord_options)\n print(\"corresponging chord probability\", corresponding_chord_probability)\n chosen_chord = numpy.random.choice(chord_options, 1, p=corresponding_chord_probability)[0]\n return(chosen_chord)", "title": "" }, { "docid": "9c144faab4b43d19ba6114750580c9f4", "score": "0.5008513", "text": "def chord_recognition_all(X, ann_matrix, p=0.15, filt_len=None, filt_type='mean'):\n if filt_len is not None:\n if filt_type == 'mean':\n X, Fs_X = libfmp.c3.smooth_downsample_feature_sequence(X, Fs=1, filt_len=filt_len, down_sampling=1)\n if filt_type == 'median':\n X, Fs_X = libfmp.c3.median_downsample_feature_sequence(X, Fs=1, filt_len=filt_len, down_sampling=1)\n # Template-based chord recogntion\n chord_sim, chord_Tem = libfmp.c5.chord_recognition_template(X, norm_sim='1')\n result_Tem = libfmp.c5.compute_eval_measures(ann_matrix, chord_Tem)\n # HMM-based chord recogntion\n A = libfmp.c5.uniform_transition_matrix(p=p)\n C = 1 / 24 * np.ones((1, 24))\n B_O = chord_sim\n chord_HMM, _, _, _ = libfmp.c5.viterbi_log_likelihood(A, C, B_O)\n result_HMM = libfmp.c5.compute_eval_measures(ann_matrix, chord_HMM)\n return result_Tem, result_HMM, chord_Tem, chord_HMM, chord_sim", "title": "" }, { "docid": "972e0e04e35f36f7f6b25bdcfe598511", "score": "0.49731722", "text": "def zelda_Song_of_Time():\r\n play_note(NOTE_A4, 0.5)\r\n play_note(NOTE_D4, 1.0)\r\n play_note(NOTE_F4, 0.5)\r\n play_note(NOTE_A4, 0.5)\r\n play_note(NOTE_D4, 1.0)\r\n play_note(NOTE_F4, 0.5)\r\n play_note(NOTE_A4, 0.25)\r\n play_note(NOTE_C5, 0.25)\r\n play_note(NOTE_B4, 0.5)\r\n play_note(NOTE_G4, 0.5)\r\n play_note(NOTE_F4, 0.25)\r\n play_note(NOTE_G4, 0.25)\r\n play_note(NOTE_A4, 0.5)\r\n play_note(NOTE_D4, 0.5)\r\n play_note(NOTE_C4, 0.25)\r\n play_note(NOTE_E4, 0.25)\r\n play_note(NOTE_D4, 1.5)\r\n PWM.stop(piezo_pin)\r\n PWM.cleanup()", "title": "" }, { "docid": "b570cd43bb7be4a2e735e7d3bc480f69", "score": "0.49640536", "text": "def GetCd(alpha, h):\r\n\r\n if not isinstance(h, numbers.Number):\r\n raise ValueError('Wrong arg type passed to GetCd() for h. Must be'\r\n + ' int or float')\r\n\r\n return AC['parasiteDrag'] + (16 * h / AC['span']) ** 2 / (1 + (16 * h / AC['span']) ** 2) * \\\r\n GetCl(alpha) ** 2 / (AC['AR'] * AC['e'] * math.pi)", "title": "" }, { "docid": "98f54016ff21e87547a477e4f003fdd9", "score": "0.4963949", "text": "def test_2q_cphase_circuit():\n prog = Program(CPHASE(0.1, 1, 2))\n\n expected = r\"\"\"\n \\begin{tikzcd}\n \\lstick{\\ket{q_{1}}} & \\gate[wires=2]{CPHASE(0.1)} & \\qw \\\\\n \\lstick{\\ket{q_{2}}} & \\qw & \\qw\n \\end{tikzcd}\n \"\"\".strip().split()\n\n actual = to_latex(prog).split()\n start_idx = actual.index(\"\\\\begin{tikzcd}\")\n assert expected == actual[start_idx : start_idx + len(expected)]", "title": "" }, { "docid": "3ed974bd52d951f3331b9b9d489796c2", "score": "0.49583477", "text": "def gen_cp(duration, h):\n cp_times = []\n cp_time = 0\n while cp_time < duration:\n dwell_time = np.random.exponential(1. / h)\n cp_time += dwell_time\n if cp_time < duration:\n cp_times += [cp_time]\n\n return np.array(cp_times)", "title": "" }, { "docid": "44db732dff5bcfd63247895fd5a077f7", "score": "0.49443159", "text": "def tada():\n note(440, 0.3)\n note(440, 0.1)\n note(440, 0.1)\n note(880, 0.8)", "title": "" }, { "docid": "f229b430a33defcfc903c00d5e065ac8", "score": "0.4930778", "text": "def load_chords(jams_path, leadsheet_version):\n try:\n with open(jams_path, \"r\") as fhandle:\n jam = jams.load(fhandle)\n except FileNotFoundError:\n raise FileNotFoundError(\"jams_path {} does not exist\".format(jams_path))\n\n if leadsheet_version:\n anno = jam.search(namespace=\"chord\")[0]\n else:\n anno = jam.search(namespace=\"chord\")[1]\n intervals, values = anno.to_interval_values()\n return annotations.ChordData(intervals, \"s\", values, \"jams\")", "title": "" }, { "docid": "39be2b4eec200d450cfb9d168e48466a", "score": "0.4928727", "text": "def make_interval_chord(chords, key):\n result = []\n c_transposed = transpose(chords, key, 'C')\n\n for chord in c_transposed:\n if len(chord) == 1 or (chord[1] != '#' and chord[1] != 'b'):\n name_index = 1\n if chord[0] == 'F':\n mode = 'b'\n else:\n mode = '#'\n else:\n name_index = 2\n mode = chord[1]\n\n interval = calc_interval('C', chord[:name_index])\n\n if interval in vocab_value:\n result_chord = vocab_interval[interval]\n else:\n if mode == '#':\n interval -= 0.5\n else:\n interval += 0.5\n result_chord = vocab_interval[interval] + mode\n\n result.append(result_chord + chord[name_index:])\n\n return result", "title": "" }, { "docid": "7e5956a1bd6d2453f993953413ac4f28", "score": "0.49157333", "text": "def make_banju_16bit(chord_prog, qlen, owd):\n part = stream.Part()\n\n #print(chord_prog)\n\n # Setting for the path and sample files\n os.chdir(owd)\n current_dir = os.getcwd()\n sample_dir = current_dir + '/banju/16bit'\n os.chdir(sample_dir)\n file_names = [file for file in os.listdir(sample_dir) if file != \".DS_Store\"]\n file_to_open = random.choice(file_names)\n # file_to_open = '16bit/chord_001.txt'\\\n file = open(file_to_open, 'r', encoding=\"utf-8\")\n\n file2 = file.read().replace(' ', '')\n banju_list = [int(f) for f in file2.split(',')]\n# Done\n on_list = []\n\n #print('banju is ', banju_list)\n\n for i, banju in enumerate(banju_list):\n if (banju == -1):\n on_list.append(-1)\n continue\n isOn = [1 if 8 & banju else 0, 1 if 4 & banju else 0, 1 if 2 & banju else 0, 1 if 1 & banju else 0]\n on_list.append(isOn)\n\n print('banju is ', on_list)\n\n for i, chord in enumerate(chord_prog): # C, Dm, G7, etc....\n offset = i * 4\n if i > qlen / 4:\n break\n c = harmony.ChordSymbol(chord)\n previous = []\n for j, is_on in enumerate(on_list): # [1, 0, 0, 1]\n if is_on == -1:\n for note_temp in previous:\n note_temp.quarterLength += 0.25\n else:\n previous.clear()\n for k, on in enumerate(is_on):\n if on:\n if k < len(c.pitches):\n note_temp = note.Note(c.pitches[k], quarterLength=0.25)\n part.insert(offset + j * 0.25, note_temp)\n previous.append(note_temp)\n elif k == 3 and k == len(c.pitches):\n note_temp = note.Note(c.pitches[0], quarterLength=0.25)\n note_temp.octave += 1\n part.insert(offset + j * 0.25, note_temp)\n previous.append(note_temp)\n\n part.insert(instrument.AcousticGuitar())\n # part.show('text', addEndTimes=True)\n return part", "title": "" }, { "docid": "ce345cf9dcd8bab5100db2bb9d7fd9c5", "score": "0.4902935", "text": "def make_banju_8bit(chord_prog, qlen):\n part = stream.Part()\n\n print(chord_prog)\n\n # Setting for the path and sample files\n current_dir = os.getcwd()\n sample_dir = current_dir + '/banju'\n os.chdir(sample_dir)\n file_names = [file for file in os.listdir(sample_dir)]\n # file_to_open = random.choice(file_names)\n file_to_open = '8bit/condense_001.txt'\n file = open(file_to_open, 'r', encoding=\"utf-8\")\n\n file2 = file.readline().replace(' ', '')\n banju_list = [int(f) for f in file2.split(',')]\n\n on_list = []\n\n print(banju_list)\n\n for i, banju in enumerate(banju_list):\n if (banju == -1):\n on_list.append(-1)\n continue\n isOn = [1 if 8 & banju else 0, 1 if 4 & banju else 0, 1 if 2 & banju else 0, 1 if 1 & banju else 0]\n on_list.append(isOn)\n\n print(on_list)\n\n for i, chord in enumerate(chord_prog): # C, Dm, G7, etc....\n offset = i * 4\n if i > qlen / 4:\n break\n c = harmony.ChordSymbol(chord)\n previous = []\n for j, is_on in enumerate(on_list): # [1, 0, 0, 1]\n if is_on == -1:\n for note_temp in previous:\n note_temp.quarterLength += 0.5\n else:\n previous.clear()\n for k, on in enumerate(is_on):\n if on:\n if k < len(c.pitches):\n note_temp = note.Note(c.pitches[k], quarterLength=0.5)\n part.insert(offset + j * 0.5, note_temp)\n previous.append(note_temp)\n elif k == 3 and k == len(c.pitches):\n note_temp = note.Note(c.pitches[0], quarterLength=0.5)\n note_temp.octave += 1\n part.insert(offset + j * 0.5, note_temp)\n previous.append(note_temp)\n\n part.show('text', addEndTimes=True)\n return part", "title": "" }, { "docid": "7ab93364f3deb75a42b8bba4c4e59291", "score": "0.48928526", "text": "def scale(notes=(\"c\", \"d\", \"e\", \"f\", \"g\", \"a\", \"b\", \"c5\")):\r\n def f(res, name, tempo):\r\n \"\"\" \r\n res: list, list of output states of the qc. \r\n return: Song object.\r\n \"\"\"\r\n s = Song(name=name, tempo=tempo)\r\n duration = 1\r\n for x in res:\r\n note = notes[int(x, base=2) % len(notes)]\r\n s.addNote(note, duration)\r\n return s\r\n return f", "title": "" }, { "docid": "8d8d6b0b0a66d6fabb50e9fd77169942", "score": "0.48915246", "text": "def chordify(self, rules: Rules = TriadBaroque()) -> Stream:\n stream = Stream()\n prev_chord: Chord = rules.first_chord(self.key, self.input_notes[0])\n stream.append(prev_chord)\n for note in self.input_notes[1:]:\n prev_chord = rules.next_chord(self.key, prev_chord, note)\n stream.append(prev_chord)\n stream.append(rules.end_cadence(self.key, prev_chord))\n\n return stream.flat", "title": "" }, { "docid": "6de14a221f19f476ba6b705027d11dc5", "score": "0.48873156", "text": "def calc_modes_circ(chi=3., Lmbda=0.5e-6, d_t=0.7):\n chi_rads = (chi/3600.)*np.pi/180.\n M = (np.pi * chi_rads * d_t/(4. * Lmbda))**2.\n\n return M", "title": "" }, { "docid": "5f7b5b0e9e85eda088a6d272adc19663", "score": "0.4881925", "text": "def chord_lengths(points):\n return [distance_point_point(a, b) for a, b in zip(points[:-1], points[1:])]", "title": "" }, { "docid": "be0e93569ae41144411db714f6b6f287", "score": "0.48799253", "text": "def csch(x):\n return 1 / sinh(x)", "title": "" }, { "docid": "9b0dedb3d5fa209d8d27be9a975d32e2", "score": "0.48784468", "text": "def circle(x, z, r, t, duration):\n y_circle = r * math.cos(2 * math.pi * (1 / duration) * t)\n z_circle =+ r * math.sin(2 * math.pi * (1 / duration) * t)\n alphas = computeIK(x, y_circle, z_circle + z)\n return alphas", "title": "" }, { "docid": "f75801a816527c88b3ddfef45517fdd1", "score": "0.48645824", "text": "def text_to_sample_midi(input_name):\n assert 'txt' in input_name\n\n input_file = open(input_name, 'r')\n # if input_name[1] == 'b' or input_name[1] == '#':\n # key = input_name[0:2]\n # else:\n # key = input_name[0]\n key = 'C'\n sample = []\n\n for line in input_file:\n sample += line.strip().split(',')\n\n return make_interval_chord(sample, key)", "title": "" }, { "docid": "7fb7b733c54da9310a54f4620a4d4b6c", "score": "0.48508376", "text": "def __init__(self,length=8,chrd_prg=['C','Am','F','G'],note_lengths={1:1,.5:1}):\n self.length = length\n self.measures = []\n self.chrd_prg = chrd_prg\n self.note_lengths=note_lengths", "title": "" }, { "docid": "381c1cb27b7230523a5a730a0fef184b", "score": "0.48507717", "text": "def __init__(self, dir_path, data=[]):\r\n self.measures = [] #measure information\r\n self.pitch = [] # pitch information\r\n #transpose the key of every song to C Major\r\n #key must be unchangable, so use 34 as the never reachable index\r\n #not nessary assign value by such a clumsy way. while the clumy way also has its advantages... \r\n self.pitch_dictionary = {globalConstant.note[0] : 0, globalConstant.note[1] : 1, globalConstant.note[2] : 34, globalConstant.note[3] : 0,\r\n globalConstant.note[4] : 2, globalConstant.note[5] : 3, globalConstant.note[6] : 1, globalConstant.note[7] : 2,\r\n globalConstant.note[8] : 4, globalConstant.note[9] : 5, globalConstant.note[10] : 3, globalConstant.note[11] : 4,\r\n globalConstant.note[12] : 5, globalConstant.note[13] : 6, globalConstant.note[14] : 4, globalConstant.note[15] : 5,\r\n globalConstant.note[16] : 7, globalConstant.note[17] : 8, globalConstant.note[18] : 6, globalConstant.note[19] : 7,\r\n globalConstant.note[20] : 9, globalConstant.note[21] : 10, globalConstant.note[22] : 8, globalConstant.note[23] : 9,\r\n globalConstant.note[24] : 10, globalConstant.note[25] : 11, globalConstant.note[26] : 9, globalConstant.note[27] : 10,\r\n globalConstant.note[28] : 12, globalConstant.note[29] : 13, globalConstant.note[30] : 11, globalConstant.note[31] : 12,\r\n globalConstant.note[32] : 14, globalConstant.note[33] : 15, globalConstant.note[34] : 13, globalConstant.note[35] : 14,\r\n globalConstant.note[36] : 16, globalConstant.note[37] : 17, globalConstant.note[38] : 15, globalConstant.note[39] : 16,\r\n globalConstant.note[40] : 17, globalConstant.note[41] : 18, globalConstant.note[42] : 16, globalConstant.note[43] : 17,\r\n globalConstant.note[44] : 19, globalConstant.note[45] : 20, globalConstant.note[46] : 18, globalConstant.note[47] : 19,\r\n globalConstant.note[48] : 21, globalConstant.note[49] : 22, globalConstant.note[50] : 20, globalConstant.note[51] : 21,\r\n globalConstant.note[52] : 22, globalConstant.note[53] : 23, globalConstant.note[54] : 21, globalConstant.note[55] : 22,\r\n globalConstant.note[56] : 24, globalConstant.note[57] : 25, globalConstant.note[58] : 23, globalConstant.note[59] : 24,\r\n globalConstant.note[60] : 26, globalConstant.note[61] : 27, globalConstant.note[62] : 25, globalConstant.note[63] : 26,\r\n globalConstant.note[64] : 28, globalConstant.note[65] : 29, globalConstant.note[66] : 27, globalConstant.note[67] : 28,\r\n globalConstant.note[68] : 29, globalConstant.note[69] : 30, globalConstant.note[70] : 28, globalConstant.note[71] : 29,\r\n globalConstant.note[72] : 31, globalConstant.note[73] : 32, globalConstant.note[74] : 30, globalConstant.note[75] : 31}\r\n\r\n \r\n #list first, then convert it to numpy array. see ListToNumpyArrayTest.py.\r\n self.duration = [] # duration information\r\n \r\n self.duration_dictionary = {globalConstant.index0 : 0, \r\n globalConstant.index1 : 1,\r\n globalConstant.index2 : 2,\r\n globalConstant.index3 : 3,\r\n globalConstant.index4 : 4,\r\n globalConstant.index5 : 5, \r\n globalConstant.index6 : 6,\r\n globalConstant.index7 : 7,\r\n globalConstant.index8 : 8,\r\n globalConstant.index9 : 9,\r\n globalConstant.index10 : 10,\r\n globalConstant.index11 : 11,\r\n globalConstant.index12 : 12,\r\n globalConstant.index13 : 13,\r\n globalConstant.index14 : 14,\r\n globalConstant.index15 : 15,\r\n globalConstant.index16 : 16,\r\n globalConstant.index17 : 17,\r\n globalConstant.index18 : 18,\r\n globalConstant.index19 : 19,\r\n globalConstant.index20 : 20,\r\n globalConstant.index21 : 21,\r\n globalConstant.index22 : 22 \r\n }\r\n self.dir_path = dir_path; #path of directionary\r\n self.data = data #ABC files information\r", "title": "" }, { "docid": "34b5a740693ba353d6a41eebd2a0c9fe", "score": "0.4847936", "text": "def choose_theme_b(chord_pair_dict):\n chord_prog = []\n first_chord = choose_random_chord(chord_pair_dict)\n chord_prog.append(first_chord)\n current_chord_num = first_chord\n print(current_chord_num)\n markov_chain(chord_pair_dict, chord_prog, current_chord_num, 7) #adds seven more chords to chord prog..\n return chord_prog", "title": "" }, { "docid": "8c6dfe478812ecd7d5b786b4cb49af63", "score": "0.48426485", "text": "def CA_Lnote(self):\n pitch = [random.choice(chord(self.chord))]\n duration = 4\n lnote = Note(pitch, duration)\n self.add_note(lnote)", "title": "" }, { "docid": "e3055c4af12716cfeef4a1dad68dac89", "score": "0.4837407", "text": "def cyril(prev_play, count=[0]):\n count[0] += 1\n choices = [\"P\", \"S\", \"S\", \"R\", \"P\"]\n return choices[count[0] % len(choices)]", "title": "" }, { "docid": "388116c2a4cd96ac66d21d323593b2da", "score": "0.4821277", "text": "def apply(self, reverse_harmony=True, time_interval=None, transcription=True, results_sample_size=200):\n from tonalmodel.interval import Interval, IntervalType\n\n self.__reverse_harmony = reverse_harmony\n self.__time_interval = time_interval if time_interval is not None else \\\n NumericInterval(Fraction(0), self.score.line.duration.duration)\n\n reduced_line, first_position, duration = self.score.line.sub_line(self.time_interval)\n reduced_reversed_line = reduced_line.clone()\n reduced_reversed_line.reverse()\n\n reduced_hct = self.score.hct.sub_hct(NumericInterval(first_position.position,\n first_position.position + duration.duration))\n\n reduced_reversed_hct = reduced_hct.reverse()\n\n # If reversing harmony OR if reversing harmony and not doing transposition\n # These cases do not require transcription:\n # a) reverse harmony and no transcription - return reversed line + reversed harmony\n # b) no reverse harmony and no transcription - return reversed_line + original hct\n # c) reverse harmony and transcription - return reversed_line and reversed harmony.\n if reverse_harmony or not transcription:\n return reduced_reversed_line, reduced_reversed_hct if reverse_harmony else reduced_hct\n\n # Case: not reversing harmony AND requiring a transposition\n # Must specify reversed line here, but ANALYZE against reversed_hct, which is the original harmony\n # making the correct analysis of the original melody.\n t_ht = THarmonicTranscription(reduced_reversed_line, reduced_reversed_hct)\n\n # Find lowest tone:\n notes = reduced_reversed_line.get_all_notes()\n note_index, _ = min(enumerate(notes), key=lambda n: n[1].diatonic_pitch.chromatic_distance)\n lowest_pitch = notes[note_index].diatonic_pitch\n\n # Adapt reversed melody to original harmony.\n tag_map = gen_tag_map(notes[0], reduced_hct.hc_list()[0])\n results = t_ht.apply(reduced_hct,\n lowest_pitch,\n tag_map, t_ht.height + 6, results_sample_size,\n tunnel_half_interval=Interval(4, IntervalType.Perfect))\n\n filtered_results = MinContourFilter(reduced_reversed_line, results.pitch_results)\n scored_filtered_results = filtered_results.scored_results\n\n if len(scored_filtered_results) == 0:\n return None, None\n\n return scored_filtered_results[0][0], reduced_hct", "title": "" }, { "docid": "07a69fc354feb7ccf0633762182acb30", "score": "0.48089764", "text": "def play_progression(*args):\n for x in args:\n for note in get_chord(x, note_number('C5')):\n play(note, 80, 1)\n time.sleep(1)", "title": "" }, { "docid": "e9b08c83eedbf7655d3db62859897a50", "score": "0.48085463", "text": "def randomChord(self,rangeVal=[-12,12],inKey=True,nowStart = True):\n\t\tnewNotes = []\n\t\tfor i in range(len(self.notes)):\n\t\t\tmaxval,minval = rangeVal\n\t\t\tshiftVal = mapValue(random(),0,1,minval,maxval)\n\t\t\tnewNotes.append(self.notes[i]+shiftVal)\n\t\t\n\t\tif inKey == True:\n\t\t\tnewNotes = enforceKey(newNotes,self.key)\n\t\tif self.octaveRange is not None:\n\t\t\tnewNotes = enforceOctave(newNotes,self.octaveRange)\n\n\t\tself.newNotes(newNotes, nowStart = nowStart)", "title": "" }, { "docid": "d2ebd20d1112e6c1c57c64ba85a0d1dc", "score": "0.4772306", "text": "def CA_Lbass(self):\n chords = chord(self.chord)\n bass = Note(pitch = chords, duration = 4)\n self.add_bass(bass)", "title": "" }, { "docid": "f56399ea6fb4b7b6d9a65cbed364c319", "score": "0.4768541", "text": "def chords_of_scale(root_name: str, root_acci: Accidental, chord_type: str) -> list[list[str]]:\n result_list = chord_scale_namer(ChordOrScale.Scale, root_name, root_acci, chord_type, 7)\n chords_list = []\n i = 0\n while i < 7:\n current_chord = [result_list[i], result_list[(i + 2) % 7], result_list[(i + 4) % 7]]\n chords_list.append(current_chord)\n i += 1\n if chord_type == \"nat_minor\":\n if 'bb' in chords_list[4][1]:\n chords_list[4][1] = chords_list[4][1][0] + 'b'\n elif 'b' in chords_list[4][1]:\n chords_list[4][1] = chords_list[4][1][0]\n elif '#' in chords_list[4][1]:\n chords_list[4][1] = chords_list[4][1][0] + 'x'\n else:\n chords_list[4][1] = chords_list[4][1][0] + '#'\n # ^raised 7th for V chord in minor scale adjustment\n return chords_list", "title": "" }, { "docid": "4ff064658ace8b1cc9964e7c8dbd3143", "score": "0.47653127", "text": "def makeCone(dir, ksi, length, resolution, d1, angle, reverse=False):\n print d1, angle\n retval = []\n if reverse:\n rot = makeAxisRotation(dir, math.pi*2./resolution)\n else:\n rot = makeAxisRotation(dir, -math.pi*2./resolution)\n\n for i in range(0, resolution):\n p1 = ksi*d1\n p2 = dir*length + ksi*(d1+length*math.tan(angle))\n\n for i in range(0,len(retval)):\n retval[i] = np.dot(rot, retval[i])\n\n retval.append(p1)\n retval.append(p2)\n\n return retval", "title": "" }, { "docid": "774899192b2b1534655dae5aa31551df", "score": "0.47606248", "text": "def parse_chord_sequence(chord_sequence):\n chord_list = chord_sequence.split(\"|\")\n note_list = []\n note_frqs = []\n for i in range(len(chord_list)):\n chord_list[i] = chord_list[i].strip()\n chord_list[i] = chord_list[i].split('_')\n chord_notes = notes.chord_notes(*chord_list[i])\n note_list.append(chord_notes)\n return note_list", "title": "" }, { "docid": "8f69e5ff3cf66b63a51b7e53e9c6b646", "score": "0.47594944", "text": "def create_chord_template(request):\n all_songs = Song.objects.all()\n for song in all_songs:\n if song.chords:\n continue\n ccli = song.ccli\n title = song.title\n f = open('D:/dropbox/django/songs_chordpro/'+str(ccli)+'.cho', 'w')\n title_line = '{title:'+title+'}\\n'\n authors_list = []\n for author in song.authors.all():\n authors_list.append(author.full_name)\n publisher_list = []\n publication_date = song.publication_year\n for publisher in song.publisher.all():\n publisher_list.append(publisher.name)\n authors_line = '{st:Words and Music by '+ ', '.join(authors_list)+'}\\n'\n if publication_date == 1111:\n publishers_line = '{st:Public Domain}\\n'\n else:\n publishers_line = '{st:Copyright '+str(song.publication_year)+' '+', '.join(publisher_list)+'}\\n'\n ccli_line = '{st:CCLI ' + str(ccli) + '}'\n f.write(title_line.encode('utf8'))\n if len(authors_list) != 0:\n f.write(authors_line.encode('utf8'))\n f.write(publishers_line)\n f.write(ccli_line)\n f.close()\n\n return HttpResponseRedirect(reverse('songs.views.success'))", "title": "" } ]
ccdbd001147d5b9402f617c20d9e2566
MC approximation of individual Generalized GaussNewton/Fisher diagonal.
[ { "docid": "e09aba5e7a974d75ea6a635b3bb67e70", "score": "0.0", "text": "def diag_ggn_mc_batch(self, mc_samples: int) -> List[Tensor]:\n return", "title": "" } ]
[ { "docid": "83fc45443edde46c289583e7f26e160b", "score": "0.6200017", "text": "def _mmd_g_(self):\r\n # calculate pairwise distance\r\n dist_gg, dist_gd, dist_dd = get_squared_dist(\r\n self.score_gen, self.score_data, z_score=False, do_summary=self.do_summary)\r\n\r\n # mmd\r\n self.loss_gen = mixture_mmd_g(\r\n dist_gg, dist_gd, dist_dd, self.batch_size, sigma=self.sigma,\r\n name='mmd_g', do_summary=self.do_summary)\r\n self.loss_dis = -self.loss_gen\r\n if self.dis_penalty is not None:\r\n self.loss_dis = self.loss_dis + self.dis_penalty", "title": "" }, { "docid": "639d3d6e37a4ce1aa0ffa25764cd4ec2", "score": "0.6191505", "text": "def gfp_dynamics_1cc_(F,V,Cm1,beta,dtsim):\n G = np.zeros_like(F)\n G[0] = Cm1*V[0]\n for k in range(len(F)-1):\n G[k+1] = G[k]+(V[k]*F[k]-beta*G[k])*dtsim\n return G,G[-1]/V[-1]", "title": "" }, { "docid": "d46119625b9ce37cf39481a6c2753ed5", "score": "0.61492175", "text": "def gfp_dynamics_1cc(F,V,Cm1,beta,dtsim):\n G = np.zeros_like(F)\n G[0] = Cm1/2\n for k in range(len(F)-1):\n G[k+1] = G[k]+(V[k]*F[k]-beta*G[k])*dtsim\n return G,G[-1]", "title": "" }, { "docid": "c86dd1ad31830cb2cc980e6175180280", "score": "0.6106971", "text": "def gmm_mllr_diag_cov(X, gmm, niter=10):\n\n # remove illed gaussians\n logprob=gmm.score_samples(X)\n pcompx=gmm.predict_proba(X)\n ###logprob,pcompx = gmm.eval(X)\n psum = np.sum(pcompx, axis=0)\n ill_g = (psum == 0);\n if any(ill_g):\n valid = psum > 0\n gmm.means_ = gmm.means_[valid,:]\n gmm.weights_ = gmm.weights_[valid]\n gmm.weights_ = gmm.weights_/sum(gmm.weights_)\n gmm.covariances_ = gmm.covariances_[valid]\n ###logprob,pcompx = gmm.eval(X)\n logprob=gmm.score_samples(X)\n pcompx=gmm.predict_proba(X)\n\n # calculate G and Z \n C = len(gmm.weights_)\n T,dim = X.shape\n W = np.empty([dim,dim+1])\n G = np.zeros([dim,dim+1,dim+1])\n # 1. first calculate D[0,...,C) and Z\n D = np.zeros([C,dim+1,dim+1])\n V = np.zeros([C,dim,dim])\n Z = np.zeros([dim,dim+1])\n for c in range(0,C):\n mu = gmm.means_[c]\n sigma = np.diag(gmm.covariances_[c])\n sigma_inv = np.linalg.inv(sigma)\n p = pcompx[:,c]\n\n xi = np.empty_like(mu)\n xi[:] = mu\n xi = np.insert(xi,0,1)\n xi = np.reshape(xi, [len(xi),1])\n D[c] = xi.dot(xi.T)\n V[c] = np.sum(p)*sigma_inv\n for t in range(0,T):\n xt = np.reshape(X[t],[len(X[t]),1])\n Z += p[t]*sigma_inv.dot(xt).dot(xi.T)\n \n # 2. now calculate G\n for i in range(0,dim): \n for c in range(0,C): # tie all Gaussians\n G[i] += V[c,i,i]*D[c]\n try: \n G_i_inv = np.linalg.inv(G[i])\n except:\n print('G is nearly singular and pseudo-inverse is used.')\n G_i_inv = np.linalg.pinv(G[i])\n z_i = np.reshape(Z[i],[dim+1,1])\n W[i] = G_i_inv.dot(z_i)[:,0]\n \n # transform means\n for c in range(0,C):\n xi = np.insert(gmm.means_[c],0,1)\n xi = np.reshape(xi,[len(xi),1])\n gmm.means_[c] = W.dot(xi)[:,0]\n \n # remove non positive definite matrices\n ###logprob,pcompx = gmm.eval(X)\n logprob=gmm.score_samples(X)\n pcompx=gmm.predict_proba(X)\n psum = np.sum(pcompx, axis=0)\n ill = (psum == 0);\n if np.any(ill):\n valid = (ill == 0)\n gmm.means_ = gmm.means_[valid]\n gmm.weights_ = gmm.weights_[valid]\n gmm.weights_ = gmm.weights_/sum(gmm.weights_)\n gmm.covariances_ = gmm.covariances_[valid]\n K = gmm.means_.shape[0]\n return gmm", "title": "" }, { "docid": "73e96a26603d715bb00dbb56cf565df3", "score": "0.59942466", "text": "def gmm(X, k, epsilon=0.0001):\n\n n = X.shape[0]\n d = X.shape[1]\n mu = k_means(X, k) #np.zeros((k, X.shape[1]))\n sigma = np.ones((k, d))\n pi = np.ones((k, 1)) / k\n\n\n while 1:\n\n r = np.zeros((n, k))\n\n # E step\n for i in range(n):\n for c in range(k):\n r[i,c] = pi[c] * gauss.pdf(X[i], mean=mu[c], cov=sigma[c]) / np.sum([pi[j] * gauss.pdf(X[i], mean=mu[j], cov=sigma[j]) for j in range(k)])\n # M step\n \n new_mu = np.zeros((k, d))\n new_sigma = np.ones((k, d))\n new_pi = np.ones((k, 1))\n\n for c in range(k):\n new_pi[c] = np.sum(r[i,c] for i in range(n)) / n\n new_mu[c] = np.sum(r[i,c] * X[i] for i in range(n)) / (n * new_pi[c])\n new_sigma[c] = np.sum(r[i,c] * (X[i] - new_mu[c]) ** 2 for i in range(n)) / (n * new_pi[c])\n # print(sigma[c])\n new_sigma[c] = np.clip(new_sigma[c], epsilon, float('inf'))\n # print(new_sigma[c])\n \n\n if la.norm(new_pi - pi) < epsilon:\n break\n\n if la.norm(new_sigma - sigma) < epsilon:\n break\n\n if la.norm(new_mu - mu) < epsilon:\n break\n\n pi = new_pi\n sigma = new_sigma\n mu = new_mu\n\n return mu, sigma, pi", "title": "" }, { "docid": "94487e0e6c94de3c624b972d87681643", "score": "0.5977705", "text": "def grl_equilibration(fish):\n dim = fish.shape[0]\n eqi = np.eye(dim)\n for i in range(dim): \n if i%6==0:\n eqi[i,i] = 1e4\n return eqi.dot(fish.dot(eqi)) #creates a copy. ", "title": "" }, { "docid": "db963bb8c3c5dff9224c4a962bc446e5", "score": "0.5919506", "text": "def get_hessian_g(x: numpy.array) -> numpy.array:\r\n # 2A\r\n #print(\"Inside Hessian of G: \\n\")\r\n #print(\"x.size \" + str(x.size))\r\n\r\n #print(numpy.eye(x.size))\r\n return numpy.eye(x.size)", "title": "" }, { "docid": "53f8b7ea3d6ec625e8ed9f96083dccab", "score": "0.5821681", "text": "def g(t, x):\t\n M = np.dot(np.dot(R.T, np.diag(P[t, :])), R)\n return np.dot(x.T, np.dot(M, x))", "title": "" }, { "docid": "554b1e9c98d6dd061e314f851f559183", "score": "0.5801351", "text": "def hessian_posteriori_matrices(xm,m,Q,sm2,H_mat):\n ########################################################\n # GRADIENT AND HESSIAN OF B AND b\n ########################################################\n def B00_x(x):\n if x=='e': sd=1\n else: sd=0\n dQ = H_mat['Q_{}'.format(x)][0,0]\n Q0=Q[0,0]; den=Q0+sm2\n return ((sd*Q0+sm2*dQ)*den-sm2*Q0*(dQ+sd))/den**2\n def B00_xy(x,y):\n if x=='e': dxe=1\n else: dxe=0\n if y=='e': dye=1\n else: dye=0\n dQx = H_mat['Q_{}'.format(x)][0,0]\n dQy = H_mat['Q_{}'.format(y)][0,0]\n dQxy = H_mat['Q_{}{}'.format(x,y)][0,0]\n Q00=Q[0,0]\n return (-2*dxe*dye*Q00**2 + (2*dQy*dxe + 2*dQx*dye)*Q00*sm2 +\\\n (-2*dQx*dQy + dQxy*Q00)*sm2**2 + dQxy*sm2**3)/(Q00 + sm2)**3\n def B01_x(x):\n if x=='e': dxe=1\n else: dxe=0\n dQ = H_mat['Q_{}'.format(x)]\n dQ00x = dQ[0,0];dQ01x = dQ[0,1]\n Q00=Q[0,0]; Q01=Q[0,1]\n return (dQ01x*sm2*(Q00 + sm2) + (-(dQ00x*sm2) + Q00*dxe)*Q01)/(Q00 + sm2)**2\n def B01_xy(x,y):\n if x=='e': dxe=1\n else: dxe=0\n if y=='e': dye=1\n else: dye=0\n Q00=Q[0,0];Q01=Q[0,1]\n dQx = H_mat['Q_{}'.format(x)]\n dQy = H_mat['Q_{}'.format(y)]\n dQxy = H_mat['Q_{}{}'.format(x,y)]\n dQ00x = dQx[0,0];dQ00y = dQy[0,0]\n dQ01x = dQx[0,1];dQ01y = dQy[0,1]\n dQ00xy= dQxy[0,0];dQ01xy= dQxy[0,1];dQ11xy= dQxy[1,1]\n\n return (Q01*((-(dQ00y*dxe) - (dQ00x + 2*dxe)*dye)*Q00 + (dQ00y*(2*dQ00x + dxe) + dQ00x*dye - dQ00xy*Q00)*sm2 -\\\n dQ00xy*sm2**2) + (Q00 + sm2)*((dQ01y*dxe + dQ01x*dye)*Q00 - (dQ00y*dQ01x + dQ00x*dQ01y - dQ01xy*Q00)*sm2 +\\\n dQ01xy*sm2**2))/(Q00 + sm2)**3\n def B11_x(x):\n if x=='e': dxe=1\n else: dxe=0\n Q00=Q[0,0];Q01=Q[0,1]\n dQx = H_mat['Q_{}'.format(x)]\n dQ00x=dQx[0,0]; dQ01x=dQx[0,1]; dQ11x=dQx[1,1]\n return dQ11x + ((dQ00x + dxe)*Q01**2)/(Q00 + sm2)**2 - (2*dQ01x*Q01)/(Q00 + sm2)\n def B11_xy(x,y):\n if x=='e': dxe=1\n else: dxe=0\n if y=='e': dye=1\n else: dye=0\n Q00=Q[0,0];Q01=Q[0,1]\n dQx = H_mat['Q_{}'.format(x)]\n dQy = H_mat['Q_{}'.format(y)]\n dQxy = H_mat['Q_{}{}'.format(x,y)]\n dQ00x = dQx[0,0];dQ00y = dQy[0,0]\n dQ01x = dQx[0,1];dQ01y = dQy[0,1]\n dQ00xy= dQxy[0,0];dQ01xy= dQxy[0,1];dQ11xy= dQxy[1,1]\n return dQ11xy - (2*(dQ00x + dxe)*(dQ00y + dye)*Q01**2)/(Q00 + sm2)**3 +\\\n (2*dQ01y*(dQ00x + dxe)*Q01)/(Q00 + sm2)**2 +\\\n (2*dQ01x*(dQ00y + dye)*Q01)/(Q00 + sm2)**2 + (dQ00xy*Q01**2)/(Q00 + sm2)**2 -\\\n (2*dQ01x*dQ01y)/(Q00 + sm2) - (2*dQ01xy*Q01)/(Q00 + sm2)\n def b0_x(x):\n if x=='e': dxe=1\n else: dxe=0\n dQ00x = H_mat['Q_{}'.format(x)][0,0]\n dm00x = H_mat['m_{}'.format(x)][0,0]\n Q00=Q[0,0];m00=m[0,0]\n return (Q00*(dm00x*sm2 + dxe*(m00 - xm)) + sm2*(dm00x*sm2 + dQ00x*(-m00 + xm)))/(Q00 + sm2)**2\n def b0_xy(x,y):\n if x=='e': dxe=1\n else: dxe=0\n if y=='e': dye=1\n else: dye=0\n dQ00x = H_mat['Q_{}'.format(x)][0,0]\n dQ00xy = H_mat['Q_{}{}'.format(x,y)][0,0]\n dQ00y = H_mat['Q_{}'.format(y)][0,0]\n dm00x = H_mat['m_{}'.format(x)][0,0]\n dm00y = H_mat['m_{}'.format(y)][0,0]\n dm00xy = H_mat['m_{}{}'.format(x,y)][0,0]\n Q00=Q[0,0];m00=m[0,0]\n return -(((dQ00y + dye)*(Q00 + sm2)*(dxe*m00 + dm00x*sm2 + dQ00x*xm) -\\\n (Q00 + sm2)**2*(dm00y*dxe + dm00x*dye + dm00xy*sm2 + dQ00xy*xm) +\\\n (dQ00x + dxe)*(Q00 + sm2)*(dye*m00 + dm00y*sm2 + dQ00y*xm) -\\\n 2*(dQ00x + dxe)*(dQ00y + dye)*(m00*sm2 + Q00*xm) +\\\n dQ00xy*(Q00 + sm2)*(m00*sm2 + Q00*xm))/(Q00 + sm2)**3)\n def b1_x(x):\n if x=='e': dxe=1\n else: dxe=0\n dQx = H_mat['Q_{}'.format(x)]\n dQ00x = dQx[0,0]\n dQ01x = dQx[0,1]\n dmx = H_mat['m_{}'.format(x)]\n dm10x = dmx[1,0];dm00x = dmx[0,0]\n Q00=Q[0,0];m00=m[0,0]\n Q01=Q[0,1];m10=m[1,0]\n return dm10x - (dm00x*Q01)/(Q00 + sm2) -\\\n ((dQ00x + dxe)*Q01*(-m00 + xm))/(Q00 + sm2)**2 + (dQ01x*(-m00 + xm))/(Q00 + sm2)\n def b1_xy(x,y):\n if x=='e': dxe=1\n else: dxe=0\n if y=='e': dye=1\n else: dye=0\n Q00=Q[0,0];Q01=Q[0,1];m00=m[0,0]\n dQx = H_mat['Q_{}'.format(x)]\n dQy = H_mat['Q_{}'.format(y)]\n dQxy = H_mat['Q_{}{}'.format(x,y)]\n dQ00x = dQx[0,0];dQ00y = dQy[0,0]\n dQ01x = dQx[0,1];dQ01y = dQy[0,1]\n dQ00xy= dQxy[0,0];dQ01xy= dQxy[0,1];dQ11xy= dQxy[1,1]\n dmx = H_mat['m_{}'.format(x)]\n dmy = H_mat['m_{}'.format(y)]\n dmxy = H_mat['m_{}{}'.format(x,y)]\n dm10x = dmx[1,0];dm00x = dmx[0,0]\n dm10y = dmy[1,0];dm00y = dmy[0,0]\n dm10xy = dmxy[1,0];dm00xy = dmxy[0,0]\n return -((-(dm00y*(dQ00x + dxe)*Q01*(Q00 + sm2)) -\\\n dm00x*(dQ00y + dye)*Q01*(Q00 + sm2) + dm00y*dQ01x*(Q00 + sm2)**2 +\\\n dm00x*dQ01y*(Q00 + sm2)**2 + dm00xy*Q01*(Q00 + sm2)**2 -\\\n dm10xy*(Q00 + sm2)**3 - 2*(dQ00x + dxe)*(dQ00y + dye)*Q01*(-m00 + xm) +\\\n dQ01y*(dQ00x + dxe)*(Q00 + sm2)*(-m00 + xm) +\\\n dQ01x*(dQ00y + dye)*(Q00 + sm2)*(-m00 + xm) +\\\n dQ00xy*Q01*(Q00 + sm2)*(-m00 + xm) - dQ01xy*(Q00 + sm2)**2*(-m00 + xm))/\\\n (Q00 + sm2)**3)\n ########################################################\n # FILL THE MATRIX\n ########################################################\n ret = {}\n build_vec = lambda x,y: np.array([[x],[y]])\n build_mat= lambda x,y,z: np.array([[x,y],[y,z]])\n for k in ['m','g','s','e']:\n ret['b_{}'.format(k)] = build_vec(b0_x(k),b1_x(k))\n ret['B_{}'.format(k)] = build_mat(B00_x(k),B01_x(k),B11_x(k))\n for j in ['m','g','s','e']:\n ret['b_{}{}'.format(k,j)] = build_vec(b0_xy(k,j),b1_xy(k,j))\n ret['B_{}{}'.format(k,j)] =\\\n build_mat(B00_xy(k,j),B01_xy(k,j),B11_xy(k,j))\n return ret", "title": "" }, { "docid": "1cceb30e5b3ae0d78a0beb9ef32b0a2a", "score": "0.57995254", "text": "def apply_G(self):\n\n mach_f = self.vec['u']('M_f')\n mach = self.vec['p']('M')\n\n mach_f[0] = mach[0]", "title": "" }, { "docid": "8a5c7fe3c13bc7629d5f225cf7f657ba", "score": "0.56645346", "text": "def ME(X, Yd, m = 4, gating = 'linear', add_bias = False):\n # Dimensions:\n # wexp (ns x ne x m)\n # wgat (m x ne)\n # yexp (N, ns, m)\n # ygat (N x m)\n # g = ygat = (N x m)\n # p (N x m)\n # h (N x m) ?\n\n # Add column with 1s for the bias\n if add_bias: # add column of 1s in X matrix for bias\n X = np.insert(X, 0, 1, axis=1)\n if Yd.ndim == 1: # convert row vector to column vector\n Yd = np.array([Yd]).T\n N, ne = X.shape # number of instances and features\n eps = np.finfo(float).eps # small number to avoid nan and division by zero\n\n ns = Yd.shape[1] # number of ME outputs\n var = np.ones(m) # adaptative variance\n wexp = np.random.rand(ns, ne, m) # expert weight\n wgat = np.random.rand(m, ne) # linear gating weight\n ygat = softmax(X @ wgat.T) # gating output (a priori probability)\n yexp = calc_saida_esp(X, wexp) # expert output\n\n j = random.sample(list(np.arange(N)), N)\n alpha = 1/m * np.ones((m,1)) # gaussian gating parameter\n gamma = X[j[:m],:] # gaussian gating parameter\n sigma = np.zeros((ne, ne, m)) # gaussian gating parameter\n for i in range(m):\n sigma[:,:,i] = np.eye(ne, ne) # gaussian gating parameter\n\n\n p = calc_prob_exp(yexp, Yd, var) # P(y|X,theta)\n p_xv = np.zeros((N, m)) # P(X|v)\n lik_old = 0\n lik_new = likelihood(ygat, p)\n it = 0\n maxiter = 100\n while abs(lik_new - lik_old) > 1e-3 and it < maxiter:\n #print('lik = ', lik_new, \"diff: \", lik_new - lik_old)\n # Linear gating\n if gating == 'linear':\n # Step E - Estimate H\n p = calc_prob_exp(yexp, Yd, var) # P(y|X,theta)\n h = prob_h(ygat, p)\n # Step M: optimize gating and expert outputs\n for i in range(m):\n wexp[:,:,i], var[i] = atualiza_exp(X, Yd, h[:, i], var[i], wexp[:,:,i], ns)\n wgat = atualiza_gat_linear(X, h, wgat) # gating weights\n ygat = softmax(X @ wgat.T) # gating output\n yexp = calc_saida_esp(X, wexp) # expert output\n\n lik_old = lik_new\n lik_new = likelihood(ygat, p)\n # Gaussian gating\n else:\n # Step E:\n ygat = prob(X, alpha, gamma, sigma)\n p = calc_prob_exp(yexp, Yd, var)\n h = alpha.T * ygat * p / np.sum(eps + alpha.T * ygat * p, axis = 1, keepdims=True)\n # Step M: optimize gating and expert outputs\n for i in range(m):\n wexp[:,:,i], var[i] = atualiza_exp(X, Yd, h[:, i], var[i], wexp[:,:,i], ns)\n alpha = np.mean(h, axis =0, keepdims = True).T\n gamma = h.T @ X / np.sum(h + eps, axis=0, keepdims=True).T\n for i in range(m):\n dif = X - gamma[i]\n sigma[:,:,i] = (dif / np.sum(h[:,i]) * h[:,i:i+1]).T @ dif\n\n #ygat = gating_output(X, p_xv, alpha) # gating output\n yexp = calc_saida_esp(X, wexp) # expert output\n\n lik_old = lik_new\n lik_new = likelihood(ygat, p)\n\n if it > 15 and gating == 'gaussian':\n if lik_new < lik_old:\n print(\"Doesn't converge.\")\n elif lik_new < lik_old + 5:\n print(\"Converge.\")\n break\n\n #print('lik = ', lik_new, \"diff: \", abs(lik_new - lik_old))\n it += 1\n\n return wexp, wgat, var, alpha, gamma, sigma", "title": "" }, { "docid": "be8b005853c3b8c29bfb455785dd8a69", "score": "0.56465757", "text": "def hamiltonian(self):", "title": "" }, { "docid": "8f0e0a35b50bd2f3e97f0e513370247a", "score": "0.56284153", "text": "def metric_matrix(dmat):\n dmat = numpy.array(dmat)\n\n natms = len(dmat)\n\n dcvec = distances_from_center(dmat)\n\n gmat = numpy.eye(natms)\n\n for i, j in itertools.product(range(natms), range(natms)):\n gmat[i, j] = (dcvec[i]**2 + dcvec[j]**2 - dmat[i, j]**2)/2.\n\n return gmat", "title": "" }, { "docid": "88545d3d9f21157bf68a8876c3c19bf4", "score": "0.56258357", "text": "def mvhermgauss(H, D):\n gh_x, gh_w = hermgauss(H)\n x = np.array(list(itertools.product(*(gh_x,) * D))) # H**DxD\n w = np.prod(np.array(list(itertools.product(*(gh_w,) * D))), 1) # H**D\n return x, w", "title": "" }, { "docid": "08f4d863cd671cf58fcc5d4a69fba2ae", "score": "0.56042755", "text": "def g_multivariate_normal(x,M):\n return .5*np.dot(x,M+M.T)", "title": "" }, { "docid": "e26d7a88f63aa5f497434c78e3d27a38", "score": "0.5598046", "text": "def apply_G(self):\n\n mach_i = self.vec['u']('M_i')\n mach = self.vec['p']('M')\n\n mach_i[0] = mach[0]", "title": "" }, { "docid": "495799d796cd50b6cd63888bbd3d31a9", "score": "0.5594839", "text": "def calc_M_1(m_s,x_1): \r\n \r\n M_1 = np.zeros((m_s.shape[0], m_s.shape[1], m_s.shape[2]))\r\n \r\n for l in range(m_s.shape[0]):\r\n M_1[l] = m_s[l] @ np.diagflat(x_1[0])\r\n\r\n return(M_1)", "title": "" }, { "docid": "517e0cd15d3df58cdba05955b49ea97b", "score": "0.5592457", "text": "def gfp_dynamics(F,V,Cm1,beta,dtsim):\n G = []\n n=0\n for f,v in zip(F,V):\n if n==0: cm1 = Cm1\n g,cm1 = gfp_dynamics_1cc(f,v,cm1,beta,dtsim)\n G.append(g)\n n+=1\n return G", "title": "" }, { "docid": "8ab0882572fcbb0246a08fe0fec490ea", "score": "0.5567783", "text": "def gauss_dimensions_cov(dimensions=range(2,100,20), total_points=200,\n num_experiments=100, d=10):\n k = 2\n if not d:\n d = dimensions[0]\n table = np.zeros((num_experiments*len(dimensions), 6))\n count = 0\n\n for D in dimensions:\n for l in range(num_experiments):\n\n # generate data\n m1 = np.zeros(D)\n m2 = np.concatenate((np.ones(d), np.zeros(D-d)))\n s1 = np.eye(D)\n # from uniform 1, 5\n s2_1 = np.array([1.367, 3.175, 3.247, 4.403, 1.249, 1.969, 4.035, 4.237, 2.813, 3.637])\n s2 = np.diag(np.concatenate((s2_1, np.ones(D-d))))\n n1, n2 = np.random.multinomial(total_points, [0.5,0.5])\n X, z = data.multivariate_normal([m1, m2], [s1, s2], [n1, n2])\n\n rho = lambda x, y: np.linalg.norm(x-y)\n G = eclust.kernel_matrix(X, rho)\n \n # can change the number of times we execute each experiment\n # and initialization method as well\n table[count, 0] = D\n table[count, 1] = run_clustering.energy_hartigan(k, X, G, z, \n init=\"k-means++\", run_times=5)\n table[count, 2] = run_clustering.energy_lloyd(k, X, G, z, \n init=\"k-means++\", run_times=5)\n table[count, 3] = run_clustering.spectral(k, X, G, z, \n run_times=5)\n table[count, 4] = run_clustering.kmeans(k, X, z, \n init=\"k-means++\", run_times=5)\n table[count, 5] = run_clustering.gmm(k, X, z, \n init=\"kmeans\", run_times=5)\n count += 1\n\n return table", "title": "" }, { "docid": "0166b5aae8cbec6fc919803d44db863e", "score": "0.55659634", "text": "def newson(m,e_g):\r\n mf = f(m,e_g)\r\n mdf = df(m,e_g)\r\n a = e_g - (mf/mdf)\r\n return a", "title": "" }, { "docid": "09be79630f3ccb94f0c345fea17749e4", "score": "0.55326855", "text": "def calc_g2():", "title": "" }, { "docid": "af61f4ce204e3ce6dfa2b9e220df9e80", "score": "0.55179346", "text": "def gmm(X, k):\n gauss = sklearn.mixture.GaussianMixture(n_components=k).fit(X)\n pi = gauss.weights_\n m = gauss.means_\n S = gauss.covariances_\n clss = gauss.predict(X)\n bic = gauss.bic(X)\n return pi, m, S, clss, bic", "title": "" }, { "docid": "c706dfe3e1d3b1e1680155eba5e60899", "score": "0.550719", "text": "def gowers_matrix(D):\n assert_square(D)\n\n n = float(D.shape[0])\n o = np.ones((int(n), 1))\n I = np.identity(int(n)) - (1/n)*o.dot(o.T)\n A = -0.5*(np.square(D))\n G = I.dot(A).dot(I)\n\n return(G)", "title": "" }, { "docid": "929c187924254c36095672f32dfe4846", "score": "0.5504482", "text": "def gmm(X, k):\n\n Gaussian = sklearn.mixture.GaussianMixture(n_components=k)\n params = Gaussian.fit(X)\n clss = Gaussian.predict(X)\n pi = params.weights_\n m = params.means_\n S = params.covariances_\n bic = Gaussian.bic(X)\n\n return pi, m, S, clss, bic", "title": "" }, { "docid": "eea487ca95e5fd139c2ec23e7f1e2ef6", "score": "0.54867804", "text": "def gmm_map_qb(X, gmm, rho=.1, epsilon=1, niter=10):\n\n # init\n ###logprob,pcompx = gmm.eval(X)\n logprob=gmm.score_samples(X)\n pcompx=gmm.predict_proba(X)\n psum = np.sum(pcompx, axis=0) #(18)\n # remove illed gaussians\n ill_g = (psum == 0);\n if any(ill_g):\n valid = psum > 0\n gmm.means_ = gmm.means_[valid,:]\n gmm.weights_ = gmm.weights_[valid]\n gmm.weights_ = gmm.weights_/sum(gmm.weights_)\n gmm.covariances_ = gmm.covariances_[valid]\n\n ###logprob,pcompx = gmm.eval(X)\n logprob=gmm.score_samples(X)\n pcompx=gmm.predict_proba(X)\n\n psum = np.sum(pcompx, axis=0) #(18)\n K,nDim = gmm.means_.shape\n tau = psum*epsilon #(22)\n tau_update = tau\n nu = 1 + tau #(23)\n nu_update = nu\n alpha = nDim + tau #(24)\n alpha_update = alpha\n mu = np.empty([K,nDim])\n mu_update = np.empty([K,nDim])\n yu = np.empty([K,nDim,nDim])\n yu_update = np.empty([K,nDim,nDim])\n for k in range(0,K):\n mu[k] = gmm.means_[k] #(25)\n yu[k] = tau[k] * gmm.covariances_[k] #(26)\n\n # EM iterations \n s = np.empty([K,nDim,nDim])\n N = X.shape[0]\n for iter in range(0, niter):\n print('iter=',iter)\n print(np.sum(gmm.weights_))\n #plot_gmm(gmm,X)\n # E-step: posterior probabilities\n ###logprob, pcompx = gmm.eval(X)\n logprob=gmm.score_samples(X)\n pcompx=gmm.predict_proba(X)\n \n # remove illed gaussians\n psum = np.sum(pcompx, axis=0) # (18)\n #print psum\n #raw_input()\n ill_g = (psum == 0);\n #print ill_g\n if any(ill_g):\n valid = psum > 0\n gmm.means_ = gmm.means_[valid,:]\n gmm.weights_ = gmm.weights_[valid]\n gmm.weights_ = gmm.weights_/sum(gmm.weights_)\n gmm.covariances_ = gmm.covariances_[valid]\n mu = mu[valid]\n mu_update = mu_update[valid]\n yu = yu[valid]\n yu_update = yu_update[valid]\n tau = tau[valid]\n tau_update = tau_update[valid]\n alpha = alpha[valid]\n alpha_update = alpha_update[valid]\n nu = nu[valid]\n nu_update = nu_update[valid]\n K = gmm.means_.shape[0]\n continue\n\n # M-step, eqs. from KimLoizou'10\n # Hyper-parameters\n psum = np.sum(pcompx, axis=0) # (18)\n #print np.sum(pcompx,axis=1)\n #print 'psum',psum\n #print gmm.weights_\n #raw_input()\n x_expected = np.dot(pcompx.T, X)/np.tile(psum[np.newaxis].T,(1,nDim)) #(19)\n #print 'x_expected',x_expected\n for k in range(0,K):\n #raw_input()\n # (20)\n s[k] = np.dot((X-np.tile(x_expected[k],(N,1))).T, (X-np.tile(x_expected[k],(N,1)))*np.tile(pcompx[:,k][np.newaxis].T,(1,nDim)))\n # (15)\n #print 'yu[k]',yu[k] \n #print 'x_expected - mu', (x_expected-mu).T \n #yu_update[k] = rho*yu[k] + s[k] + rho*tau[k]*psum[k]/(rho*tau[k]+psum[k])*np.dot((x_expected-mu).T, x_expected-mu) \n yu[k] = rho*yu[k] + s[k] + rho*tau[k]*psum[k]/(rho*tau[k]+psum[k])*np.dot((x_expected[k,:]-mu[k,:]).T, x_expected[k,:]-mu[k,:]) \n #print 'yu[k],after',yu[k] \n \n # (21)\n beta = psum/(rho*tau+psum)\n # mu, eq (14)\n #print beta.shape\n #print mu.shape\n #print x_expected.shape\n #mu_update = np.tile(beta[np.newaxis].T,(1,nDim))*x_expected + np.tile((1-beta)[np.newaxis].T,(1,nDim))*mu\n mu = np.tile(beta[np.newaxis].T,(1,nDim))*x_expected + np.tile((1-beta)[np.newaxis].T,(1,nDim))*mu\n # tau, eq (11)\n #tau_update = rho*tau + psum\n tau = rho*tau + psum\n # alpha, eq (12)\n #alpha_update = rho*(alpha-nDim) + nDim + psum \n alpha = rho*(alpha-nDim) + nDim + psum \n #print 'alpha',alpha\n # nu, eq (13)\n #nu_update = rho*(nu-1) + 1 + psum\n nu = rho*(nu-1) + 1 + psum\n \n # GMM parameters\n # weight, (27)\n #gmm.weights_ = (nu_update-1)/np.sum(nu_update-1)\n gmm.weights_ = (nu-1)/np.sum(nu-1)\n ill = (gmm.weights_ == 0)\n print(np.sum(gmm.weights_))\n # mean, (28)\n #gmm.means_ = mu_update \n gmm.means_ = mu \n # sigma, (29)\n for k in range(0,K):\n #if alpha_update[k] != nDim:\n if alpha[k] != nDim:\n #gmm.covars_[k] = yu_update[k]/(alpha_update[k]-nDim)\n gmm.covariances_[k] = yu[k]/(alpha[k]-nDim)\n else:\n #gmm.covars_[k] = yu_update[k]/tau_update[k]\n gmm.covariances_[k] = yu[k]/tau[k]\n try:\n np.linalg.cholesky(gmm.covariances_[k])\n except:\n ill[k] = 1\n print('cov_%d not positive definite' % k)\n \n # remove non positive definite matrices\n if np.any(ill):\n valid = (ill == 0)\n gmm.means_ = gmm.means_[valid]\n gmm.weights_ = gmm.weights_[valid]\n gmm.weights_ = gmm.weights_/sum(gmm.weights_)\n gmm.covariances_ = gmm.covariances_[valid]\n mu = mu[valid]\n mu_update = mu_update[valid]\n yu = yu[valid]\n yu_update = yu_update[valid]\n tau = tau[valid]\n tau_update = tau_update[valid]\n alpha = alpha[valid]\n alpha_update = alpha_update[valid]\n nu = nu[valid]\n nu_update = nu_update[valid]\n K = gmm.means_.shape[0]\n return gmm", "title": "" }, { "docid": "735d0d019bd2cc33e9df61617c1f321f", "score": "0.546018", "text": "def apply_G(self):\n\n tmin = self.vec['u']('Tmin')\n tau = self.vec['p']('tau')\n\n fmax = numpy.max(self.min - tau)\n tmin[0] = fmax + 1/self.rho * \\\n numpy.log(numpy.sum(numpy.exp(self.rho*(self.min - tau - fmax))))", "title": "" }, { "docid": "eba553876d226a66113689776e46a30f", "score": "0.54464203", "text": "def c_m(self):\n c_m = (1.5 * np.matmul(\n np.swapaxes(from_6x6_to_21x1(self.d_sq), -1, -2),\n from_6x6_to_21x1(E_shear)) / np.matmul(\n np.swapaxes(from_6x6_to_21x1(self.d_sq), -1, -2),\n from_6x6_to_21x1(E_iso)))[..., 0, 0]\n return c_m", "title": "" }, { "docid": "9c304d79bfef84afa830ef8277572c50", "score": "0.54338884", "text": "def diag_ggn_mc(self, mc_samples: int) -> List[Tensor]:\n return", "title": "" }, { "docid": "f3a3e77730d932ce9e0ae5b7cc720a8f", "score": "0.542774", "text": "def hessian_log_likelihood(xm,m,Q,sm2,H_mat):\n def G(x):\n if x=='e': dxe=1\n else: dxe=0\n dQ00x = H_mat['Q_{}'.format(x)][0,0]\n dm00x = H_mat['m_{}'.format(x)][0,0]\n Q00=Q[0,0];m00=m[0,0]\n return (-((dQ00x + dxe)*(Q00 + sm2)) + 2*dm00x*(Q00 + sm2)*(-m00 + xm)\\\n + (dQ00x + dxe)*(-m00 + xm)**2)/(2.*(Q00 + sm2)**2)\n def H(x,y):\n if x=='e': dxe=1\n else: dxe=0\n if y=='e': dye=1\n else: dye=0\n dQ00x = H_mat['Q_{}'.format(x)][0,0]\n dQ00xy = H_mat['Q_{}{}'.format(x,y)][0,0]\n dQ00y = H_mat['Q_{}'.format(y)][0,0]\n dm00x = H_mat['m_{}'.format(x)][0,0]\n dm00y = H_mat['m_{}'.format(y)][0,0]\n dm00xy = H_mat['m_{}{}'.format(x,y)][0,0]\n Q00=Q[0,0];m00=m[0,0]\n return ((dQ00x + dxe)*(dQ00y + dye)*(Q00 + sm2) - 2*dm00x*dm00y*(Q00 + sm2)**2 -\\\n dQ00xy*(Q00 + sm2)**2 - 2*dm00y*(dQ00x + dxe)*(Q00 + sm2)*(-m00 + xm) -\\\n 2*dm00x*(dQ00y + dye)*(Q00 + sm2)*(-m00 + xm) + 2*dm00xy*(Q00 + sm2)**2*(-m00 + xm) -\\\n 2*(dQ00x + dxe)*(dQ00y + dye)*(-m00 + xm)**2 + dQ00xy*(Q00 + sm2)*(-m00 + xm)**2)/\\\n (2.*(Q00 + sm2)**3)\n grad = np.array([G(x) for x in ['m','g','s','e']])[:,None]\n hess = np.array([H(x,y) for y in ['m','g','s','e'] for x in\\\n ['m','g','s','e']]).reshape(4,4)\n return grad, hess", "title": "" }, { "docid": "9facf115a2c7ba815a36137cfe4175d0", "score": "0.5423068", "text": "def create_bigG(G, A, M):\n GA = np.dot(G, A)\n n_c , n_t = M.shape\n _ , n_s = G.shape\n m_p = A.shape[1]//n_s\n row = []\n col = []\n data = []\n for i in range(n_t):\n for j in range(n_c):\n row.append([j+n_c*i]*n_s*m_p)\n col.append([n_s*i+k for k in range(n_s*m_p)])\n data.append(GA[j, :])\n data = np.array(data).reshape(-1)\n row = np.array(row).reshape(-1)\n col = np.array(col).reshape(-1)\n return coo_matrix((data, (row, col)), shape=(n_t*n_c, n_s*(n_t + m_p - 1)))", "title": "" }, { "docid": "891744071be3c09ef6f85a5cefa4e603", "score": "0.5418729", "text": "def __init__(self, Vh, gamma, delta, locations, m_true, Theta = None, pen = 1e1, order=2, rel_tol=1e-12, max_iter=1000):\n assert delta != 0. or pen != 0, \"Intrinsic Gaussian Prior are not supported\"\n self.Vh = Vh\n \n trial = dl.TrialFunction(Vh)\n test = dl.TestFunction(Vh)\n \n if Theta == None:\n varfL = dl.inner(dl.nabla_grad(trial), dl.nabla_grad(test))*dl.dx\n else:\n varfL = dl.inner(Theta*dl.grad(trial), dl.grad(test))*dl.dx\n varfM = dl.inner(trial,test)*dl.dx\n \n self.M = dl.assemble(varfM)\n if dlversion() <= (1,6,0):\n self.Msolver = dl.PETScKrylovSolver(\"cg\", \"jacobi\")\n else:\n self.Msolver = dl.PETScKrylovSolver(self.Vh.mesh().mpi_comm(), \"cg\", \"jacobi\")\n self.Msolver.set_operator(self.M)\n self.Msolver.parameters[\"maximum_iterations\"] = max_iter\n self.Msolver.parameters[\"relative_tolerance\"] = rel_tol\n self.Msolver.parameters[\"error_on_nonconvergence\"] = True\n self.Msolver.parameters[\"nonzero_initial_guess\"] = False\n \n #mfun = Mollifier(gamma/delta, dl.inv(Theta), order, locations)\n mfun = dl.Expression(code_Mollifier, degree = Vh.ufl_element().degree()+2)\n mfun.l = gamma/delta\n mfun.o = order\n mfun.theta0 = 1./Theta.theta0\n mfun.theta1 = 1./Theta.theta1\n mfun.alpha = Theta.alpha\n for ii in range(locations.shape[0]):\n mfun.addLocation(locations[ii,0], locations[ii,1])\n \n varfmo = mfun*dl.inner(trial,test)*dl.dx\n MO = dl.assemble(pen*varfmo)\n \n self.A = dl.assemble(gamma*varfL+delta*varfM + pen*varfmo)\n \n if dlversion() <= (1,6,0):\n self.Asolver = dl.PETScKrylovSolver(\"cg\", amg_method())\n else:\n self.Asolver = dl.PETScKrylovSolver(self.Vh.mesh().mpi_comm(), \"cg\", amg_method())\n self.Asolver.set_operator(self.A)\n self.Asolver.parameters[\"maximum_iterations\"] = max_iter\n self.Asolver.parameters[\"relative_tolerance\"] = rel_tol\n self.Asolver.parameters[\"error_on_nonconvergence\"] = True\n self.Asolver.parameters[\"nonzero_initial_guess\"] = False\n \n old_qr = dl.parameters[\"form_compiler\"][\"quadrature_degree\"]\n dl.parameters[\"form_compiler\"][\"quadrature_degree\"] = -1\n qdegree = 2*Vh._ufl_element.degree()\n metadata = {\"quadrature_degree\" : qdegree}\n \n if dlversion() >= (2017,1,0):\n representation_old = dl.parameters[\"form_compiler\"][\"representation\"]\n dl.parameters[\"form_compiler\"][\"representation\"] = \"quadrature\"\n \n if dlversion() <= (1,6,0):\n Qh = dl.FunctionSpace(Vh.mesh(), 'Quadrature', qdegree)\n else:\n element = dl.FiniteElement(\"Quadrature\", Vh.mesh().ufl_cell(), qdegree, quad_scheme=\"default\")\n Qh = dl.FunctionSpace(Vh.mesh(), element)\n \n ph = dl.TrialFunction(Qh)\n qh = dl.TestFunction(Qh)\n Mqh = dl.assemble(ph*qh*dl.dx(metadata=metadata))\n ones = dl.interpolate(dl.Constant(1.), Qh).vector()\n dMqh = Mqh*ones\n Mqh.zero()\n dMqh.set_local( ones.get_local() / np.sqrt(dMqh.get_local() ) )\n Mqh.set_diagonal(dMqh)\n MixedM = dl.assemble(ph*test*dl.dx(metadata=metadata))\n self.sqrtM = MatMatMult(MixedM, Mqh)\n \n dl.parameters[\"form_compiler\"][\"quadrature_degree\"] = old_qr\n \n if dlversion() >= (2017,1,0):\n dl.parameters[\"form_compiler\"][\"representation\"] = representation_old\n \n self.R = _BilaplacianR(self.A, self.Msolver) \n self.Rsolver = _BilaplacianRsolver(self.Asolver, self.M)\n \n rhs = dl.Vector()\n self.mean = dl.Vector()\n self.init_vector(rhs, 0)\n self.init_vector(self.mean, 0)\n \n MO.mult(m_true, rhs)\n self.Asolver.solve(self.mean, rhs)", "title": "" }, { "docid": "2447e339ec4250b8692a4b9a6c0dd0e2", "score": "0.54172355", "text": "def test_newtons_method():\n\n\tH = zeros((2,2))\n\tg = zeros((2,2)) # first column, gradient of first direct. derv., second second direct. derv, should be the same\n\n\tfor n in range(2):\n\t\t# directional derivatives to evaluate columns of the hessian\n\t\tq1 = array([[13.]])\n\t\tq2 = array([[17.]])\n\t\tze = array([[0.]])\n\n\t\tq1dot = array([[1.*(n==0)]])\n\t\tq2dot = array([[1.*(n==1)]])\n\n\t\tzedot = array([[0.]])\n\n\t\tcg = CGraph()\n\t\tFq1 = Function(Mtc(q1, q1dot))\n\t\tFq2 = Function(Mtc(q2, q2dot))\n\t\tFze = Function(Mtc(ze, zedot))\n\t\tFC = Function([[Fq1*Fq1,Fze],[Fze,Fq2*Fq2]])\n\t\tFPhi = FC.trace()\n\n\t\tPhibar = array([[1.]])\n\t\tPhibardot = array([[0.]])\n\t\t\n\t\tcg.independentFunctionList = [Fq1,Fq2]\n\t\tcg.dependentFunctionList = [FPhi]\n\t\tcg.reverse([Mtc(Phibar, Phibardot)])\n\t\t\n\t\t\n\t\tg[0,n] = Fq1.xbar.X[0,0]\n\t\tg[1,n] = Fq2.xbar.X[0,0]\n\t\tH[0,n] = Fq1.xbar.Xdot[0,0]\n\t\tH[1,n] = Fq2.xbar.Xdot[0,0]\n\n\n\n\t# compute new search direction\n\tdelta_q = numpy.linalg.solve(H,-g[:,0])\n\tq_plus = [13.,17.] + delta_q\n\tassert numpy.prod(q_plus == [0.,0.])", "title": "" }, { "docid": "98e52c056e4b86de92fbff0b7b08ca45", "score": "0.53999287", "text": "def g_func( self, lmnAB, lmnCD, rstAB, rstCD, ijk, ang_A, ang_B, PA, PB, gammaAB, ang_C, ang_D, QC, QD, gammaCD, PQ):\n delta = 1/(4.0*gammaAB) + 1/(4.0*gammaCD)\n \n return ( ((-1) ** lmnAB)\n * self.theta_func(lmnAB,ang_A,ang_B,PA,PB,rstAB,gammaAB)\n * self.theta_func(lmnCD,ang_C,ang_D,QC,QD,rstCD,gammaCD)\n * ((-1) ** ijk)\n * ((2*delta) ** (2*(rstAB + rstCD)))\n * math.factorial(lmnAB + lmnCD - 2*rstAB - 2*rstCD)\n * (delta ** ijk)\n * (PQ ** (lmnAB + lmnCD - 2*(rstAB + rstCD + ijk)))\n / ((4*delta) ** (lmnAB + lmnCD))\n / math.factorial(ijk)\n / math.factorial(lmnAB + lmnCD - 2*(rstAB + rstCD + ijk)) )", "title": "" }, { "docid": "48ad6e5314a8360a90b0ed8e00b2716a", "score": "0.53907347", "text": "def efficient_full_exact_ACDM(self, X, n_iter):\n calc_KL_time = 0\n e = np.e \n B_size = len(self.B_)\n Z = len(self.S_)\n total_step = n_iter*B_size\n # Initialization of Theta\n theta_prime_tilde = dict()\n theta_prime_hat = dict()\n for phi in self.B_:\n theta_prime_tilde[phi] = 0.0\n theta_prime_hat[phi] = 0.0\n\n # Initialization of U\n U_hat = dict() \n U_prime_hat = dict() \n U_prime_tilde = dict()\n for x in self.S_:\n U_hat[x] = 0\n U_prime_hat[x] = 0.0\n U_prime_tilde[x] = 0.0\n # Initialization of Coefficients\n alpha = [0] * (total_step+1)\n gamma = [0] * (total_step+2)\n c_hat = [0] * (total_step+2)\n gamma[-1] =1/(2.0) \n c_hat[-1] = 3.0 + 2*math.sqrt(2.000) \n for k in range(total_step+1):\n gamma_k_before = gamma[k-1]\n gamma_k = math.sqrt(1.0/(4.0)+gamma_k_before**2)+1.0/(2.0)\n gamma[k] = gamma_k\n alpha[k] =1/(gamma_k)\n c_hat[k] = (1-alpha[k]) * c_hat[k-1]\n # Caching \n e_U_hat_dict = dict() \n for x in self.S_:\n e_U_hat_dict[x] = 1.0\n # Beginning of Iteration\n start = time.time()\n for iter_ in range(n_iter):\n for index in range(B_size):\n k = iter_*B_size + index\n phi = random.choice(self.B_)\n if self.etahat_[phi] >= 1.0 or self.etahat_[phi]== 0.0:\n continue\n # Caching\n c_hat_k = c_hat[k]\n c_hat_k_after = c_hat[k+1]\n alpha_k_after = alpha[k+1]\n gamma_k = gamma[k]\n\n # calculate G, Z\n G = 0 \n for x in self.Ssub_[phi]:\n G += e_U_hat_dict[x]\n eta_ = G/Z\n # update tilde_theta_prime, hat_theta_prime\n nabla = 4.0*(self.etahat_[phi] - eta_)\n nabla_exact = np.log((1.0 -eta_)* self.etahat_[phi]/(eta_*(1-self.etahat_[phi])))\n\n delta_tilde = nabla_exact/len(X)\n delta_hat = nabla_exact/len(X)\n step1 = gamma_k/B_size * delta_tilde\n step2 = 1.0/c_hat_k_after*((alpha_k_after-(1-c_hat_k_after))*gamma_k/B_size*delta_tilde+(1-alpha_k_after)*delta_hat)\n\n\n theta_prime_tilde[phi] += step1\n theta_prime_hat[phi] += step2 \n for x in self.Ssub_[phi]:\n U_prime_tilde[x] += step1\n U_prime_hat[x] += step2\n U_hat_temp = (1-c_hat_k_after)*U_prime_tilde[x]+c_hat_k_after*U_prime_hat[x]\n e_U_hat_temp = e ** U_hat_temp\n Z += e_U_hat_temp - e_U_hat_dict[x]\n e_U_hat_dict[x] = e_U_hat_temp\n\n update_start_time = time.time()\n for x in self.B_:\n self.theta_[x] = (1-c_hat_k_after)*theta_prime_tilde[x]+c_hat_k_after*theta_prime_hat[x]\n self.theta_[phi] += 4*(self.etahat_[phi]-eta_)\n self.compute_theta_perp()\n self.compute_P()\n kl=self.compute_KL()\n calc_KL_time += time.time() - update_start_time \n print(iter_ ,\":\", \"KL divergence: \", f'{kl:.16f}' ,\" time : %4.2f\"% (time.time()-calc_KL_time-start),flush=True)", "title": "" }, { "docid": "2b71317599315baa15abd0d8007dc05c", "score": "0.5383931", "text": "def __init__(self, Vh, gamma, delta, Theta = None, mean=None, rel_tol=1e-12, max_iter=1000):\n assert delta != 0., \"Intrinsic Gaussian Prior are not supported\"\n self.Vh = Vh\n\n trial = dl.TrialFunction(Vh)\n test = dl.TestFunction(Vh)\n \n if Theta == None:\n varfL = dl.inner(dl.nabla_grad(trial), dl.nabla_grad(test))*dl.dx\n else:\n varfL = dl.inner( Theta*dl.grad(trial), dl.grad(test))*dl.dx\n \n varfM = dl.inner(trial,test)*dl.dx\n \n self.M = dl.assemble(varfM)\n if dlversion() <= (1,6,0):\n self.Msolver = dl.PETScKrylovSolver(\"cg\", \"jacobi\")\n else:\n self.Msolver = dl.PETScKrylovSolver(self.Vh.mesh().mpi_comm(), \"cg\", \"jacobi\")\n self.Msolver.set_operator(self.M)\n self.Msolver.parameters[\"maximum_iterations\"] = max_iter\n self.Msolver.parameters[\"relative_tolerance\"] = rel_tol\n self.Msolver.parameters[\"error_on_nonconvergence\"] = True\n self.Msolver.parameters[\"nonzero_initial_guess\"] = False\n \n self.A = dl.assemble(gamma*varfL + delta*varfM) \n if dlversion() <= (1,6,0):\n self.Asolver = dl.PETScKrylovSolver(\"cg\", amg_method())\n else:\n self.Asolver = dl.PETScKrylovSolver(self.Vh.mesh().mpi_comm(), \"cg\", amg_method())\n self.Asolver.set_operator(self.A)\n self.Asolver.parameters[\"maximum_iterations\"] = max_iter\n self.Asolver.parameters[\"relative_tolerance\"] = rel_tol\n self.Asolver.parameters[\"error_on_nonconvergence\"] = True\n self.Asolver.parameters[\"nonzero_initial_guess\"] = False\n \n old_qr = dl.parameters[\"form_compiler\"][\"quadrature_degree\"]\n dl.parameters[\"form_compiler\"][\"quadrature_degree\"] = -1\n qdegree = 2*Vh._ufl_element.degree()\n metadata = {\"quadrature_degree\" : qdegree}\n\n if dlversion() >= (2017,1,0):\n representation_old = dl.parameters[\"form_compiler\"][\"representation\"]\n dl.parameters[\"form_compiler\"][\"representation\"] = \"quadrature\"\n \n if dlversion() <= (1,6,0):\n Qh = dl.FunctionSpace(Vh.mesh(), 'Quadrature', qdegree)\n else:\n element = dl.FiniteElement(\"Quadrature\", Vh.mesh().ufl_cell(), qdegree, quad_scheme=\"default\")\n Qh = dl.FunctionSpace(Vh.mesh(), element)\n \n ph = dl.TrialFunction(Qh)\n qh = dl.TestFunction(Qh)\n Mqh = dl.assemble(ph*qh*dl.dx(metadata=metadata))\n ones = dl.interpolate(dl.Constant(1.), Qh).vector()\n dMqh = Mqh*ones\n Mqh.zero()\n dMqh.set_local( ones.get_local() / np.sqrt(dMqh.get_local() ) )\n Mqh.set_diagonal(dMqh)\n MixedM = dl.assemble(ph*test*dl.dx(metadata=metadata))\n self.sqrtM = MatMatMult(MixedM, Mqh)\n\n dl.parameters[\"form_compiler\"][\"quadrature_degree\"] = old_qr\n \n if dlversion() >= (2017,1,0):\n dl.parameters[\"form_compiler\"][\"representation\"] = representation_old\n \n self.R = _BilaplacianR(self.A, self.Msolver) \n self.Rsolver = _BilaplacianRsolver(self.Asolver, self.M)\n \n self.mean = mean\n \n if self.mean is None:\n self.mean = dl.Vector()\n self.init_vector(self.mean, 0)", "title": "" }, { "docid": "a3365b18e9f481a59257890844aae335", "score": "0.53832597", "text": "def _g(self, w):\n p_w = self._p_w(w)\n return (self.p_data_x[None, :, None] * p_w[None, :, :] * self.feature\n ).sum(axis=(1, 2)) - self.E_feature", "title": "" }, { "docid": "7ab7fc8bf2f63fe190d89a5e3852222d", "score": "0.5356697", "text": "def optimise_gaussian(self, filename = \"test\"):\n\n # first do minimisation with MMFF94 and dihedrals contrainted\n # this will hopefully fix clashes\n mp = AllChem.MMFFGetMoleculeProperties(self.molecule, mmffVariant=\"MMFF94\")\n ff = AllChem.MMFFGetMoleculeForceField(self.molecule, mp)\n\n current_angles = self.get_dihedrals()\n N = 0\n for angle in current_angles:\n ff.MMFFAddTorsionConstraint(angle[0], angle[1], angle[2], angle[3], False, angle[4], angle[4], 500000.0)\n N += 1\n\n ff.Minimize(maxIts=500,forceTol=0.0001,energyTol=1e-06)\n\n # do gaussian opt\n self.molecule.SetProp(\"_Name\", filename)\n Gmethod = \"pm6\"\n output = qmgaussian.gaussian([self.molecule], method=\"opt \" + Gmethod, charge=0, mult=1, charged_fragments=True, cpus=1, keepFiles=True)\n\n if output[0] is not None:\n atoms = output[0]['atomicPos']\n\n energykey = 'g16:(' + Gmethod + ')_energy'\n self.energy = float(output[0][energykey])\n self.energy = self.energy * 627.15 # converts from hartree to kcal/mol\n conf = self.molecule.GetConformer()\n N = 0\n # This assumes that the atom ordering is the same!!!!\n for xyz in atoms: \n conf.SetAtomPosition(N, xyz)\n N += 1\n elif output[0] is None:\n # gaussian failed, set energy to a high number, this is kind of a hack\n self.energy = 9999.0", "title": "" }, { "docid": "13132ac0f9ad0d8857ca54eed5036ac8", "score": "0.5354696", "text": "def hessian_1cc(W,mlam,gamma,sl2,sm2,dt,s,S,grad_matS,rescale,sd2):\n ##### likelihood and gradient at initial conditions\n gll,hll = hessian_log_likelihood(W[0,0],s,S,sm2,grad_matS)\n #### Initialize parameters for recurrence\n F, A, a = parameters(gamma,dt,mlam,sl2)\n grad_param = hessian_parameters(gamma,dt,mlam,sl2)\n ##### P(z_0|x_0^m)\n b,B = posteriori_matrices(W[0,0],s,S,sm2)\n grad_mat_b = hessian_posteriori_matrices(W[0,0],s,S,sm2,grad_matS)\n for j in range(1,W.shape[1]):\n ###### P(z_{t+dt}|D_t) = N(m,Q))\n m,Q = new_mean_cov(b,B,F,A,a)\n grad_mat_Q = hessian_new_mean_cov(b,B,F,A,a,grad_param,grad_mat_b)\n ##### P(z_{t+dt}|D_{t+dt}) = N(b',B')\n b,B = posteriori_matrices(W[0,j],m,Q,sm2)\n grad_mat_b = hessian_posteriori_matrices(W[0,j],m,Q,sm2,grad_mat_Q)\n ##### Likelihood\n G,H = hessian_log_likelihood(W[0,j],m,Q,sm2,grad_mat_Q)\n gll+=G; hll=+H\n # Predict for daughter cell\n m,Q = new_mean_cov(b,B,F,A,a)\n grad_mat_Q = hessian_new_mean_cov(b,B,F,A,a,grad_param,grad_mat_b)\n # Find next cell initial conditions (9% asym div)\n s, S, grad_matS = hess_cell_division_likelihood(m,Q,grad_mat_Q,sd2,rescale)\n return hll, gll, s, S, grad_matS", "title": "" }, { "docid": "7469fac99123f049f1b2e136e5eedca0", "score": "0.53455836", "text": "def rand_mmd_g(dist_all, batch_size, omega=0.5, max_iter=0, name='mmd', do_summary=False, scope_prefix=''):\r\n with tf.name_scope(name):\r\n m = tf.constant(batch_size, tf.float32)\r\n\r\n def kernel(b):\r\n return tf.exp(-dist_all * b)\r\n\r\n def f(b):\r\n k = kernel(b)\r\n e_k = matrix_mean_wo_diagonal(k, 2 * m)\r\n return e_k - omega, k\r\n\r\n def df(k):\r\n kd = -k * dist_all # gradient of exp(-d*w)\r\n e_kd = matrix_mean_wo_diagonal(kd, 2 * m)\r\n return e_kd\r\n\r\n # initialize sigma as the geometric mean of all pairwise distances\r\n dist_mean = matrix_mean_wo_diagonal(dist_all, 2 * m)\r\n beta = -tf.log(omega) / (dist_mean + FLAGS.EPSI) # beta = 1/2/sigma\r\n # if max_iter is larger than one, do newton's update\r\n if max_iter > 0:\r\n beta, _ = tf.while_loop(\r\n cond=lambda _1, i: i < max_iter,\r\n body=lambda b, i: newton_root(b, f, df, step=i),\r\n loop_vars=(beta, tf.constant(0, dtype=tf.int32)))\r\n\r\n k_all = kernel(beta)\r\n k_xx = k_all[0:batch_size, 0:batch_size]\r\n k_xy_0 = k_all[0:batch_size, batch_size:]\r\n k_xy_1 = k_all[batch_size:, 0:batch_size]\r\n k_yy = k_all[batch_size:, batch_size:]\r\n\r\n e_kxx = matrix_mean_wo_diagonal(k_xx, m)\r\n e_kxy_0 = matrix_mean_wo_diagonal(k_xy_0, m)\r\n e_kxy_1 = matrix_mean_wo_diagonal(k_xy_1, m)\r\n e_kyy = matrix_mean_wo_diagonal(k_yy, m)\r\n\r\n if do_summary:\r\n with tf.name_scope(None): # return to root scope to avoid scope overlap \r\n tf.summary.scalar(scope_prefix + name + '/kxx', e_kxx)\r\n tf.summary.scalar(scope_prefix + name + '/kyy', e_kyy)\r\n tf.summary.scalar(scope_prefix + name + '/kxy_0', e_kxy_0)\r\n tf.summary.scalar(scope_prefix + name + '/kxy_1', e_kxy_1)\r\n # tf.summary.scalar(scope_prefix + name + 'omega', omega)\r\n\r\n return e_kxx + e_kyy - e_kxy_0 - e_kxy_1", "title": "" }, { "docid": "d4a475014918f6668221cd055be938a4", "score": "0.5344345", "text": "def _compute_gauss_pdf_gradients(x, mu, sigma):\n sigma_matrix = torch.diag(sigma)\n distr = torch.distributions.MultivariateNormal(mu, sigma_matrix)\n return distr.log_prob(x).exp()", "title": "" }, { "docid": "fca994135a04f0b3e09ec041daf5d4d5", "score": "0.53440166", "text": "def _mixed_norm_solver_bcd(\n M,\n G,\n alpha,\n lipschitz_constant,\n maxit=200,\n tol=1e-8,\n verbose=None,\n init=None,\n n_orient=1,\n dgap_freq=10,\n use_accel=True,\n K=5,\n):\n _, n_times = M.shape\n _, n_sources = G.shape\n n_positions = n_sources // n_orient\n\n if init is None:\n X = np.zeros((n_sources, n_times))\n R = M.copy()\n else:\n X = init\n R = M - np.dot(G, X)\n\n E = [] # track primal objective function\n highest_d_obj = -np.inf\n active_set = np.zeros(n_sources, dtype=bool) # start with full AS\n\n alpha_lc = alpha / lipschitz_constant\n\n if use_accel:\n last_K_X = np.empty((K + 1, n_sources, n_times))\n U = np.zeros((K, n_sources * n_times))\n\n # First make G fortran for faster access to blocks of columns\n G = np.asfortranarray(G)\n # Ensure these are correct for dgemm\n assert R.dtype == np.float64\n assert G.dtype == np.float64\n one_ovr_lc = 1.0 / lipschitz_constant\n\n # assert that all the multiplied matrices are fortran contiguous\n assert X.T.flags.f_contiguous\n assert R.T.flags.f_contiguous\n assert G.flags.f_contiguous\n # storing list of contiguous arrays\n list_G_j_c = []\n for j in range(n_positions):\n idx = slice(j * n_orient, (j + 1) * n_orient)\n list_G_j_c.append(np.ascontiguousarray(G[:, idx]))\n\n for i in range(maxit):\n _bcd(G, X, R, active_set, one_ovr_lc, n_orient, alpha_lc, list_G_j_c)\n\n if (i + 1) % dgap_freq == 0:\n _, p_obj, d_obj, _ = dgap_l21(\n M, G, X[active_set], active_set, alpha, n_orient\n )\n highest_d_obj = max(d_obj, highest_d_obj)\n gap = p_obj - highest_d_obj\n E.append(p_obj)\n logger.debug(\n \"Iteration %d :: p_obj %f :: dgap %f :: n_active %d\"\n % (i + 1, p_obj, gap, np.sum(active_set) / n_orient)\n )\n\n if gap < tol:\n logger.debug(\"Convergence reached ! (gap: %s < %s)\" % (gap, tol))\n break\n\n # using Anderson acceleration of the primal variable for faster\n # convergence\n if use_accel:\n last_K_X[i % (K + 1)] = X\n\n if i % (K + 1) == K:\n for k in range(K):\n U[k] = last_K_X[k + 1].ravel() - last_K_X[k].ravel()\n C = U @ U.T\n # at least on ARM64 we can't rely on np.linalg.solve to\n # reliably raise LinAlgError here, so use SVD instead\n # equivalent to:\n # z = np.linalg.solve(C, np.ones(K))\n u, s, _ = np.linalg.svd(C, hermitian=True)\n if s[-1] <= 1e-6 * s[0] or not np.isfinite(s).all():\n logger.debug(\"Iteration %d: LinAlg Error\" % (i + 1))\n continue\n z = ((u * 1 / s) @ u.T).sum(0)\n c = z / z.sum()\n X_acc = np.sum(last_K_X[:-1] * c[:, None, None], axis=0)\n _grp_norm2_acc = groups_norm2(X_acc, n_orient)\n active_set_acc = _grp_norm2_acc != 0\n if n_orient > 1:\n active_set_acc = np.kron(\n active_set_acc, np.ones(n_orient, dtype=bool)\n )\n p_obj = _primal_l21(M, G, X[active_set], active_set, alpha, n_orient)[0]\n p_obj_acc = _primal_l21(\n M, G, X_acc[active_set_acc], active_set_acc, alpha, n_orient\n )[0]\n if p_obj_acc < p_obj:\n X = X_acc\n active_set = active_set_acc\n R = M - G[:, active_set] @ X[active_set]\n\n X = X[active_set]\n\n return X, active_set, E", "title": "" }, { "docid": "178de04b2af5641740265bd3c7a8bc3c", "score": "0.5335637", "text": "def g(q, xi):\n return flx.eigenvalue(q)-xi", "title": "" }, { "docid": "3c034f9bd66b94fa7d3e4ceebf409aeb", "score": "0.5334269", "text": "def hamiltonian_diag_1d(mu,V,deltax):\n\n\n #constructs kinetic energy operator\n T=kinetic_op_1d(mu,deltax,V.size)\n #constructs hamiltonian\n H=hamiltonian_1d(T,V)\n #performs diagonalization\n eigen_val,eigen_vec=solve_eigenstates(H)\n\n return eigen_val,eigen_vec", "title": "" }, { "docid": "3efdf2a40d5b6d307da83456e17bdd6e", "score": "0.53294677", "text": "def diag_ggn(self) -> List[Tensor]:\n return", "title": "" }, { "docid": "4a742599d07d57c1603fa2403b352556", "score": "0.5328856", "text": "def Gxyz(lA,mA,nA,lB,mB,nB,lC,mC,nC,lD,mD,nD,a,b,c,d,RA,RB,RC,RD):\n with loops.Scope() as s:\n s.lA,s.mA,s.nA,s.lB,s.mB,s.nB,s.lC,s.mC,s.nC,s.lD,s.mD,s.nD,s.a,s.b,s.c,s.d,s.RA,s.RB,s.RC,s.RD = lA,mA,nA,lB,mB,nB,lC,mC,nC,lD,mD,nD,a,b,c,d,RA,RB,RC,RD\n s.gP = s.a + s.b\n s.gQ = s.c + s.d\n s.delta = 1/(4*s.gP) + 1/(4*s.gQ)\n s.RP = gaussian_product(s.a,s.b,s.RA,s.RB)\n s.RQ = gaussian_product(s.c,s.d,s.RC,s.RD)\n s.ABsq = np.dot(s.RA-s.RB,s.RA-s.RB)\n s.CDsq = np.dot(s.RC-s.RD,s.RC-s.RD)\n s.PQsq = np.dot(s.RP-s.RQ,s.RP-s.RQ)\n s.boysarg = s.PQsq / (4 * s.delta)\n\n s.Gxyz = 0.0\n s.l = lA + lB \n s.r = np.floor(s.l/2) \n s.lp = lC + lD \n s.rp = np.floor(s.lp/2) \n s.i = np.floor((s.l + s.lp - 2 * s.r - 2 * s.rp) / 2) \n\n for _ in s.while_range(lambda: s.l > -1):\n s.r = np.floor(s.l/2)\n for _ in s.while_range(lambda: s.r > -1):\n s.lp = lC + lD \n for _ in s.while_range(lambda: s.lp > -1):\n s.rp = np.floor(s.lp/2)\n for _ in s.while_range(lambda: s.rp > -1):\n #s.i = np.floor((s.l + s.lp - 2 * s.r - 2 * s.rp) / 2) # Compiles forever until OOM\n s.i = np.floor((s.l - 2 * s.r + s.lp - 2 * s.rp) / 2) # This works, compiles fine since loop variables are added in order\n for _ in s.while_range(lambda: s.i > -1):\n gx = gi(s.l,s.lp,s.r,s.rp,s.i, s.lA,s.lB,s.RA[0],s.RB[0],s.RP[0],s.gP, s.lC,s.lD,s.RC[0],s.RD[0],s.RQ[0],s.gQ)\n nu = s.l - 2 * s.r + s.lp - 2 * s.rp - s.i #THIS ORDER MATTERS (UGH)\n F = boys(nu, s.boysarg)\n s.Gxyz += F * gx\n\n\n #s.nu = s.l + s.lp - 2 * (s.r+s.rp)-s.i\n #nu = s.l - 2 * s.r + s.lp - 2 * s.rp - s.i\n #F = boys(s.nu, s.boysarg)\n #F = 0.5 * (s.boysarg + 1e-11)**(-(s.nu + 0.5)) * jax.lax.igamma(s.nu + 0.5, s.boysarg + 1e-11) * np.exp(jax.lax.lgamma(s.nu + 0.5))\n\n # WORKS: fake boys function acucmulation\n #nu = s.l - 2 * s.r + s.lp - 2 * s.rp - s.i #THIS ORDER MATTERS (UGH)\n #F = 0.5 * (s.boysarg + 1e-11)**(-(nu + 0.5)) * jax.lax.igamma(nu + 0.5, s.boysarg + 1e-11) * np.exp(jax.lax.lgamma(nu + 0.5))\n #s.Gxyz += F\n\n\n # This works ( thou shalt not add s.l + s.lp\n #nu = s.l - 2 * s.r + s.lp - 2 * s.rp - s.i #THIS ORDER MATTERS (UGH)\n #F = boys(nu, s.boysarg)\n #s.Gxyz += F\n\n s.i -= 1\n s.rp -= 1\n s.lp -= 1\n s.r -= 1\n s.l -= 1\n\n return s.Gxyz\n\n\n\n #Na = N(a,lA,mA,nA)\n #Nb = N(b,lB,mB,nB)\n #Nc = N(c,lC,mC,nC)\n #Nd = N(d,lD,mD,nD)\n\n #Gxyz *= Na * Nb * Nc * Nd", "title": "" }, { "docid": "89987b761534239f9d8c38765aa0e849", "score": "0.53207606", "text": "def _calc_m(x0, xt, tau=1):\n\n # These represent the C(tau) and C(0) covariance matrices\n # Note: x is an anomaly vector, no division by N-1 because it's undone\n # in the inversion anyways\n\n # Division by number of samples ignored due to inverse\n x0x0 = np.dot(x0.T, x0)\n x0xt = np.dot(xt.T, x0)\n\n # Calculate the mapping term G_tau\n G = np.dot(x0xt, pinv(x0x0))\n\n # Calculate the forcing matrix to check that all modes are damped\n Geigs = eigvals(G)\n Leigs = (1. / tau) * np.log(Geigs)\n\n if np.any(Leigs.real >= 0):\n logger.debug('L eigenvalues: \\n' + str(Leigs))\n raise ValueError(\n 'Positive eigenvalues detected in forecast matrix L.')\n\n return G", "title": "" }, { "docid": "7d6513d5e0985d8f627c0a32a832a6a6", "score": "0.5317741", "text": "def calcgaussian( self, frequency=None ):\n if frequency :\n diff = frequency - self.B\n else :\n diff = self.frequencies - self.B\n return exp( -0.5 * diff * diff * self.C2 )", "title": "" }, { "docid": "f6392db40c17f5f1e25f1e0aba7ad643", "score": "0.5317538", "text": "def test_diagonalizing_gates(self):\n base = qml.Hadamard(0)\n diag_gate = Adjoint(base).diagonalizing_gates()[0]\n\n assert isinstance(diag_gate, qml.RY)\n assert qml.math.allclose(diag_gate.data[0], -np.pi / 4)", "title": "" }, { "docid": "21697a506e1ca5131df2e9f1930bfd38", "score": "0.5314103", "text": "def _calc_MC(self):\r\n _ = self; T, K, rf_r, net_r, sCP = _.T, _.K, _.rf_r, _.net_r, _.signCP\r\n _ = self.ref; S, vol, q = _.S0, _.vol, _.q\r\n _ = self.px_spec; n, m, keep_hist, rng_seed, rho = _.nsteps, _.npaths, _.keep_hist, _.rng_seed, _.rho\r\n _ = self.px_spec.ref2; S2, vol2, q2 = _.S0, _.vol, _.q\r\n _ = self._LT_specs(); u, d, p, df, dt = _['u'], _['d'], _['p'], _['df_dt'], _['dt']\r\n\r\n px = list()\r\n numpy.random.seed(rng_seed)\r\n\r\n for path in range(0,m):\r\n\r\n ## Generate correlated Wiener Processes\r\n u = numpy.random.normal(size=n)\r\n v = numpy.random.normal(size=n)\r\n v = rho * u + math.sqrt(1 - rho**2) * v\r\n u = u * math.sqrt(dt)\r\n v = v * math.sqrt(dt)\r\n\r\n ## Simulate the paths\r\n s1, s2, mu1, mu2 = [S], [S2], (rf_r - q)*dt, (rf_r - q2)*dt\r\n\r\n for t in range(0, len(u)):\r\n s1.append(s1[-1] * (mu1 + vol * u[t]) + s1[-1])\r\n s2.append(s2[-1] * (mu2 + vol2 * v[t]) + s2[-1])\r\n\r\n val = np.maximum(sCP * (s2[-1] - s1[-1] - K), 0) * math.exp(-rf_r * T) # Calculate the Payoff\r\n px.append((val))\r\n\r\n self.px_spec.add(px=float(np.mean(px)))\r\n\r\n return self", "title": "" }, { "docid": "03b47414233ac7b3883179ec28ad283b", "score": "0.5308294", "text": "def make_G(V, q):\n # Shape to 1x1 matrix (scalar)\n V = Matrix([V])\n\n # Return jacobian as a column vector\n return V.jacobian(q).T", "title": "" }, { "docid": "c382a5bb452ac01e1c8a73b504bda258", "score": "0.5307989", "text": "def gmm_cluster(data_set, cls_num):\n iter_count = 0\n MAX_ITER_COUNT = 25\n sample_num = data_set.shape[0]\n sample_dim = data_set.shape[1]\n data_set_T = numpy.transpose(data_set)\n\n alphas = numpy.asarray([1.0 / cls_num] * cls_num).T\n means = numpy.asarray(select_means(data_set, cls_num))\n covariances = numpy.asarray([numpy.cov(data_set_T) for i in range(0, cls_num)])\n\n gammas = numpy.asarray(\n [[alphas[j] for j in range(0, cls_num)] for i in range(0, sample_num)]\n )\n\n while iter_count < MAX_ITER_COUNT:\n # E step: calculate the probability matrix \"gammas\" based on current means, covariances and alphas.\n p = numpy.asarray(\n [\n [\n gauss(\n data_set_T[:, i].reshape(sample_dim, 1),\n means[j].reshape(sample_dim, 1),\n covariances[j],\n )[0, 0]\n for j in range(0, cls_num)\n ]\n for i in range(0, sample_num)\n ]\n )\n \n gammas = numpy.asarray(\n [\n numpy.multiply(p[i], alphas) / numpy.dot(p[i].T, alphas)\n for i in range(0, sample_num)\n ]\n )\n\n # M step: update means, covariances and alphas based on new \"gammas\".\n means = numpy.asarray(\n [\n (numpy.dot(data_set_T, gammas[:, i]) / numpy.sum(gammas[:, i])).reshape(sample_dim, 1) \n for i in range(0, cls_num)\n ]\n )\n\n covariances = numpy.asarray(\n [\n numpy.dot(\n numpy.multiply(data_set_T - means[i], gammas[:, i]),\n (data_set_T - means[i]).T,\n )\n / numpy.sum(gammas[:, i])\n for i in range(0, cls_num)\n ]\n )\n alphas = numpy.asarray([numpy.sum(gammas[:, i]) / sample_num for i in range(0, cls_num)])\n iter_count += 1\n\n return means, gammas", "title": "" }, { "docid": "316575b1ced6a4e975d7572f1d6eb2ce", "score": "0.53045654", "text": "def method(m,points,data):\n n = len(data)-1\n u,s,g,h = [],[],[],[]\n if m>n:\n m = n\n print \"The highest power is adjusted to: \" + str(n)\n for i in range(m+1):\n u.append([None]*(n+2))\n u[i][n+1] = 0\n s.append(0) \n g.append(0) \n h.append(0) \n for i in range(n+1):\n u[0][i] = 1\n stmp = u[0][i]**2\n s[0] += stmp\n g[0] += points[i]*stmp\n u[0][n+1] += u[0][i]*data[i]\n g[0] = g[0]/s[0]\n u[0][n+1] = u[0][n+1]/s[0]\n for i in range(n+1): \n u[1][i] = points[i]*u[0][i]-g[0]*u[0][i]\n s[1] += u[1][i]**2\n g[1] += points[i]*u[1][i]**2\n h[1] += points[i]*u[1][i]*u[0][i]\n u[1][n+1] += u[1][i]*data[i]\n g[1] = g[1]/s[1]\n h[1] = h[1]/s[0]\n u[1][n+1] = u[1][n+1]/s[1] \n if m>=2:\n for i in range(1,m):\n for j in range(n+1):\n u[i+1][j] = points[j]*u[i][j]-g[i]*u[i][j]-h[i]*u[i-1][j]\n s[i+1] += u[i+1][j]**2\n g[i+1] += points[j]*u[i+1][j]**2\n h[i+1] += points[j]*u[i+1][j]*u[i][j]\n u[i+1][n+1] += u[i+1][j]*data[j]\n g[i+1] = g[i+1]/s[i+1]\n h[i+1] = h[i+1]/s[i]\n u[i+1][n+1] = u[i+1][n+1]/s[i+1] \n return u", "title": "" }, { "docid": "a6a5542365503202a32777702b724d32", "score": "0.5300515", "text": "def compute_gating_distances(self):\n\t\t# For Gibbs\n\t\tgating_distances_evals = pdist(self.x, metric=self.gating_kern)\n\t\tself.gating_distances = squareform(gating_distances_evals)\n\t\tself.gating_distances_to_rest = np.sum(self.gating_distances, axis=1)\n\n\t\t# For HMH\n\t\tgating_log_distances_1st_incomplete = pdist(self.x, metric=self.gating_log_kern_1st_incomplete)\n\t\tgating_log_distances_2nd_incomplete = pdist(self.x, metric=self.gating_log_kern_2nd_incomplete)\n\t\t\n\t\tgating_log_distances_1st_evals = gating_log_distances_1st_incomplete * gating_distances_evals\n\t\tgating_log_distances_2nd_evals = gating_log_distances_2nd_incomplete * gating_distances_evals\n\t\t\n\t\tself.gating_log_distances_1st = squareform(gating_log_distances_1st_evals)\n\t\tself.gating_log_distances_2nd = squareform(gating_log_distances_2nd_evals)\n\t\tself.gating_log_distances_1st_to_rest = np.sum(self.gating_log_distances_1st, axis=1)\n\t\tself.gating_log_distances_2nd_to_rest = np.sum(self.gating_log_distances_2nd, axis=1)", "title": "" }, { "docid": "09f2212a9ff3ed2438a87dbbc5aaf4e5", "score": "0.5298525", "text": "def gauss():\n from Disk import Disk\n from Mersenne import Mersenne\n\n # inputs\n N = 10**5\n box = [(0,1), (0,1)]\n # the point cloud generator\n cloud = Mersenne()\n # the region of integration\n disk = Disk(center=(0,0), radius=1)\n\n # the integration algorithm\n interior = 0\n for i in range(N):\n point = cloud.point(box)\n if disk.interior(point):\n interior += 1\n # print the estimate of π\n print(\"π: {:.8f}\".format(4*interior/N))\n return", "title": "" }, { "docid": "5635d8eaff8d1cbb10b60a522719cb95", "score": "0.529164", "text": "def calcGenCost(self):\n df = self.df_0.copy(deep=True)\n\n #pre-processing:\n #adjust coal fuel prices by the \"coal_dol_per_mmbtu\" input\n df.loc[df.fuel_type=='coal', 'fuel_price' + str(self.time)] = scipy.maximum(0, df.loc[df.fuel_type=='coal', 'fuel_price' + str(self.time)] + self.coal_dol_per_mmbtu)\n #adjust coal capacity by the \"coal_capacity_derate\" input\n df.loc[df.fuel_type=='coal', 'mw' + str(self.time)] = df.loc[df.fuel_type=='coal', 'mw' + str(self.time)] * (1.0 - self.coal_capacity_derate)\n #calculate the generation cost:\n df['fuel_cost'] = df['heat_rate' + str(self.time)] * df['fuel_price' + str(self.time)]\n df['co2_cost'] = df['co2' + str(self.time)] * self.co2_dol_per_kg\n df['so2_cost'] = df['so2' + str(self.time)] * self.so2_dol_per_kg\n df['nox_cost'] = df['nox' + str(self.time)] * self.nox_dol_per_kg\n df['gen_cost'] = scipy.maximum(0.01, df.fuel_cost + df.co2_cost + df.so2_cost + df.nox_cost + df.vom)\n #add a zero generator so that the bid stack goes all the way down to zero. This is important for calculating information for the marginal generator when the marginal generator is the first one in the bid stack.\n df['dmg_easiur'] = df['dmg' + str(self.time)]\n #if self.initialization:\n df = df.append(df.loc[0]*0)\n df = df.append(df.iloc[-1])\n #self.initialization = False\n df.sort_values('gen_cost', inplace=True)\n #move coal_0 and ngcc_0 to the front of the merit order regardless of their gen cost\n coal_0_ind = df[df.orispl_unit=='coal_0'].index[0]\n ngcc_0_ind = df[df.orispl_unit=='ngcc_0'].index[0]\n df = pandas.concat([df.iloc[[0],:], df[df.orispl_unit=='coal_0'], df[df.orispl_unit=='ngcc_0'], df.drop([0, coal_0_ind, ngcc_0_ind], axis=0)], axis=0)\n df.reset_index(drop=True, inplace=True)\n df['demand'] = df['mw' + str(self.time)].cumsum()\n df.loc[len(df)-1, 'demand'] = df.loc[len(df)-1, 'demand'] + 1000000 #creates a very large generator at the end of the merit order so that demand cannot be greater than supply\n df['f'] = df['demand']\n df['s'] = scipy.append(0, scipy.array(df.f[0:-1]))\n df['a'] = scipy.maximum(df.s - df.min_out*(1/0.10), 1.0)\n #add a very large demand for the last row\n self.df = df", "title": "" }, { "docid": "608b239b715ae75924ded17a7364d352", "score": "0.52880335", "text": "def gibbs_step(self, C, Gamma, Sigma_det, log_pc, idx = None):\n if idx is None:\n idx = np.random.randint(self.d) \n u = self.X_td[:, idx][:, None]\n idx_val = C[idx]\n if idx_val:\n Gamma_1 = Gamma\n Sigma_det_1 = Sigma_det\n log_p1 = log_pc\n C[idx] = 0\n key = tuple(C.tolist())\n if key in self._cache:\n self._cache_hits += 1\n Gamma_0, Sigma_det_0, log_p0 = self._cache[key]\n else:\n Gamma_0, Sigma_det_0 = sherman_morrison_update(\n Gamma, Sigma_det, -u, u)\n log_p0 = self.log_p(C, Gamma_0, Sigma_det_0)\n self._cache_insert(key, (Gamma_0, Sigma_det_0, log_p0))\n else:\n Gamma_0 = Gamma\n Sigma_det_0 = Sigma_det\n log_p0 = log_pc\n C[idx] = 1\n key = tuple(C.tolist())\n if key in self._cache:\n self._cache_hits += 1\n Gamma_1, Sigma_det_1, log_p1 = self._cache[key]\n else:\n Gamma_1, Sigma_det_1 = sherman_morrison_update(\n Gamma, Sigma_det, u, u)\n log_p1 = self.log_p(C, Gamma_1, Sigma_det_1)\n self._cache_insert(key, (Gamma_1, Sigma_det_1, log_p1))\n\n Z = logsumexp(log_p0, log_p1)\n with warnings.catch_warnings():\n warnings.simplefilter('error')\n p0 = np.exp(log_p0 - Z)\n\n if np.random.rand() > p0:\n # we set it to 1\n C[idx] = 1\n Gamma = Gamma_1\n Sigma_det = Sigma_det_1\n log_pc = log_p1\n\n else:\n # we set it to 0\n C[idx] = 0\n Gamma = Gamma_0\n Sigma_det = Sigma_det_0\n log_pc = log_p0\n self.likelihoods.append(log_pc)\n return C, Gamma, Sigma_det, log_pc", "title": "" }, { "docid": "456990deb35189fdd95089e95703ed43", "score": "0.52843225", "text": "def get_gmm(self,gr_charge, ex_charge, FG_elstat, struc, index, E01, \r\n int2cart, freq, red_mass, order=2, approx=1.1, CoarseGrain='C'):\r\n \r\n dR_Hmm,dR_env_Hmm = self.get_SingleDefect_derivation(gr_charge, ex_charge, FG_elstat, struc, index, E01, order=2, approx=1.1)\r\n \r\n # freq is actualy wavenumber (= frequency/speed_of_light).\r\n # therefore angluar frequency omega in atomic units = 100/(2*Rydberg_inf) wavenumber in [cm-1]\r\n omega_au = freq/conversion_facs_energy[\"1/cm\"]\r\n RedMass_au = red_mass/conversion_facs_mass\r\n # in atomic units hbar = 1.0, m_e = 1.0, elementary_charge = 1.0, 1/(4*pi*eps_0) = 1.0, speed_of_light = 137 ( fine-structure constant)\r\n \r\n # pick only carbon atoms from eigenvectors of normal modes (assume that fluorine atoms doesn't influent the result) - in needed\r\n if CoarseGrain in [\"C\",\"plane\"] :\r\n indxC = np.where(np.array(struc.at_type) == 'C')\r\n index = np.zeros((len(indxC),3),dtype='i8')\r\n for ii in range(3):\r\n index[:,ii] = index*3+ii\r\n index.reshape(3*len(indxC))\r\n int2cart_loc = int2cart[index,:]\r\n else:\r\n int2cart_loc = int2cart.copy()\r\n \r\n g_mm = np.dot(int2cart_loc.T,dR_Hmm) + np.dot(int2cart.T,dR_env_Hmm)\r\n g_mm = g_mm/(np.sqrt(omega_au*omega_au*omega_au))\r\n g_mm = g_mm/(2*np.sqrt(RedMass_au))\r\n \r\n return g_mm", "title": "" }, { "docid": "2c440c76b9936635298b4bb1cc941f6e", "score": "0.5281879", "text": "def compute_G(self):\n \"\"\" YOUR CODE HERE\"\"\"\n G = 0\n raise NotImplementedError\n\n return G", "title": "" }, { "docid": "5d0418f4c70674079dcac873d1f89d15", "score": "0.528127", "text": "def MMD2H_Gaussian(kernel,gm,samples):\n N = len(samples)\n \n EE = EE_Gaussian(gm,kernel)\n \n crossed_term = 0\n for sample in samples:\n crossed_term += E_Gaussian(np.array([sample]), gm, kernel)\n\n gram_term = 0\n for sample_n in samples:\n for sample_m in samples:\n gram_term += kernel.pdf(sample_n,sample_m)\n\n return EE - 2*crossed_term/N + gram_term/N**2", "title": "" }, { "docid": "df345ab6b0854133a35e504f993f2047", "score": "0.5279673", "text": "def _rand_g_(self):\r\n # calculate pairwise distance\r\n dist_gg, dist_gd, dist_dd = get_squared_dist(\r\n self.score_gen, self.score_data, z_score=False, do_summary=self.do_summary)\r\n\r\n # mmd\r\n with tf.name_scope('rand_g'):\r\n omega = tf.random_uniform([], self.omega_range[0], self.omega_range[1], dtype=tf.float32) \\\r\n if isinstance(self.omega_range, (list, tuple)) else self.omega_range\r\n loss_gr = rand_mmd_g_xy(\r\n dist_gg, dist_gd, dist_dd, self.batch_size, omega=omega,\r\n max_iter=3, name='mmd_gr', do_summary=self.do_summary, scope_prefix='rand_g/')\r\n loss_gn = rand_mmd_g_xn(\r\n self.score_gen, self.ref_normal, self.batch_size, self.num_scores, dist_xx=dist_gg, omega=omega,\r\n max_iter=3, name='mmd_gn', do_summary=self.do_summary, scope_prefix='rand_g/')\r\n loss_rn = rand_mmd_g_xn(\r\n self.score_data, self.ref_normal, self.batch_size, self.num_scores, dist_xx=dist_dd, omega=omega,\r\n max_iter=3, name='mmd_rn', do_summary=self.do_summary, scope_prefix='rand_g/')\r\n # final loss\r\n self.loss_gen = loss_gr\r\n self.loss_dis = loss_rn - loss_gr\r\n\r\n # self.debug_register = [omega, loss_gr, loss_gn, loss_rn]\r\n if self.do_summary:\r\n with tf.name_scope(None): # return to root scope to avoid scope overlap \r\n tf.summary.scalar('rand_g/omega', omega)\r\n tf.summary.scalar('GANLoss/gr', loss_gr)\r\n tf.summary.scalar('GANLoss/gn', loss_gn)\r\n tf.summary.scalar('GANLoss/rn', loss_rn)", "title": "" }, { "docid": "7c8d34f3de667ebed5b0cd6412c5bd80", "score": "0.5276958", "text": "def g_reuss(self):\n return 15. / (8. * self.inv[:3, :3].trace() -\n 4. * np.triu(self.inv[:3, :3]).sum() +\n 3. * self.inv[3:, 3:].trace())", "title": "" }, { "docid": "139e5a880bf42ab96f1e97c12d0b786d", "score": "0.5272041", "text": "def gauss(self, mu, sigma):\n pass", "title": "" }, { "docid": "e952e878765b02e7efa8a78b9d3a3ee1", "score": "0.5262486", "text": "def _KLD_diag_gaussians(mu, log_sigma, prior_mu, prior_log_sigma):\n return prior_log_sigma - log_sigma + 0.5 * (tf.exp(2 * log_sigma)\n + tf.square(mu - prior_mu)) * tf.exp(-2. * prior_log_sigma) - 0.5", "title": "" }, { "docid": "39784702b7ef84286fe174b59114d798", "score": "0.5260531", "text": "def _g(self, RD):\n return 1.0 / math.sqrt(1.0 + 3.0 * math.pow(RD, 2.0) / math.pow(math.pi, 2.0))", "title": "" }, { "docid": "91b5829f4ec2d8af3d847ca0afb9c897", "score": "0.5248102", "text": "def initG_uniform_old(C, commlangdistros, innov = 0):\n\n g = commlangdistros[0,:].size\n c = commlangdistros[:,0].size\n n = C[:,0].size\n Gassertions(C, commlangdistros, g)\n \n G = matlib.zeros((n,g))\n for i in range(0,c):\n for h in range(0,C[:,0].size):\n if C[h,i] == 1:\n G[h] = commlangdistros[i,:]\n return G", "title": "" }, { "docid": "579e396569e9e92d66a631ad60cc4537", "score": "0.5235529", "text": "def gaussian_product_diag(d1, d2):\n inv_cov1 = 1 / np.diag(d1.cov)\n inv_cov2 = 1 / np.diag(d2.cov)\n sigma = 1 / (inv_cov1 + inv_cov2)\n mu = sigma * (inv_cov1*d1.mean + inv_cov2*d2.mean) \n return Gaussian(*mu, *sigma)", "title": "" }, { "docid": "9834df3259e407b7b7bba15b13995dc3", "score": "0.5230722", "text": "def calcgaussianp( self, frequency=None ):\n if frequency :\n diff = frequency - self.Bp\n else :\n diff = self.frequencies - self.Bp\n return exp( -0.5 * diff * diff * self.C2p )", "title": "" }, { "docid": "5201a4f88595c5589a9e007ba199474c", "score": "0.52282745", "text": "def shell_Green_grid_Arnoldi_Mmn_step(n,k, invchi, rgrid,rsqrgrid,rdiffgrid, RgMgrid, ImMgrid, unitMvecs, Gmat, plotVectors=False):\n #first, begin by orthogonalizing and normalizing unitMvecs[-1]\n #use relation U = V^{-1} - G\n \"\"\"\n see comment for analogous method for N waves, shell_Green_grid_Arnoldi_Nmn_step\n coef1 = Gmat[-1,-1]\n unitMvecs[-1] -= coef1*unitMvecs[-2]\n \n if Gmat.shape[0]>1: #since G has symmetric Arnoldi representation (so tridiagonal), G*M_j has non-zero overlap with M_j and M_{j-1}\n coef2 = Gmat[-2,-1]\n unitMvecs[-1] -= coef2*unitMvecs[-3]\n \n unitMvecs[-1][:] = np.real(unitMvecs[-1][:])\n \"\"\"\n vecnum = Gmat.shape[0]\n for i in range(vecnum):\n coef = rgrid_Mmn_vdot(unitMvecs[i], unitMvecs[-1], rsqrgrid,rdiffgrid)\n unitMvecs[-1] -= coef*unitMvecs[i]\n unitMvecs[-1][:] = np.real(unitMvecs[-1][:])\n \n norm = np.sqrt(rgrid_Mmn_normsqr(unitMvecs[-1], rsqrgrid,rdiffgrid))\n unitMvecs[-1] /= norm\n \n if plotVectors:\n rgrid_Mmn_plot(unitMvecs[-1], rgrid)\n \n #get new vector\n newvecM = shell_Green_grid_Mmn_vec(n,k, rsqrgrid,rdiffgrid, RgMgrid,ImMgrid, unitMvecs[-1])\n \n newvecM[:] = np.real(newvecM)\n \n newGmat = np.zeros((Gmat.shape[0]+1,Gmat.shape[1]+1), dtype=np.complex)\n newGmat[:-1,:-1] = Gmat[:,:]\n \n newGmat[-1,-1] = rgrid_Mmn_vdot(unitMvecs[-1], newvecM, rsqrgrid,rdiffgrid)\n newGmat[-2,-1] = rgrid_Mmn_vdot(unitMvecs[-2], newvecM, rsqrgrid,rdiffgrid)\n newGmat[-1,-2] = newGmat[-2,-1]\n \n unitMvecs.append(newvecM) #append to end of unitMvecs for next round of iteration\n return newGmat", "title": "" }, { "docid": "821ba4e50a8fbfaea368b31eb7d0b02c", "score": "0.52278423", "text": "def pm(v,g):\r\n p = np.zeros_like(v)\r\n p[:,0] = v[:,0]*g; p[:,1] = v[:,1]*g; p[:,2] = v[:,2]*g\r\n return p", "title": "" }, { "docid": "2f7309efc8ce4006ec33e0e04b05c45f", "score": "0.5226854", "text": "def initG_indicator_old(C, commlangdistros, innov = 0):\n\n g = commlangdistros[0,:].size\n c = commlangdistros[:,0].size\n n = C[:,0].size\n Gassertions(C, commlangdistros, g)\n \n G = matlib.zeros((n,g))\n\n for comm in range(0,c):\n lowerbound = 0\n rangedict = {}\n for i in range(0,(commlangdistros[comm,:].size)):\n prob = commlangdistros[comm,:].item(i)\n if prob == 0:\n continue\n rangedict[(lowerbound, lowerbound + prob)] = i\n lowerbound += prob\n for h in range(0,C[:,0].size):\n if C[h,comm] == 1:\n rando = random.random()\n for grange, grammar in rangedict.iteritems():\n if grange[0] <= rando and rando < grange[1]:\n if random.random() < innov:\n G[h,random.randint(0,g-1)] = 1\n else:\n G[h,grammar] = 1\n return G", "title": "" }, { "docid": "cd8416dc1d2cb6a79dfc7673b1add71c", "score": "0.52182436", "text": "def NonDiagonalElements(self,n=0,m=1):\n Hnm = sp.zeros(self.size,self.size)\n for i in range(0,self.size):\n for j in range(0,self.size):\n Hnm[i,j] = 1/self.T*sp.integrate(self.Vt[i,j]*sp.exp(-sp.I*(m-n)*self.G*t),(t,0,self.T),conds='none')\n return(Hnm)", "title": "" }, { "docid": "07b025ce908e487bd58063a1260f8a9e", "score": "0.521736", "text": "def apply_G(self):\n\n pvec = self.vec['p']\n uvec = self.vec['u']\n\n speed = pvec('v') * 1e2\n dist = pvec('x') * 1e6\n gamma = pvec('gamma') * 1e-1\n time = uvec('time')\n\n time_temp = ((dist[1:] - dist[0:-1]) /\n (((speed[1:] + speed[0:-1])/2) *\n numpy.cos((gamma[1:] + gamma[0:-1])/2)))\n time[0] = numpy.sum(time_temp)/1e4", "title": "" }, { "docid": "8800c004fa23a5a5fdee4e422efb623f", "score": "0.5215661", "text": "def gaussian(x,mu,fwhm):\n \n c = fwhm/(2* np.sqrt(2*np.log(2)))\n return np.exp(-1*((x-mu)**2/(2*c**2)))", "title": "" }, { "docid": "94d8016e105380d54b280b1a52e67a3b", "score": "0.52119476", "text": "def gauss_upper_diag_tridiag(M):\n M[0]/=M[0][0]\n for i in range(1,len(M[:,0])):\n M[i]-=M[i-1]*M[i,i-1]\n M[i]/=M[i][i]\n return M", "title": "" }, { "docid": "85c74a6d2fce1d009f1411e600eb8fc1", "score": "0.5210143", "text": "def generate():\n\n m = np.array([2., 5., 5., 2.])\n temp = 2.0 * (np.random.rand(4, 4)) # to random-generate S\n S = np.dot(temp, temp.T) # make sure S is positive semidefinite, symmetric\n d, U = np.linalg.eig(S)\n L = np.diagflat(d)\n A = np.dot(U, np.sqrt(L))\n X = np.random.randn(4, 1000)\n Y = np.dot(A,X) + np.tile(m.reshape(4,1), 1000) \n return S, m, Y", "title": "" }, { "docid": "7273e003b5fb28fd61827833dbe8f464", "score": "0.5208473", "text": "def _calc_m(x0, xt, tau=1):\n \n # These represent the C(tau) and C(0) covariance matrices\n # Note: x is an anomaly vector, no division by N-1 because it's undone\n # in the inversion anyways\n \n x0x0 = np.dot(x0.T, x0)\n x0xt = np.dot(xt.T, x0)\n\n # Calculate the mapping term G_tau\n G = np.dot(x0xt, pinv(x0x0))\n\n # Calculate the forcing matrix to check that all modes are damped\n Geigs = eigvals(G)\n Leigs = (1./tau) * np.log(Geigs)\n\n if np.any(Leigs.real >= 0):\n logger.debug('L eigenvalues: \\n' + str(Leigs))\n raise ValueError('Positive eigenvalues detected in forecast matrix L.')\n\n return G", "title": "" }, { "docid": "6102fccc3a1df6d52a7b25e0f71aea4f", "score": "0.52029395", "text": "def gsl_eigen_genherm_alloc(*args, **kwargs):\n return _gslwrap.gsl_eigen_genherm_alloc(*args, **kwargs)", "title": "" }, { "docid": "1abb2b137688ce80ceaffc51a5d512f5", "score": "0.5202235", "text": "def efficient_ACDM(self, X, n_iter):\n calc_KL_time = 0\n e = np.e \n B_size = len(self.B_)\n Z = len(self.S_)\n total_step = n_iter*B_size\n # Initialization of Theta\n theta_prime_tilde = dict()\n theta_prime_hat = dict()\n for phi in self.B_:\n theta_prime_tilde[phi] = 0.0\n theta_prime_hat[phi] = 0.0\n\n # Initialization of U\n U_hat = dict() \n U_prime_hat = dict() \n U_prime_tilde = dict()\n for x in self.S_:\n U_hat[x] = 0\n U_prime_hat[x] = 0.0\n U_prime_tilde[x] = 0.0\n # Initialization of Coefficients\n alpha = [0] * (total_step+1)\n gamma = [0] * (total_step+2)\n c_hat = [0] * (total_step+2)\n gamma[-1] =1/(2.0) \n c_hat[-1] = 3.0 + 2*math.sqrt(2.000) \n for k in range(total_step+1):\n gamma_k_before = gamma[k-1]\n gamma_k = math.sqrt(1.0/(4.0)+gamma_k_before**2)+1.0/(2.0)\n gamma[k] = gamma_k\n alpha[k] =1/(gamma_k )\n c_hat[k] = (1-alpha[k]) * c_hat[k-1]\n # Caching \n e_U_hat_dict = dict() \n for x in self.S_:\n e_U_hat_dict[x] = 1.0\n # Beginning of Iteration\n start = time.time()\n for iter_ in range(n_iter):\n for index in range(B_size):\n k = iter_*B_size + index\n phi = random.choice(self.B_)\n # Caching\n c_hat_k = c_hat[k]\n c_hat_k_after = c_hat[k+1]\n alpha_k_after = alpha[k+1]\n gamma_k = gamma[k]\n\n # calculate G, Z\n G = 0 \n for x in self.Ssub_[phi]:\n G += e_U_hat_dict[x]\n eta_ = G/Z\n # update tilde_theta_prime, hat_theta_prime\n nabla = (self.etahat_[phi] - eta_)\n step1 = 4.0 * gamma_k/B_size\n step2 = 4.0/c_hat_k_after*(1-alpha_k_after-(alpha_k_after+1-c_hat_k_after)*gamma_k/B_size)\n\n theta_prime_tilde[phi] += step1 * nabla\n theta_prime_hat[phi] += step2 * nabla\n for x in self.Ssub_[phi]:\n U_prime_tilde[x] += step1 * nabla\n U_prime_hat[x] += step2 * nabla\n U_hat_temp = (1-c_hat_k_after)*U_prime_tilde[x]+c_hat_k_after*U_prime_hat[x]\n e_U_hat_temp = e ** U_hat_temp\n Z += e_U_hat_temp - e_U_hat_dict[x]\n e_U_hat_dict[x] = e_U_hat_temp\n\n update_start_time = time.time()\n for x in self.B_:\n self.theta_[x] = (1-c_hat_k_after)*theta_prime_tilde[x]+c_hat_k_after*theta_prime_hat[x]\n self.theta_[phi] += 4*(self.etahat_[phi]-eta_)\n self.compute_theta_perp()\n self.compute_P()\n kl=self.compute_KL()\n calc_KL_time += time.time() - update_start_time \n print(iter_ ,\":\", \"KL divergence: \", f'{kl:.16f}' ,\" time : %4.2f\"% (time.time()-calc_KL_time-start),flush=True)", "title": "" }, { "docid": "f63554c2bb826513c5dea1ca73b00c83", "score": "0.5196821", "text": "def GS_classical(A):\n\n m, n = A.shape\n\n Q = np.zeros((m, n), dtype=A.dtype)\n R = np.zeros((n, n), dtype=A.dtype)\n\n for j in range(n):\n R[:j-1,j] = np.dot(Q[:,:j-1].T.conj(), A[:,j])\n Q[:,j] = A[:, j] - np.dot(Q[:,:j-1], R[:j-1,j])\n R[j,j] = np.linalg.norm(Q[:,j])\n Q[:,j] /= R[j,j]\n\n return Q, R", "title": "" }, { "docid": "15d3fc1df384b92cb613df958ec1fa5a", "score": "0.51892686", "text": "def gmm_pdf(X, centroids, ccov, mc, individual=False):\n if individual:\n pdf = np.zeros((len(X), len(centroids)))\n for i in range(len(centroids)):\n pdf[:,i] = mulnormpdf(X, centroids[i], ccov[i]) * mc[i]\n return pdf\n else:\n pdf = None\n for i in range(len(centroids)):\n pdfadd = mulnormpdf(X, centroids[i], ccov[i]) * mc[i]\n if pdf==None:\n pdf = pdfadd\n else:\n pdf = pdf + pdfadd\n return pdf", "title": "" }, { "docid": "d31c54b12c1baf7755080f3f45d71dc6", "score": "0.51869303", "text": "def gauss(self, mu, sigma):\n pass", "title": "" }, { "docid": "d31c54b12c1baf7755080f3f45d71dc6", "score": "0.51869303", "text": "def gauss(self, mu, sigma):\n pass", "title": "" }, { "docid": "d31c54b12c1baf7755080f3f45d71dc6", "score": "0.51869303", "text": "def gauss(self, mu, sigma):\n pass", "title": "" }, { "docid": "fb29cc8a52060aec3c186eedb3104457", "score": "0.51843107", "text": "def gauss_pdf_mat(x):\n sub = x - self.mean\n r0 = inv_cov * sub\n exponent = -0.5 * numpy.sum(sub.A * r0.A, axis=0)\n if (numpy.shape(exponent) != (x.shape[1],)):\n raise AssertionError(\"exponent has the wrong shape, should be (%d,), but is (%d,)\" % x.shape[1], exponent.shape[0])\n g = const * (numpy.e ** exponent)\n return g", "title": "" }, { "docid": "445110a070b4c7d159d0c08e5b1db617", "score": "0.5181291", "text": "def marginal_covar_matrix(self, g, beta, gamma, logphi, alpha, eval_gradient=False):\n # reshape g\n g = g.reshape(self.dim.R, self.dim.N).T # reshape \n g = np.column_stack((np.ones(self.dim.N), g)) # and append a col. of 1s\n\n # get the struct matrices\n struct_mats = np.array([sum(brd*Ld for brd, Ld in zip(br, self.basis_mats))\n for br in beta])\n # Add an axis for n=0,...,N-1\n At = struct_mats[None, :, :, :] * g[..., None, None]\n At = At.sum(1)\n\n # Get the additive noise error term\n #Gamma = np.diag(np.concatenate([gk*np.ones(self.dim.N) for gk in gamma]))\n Gamma = np.kron(np.eye(self.dim.N), np.diag(gamma))\n\n # Get the initial state covariance matrix\n C = np.kron(np.ones((self.dim.N, self.dim.N)),\n np.diag(alpha))\n \n ####\n # Construct the transformation matrix K\n W = self._weight_matrix\n K = sum(sparse.kron(sparse.kron(W[:, i][:, None],\n sparse.eye(1, self.dim.N, i)),\n At[i, ...])\n for i in range(self.dim.N))\n I = sparse.eye(self.dim.K)\n\n eifix = sparse.eye(1, self.dim.N, self.ifix)\n K += sum(sparse.kron(sparse.kron(sparse.eye(1, self.dim.N, i).transpose(),\n eifix),\n I)\n for i in range(self.dim.N))\n #_K = np.array(K.todense())\n if eval_gradient:\n dCdg = [np.zeros((self.dim.N*self.dim.K, self.dim.N*self.dim.K))]*self.dim.N*self.dim.R \n\n #_dCdg = np.zeros((self.dim.N*self.dim.R,\n # self.dim.N*self.dim.K,\n # self.dim.N*self.dim.K))\n\n # Gradient of the operator K wrt g_rn\n dK = [sparse.kron(sparse.kron(W[:, n][:, None],\n sparse.eye(1, self.dim.N, n)),\n struct_mats[r+1])\n for r in range(self.dim.R)\n for n in range(self.dim.N)]\n #_dK = np.array([np.kron(W[:, n][:, None], struct_mats[r+1])\n # for r in range(self.dim.R)\n # for n in range(self.dim.N)])\n\n dCdgamma = np.zeros((self.dim.N*self.dim.K,\n self.dim.N*self.dim.K,\n self.dim.K))\n dCdalpha = [np.kron(np.ones((self.dim.N, self.dim.N)),\n np.diag(np.eye(N=1, M=self.dim.K, k=k).ravel()))\n for k in range(self.dim.K)]\n\n for m in range(self.order):\n\n dCdg = np.array([K.dot(K.dot(dC_grn).T) for dC_grn in dCdg]) \n for i, dK_grn in enumerate(dK):\n kcdk = K.dot(dK_grn.dot(C).T)\n dCdg[i] += kcdk + kcdk.T\n\n # update gradient of C wrt to g_{rn}\n #CKt = K.dot(C).T\n #print(K.shape, _dCdg.shape)\n #expr = _K.dot(_dCdg)\n #print(expr.shape)\n #assert(False)\n #expr = dK.dot(CKt.reshape(self.dim.N, self.dim.K, self.dim.N*self.dim.K)).sum(2)\n #expr += expr.transpose(0, 2, 1)\n #expr2 = K.dot(K.dot(dCdg).T)\n #dCdg = expr + expr2\n #dCdg = [K.dot(dK_grn.dot(C).T) + \\\n # dK_grn.dot(K.dot(C).T) + \\\n # K.dot(K.dot(dC_grn).T)\n # for dK_grn, dC_grn in zip(dK, dCdg)]\n\n # update gradient of C wrt to Gamma\n dCdgamma = np.dstack((K.dot(K.dot(dCdgamma[..., i]).T)\n for i in range(dCdgamma.shape[-1])))\n\n diag_ek = np.zeros((self.dim.K, self.dim.K))\n for k in range(self.dim.K):\n diag_ek[k, k] = 1.\n dCdgamma[..., k] += np.kron(np.eye(self.dim.N), diag_ek)\n diag_ek[k, k] = 0.\n\n # update gradient of C wrt to alpha\n dCdalpha = [K.dot(K.dot(dCdak).T) for dCdak in dCdalpha]\n \n C = K.dot(K.dot(C).T) + Gamma\n\n #print(\"...done.\")\n dCdg = np.dstack(dCdg)\n return (C,\n dCdg,\n dCdgamma,\n np.dstack(dCdalpha))\n\n else:\n for m in range(self.order):\n C = K.dot(K.dot(C).T) + Gamma \n return C", "title": "" }, { "docid": "8f1f8a13fd6db519eedebdda1733c583", "score": "0.51747596", "text": "def build_generalized_fock_matrix(Num_MO,\n h_MO,\n g_MO,\n one_rdm,\n two_rdm ):\n F = np.zeros((Num_MO, Num_MO))\n for m in range(Num_MO):\n for n in range(Num_MO):\n for q in range(Num_MO):\n F[m,n] += one_rdm[m, q] * h_MO[n,q]\n for r in range(Num_MO):\n for s in range(Num_MO):\n F[m,n] += two_rdm[m,q,r,s] * g_MO[n,q,r,s]\n return F", "title": "" }, { "docid": "11a0ad90c0475b7c3d80acbe06eca254", "score": "0.5172434", "text": "def testQg(self):\n msg = self.case_name\n self.case.sort_generators() # ext2int\n _, _, gn = self.opf._remove_isolated(self.case)\n Qg = self.opf._get_qgen_var(gn, self.case.base_mva)\n\n mpQg0 = mmread(join(DATA_DIR, self.case_name, \"opf\", \"Qg0.mtx\"))\n mpQmin = mmread(join(DATA_DIR, self.case_name, \"opf\", \"Qmin.mtx\"))\n mpQmax = mmread(join(DATA_DIR, self.case_name, \"opf\", \"Qmax.mtx\"))\n\n self.assertTrue(mfeq1(Qg.v0, mpQg0.flatten()), msg)\n self.assertTrue(mfeq1(Qg.vl, mpQmin.flatten()), msg)\n# self.assertTrue(mfeq1(Qg.vu, mpQmax.flatten()), msg)\n self.assertTrue(mfeq1(Qg.vu, mpQmax.flatten()), msg)", "title": "" }, { "docid": "8e9ccae31f4dc6e4dfa70f81f1598ea1", "score": "0.5170576", "text": "def apply_G(self):\n\n alt_f = self.vec['u']('h_f')\n alt = self.vec['p']('h')\n\n alt_f[0] = alt[0]", "title": "" }, { "docid": "5d4405672a59c838bc935b1da26d2f99", "score": "0.5170253", "text": "def mh_gaussian(init,ys,ts,iters,fi=None):\n print(\"Running Metropolis Algorithm with Gaussian proposals.\")\n D = len(init)\n samples = np.zeros((iters,D))\n my_dist = dist(init)\n # initialize state and log-likelihood\n state = init.copy()\n Lp_state = my_dist.log_likelihood(ys,ts)\n accepts = 0.\n\n cov = (0.1)**2 * np.eye(D)*1./D\n for i in np.arange(0, iters):\n if fi is not None:\n write_samp(fi,state)\n # propose a new state\n prop = (np.random.multivariate_normal(state.ravel(), cov))\n move_p = np.log(scipy.stats.multivariate_normal(state,cov).pdf(prop))\n rev_p = np.log(scipy.stats.multivariate_normal(prop,cov).pdf(state))\n my_dist.set_params(prop)\n Lp_prop = my_dist.log_likelihood(ys,ts)\n rand = np.random.rand()\n\n if np.log(rand) < min(1,((Lp_prop+rev_p) - (Lp_state+move_p))):\n print (\"acct bc %s < %s (iter %s)\"%(np.log(rand),(Lp_prop-Lp_state),i))\n accepts += 1\n state = prop.copy()\n print state\n Lp_state = Lp_prop\n else:\n my_dist.set_params(state)\n samples[i] = state.copy()\n\n print 'Acceptance ratio', accepts/iters\n return samples", "title": "" }, { "docid": "f500066ec71462760a1d0c385aad5461", "score": "0.5167478", "text": "def gsl_eigen_genhermv_alloc(*args, **kwargs):\n return _gslwrap.gsl_eigen_genhermv_alloc(*args, **kwargs)", "title": "" }, { "docid": "0d6a3c40227d11bf75b9113f674b3f6d", "score": "0.51667595", "text": "def G ( x , y , z , u , v , w ) :\n result = 0.0 \n result += x * x * y + x * y * y \n result += z * z * u + z * u * u \n result += v * v * w + v * w * w \n result += x * z * w + x * u * v \n ## result += y * z * w + y * u * w ## <-- typo in E.Byckling & K.Kajantie\n result += y * z * v + y * u * w ## <-- correct line \n result -= x * y * ( z + u + v + w ) \n result -= z * u * ( x + y + v + w ) \n result -= v * w * ( x + y + z + u )\n ##\n return result", "title": "" }, { "docid": "2f13202e74890870c8cace5e32b59e68", "score": "0.51664", "text": "def gauss_f(d, fwhm=0.529177):\n sigma = fwhm / (2 * np.sqrt(2 * np.log(2)))\n d2 = d * d\n return np.exp(-1 / (2 * sigma**2) * d2)", "title": "" }, { "docid": "f6218ad79a806807a819ee2c6b26d12f", "score": "0.5164684", "text": "def calc_F(H, G, m=None):\n assert_square(H)\n assert_square(G)\n\n n = H.shape[0]\n I = np.identity(n)\n IG = I-G\n\n if m:\n F = (np.trace(H.dot(G).dot(H)) / (m-1)) / (np.trace(IG.dot(G).dot(IG)) / (n-m))\n else:\n F = (np.trace(H.dot(G).dot(H))) / np.trace(IG.dot(G).dot(IG))\n\n return F", "title": "" }, { "docid": "47f3dcdeb2f2505d5b8da196d6868f5a", "score": "0.5158682", "text": "def generate_global_matrix_1D(request):\n\n gmsh_buffer, cell_type = request.param\n\n diff = 0.1\n vel = -1.0\n r = vel/diff\n\n with tempfile.NamedTemporaryFile(suffix='.msh') as temp:\n temp.write(gmsh_buffer.encode('utf-8'))\n temp.flush()\n points, cells, point_data, cell_data, field_data = meshio.read(temp.name)\n\n _points = np.array(points[:, 0], dtype='double', order='F')\n _cells = np.array(cells[cell_type] + 1, dtype='int32', order='F')\n # NOTE: Always change 0-based to 1-based indexing\n\n num_cells, num_pts_per_cell, num_pts = (\n _cells.shape[0],\n _cells.shape[1],\n _points.shape[0]\n )\n\n # NOTE: Can't do this anymore because fixture is parametrized\n # # Sanity checks on matrix sizes\n # assert num_cells == 52\n # assert num_pts_per_cell == 2\n # assert num_pts == 53\n\n # Zero Ie array\n A = np.zeros(\n (num_pts, num_pts),\n dtype='double',\n order='F'\n )\n\n # Initialize function\n f = helpers.set_assemble1D_c_args(\n num_cells,\n num_pts_per_cell,\n num_pts\n )\n\n print('\\nCalling: ', f.__name__, '\\n With N = ', num_pts)\n # Calculate Global stiffness matrix `A`\n f(\n num_cells,\n num_pts_per_cell,\n num_pts,\n _points,\n _cells,\n np.float64(diff),\n np.float(vel),\n A\n )\n\n # A 1D line in Gmsh notation will always have end points as pt 0 and 1\n left_list = [0]\n right_list = [1]\n\n # Set boundary condtions in Ie matrix\n for ii in left_list + right_list:\n A[ii,:] = 0.; A[ii,ii] = 1.\n\n # Print condition number of `A`\n print(' Cond(A) = ', np.linalg.cond(A), '\\n')\n\n # Set boundary condtions in RHS vector\n b = np.zeros((num_pts,))\n for ii in right_list:\n b[ii] = 1.\n\n x = np.linalg.solve(A, b)\n\n return x, r, _points", "title": "" }, { "docid": "982a3700e8d93dda58ba51fadea2b726", "score": "0.5156181", "text": "def _TFmdm_set_cosm(self):\n omega_lambda = self.q0\n omega_matter = self.m0\n omega_lambda_z = self.qz\n omega_matter_z = self.mz\n theta_cmb = self.theta_cmb\n redshift = self.z\n h2 = self.h*self.h\n omhh = self.m0*h2\n obhh = self.b0*h2\n onhh = self.n0*h2\n f_baryon = self.b0/self.m0\n f_hdm = self.n0/self.m0\n f_cdm = 1.0 - f_baryon - f_hdm\n f_cb = f_cdm + f_baryon\n f_bnu = f_baryon + f_hdm\n num_degen_hdm = self.nnu\n# equality scale (Eqn 1 in EH99)\n z_equality = 25000.0*omhh/theta_cmb**4 # Actually 1+z_eq \n k_equality = 0.0746*omhh/theta_cmb**2\n# drag epoch and sound horizon (Eqn 2)\n z_drag_b1 = 0.313*pow(omhh,-0.419)*(1+0.607*pow(omhh,0.674))\n z_drag_b2 = 0.238*pow(omhh,0.223)\n z_drag = 1291*pow(omhh,0.251)/(1.0+0.659*pow(omhh,0.828))*\\\n (1.0+z_drag_b1*pow(obhh,z_drag_b2))\n# (Eqn 3)\n y_drag = z_equality/(1.0+z_drag)\n# (Eqn 4)\n sound_horizon_fit = 44.5*np.log(9.83/omhh)/np.sqrt(1.0+10.0*np.power(obhh,0.75))\n# Set up for the free-streaming & infall growth function */\n# (Eqn 11)\n p_c = 0.25*(5.0-np.sqrt(1+24.0*f_cdm))\n p_cb = 0.25*(5.0-np.sqrt(1+24.0*f_cb))\n# (Eqn 10)\n# growth_k0 is used for calculating the free streaming scale\n growth_k0 = (z_equality/(1.0+redshift)*2.5*\n omega_matter_z/(pow(omega_matter_z,4.0/7.0)-\n omega_lambda_z+(1.0+omega_matter_z/2.0)*\n (1.0+omega_lambda_z/70.0)))\n# for scale-independent growth of power spectrum \n# (same as Density.growth_factor)\n# growth_to_z0 = (z_equality*2.5*omega_matter/\n# (pow(omega_matter,4.0/7.0)-\n# omega_lambda +\n# (1.0+omega_matter/2.0)*(1.0+omega_lambda/70.0)))\n# growth_to_z0 = growth_k0/growth_to_z0\n\n# Compute small-scale suppression \n# (Eqn 15)\n alpha_nu = (f_cdm/f_cb*(5.0-2.*(p_c+p_cb))/(5.-4.*p_cb)*\n pow(1+y_drag,p_cb-p_c)*\n (1+f_bnu*(-0.553+0.126*f_bnu*f_bnu))/\n (1-0.193*np.sqrt(f_hdm*num_degen_hdm)+\n 0.169*f_hdm*pow(num_degen_hdm,0.2))*\n (1+(p_c-p_cb)/2*(1+1/(3.-4.*p_c)/(7.-4.*p_cb))/(1+y_drag))\n )\n alpha_gamma = np.sqrt(alpha_nu)\n# (Eqn 21)\n beta_c = 1/(1-0.949*f_bnu)\n# Done setting scalar variables\n return(omhh, f_hdm, f_cb, growth_k0, p_cb, \n alpha_gamma, sound_horizon_fit, beta_c)", "title": "" }, { "docid": "abbe4f6aecc94299d8d616b1db8be9bd", "score": "0.5147316", "text": "def calculateGMag(x):\n g_vector = np.array([x['g_x'], x['g_y'], x['g_z']])\n return np.sqrt(g_vector.dot(g_vector))", "title": "" }, { "docid": "374289d702f40c9b3e42bf5dd38c2470", "score": "0.51468635", "text": "def initG_indicator(hindices, commlangdistros, innov = 0):\n\n g = commlangdistros[0,:].size\n c = commlangdistros[:,0].size\n n = hindices[0].shape[1]\n# print \"n\", hindices[0].shape[1]\n# Gassertions(C, commlangdistros, g)\n \n G = matlib.zeros((n,g))\n\n\n randmat = nprandom.rand(G.shape[0],1)\n\n for b in range(0,n):\n if randmat[b] < innov:\n G[b,random.randint(0,g-1)] = 1\n else:\n comm = np.asarray(hindices[1])[0][b]\n distr = np.asarray(commlangdistros[comm,:])[0]\n G[b,:] = nprandom.multinomial(1, distr)\n\n return G", "title": "" }, { "docid": "57531a2ef18270590abf01d4616a5f79", "score": "0.51447994", "text": "def test_MonteCarlo():\n\tfrom diffusion_model import energy\t#use diffusion model energy function as test for functionality of MC minimisation\n\timport nose.tools\n\tfrom nose.tools import assert_equal\n\tfrom nose.tools import assert_true\n\tfrom random import randint\n\timport random\n\timport matplotlib.pyplot as plt\n\t\n\t#minimum energy state doesn't change at absolute zero\n\tfor n in range (3): #try three times\n\t\tdensity=[1.0,1.0,1.0,1.0]\n\t\tdensity=MonteCarlo (\"diffusion_model\",\"energy\",density,0)\n\t\tassert_equal(density,[1.0,1.0,1.0,1.0])\n\t\n\t#energy state lowers until minimum energy state is reached at absolute zero or iterated 100 times\n\tdensity=range(10)\n\tfor i in range (10):\n\t\tdensity[i]=random.randint(1,10) #fill density with random integers\n\tif max(density)-min(density)<=1:\n\t\tdensity_backup=density[:]\n\t\tMonteCarlo(\"diffusion_model\",\"energy\",density,0)\n\t\tassert_true(density=density_backup)\n\telse:\n\t\tE0=energy(density)\n\t\tx=100\n\t\tMCEnergy=[]\n\t\twhile (max(density)-min(density)>=2) and x>0: #if distribution is non-uniform energy minimum is not reached\n\t\t\tMCEnergy.append(energy(density))\n\t\t\tdensity=MonteCarlo(\"diffusion_model\",\"energy\",density,0)\n\t\t\tx-=1\n\t\tprint \"energy changed by \", MCEnergy\n\t\tplt.plot(MCEnergy)\n\t\tplt.show()\n\tassert_true(E0>energy(density))", "title": "" }, { "docid": "2e650b8a358b8258ff4bbeef4cd13aeb", "score": "0.51432973", "text": "def evaluate_mog(self, x, y):\n\t\tf = np.zeros(x.shape)\n\t\tparams = self.pos.copy()\n\t\tparams[:,2] = np.exp(params[:,2])\n\t\tparams[:,3] = np.exp(params[:,3])\n\t\tparams[:,4] = np.exp(0.5*params[:,4])\n\n\t\tfor i in range(self.num):\n\t\t\tgaussian = Gaussian(params=params[i, :])\n\t\t\tf += gaussian.evaluate(x, y)\n\t\treturn f", "title": "" }, { "docid": "147355d6af861a805ab2b384ef15cdaf", "score": "0.5142504", "text": "def locmat(nodes,de): #local stiffness (jacobian) and force (residual) over the reference element\r\n# print(nodes.shape)\r\n# print(de.shape)\r\n Xi=np.tile(GP.xi,OrdGauss)\r\n Eta=np.repeat(GP.xi,OrdGauss)\r\n dof=de.reshape(de.size,-1,1).repeat(len(Xi),axis=-1) #arranging dof for (dot) product with B (len(Xi) and not len(GP.xi)) !!!\r\n Wg=np.outer(GP.wght,GP.wght).flatten() \r\n Nshp=np.kron(np.eye(geom.nDim).reshape(geom.nDim,geom.nDim,-1),B.Ns(Xi,Eta)) #kron has to be taken on nDim (and not OrdGauss)\r\n# print(np.array(B.Ns(Xi,Eta)).shape)\r\n gDshpL=np.array(B.dN(Xi,Eta)).reshape(geom.nDim,int(Eltype[1]),-1) #local derivatives\r\n Je=np.einsum('ilk,lj->ijk',gDshpL,nodes.reshape(geom.nDim,-1).T) #computing the jacobian\r\n detJ=(Je[0,0,:]*Je[1,1,:]-Je[0,1,:]*Je[1,0,:])\r\n# print(detJ)\r\n# Jeinv=1/detJ*np.array([[Je[1,1,:],-Je[0,1,:]],[-Je[1,0,:],Je[0,0,:]]]) #avoid computing inverse on a loop (--check ?)\r\n Jeinv=1/detJ*np.einsum('ijk->jik',Je)\r\n gDshpG=np.einsum('ilk,ljk->ijk',Jeinv,gDshpL) #global derivatives \r\n Bmat=np.kron(np.eye(geom.nDim).reshape(geom.nDim,geom.nDim,-1),gDshpG) \r\n# B1=Bmat.copy()\r\n gradU=np.einsum('ilk,ljk->ijk',Bmat,dof) #remember that gradU is never symmetric !!!\r\n# print(gradU)\r\n \"\"\"\r\n Computing the deformation gradient (F12,F11,F22).T = B*de, and first piola (S) --> (S12,S11,S22), \r\n Multiplying by the Gauss-weights, and calculating the element residual\r\n \"\"\"\r\n F=gradU+np.array([1,0,0,1.]).reshape(-1,1,1).repeat(len(Xi),axis=-1) #computing F as (F11,F11,F22) shape: 3x1xNGP\r\n detF=F[0]*F[3]-F[1]*F[2] \r\n WpI1=dWdIi.DWDI1(*F)\r\n WpJ=dWdIi.DWDJ(*F)\r\n WppI1=dWdIi.D2WDI12(*F)\r\n WppJ=dWdIi.D2WDJ2(*F)\r\n Finv=np.array([F[3],-F[1],-F[1],F[0]])/detF #avoid computing inverse on the loop for the deformation gradient \r\n# Helpful variables: \r\n S=WpI1*2*F+(WpJ*detF).reshape(1,1,-1)*Finv[np.array([0,2,1,3],int)] #notice the swap of axes for transpose\r\n fac=Wg*detJ*geom.thck\r\n S*=fac #multiplying S by the determinant of the jacobian, thickness, and gauss-weights\r\n res=np.einsum('lik,ljk->ij',Bmat,S) #double contraction along axis 1 and 2 (of B)\r\n \"\"\"\r\n Computing the Consistent Tangent:D= B^T *C *B <-- Cijkl, check notes\r\n Cijkl = 4*W''_(I1) Fij Fkl + 2 W'_(I1) delik deljl + J**2*W''_(J) F-1ji F-1lk +J*W'_(J) F-1ji F-1lk - J W'_(J) F-1jk F-1li \r\n F = (F11,F12,f21,F22).T\r\n \"\"\"\r\n F11=F[0];F12=F[1];F21=F[2];F22=F[3]\r\n #This C does not have minor symmetry (relates S to F) , only major symmetry \r\n \r\n C1111=4*WppI1*F11*F11+2*WpI1+detF**2*WppJ*Finv[0]**2 #scalar addition to multi-dimensional array (--check??) \r\n C1112=4*WppI1*F11*F12+detF**2*WppJ*Finv[0]*Finv[2]\r\n C1121=4*WppI1*F11*F21+detF**2*WppJ*Finv[0]*Finv[1]\r\n C1122=4*WppI1*F11*F22+detF**2*WppJ*Finv[0]*Finv[3]+detF*WpJ*Finv[0]*Finv[3]-detF*WpJ*Finv[1]*Finv[2]\r\n C1212=4*WppI1*F12*F12+2*WpI1+detF**2*WppJ*Finv[2]**2\r\n C1221=4*WppI1*F12*F21+detF**2*WppJ*Finv[2]*Finv[1]+detF*WpJ*(Finv[2]*Finv[1] -Finv[3]*Finv[0])\r\n C1222=4*WppI1*F12*F22+detF**2*WppJ*Finv[2]*Finv[3]\r\n C2121=4*WppI1*F21*F21+2*WpI1+detF**2*WppJ*Finv[1]**2\r\n C2122=4*WppI1*F21*F22+detF**2*WppJ*Finv[2]*Finv[3]+detF*WpJ*(Finv[2]*Finv[3] -Finv[3]*Finv[1])\r\n C2222=4*WppI1*F22*F22+2*WpI1+detF**2*WppJ*Finv[3]**2\r\n \r\n C1111=C1111.flatten()\r\n C1112=C1112.flatten()\r\n C1121=C1121.flatten()\r\n C1122=C1122.flatten()\r\n C1212=C1212.flatten()\r\n C1221=C1221.flatten()\r\n C1222=C1222.flatten()\r\n C2121=C2121.flatten()\r\n C2122=C2122.flatten()\r\n C2222=C2222.flatten()\r\n \r\n C=np.array([[C1111,C1112,C1121,C1122],\r\n [C1112,C1212,C1221,C1222],\r\n [C1121,C1221,C2121,C2122],\r\n [C1122,C1222,C2122,C2222]])\r\n \r\n D=np.einsum('lik,lpk,pjk->ij',Bmat,C,Bmat) #Check the multiplication once for a simple case!\r\n# print(res)\r\n return {'K':D,\r\n 'F':res.flatten(),\r\n 'Stress':S,\r\n 'DefGrad':F,\r\n 'InptGlobal':np.einsum('ilj,l->ij',Nshp,nodes),\r\n 'NGP':len(Xi)}", "title": "" }, { "docid": "42e165342059e3872fbe57292d7dca3b", "score": "0.5142341", "text": "def Me(self) -> np.ndarray:\n ip, iw = self.gauss\n psi = self.psi(ip)\n dpsi = self.dpsi(ip)\n J = self.J(dpsi)\n dJ = self.dJ(J)\n\n Me = np.zeros((self._NODES * self._DOFS, self._NODES * self._DOFS), dtype=_FLOAT)\n\n for i in range(ip.shape[1]):\n N = self.N(psi, i)\n Me += self.material.rho * (N.T @ N) * dJ[i] * iw[i]\n\n return Me", "title": "" } ]
7f5f715f96636816652301188d4a7d08
Return the linear transformation of `y` by `x` or `x` by `y` when one or both of `x` and `y` is a LinearTransform instance
[ { "docid": "8e445e1df5116820f63b2719a02595de", "score": "0.5793339", "text": "def dot_shape(x, y):\n if isinstance(x, LinearTransform):\n return dot_shape_from_shape(x, tuple(y.shape))\n elif isinstance(y, LinearTransform):\n return dot_shape_from_shape(tuple(x.shape), y)\n else:\n raise TypeError('One of x or y should be a LinearTransform')", "title": "" } ]
[ { "docid": "345bd03e2d81a8f108ff75220e9a3080", "score": "0.66478336", "text": "def transform(self, x, y):\n \n return numpy.array((x, y, 1)).dot(self._matrix.T)[0:2]", "title": "" }, { "docid": "fa8bde99f2c683e4d0c5f040c3610a54", "score": "0.61888194", "text": "def _transform(self, X, y=None):\n Xt = X\n if y is None:\n for _, name, transform in self._iter():\n Xt = transform.transform(Xt)\n return Xt\n else:\n yt = y\n for _, name, transform in self._iter():\n if self._skip_transform(transform):\n continue\n if isinstance(transform, YTransformer):\n yt = transform.fit_transform(yt, Xt)\n elif isinstance(transform, XAndYTransformer) \\\n or isinstance(transform, XOrYTransformer):\n Xt, yt = transform.fit_transform(Xt, yt)\n else:\n Xt = transform.transform(Xt)\n return Xt, yt", "title": "" }, { "docid": "604a70a828d7f86e7da3af8a59043cc7", "score": "0.6155586", "text": "def dot(x, y):\n if isinstance(x, LinearTransform):\n return x.rmul(y)\n elif isinstance(y, LinearTransform):\n return y.lmul(x)\n else:\n return theano.dot(x,y)", "title": "" }, { "docid": "d1e2e76ac248b15f3fe52ba49d9c2e2d", "score": "0.61270404", "text": "def transl2(x, y=None):\n\n if np.isscalar(x):\n T = np.identity(3)\n T[:2, 2] = [x, y]\n return T\n elif argcheck.isvector(x, 2):\n T = np.identity(3)\n T[:2, 2] = argcheck.getvector(x, 2)\n return T\n elif argcheck.ismatrix(x, (3, 3)):\n return x[:2, 2]\n else:\n ValueError('bad argument')", "title": "" }, { "docid": "17af9d93b6e6ebaca1b4ce1fc3f65f1f", "score": "0.6072614", "text": "def map_linear(self, x, x1, x2, y1, y2):\n m = (y2 - y1) / (x2 - x1)\n return y1 + m * (x - x1)", "title": "" }, { "docid": "036bcff5e071f20c9213b74f2f95e346", "score": "0.60661876", "text": "def transl2(x=None, y=None):\n if type(x) is np.matrix:\n if common.ishomog(x, [3, 3]):\n return x[:2, 2]\n elif type(x) is list:\n if len(x) == 2:\n temp = np.matrix([[x[0]], [x[1]]])\n temp = np.concatenate((np.eye(2), temp), axis=1)\n return np.concatenate((temp, np.matrix([[0, 0, 1]])), axis=0)\n elif x is not None and y is not None:\n t = np.matrix([[x], [y]])\n return rt2tr(np.eye(2), t)", "title": "" }, { "docid": "6519c308d44c63d4e0ade99223bafac4", "score": "0.59372556", "text": "def fit_transform(self, X, y):\n return self.fit(X, y).transform(X)", "title": "" }, { "docid": "6519c308d44c63d4e0ade99223bafac4", "score": "0.59372556", "text": "def fit_transform(self, X, y):\n return self.fit(X, y).transform(X)", "title": "" }, { "docid": "0b416f951b48cd1db0012a9df8ab197e", "score": "0.5922566", "text": "def fit_transform(self, X, y):\n return self.fit(X, y).transform(X, y)", "title": "" }, { "docid": "79661e5a747e4bd148302284edfd07c3", "score": "0.58838165", "text": "def _linear(x):\n return x", "title": "" }, { "docid": "1dc8bd313822acf238fdcb062104679f", "score": "0.58141077", "text": "def fit_transform(self, X, y=None, **trans_kwargs):\n return self.fit(X, y).transform(X, **trans_kwargs)", "title": "" }, { "docid": "21a11d91aefdcc52a0cbd3d1c5b3e051", "score": "0.57625264", "text": "def translate(self, x, y):\n raise NotImplementedError", "title": "" }, { "docid": "fafba04d80dffdced53979a3bdfc98f7", "score": "0.57254046", "text": "def coefToTransformation(xcoef, ycoef):\n return np.linalg.inv(np.array([[xcoef[1], xcoef[2], xcoef[0]],\n [ycoef[1], ycoef[2], ycoef[0]],\n [0, 0, 1]]))", "title": "" }, { "docid": "65d80324de6d127517d0343b86f84baf", "score": "0.56842816", "text": "def fit_transform(self, X, y=None, **fit_params):\n return self.fit(X, y).transform(X)", "title": "" }, { "docid": "545129a28728ae0b31e0bdc87be5b323", "score": "0.56673855", "text": "def fit_transform(self, X, y=None):\n self.fit(X, y)\n return self.transform(X)", "title": "" }, { "docid": "1bf532328d6bb21597494a37e774d3de", "score": "0.56484675", "text": "def _spatial_transform(transform, x, y):\n x = x - transform.origin[0]\n y = y - transform.origin[1]\n\n theta = -transform.rotation * np.pi/180\n x, y = (x * np.cos(theta) - y * np.sin(theta),\n y * np.cos(theta) + x * np.sin(theta))\n\n if transform.magnification is not None:\n x = x / transform.magnification\n y = y / transform.magnification\n\n if transform.x_reflection:\n y = -y\n\n return x, y", "title": "" }, { "docid": "74540150c323740a2011e48dd61d4a52", "score": "0.56308955", "text": "def fit_transform(self, X, y=None, **inputs):\n return self.fit(X, y=y, **inputs).transform(X, y)", "title": "" }, { "docid": "f5fba4bb25605c43d9cc0788ccfb4f82", "score": "0.5621015", "text": "def comp4983_lin_reg_fit(x: np.ndarray, y: np.ndarray):\n return np.dot(np.dot(np.linalg.inv(np.dot(x.transpose(), x)), x.transpose()), y)", "title": "" }, { "docid": "80ed02b562471a2c5adb13f5e5c378ab", "score": "0.5608641", "text": "def linear(x):\n return x", "title": "" }, { "docid": "80ed02b562471a2c5adb13f5e5c378ab", "score": "0.5608641", "text": "def linear(x):\n return x", "title": "" }, { "docid": "92a290b37fdc25d18dfcfd39d993300c", "score": "0.56079733", "text": "def translate(self, x, y, transformFrame=True):\n return self.transform(Transform(1, 0, 0, 1, x, y), transformFrame=transformFrame)", "title": "" }, { "docid": "7216cf900cd91b7ba23e44b662e4d12c", "score": "0.56032175", "text": "def fit_transform(self, x, y, p=[]):\n\n self.fit(x, y, p)\n\n return self.transform(x)", "title": "" }, { "docid": "ea6a01624c35ec9e687596895f204028", "score": "0.5596607", "text": "def transform(self, X, y=...):\n ...", "title": "" }, { "docid": "f1c4872bc15c9b6606a681bfedbe0f92", "score": "0.55920976", "text": "def transform(self,\r\n x: np.ndarray,\r\n y: Optional[np.ndarray] = None) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:\r\n\r\n if self.input_vector:\r\n x = x.reshape(-1, 1)\r\n\r\n x_new = []\r\n for i in x[:, 0]:\r\n values_list = list(map(self.label_dict.get, i.split(self.sep)[-self.order:]))\r\n while len(values_list) < self.order:\r\n values_list = [self.label_dict[self.r_just_string]] + values_list\r\n x_new.append(values_list)\r\n\r\n if y is None:\r\n if self.return_vector:\r\n return np.array(x_new).ravel()\r\n else:\r\n return np.array(x_new)\r\n else:\r\n return np.array(x_new), np.vectorize(self.label_dict.get)(y)", "title": "" }, { "docid": "e4647c3dc32232fc4b2581c2b4b16862", "score": "0.5582409", "text": "def conv( x, y, trans_matrix=None ):\n\n\tif trans_matrix:\n\t\txt = trans_matrix[0][0] * x + trans_matrix[0][1] * y + trans_matrix[0][2]\n\t\tyt = trans_matrix[1][0] * x + trans_matrix[1][1] * y + trans_matrix[1][2]\n\t\treturn xt, yt\n\telse:\n\t\treturn x, y", "title": "" }, { "docid": "cfac40ece33cfbf69f870fe945be1a82", "score": "0.5582098", "text": "def _fit_transform_one(transformer,\n X,\n y,\n weight,\n message_clsname='',\n message=None,\n **fit_params):\n with _print_elapsed_time(message_clsname, message):\n if hasattr(transformer, 'fit_transform'):\n res = transformer.fit_transform(X, y, **fit_params)\n else:\n res = transformer.fit(X, y, **fit_params).transform(X)\n\n if (isinstance(transformer, XAndYTransformer) or isinstance(transformer, XOrYTransformer)) and weight is None:\n return *res, transformer\n elif (isinstance(transformer, XAndYTransformer) or isinstance(transformer, XOrYTransformer)) and weight is not None:\n res = res * weight\n return *res, transformer\n elif weight is None:\n return res, transformer\n else:\n return res * weight, transformer", "title": "" }, { "docid": "4fe97b03cab4543dbe8b4affee73223f", "score": "0.55807245", "text": "def linear_func(xq: float, x0: float, x1: float, y0: float, y1: float):\n slope = (y1 - y0) / (x1 - x0)\n return (slope * (xq - x0)) + y0", "title": "" }, { "docid": "2a2bf193d825d471d0ef415e7e27bea9", "score": "0.5578561", "text": "def transform(self, x, y, wcsID):\n\n self.commit()\n if wcsID == 0: return (x, y, wcsID)\n\n # Since transformation is defined by a direct linear (or log) mapping\n # between two rectangular windows, apply the usual linear\n # interpolation.\n\n # log scale does not affect the w numbers at all, a plot\n # ranging from 10 to 10,000 will have wx1,wx2 = (10,10000),\n # not (1,4)\n\n return (self.transform1d(coord=x,dimension='x',wcsID=wcsID),\n self.transform1d(coord=y,dimension='y',wcsID=wcsID),\n wcsID)", "title": "" }, { "docid": "6efff56618ab65237a4402c413928f0d", "score": "0.5553774", "text": "def fit_transform(self, X, y=None, **fit_params):\n # fit method of arity 2 (supervised transformation)\n return self.fit(X, y, **fit_params).transform(X)", "title": "" }, { "docid": "8d77c1600b995b4c67e2bb81116ad5bd", "score": "0.55082804", "text": "def fit_transform(self, X, y=None, **fit_params):\n # non-optimized default implementation; override when a better\n # method is possible for a given clustering algorithm\n if y is None:\n # fit method of arity 1 (unsupervised transformation)\n return self.fit(X, **fit_params).transform(X)\n else:\n # fit method of arity 2 (supervised transformation)\n return self.fit(X, y, **fit_params).transform(X)", "title": "" }, { "docid": "3e2a01f266c56af4eeaba1d718578fd6", "score": "0.54977244", "text": "def fit_transform(self, X, y=None, **fit_params):\r\n # non-optimized default implementation; override when a better\r\n # method is possible for a given clustering algorithm\r\n if y is None:\r\n # fit method of arity 1 (unsupervised transformation)\r\n return self.fit(X, **fit_params).transform(X)\r\n else:\r\n # fit method of arity 2 (supervised transformation)\r\n return self.fit(X, y, **fit_params).transform(X)", "title": "" }, { "docid": "f6c1910b2a7b382ee630ae23e9a134b4", "score": "0.54848564", "text": "def translate(x):\n\n x = T.as_tensor_variable(x)\n\n m = T.eye(4, 4)\n m = T.set_subtensor(m[0,3], x[0])\n m = T.set_subtensor(m[1,3], x[1])\n m = T.set_subtensor(m[2,3], x[2])\n\n mInv = T.eye(4, 4)\n mInv = T.set_subtensor(mInv[0,3], -x[0])\n mInv = T.set_subtensor(mInv[1,3], -x[1])\n mInv = T.set_subtensor(mInv[2,3], -x[2])\n\n return Transform(m, mInv)", "title": "" }, { "docid": "6346997d9b8902c970b07fd5be330f36", "score": "0.5482218", "text": "def linear(x, p0, p1):\r\n return p1*x + p0", "title": "" }, { "docid": "16c2cd29ac51f63fbf99a763173e5596", "score": "0.5462316", "text": "def fit_transform(self, X, y=None, **fit_params):\n self.fit(X, y, **fit_params)\n return self.transform(X)", "title": "" }, { "docid": "16c2cd29ac51f63fbf99a763173e5596", "score": "0.5462316", "text": "def fit_transform(self, X, y=None, **fit_params):\n self.fit(X, y, **fit_params)\n return self.transform(X)", "title": "" }, { "docid": "9422ce42921eb3fc8c3738d5d69412f6", "score": "0.54554415", "text": "def get_x_to_y(x,y):\n return ROOT.TVector3(y.X()-x.X(),y.Y()-x.Y(),y.Z()-x.Z())", "title": "" }, { "docid": "0ea8b310040ea2a81cc13606ed6d6b2a", "score": "0.54348564", "text": "def linear_transformation(arg0, arg1=None, arg2=None, side='left'):\n from sage.matrix.constructor import matrix\n from sage.modules.module import is_VectorSpace\n from sage.modules.free_module import VectorSpace\n from sage.categories.homset import Hom\n from sage.symbolic.ring import SymbolicRing\n from sage.modules.vector_callable_symbolic_dense import Vector_callable_symbolic_dense\n from inspect import isfunction\n\n if not side in ['left', 'right']:\n raise ValueError(\"side must be 'left' or 'right', not {0}\".format(side))\n if not (is_Matrix(arg0) or is_VectorSpace(arg0)):\n raise TypeError('first argument must be a matrix or a vector space, not {0}'.format(arg0))\n if is_Matrix(arg0):\n R = arg0.base_ring()\n if not R.is_field():\n try:\n R = R.fraction_field()\n except (NotImplementedError, TypeError):\n msg = 'matrix must have entries from a field, or a ring with a fraction field, not {0}'\n raise TypeError(msg.format(R))\n if side == 'right':\n arg0 = arg0.transpose()\n side = 'left'\n arg2 = arg0\n arg0 = VectorSpace(R, arg2.nrows())\n arg1 = VectorSpace(R, arg2.ncols())\n elif is_VectorSpace(arg0):\n if not is_VectorSpace(arg1):\n msg = 'if first argument is a vector space, then second argument must be a vector space, not {0}'\n raise TypeError(msg.format(arg1))\n if arg0.base_ring() != arg1.base_ring():\n msg = 'vector spaces must have the same field of scalars, not {0} and {1}'\n raise TypeError(msg.format(arg0.base_ring(), arg1.base_ring()))\n\n # Now arg0 = domain D, arg1 = codomain C, and\n # both are vector spaces with common field of scalars\n # use these to make a VectorSpaceHomSpace\n # arg2 might be a matrix that began in arg0\n D = arg0\n C = arg1\n H = Hom(D, C, category=None)\n\n # Examine arg2 as the \"rule\" for the linear transformation\n # Pass on matrices, Python functions and lists to homspace call\n # Convert symbolic function here, to a matrix\n if is_Matrix(arg2):\n if side == 'right':\n arg2 = arg2.transpose()\n elif isinstance(arg2, (list, tuple)):\n pass\n elif isfunction(arg2):\n pass\n elif isinstance(arg2, Vector_callable_symbolic_dense):\n args = arg2.parent().base_ring()._arguments\n exprs = arg2.change_ring(SymbolicRing())\n m = len(args)\n n = len(exprs)\n if m != D.degree():\n raise ValueError('symbolic function has the wrong number of inputs for domain')\n if n != C.degree():\n raise ValueError('symbolic function has the wrong number of outputs for codomain')\n arg2 = [[e.coefficient(a) for e in exprs] for a in args]\n try:\n arg2 = matrix(D.base_ring(), m, n, arg2)\n except TypeError as e:\n msg = 'symbolic function must be linear in all the inputs:\\n' + e.args[0]\n raise ValueError(msg)\n # have matrix with respect to standard bases, now consider user bases\n images = [v*arg2 for v in D.basis()]\n try:\n arg2 = matrix([C.coordinates(C(a)) for a in images])\n except (ArithmeticError, TypeError) as e:\n msg = 'some image of the function is not in the codomain, because\\n' + e.args[0]\n raise ArithmeticError(msg)\n else:\n msg = 'third argument must be a matrix, function, or list of images, not {0}'\n raise TypeError(msg.format(arg2))\n\n # arg2 now compatible with homspace H call method\n # __init__ will check matrix sizes versus domain/codomain dimensions\n return H(arg2)", "title": "" }, { "docid": "b6f43f7d78d90d037f01998225679f50", "score": "0.5431147", "text": "def htranslation2(x=0, y=0):\n return sp.Matrix([[1, 0, x],\n [0, 1, y],\n [0, 0, 1]])", "title": "" }, { "docid": "cb0ea76e8b38d8df6d6311237c47164f", "score": "0.54303443", "text": "def asLinearOperator(X, parallel=False):\n if not parallel: \n def matvec(v): \n return X.dot(v)\n \n def rmatvec(v): \n return X.T.dot(v)\n \n def matmat(V): \n return X.dot(V)\n \n def rmatmat(V): \n return X.T.dot(V)\n else:\n def matvec(v): \n return X.pdot(v)\n \n def rmatvec(v): \n return X.T.pdot(v)\n \n def matmat(V): \n return X.pdot(V)\n \n def rmatmat(V): \n return X.T.pdot(V)\n \n return GeneralLinearOperator(X.shape, matvec, rmatvec, matmat, rmatmat, X.dtype)", "title": "" }, { "docid": "e1947e5e2a9321b4092295a0df3b53fe", "score": "0.5429983", "text": "def mmd_linear(X, Y):\r\n delta = X.mean(0) - Y.mean(0)\r\n return delta.dot(delta.T)", "title": "" }, { "docid": "698e86b357b2602de9f485298490b1c9", "score": "0.5415955", "text": "def transform(self, x):\n return self._transform(x, inverse=False)", "title": "" }, { "docid": "99a7b47409b93c7660d9f9a6ce9246a4", "score": "0.53800523", "text": "def linear_regression(x, y):\r\n model = LinearRegression()\r\n return get_theta_vector(model.fit(x, y))", "title": "" }, { "docid": "b2e9316c55d257b005dbdb14e6c7d566", "score": "0.5355075", "text": "def warp_xy(x, y, old_crs, new_crs):\n if old_crs == new_crs:\n return x,y\n\n old_crs_proj = pyproj.Proj(old_crs)\n new_crs_proj = pyproj.Proj(new_crs)\n return pyproj.transform(old_crs_proj, new_crs_proj, x,y)", "title": "" }, { "docid": "3c2842af3a7cb509a3768f515091206e", "score": "0.5347562", "text": "def linear_regression(self, dataset):\n\n x,y = dataset\n # solution (explain how to achieve this analitical expression)\n w = np.dot(np.linalg.inv(np.dot(x.T,x)), np.dot(x.T,y))\n return w", "title": "" }, { "docid": "c2adc562449d6978109120f28938e605", "score": "0.53445894", "text": "def dot_shape_from_shape(x, y):\n if isinstance(x, LinearTransform):\n if type(y) != tuple:\n raise TypeError('y should be tuple', y)\n return x.col_shape() + x.split_right_shape(y, False)[1]\n elif isinstance(y, LinearTransform):\n if type(x) != tuple:\n raise TypeError('x should be tuple', x)\n return y.split_left_shape(x, False)[0] + y.row_shape()\n else:\n raise TypeError('One of x or y should be a LinearTransform')", "title": "" }, { "docid": "08bafeda826a360b52decb448a38d781", "score": "0.5338421", "text": "def translate(self, x=0, y=0):\n fx, fy = self.functions\n return self.func((fx + x, fy + y), self.limits)", "title": "" }, { "docid": "01ced9b525c98325de3dcd6ec6c91c72", "score": "0.5335345", "text": "def transl(x=None, y=None, z=None):\n if type(x) is np.matrix:\n if common.ishomog(x, [4, 4]):\n return x[:3, 2]\n elif type(x) is list:\n if len(x) == 3:\n temp = np.matrix([[x[0]], [x[1]], [x[2]]])\n temp = np.concatenate((np.eye(3), temp), axis=1)\n return np.concatenate((temp, np.matrix([[0, 0, 0, 1]])), axis=0)\n # todo trajectory case\n elif x is not None and y is not None and z is not None:\n t = np.matrix([[x], [y], [z]])\n return rt2tr(np.eye(3), t)\n else:\n raise AttributeError(\"Invalid arguments\")", "title": "" }, { "docid": "d5179424098e0ecb03a92769a1ee4603", "score": "0.5329425", "text": "def transform(self, y, X=None, **kwargs):\n check_is_fitted(self, \"lam1_\")\n\n lam1 = self.lam1_\n lam2 = self.lam2_\n\n y, exog = self._check_y_X(y, X)\n y += lam2\n\n neg_mask = y <= 0.\n if neg_mask.any():\n action = self.neg_action\n msg = \"Negative or zero values present in y\"\n if action == \"raise\":\n raise ValueError(msg)\n elif action == \"warn\":\n warnings.warn(msg, UserWarning)\n y[neg_mask] = self.floor\n\n if lam1 == 0:\n return np.log(y), exog\n return (y ** lam1 - 1) / lam1, exog", "title": "" }, { "docid": "b1075bd44587e2002f2faefff4e7a92f", "score": "0.5313699", "text": "def fit_transform(self, X: Any, y: Any = None) -> np.ndarray:\n self.fit(X, y)\n\n return self.transform(X, y)", "title": "" }, { "docid": "2cc63fc6a334092265052c1a16c153c5", "score": "0.5301931", "text": "def linear_interpolation(y1, x1, y2, x2, v11, v12, v21, v22, p_y, p_x):\n # x interpolation\n if x1 == x2:\n p1 = v11\n p2 = v21\n else:\n diffx = (x2 - x1)\n rat1 = (x2 - p_x) / diffx\n rat2 = (p_x - x1) / diffx\n\n p1 = v11 * rat1 + v12 * rat2\n p2 = v21 * rat1 + v22 * rat2\n\n # y interpolation\n if y1 == y2:\n p = p1\n else:\n diffy = (y2 - y1)\n rat1 = (y2 - p_y) / diffy\n rat2 = (p_y - y1) / diffy\n\n p = p1 * rat1 + p2 * rat2\n\n return p", "title": "" }, { "docid": "a8c422513293361f99988172e3a50ed5", "score": "0.5298343", "text": "def r_linear_regression(x, y):\n x_dim = x.shape[1]\n for i in range(x_dim):\n xr_i = FloatVector(x[:, i])\n robjects.globalenv[\"x%d\" % i] = xr_i\n yr = FloatVector(y.view(-1))\n robjects.globalenv[\"y\"] = yr\n formula = \"y~\" + \"+\".join(\"x%d\" % i_ for i_ in range(x_dim))\n robjects.r(\"fit <- lm(%s)\" % formula)\n robjects.r(\"lin_reg_out <- coef(fit)\")\n lin_reg_out = robjects.r[\"lin_reg_out\"]\n intercept = float(lin_reg_out[0])\n weights = torch.DoubleTensor(lin_reg_out[1:])\n weights[torch.isnan(weights)] = 0.0\n return LinearFunction(intercept, weights)", "title": "" }, { "docid": "eec4fcef1023921d928f77ea9c345a54", "score": "0.52963626", "text": "def invert_point(x, y, transform):\r\n return transform.inverted().transform_point((x, y))", "title": "" }, { "docid": "a09d400451f0063cecb358b31cedf907", "score": "0.5287271", "text": "def transform(self, X, y=None):\n return X.todense()", "title": "" }, { "docid": "86a01aaef5e853c0351baef90de71199", "score": "0.52816355", "text": "def fromTranslate(self, p_float, p_float_1): # real signature unknown; restored from __doc__\r\n return QTransform", "title": "" }, { "docid": "39ca3e0cf14af46ab1e7cb650dd3efd2", "score": "0.52799237", "text": "def regressionLine(x, y):\n x2 = [float(i**2) for i in x]\n y2 = [float(i**2) for i in y]\n xy = [x[i]*y[i] for i in xrange(0,len(x))]\n a = (sum(x2)*sum(y) - sum(x)*sum(xy))/(len(x)*sum(x2) - sum(x)**2)\n b = (len(x)*sum(xy) - sum(x)*sum(y))/(len(x)*sum(x2) - sum(x)**2)\n return (round(a,4),round(b,4))", "title": "" }, { "docid": "125c1e93354f846e9bbea0a0c2d624bb", "score": "0.52734256", "text": "def linterp(x1, x2, y1, y2, x):\n return tf.div_no_nan((x * (y2 - y1) + y1 * x2 - x1 * y2), (x2 - x1))", "title": "" }, { "docid": "06ed52f661f42d7a629443f3e0e5b0f2", "score": "0.5272565", "text": "def lin_interp(X, x1, y1, x2, y2):\n\n m = (y2 - y1) / (x2 - x1)\n Y = m * (X - x1) + y1\n return Y", "title": "" }, { "docid": "8f8611dc2081d7c076624c5a9d048ed9", "score": "0.52725315", "text": "def translate(self, x, y):\n p = self.points.copy()\n p[0::2] = p[0::2] + x\n p[1::2] = p[1::2] + y\n\n return self.copy(p)", "title": "" }, { "docid": "b017400b3741aafd12495a76c706f6d6", "score": "0.52689767", "text": "def linear_regression_0(x, y):\n n = len(x)\n sum_x = sum(x)\n sum_y = sum(y)\n sum_x2 = sum(x**2)\n sum_xy = sum(x*y)\n delta = n*sum_x2-sum_x**2\n m = (n*sum_xy-sum_x*sum_y)/delta\n c = (sum_x2*sum_y-sum_x*sum_xy)/delta\n return m, c", "title": "" }, { "docid": "83a7d325af13e1e85b55f13c3a7fe475", "score": "0.5267445", "text": "def rotate_to_source(linear: Dict[Tuple[str, int], ITeg],\n target_vars: List[TegVar],\n source_vars: List[TegVar]) -> List[ITeg]:\n rotation = []\n num_vars = len(target_vars)\n exprs = [linear[(s_var.name, s_var.uid)] for s_var in source_vars]\n for source_index in range(num_vars):\n if source_index == 0:\n rotation.append(sum((Const(1) if i == 0 else Const(-1)) * exprs[i] * target_vars[i] for i in range(num_vars)))\n elif source_index < len(linear):\n i = source_index\n inverse_rotation = sum(((Const(1) if i == j else Const(0))\n - (exprs[i] * exprs[j]) / (1 + exprs[0])) * target_vars[j]\n for j in range(1, num_vars))\n rotation.append(inverse_rotation + exprs[i] * target_vars[0])\n else:\n raise ValueError(f'Requested source coordinate index: {source_index} is invalid.')\n\n return rotation", "title": "" }, { "docid": "ea6aa49b3001a80ad302ba7a4f612122", "score": "0.5250189", "text": "def fit_linear_regression(X: np.array, y: np.array) -> (np.array, np.array):\r\n w = np.linalg.pinv(X).T @ y\r\n s = svd(X, compute_uv=False)\r\n return w,s", "title": "" }, { "docid": "740ee0c68e59906e81db190c4acaf793", "score": "0.5248054", "text": "def linear(params, x, data=None):\n\ta = params['slope'].value\n\tb = params['offs'].value\n\tmodel = a*x + b\n\tif data is None:\n\t\treturn model\n\telse:\n\t\treturn model-data", "title": "" }, { "docid": "179f1d49f1096918f085b67f5aebd4dc", "score": "0.52468514", "text": "def invert_transform(self, Y, X=None):", "title": "" }, { "docid": "29277c7db8d402cbf45f543542f92fe2", "score": "0.52465487", "text": "def to_linear_transformation(self, reset=False):\n tf = np.dot(self.B * self.D, self.B.T)\n if reset:\n self.reset()\n return tf", "title": "" }, { "docid": "a6a2f6543f0ca5b521f5e3b474fc898d", "score": "0.5240029", "text": "def transform(x,y,theta):\n s = np.sin(theta)\n c = np.cos(theta)\n return np.matrix([[c,-s,x],[s,c,y],[0,0,1]])", "title": "" }, { "docid": "4c84ec28b33b33a292a996a1cb857821", "score": "0.52322036", "text": "def linear_interpolation (x1: float,\n x2: float,\n y1: float,\n y2: float,\n x3: float) -> float:\n\n f_linear = interp1d([x1, x2], [y1, y2])\n y3 = float(f_linear(x3))\n\n return y3", "title": "" }, { "docid": "b69a4c9ac7eecf77bd74790c76afda85", "score": "0.522647", "text": "def _make_transform(rxy, gxy, bxy, wxy):\n xy = np.array([rxy, gxy, bxy]).transpose()\n xyz = np.append(xy, (1 - xy[0] - xy[1])[np.newaxis, :], axis = 0)\n wxyz = np.append(wxy, 1 - wxy[0] - wxy[1])\n wXYZ = wxyz / wxyz[1]\n I = np.linalg.inv(xyz)\n scale = np.inner(I, wXYZ)\n M = xyz * scale[np.newaxis, :]\n I *= (1 / scale)[:, np.newaxis]\n return M, I", "title": "" }, { "docid": "9ea196ef6e48af2af09fe39c1a20b478", "score": "0.52213454", "text": "def forward(x: Any, y: Any) -> Any:\n if isinstance(x, torch.Tensor) or isinstance(y, torch.Tensor):\n out = torch.mul(x, y)\n else:\n out = x * y\n return out", "title": "" }, { "docid": "ba23f58b15766521c74a4fb8e1ca8f83", "score": "0.52193224", "text": "def transformPoint(x, y, from_srs, to_srs):\n coordXform = osr.CoordinateTransformation(from_srs, to_srs)\n yRound = round(y, 8)\n xRound = round(x, 8)\n\n result = coordXform.TransformPoint(xRound, yRound)\n\n gx = result[0]\n gy = result[1]\n\n return gx, gy", "title": "" }, { "docid": "9de4b9d0af4f1ba17fd554d549867fbe", "score": "0.52089655", "text": "def lin_interp(x, x0, x1, y0, y1):\n y = y0 + (y1-y0)*(x-x0)/(x1-x0)\n \n return y", "title": "" }, { "docid": "e9c6fb973a55d4f73c88d913af5a508e", "score": "0.5193385", "text": "def translate_matrix((a,b,c,d,e,f), (x,y)):\n return (a,b,c,d,x*a+y*c+e,x*b+y*d+f)", "title": "" }, { "docid": "c8ff3dfc15a06975def89dd6a9b70d98", "score": "0.5190883", "text": "def to_linear_transformation(self, reset=False):\n tf = self.C**0.5\n if reset:\n self.reset()\n return tf", "title": "" }, { "docid": "3111e1e27da9a1e1fec617edc752cee7", "score": "0.5185958", "text": "def Mxform(x1,y1,x2,y2):\n return Jones.toMueller([[dot(x2,x1), dot(x2, y1)], [dot(y2,x1), dot(y2,y1)]])", "title": "" }, { "docid": "070323b9c290f430a524142f5e3f3c59", "score": "0.51817906", "text": "def fit_transform(self, X, y, verbose=False, fit_kwargs=None):\n self._fit(X, y, verbose=verbose, fit_kwargs=fit_kwargs)\n return self._transform(verbose=verbose)", "title": "" }, { "docid": "964cb557be4d061a161ec47a3a145d45", "score": "0.517799", "text": "def line_equation(x1, y1, x2, y2):\n if x2 == x1:\n return (1, 0, -x1) # special case for vertical line\n m = (y2 - y1) / (x2 - x1)\n a = m\n b = -1\n c = -m * x1 + y1\n return (a, b, c)", "title": "" }, { "docid": "ae1fef3a1872e39f54f55b16e8d47235", "score": "0.5173923", "text": "def linear_transformation(self, A):\n assert A.shape[0] == self.N \n return MultivariateNormal(A.T @ self.u, A.T @ self.C @ A)", "title": "" }, { "docid": "3da223ba2015997747c6a3746f0c1a3b", "score": "0.5141163", "text": "def fit_transform(self, X, y=None, sample_weight=None, sample_cost=None):\n\n\t\treturn self.fit(X, y=y, sample_weight=sample_weight, \n\t\t\tsample_cost=sample_cost).transform(X, y=y, \n\t\t\tsample_weight=sample_weight)", "title": "" }, { "docid": "88e623c49a4e0960bcbbd8965aa05f88", "score": "0.5136432", "text": "def linear(self):\n return self._linear", "title": "" }, { "docid": "671f5c82a80f429ac3943baf9fd52347", "score": "0.5136224", "text": "def inverse_transform(self, x):\n return self._transform(x, inverse=True)", "title": "" }, { "docid": "f833a7fe581a9b9864a1fb3d1857a59e", "score": "0.5126777", "text": "def linear_interpolator(xs, ys):\n\n def return_function(xp):\n \"\"\" xp is the x value to find the interpolation \"\"\"\n for i in range(len(xs)-1):\n # Define interpolating range for this iteration\n x0 = xs[i]\n x1 = xs[i+1]\n if not (x0 <= xp <= x1):\n continue\n y0 = ys[i]\n y1 = ys[i+1]\n xd = (xp - x0)/float(x1 - x0)\n yp = y0*(1-xd) + y1*xd\n return yp\n return None\n return return_function", "title": "" }, { "docid": "46253b212b9c308d8b70a28749c3d07f", "score": "0.5123316", "text": "def least_squares(y, tx):\r\n a = tx.T.dot(tx)\r\n b = tx.T.dot(y)\r\n return np.linalg.solve(a, b)", "title": "" }, { "docid": "96be74a8ac7fbefb74414b20b0dedcc5", "score": "0.51219106", "text": "def lmodel(x, a, b):\n y = a + b*x\n return y", "title": "" }, { "docid": "49d0422787a01e260b9e2bd7a859f466", "score": "0.51181537", "text": "def inverse_transform(self, y, X=None):\n check_is_fitted(self, \"lam1_\")\n\n lam1 = self.lam1_\n lam2 = self.lam2_\n\n y, exog = self._check_y_X(y, X)\n if lam1 == 0:\n return np.exp(y) - lam2, exog\n\n numer = y * lam1 # remove denominator\n numer += 1. # add 1 back to it\n de_exp = numer ** (1. / lam1) # de-exponentiate\n return de_exp - lam2, exog", "title": "" }, { "docid": "3ceb65633c739ba2cb2c7f2aae090821", "score": "0.511662", "text": "def __solve_linear_syste(self, A, b):\n dx = np.linalg.solve(A, b)\n return dx", "title": "" }, { "docid": "bbe72778f5cc04b44b4e015afda61e21", "score": "0.51158327", "text": "def fit_line(x, y):\n slope, intercept, r, p, stderr = sps.linregress(x, y)\n return slope, intercept, r**2", "title": "" }, { "docid": "486f6e0f7ded4508cbca25577e86ee94", "score": "0.511329", "text": "def linear_fit(y, x, m=None, method='robust'):\n Model = RLM if method == 'robust' else OLS\n if m is None:\n X = np.empty((len(y), 2))\n X[:, 0] = x\n X[:, 1] = 1\n res = Model(y, X).fit()\n return res.params\n else:\n X = np.ones(len(y))\n res = Model(np.array(y) - m * np.array(x), X).fit()\n return m, res.params[0]", "title": "" }, { "docid": "098ed8e16c52479f34c4336cb344fc9f", "score": "0.51080406", "text": "def transform_y(self, y):\n self.logging('transforming y...')\n y_new = y.copy()\n for t in self.transformers:\n self.logging('-> {}'.format(t.name))\n y_new = t.transform_y(y_new)\n return y_new", "title": "" }, { "docid": "3258c1239ecea3bbee91751c83173069", "score": "0.5103071", "text": "def fit_transform(self, X, y=None, **params):\n self.fit(X, **params)\n\n X_transformed= self.X_transformed\n \n return X_transformed", "title": "" }, { "docid": "6f025c51b493eed3eed5d41868615391", "score": "0.5101539", "text": "def translation(img, x, y):\n\n rows, cols = _get_rows_cols(img)\n M = np.float32([[1 ,0 , int(x)], [0, 1, int(y)]])\n return cv2.warpAffine(img, M, (cols, rows))", "title": "" }, { "docid": "266d6591390d0be464af05bf43df5b73", "score": "0.50921667", "text": "def linear_regression(x, y, uncert_x=None, uncert_y=None):\n # if only one uncertainty is given, calculate analytically\n if uncert_x is None:\n return linear_regression_1(x, y, uncert_y)\n elif uncert_y is None:\n # as linear_regression_1 assumes uncertainties in y, switch the axes\n m, um, c, uc, corr, chisq = linear_regression_1(y,x,uncert_x)\n sigma_c = sqrt(uc**2/m**2+c**2*um**2/m**4-c/m**3*corr*um*uc)\n return 1/m, um/m**2, -c/m, sigma_c, corr, chisq\n \n # For a first guess, assume a slope around 1 and weight both uncertainties\n # equal. Calculate initial values analytically.\n uncert_sum = uncert_x + uncert_y\n m0, um0, c0, uc0, corr0, chisq0 = linear_regression_1(x, y, uncert_sum)\n\n def f(B, x):\n return B[0]*x + B[1]\n \n model = scipy.odr.Model(f)\n data = scipy.odr.RealData(x, y, sx=uncert_x, sy=uncert_y)\n odr = scipy.odr.ODR(data, model, beta0=[m0, c0])\n output = odr.run()\n ndof = len(x)-2\n chiq = output.res_var*ndof\n sigma_m = sqrt(output.cov_beta[0,0])\n sigma_c = sqrt(output.cov_beta[1,1])\n corr = output.cov_beta[0,1] /sigma_m /sigma_c\n \n return (output.beta[0], sigma_m, output.beta[1], sigma_c, corr, chiq)", "title": "" }, { "docid": "8b4f9cfaafae4561d55ff075f493cdd0", "score": "0.5088912", "text": "def piecewise_linear(x: float)->float:\n if x < -1:\n return -1\n if x > 1:\n return 1\n return x", "title": "" }, { "docid": "398a2b6e104450ea556165a3edbd6972", "score": "0.50806266", "text": "def linefit(x1, y1, x2, y2):\n if x2 == x1:\n m = Inf\n b = NaN\n else:\n m = old_div((y2 - y1), (x2 - x1))\n b = y1 - m * x1\n return m, b", "title": "" }, { "docid": "67fc09d123264b9b24fcc268b9496f0c", "score": "0.50646925", "text": "def linear(target, X, A1='', A2=''):\n A = _parse_args(target=target, key=A1, default=0.0)\n B = _parse_args(target=target, key=A2, default=0.0)\n X = target[X]\n\n r = A * X + B\n S1 = A\n S2 = B\n values = {'S1': S1, 'S2': S2, 'rate': r}\n return values", "title": "" }, { "docid": "3ae36658d69a2948e9b75bb716e6c99f", "score": "0.5064198", "text": "def translate(self, p_float, p_float_1): # real signature unknown; restored from __doc__\r\n return QTransform", "title": "" }, { "docid": "b0a645104402136fb7da09f3f8dc7a5c", "score": "0.5058161", "text": "def compute_linear_regression_line(a0, a1):\n return f'{a0} + {a1}x'", "title": "" }, { "docid": "8f128f9f226d8717014d847a65f6cb0b", "score": "0.50547147", "text": "def fit_transform(self, X, y=...):\n ...", "title": "" }, { "docid": "7c14700936f5936326d580b38a7a4b8d", "score": "0.5054449", "text": "def fit_transform(self, X, y=None, **fit_params):\n\n\n results = self._parallel_func(X, y, fit_params, _fit_transform_one)\n if not results:\n # All transformers are None\n return np.zeros((X.shape[0], 0))\n\n Xs, transformers = zip(*results)\n self._update_transformer_list(transformers)\n\n Xs = self._filter_skips(Xs)\n return self._custom_hstack(Xs)", "title": "" }, { "docid": "695fabf3004a581ea06dae99e9b42625", "score": "0.50521016", "text": "def L(x, y, x0, y0):\n return P3.F(x0,y0) + P3.F_x(x0,y0)*(x-x0) + P3.F_y(x0,y0)*(y-y0)", "title": "" }, { "docid": "c9806f2d3426c4bdb5fe2970f534bba6", "score": "0.5050443", "text": "def _linear_predictor(self, X):\n check_is_fitted(self)\n X = self._validate_data(\n X,\n accept_sparse=[\"csr\", \"csc\", \"coo\"],\n dtype=[np.float64, np.float32],\n ensure_2d=True,\n allow_nd=False,\n reset=False,\n )\n return X @ self.coef_ + self.intercept_", "title": "" }, { "docid": "8a2542682a4aea3e3b5c763e9cc151f9", "score": "0.5047879", "text": "def lerp(x, x0, x1, y0, y1):\n t = (x-x0)/(x1 - x0)\n return (1 - t) * y0 + t * y1", "title": "" } ]
78b9154d02d7e2e82a6670697ecad797
Called when app is resized.
[ { "docid": "54a5013ed2c7440077ce98fbcf991d1d", "score": "0.65565294", "text": "def on_resize_parent(self,event):\n #print(\"parent event size=\"+str(event.width)+\" X \"+str(event.height))\n self.canvas_width = event.width\n self.canvas_height = event.height\n self.canvas.config(width=self.canvas_width, height=self.canvas_height)\n self.show_image()", "title": "" } ]
[ { "docid": "cf95f21cd7739a672db6e5c661a5ca90", "score": "0.8178788", "text": "def onResize(self, window, width, height):\n pass", "title": "" }, { "docid": "e1743c098b05d52132a2b8d8e82d29e0", "score": "0.80237764", "text": "def _resize(self, event):\n pass", "title": "" }, { "docid": "545f61a7170492b00c7bae3c814618dd", "score": "0.78258556", "text": "def resizeEvent(self, event):\n\n # -- Call the function which handles the tab mode switching\n self.setTabMode(self.tabMode)\n\n # -- Store the current window size in our scribble settings\n self.storeWidgetGeometry()", "title": "" }, { "docid": "bd179e6627dd9fd92de561925238420b", "score": "0.77364415", "text": "def resizeEvent(self, event):\n QtGui.QMainWindow.resizeEvent(self, event)\n QtGui.QApplication.processEvents()", "title": "" }, { "docid": "ac0dea63a46529b934e56916f8c9309a", "score": "0.7684432", "text": "def resizeEvent(self,event):\n self.grab_default_dimensions()", "title": "" }, { "docid": "17f72fd5c27658d73eb945ad02c4cdd2", "score": "0.7643923", "text": "def on_resize( self, width, height ):\r\n # we don't do anything by default\r\n pass", "title": "" }, { "docid": "395879c1edefadb20a47477d3ebc5fcf", "score": "0.7602799", "text": "def resizeEvent(self, new_size):\n\n self.app.settings['mainwindow_w'] = new_size.size().width()\n self.app.settings['mainwindow_h'] = new_size.size().height()", "title": "" }, { "docid": "d3ab0786d88fe417a524eba3b9f10b5f", "score": "0.75728744", "text": "def on_resize(self, event):\n self.context.set_viewport(0, 0, event.size[0], event.size[1])\n for visual in self.visuals:\n visual.on_resize(event.size)\n self.update()", "title": "" }, { "docid": "04f643a929684c613ea1b8e7c1a789c7", "score": "0.7476696", "text": "def resizeEvent(self, event):\n \n # perform the default resizing\n super(PlotCanvas, self).resizeEvent(event)\n # fix the layout\n self.fixLayout()", "title": "" }, { "docid": "2b9f6f33fcb706563753028a1405e87c", "score": "0.7400655", "text": "def on_resize(self, width, height):\n # label\n self.label.y = height - 10\n G.eventhandler.call(\"user:window:resize\", width, height)", "title": "" }, { "docid": "86663dd22bc723587e2991dcc7a363e9", "score": "0.7394615", "text": "def resizeEvent(self, event):\n super(QContainer, self).resizeEvent(event)\n self.resized.emit()", "title": "" }, { "docid": "77da8abdda864503551d76db56f268ec", "score": "0.73922807", "text": "def resize(self, event):\n print(\"Screen.resize()\")\n if event:\n self.width = event.width\n self.height = event.height\n self.draw_grid()", "title": "" }, { "docid": "fd1aac8efc588442d8eae6a14af92cd0", "score": "0.7337735", "text": "def _on_parent_resized(self):\n # resize overlay\n self.resize(self.parentWidget().size())", "title": "" }, { "docid": "5aee5937ac70940fc2a79f9b5e0fa13e", "score": "0.73191726", "text": "def _auto_resize(self, window):\n if self.get_size() != self.winsize:\n self.winsize = self.get_size()\n self._app.emit(\"widget-layout-changed\", self)", "title": "" }, { "docid": "4235b3631ae0d04bf3a84d0e3cfe6ddb", "score": "0.7298679", "text": "def _onResize(self, event):\n if 'gtk2' in wx.PlatformInfo or 'gtk3' in wx.PlatformInfo:\n self._doResize()\n else:\n wx.CallAfter(self._doResize)\n event.Skip()", "title": "" }, { "docid": "ca8e0ae1cb9207a3f3477b323a2e6200", "score": "0.72599375", "text": "def on_resize_event(self, event):\n self.width = event.width\n self.height = event.height\n log.debug(\"New size 'ManageMyDico' : {}x{}\".format(self.width, self.height))", "title": "" }, { "docid": "e73a4b3bbb1509305aa68f94765cfd0d", "score": "0.7212062", "text": "def resizeEvent(self, event):\n super(XImageSlider, self).resizeEvent(event)\n \n self.recalculate()", "title": "" }, { "docid": "a98d5a2f3464f593aeb9e2bf44a645ea", "score": "0.71496797", "text": "def on_resize_parent(self, event):\n\n self.statusbar.config(width = event.width, height = 2)", "title": "" }, { "docid": "636e8c6f36b13ac8feccd1f284874bf3", "score": "0.71427923", "text": "def resizeEvent(self, event):\n # update the image scale\n self._update_pixmap_size()\n super(ShotWidget, self).resizeEvent(event)", "title": "" }, { "docid": "aa6fa9b49d0a61d993c93d3ce6bf5201", "score": "0.7137341", "text": "def handle_resize(event):\n # grabbing the newely resized windows size\n scale_size = event.dict['size']\n\n # save these scaling factors globally as they affect the global screen\n # rendering\n global scale_x\n global scale_y\n\n # grabbing new scaling factor values\n (x_o, y_o) = size_o\n (x_1, y_1) = scale_size\n # calculating new scaling factor values\n scale_x = (x_1 / x_o)\n scale_y = (y_1 / y_o)", "title": "" }, { "docid": "098b2f0cfede484173fe38e39e73931a", "score": "0.7097699", "text": "def resizeEvent(self, event: QtGui.QResizeEvent) -> None:\n\n self.views[0].resize()\n\n super(ViewMain, self).resizeEvent(event)", "title": "" }, { "docid": "74ebbd543242d6d43e9b3dcf535a9cd5", "score": "0.7042027", "text": "def resize(self, event):\r\n padding = 4 # not sure why this is required, but if I don't include it it growths continually\r\n self.size = max(min(event.width - padding, event.height - padding), 100)\r\n self.update_indicator()", "title": "" }, { "docid": "d3c12195a44d7ea84a07a701c3e48b3a", "score": "0.6989664", "text": "def on_resize_parentx(self, event):\n self.statusbar.config(width = event.width, height = 2)", "title": "" }, { "docid": "00e63848501c41f29fd637bfc5ee3351", "score": "0.69684976", "text": "def OnSize( self, evt ):\n if not self.IsMaximized():\n self.preMaxSize = self.GetSize()", "title": "" }, { "docid": "7af3e4f793d485e1fd1caf0c52c66287", "score": "0.6942225", "text": "def _updateSizes(self) -> None:\n print(f'QmlOffscreenRenderer._updateSizes: {self.size}')\n width, height = self.size.toTuple()\n self._window.setGeometry(0, 0, width, height)\n self.rootItem.setWidth(width)\n self.rootItem.setHeight(height)", "title": "" }, { "docid": "aff5fc4c09588e1a7408797b2018d98c", "score": "0.69375074", "text": "def resizeEvent(self, event):\n super(Editor, self).resizeEvent(event)\n self.resize_signal.emit(event)", "title": "" }, { "docid": "1b90d7c48229884795b49280a3a47ced", "score": "0.69124967", "text": "def resizeEvent(self, event):\r\n QtGui.QSplitter.resizeEvent(self, event)\r\n\r\n parent = self.parent()\r\n if (not self._initialized and parent and\r\n (self.isVisible() or isinstance(parent, QtGui.QMainWindow))):\r\n self._initialized = True\r\n self._resize_items()", "title": "" }, { "docid": "4660be854a6f5d1cc04138bf2f8d2078", "score": "0.68822205", "text": "def resizeEvent(self, event):\r\n QDialog.resizeEvent(self, event)\r\n self.size_change.emit(self.size())", "title": "" }, { "docid": "f15e715bcac281544615f13e4abc649d", "score": "0.68478733", "text": "def handle_key_resize_event(self):\n self.read_screen_size()\n if self.prompt_for_screen_size():\n self.redraw()\n else:\n self.keep_running = False", "title": "" }, { "docid": "d530ec8c5bd73b76e222741a9a4e8b03", "score": "0.6823641", "text": "def componentResized(self, e):\n gCanvas.initOffscreenImage()\n if gCanvas.isShowing():\n gCanvas.repaint()", "title": "" }, { "docid": "8cf8c9857378022ef38587717e3777e6", "score": "0.68174404", "text": "def resizeEvent(self, event):\n super(QResizingGroupBox, self).resizeEvent(event)\n self.resized.emit()", "title": "" }, { "docid": "39f12a918c809ca410f776b0a36c6a91", "score": "0.68159217", "text": "def on_resize(self, event):\n width, height = self.GetClientSize()\n self.buffer = wx.Bitmap(width, height)\n self.update_bitmap()", "title": "" }, { "docid": "c09bdf83637867959da01096cd97b590", "score": "0.67809826", "text": "def OnSize(self,event):\n\t\tself.Refresh()", "title": "" }, { "docid": "c19bb33feebe718e73108c0b8960463f", "score": "0.677057", "text": "def resizeEvent(self, event):\n if not self._page:\n return\n\n self.resizePolygonsToPixmap()", "title": "" }, { "docid": "aeba497f1fb2b5a7b5d04667ecd2a172", "score": "0.6724207", "text": "def canvasSizeChanged(self, app):\n\n self.x = int(self.xFrac * app.width) - self.width // 2\n self.y = int(self.yFrac * app.height) - self.height // 2", "title": "" }, { "docid": "ad92c8ca2670bf39ae2f78d573eebbec", "score": "0.6718328", "text": "def on_resize(self, width, height):\n # label\n self.label.y = height - 10\n # reticle\n if self.reticle:\n self.reticle.delete()\n x, y = self.width // 2, self.height // 2\n n = 10\n self.reticle = pyglet.graphics.vertex_list(4, ('v2i', (x - n, y, x + n, y, x, y - n, x, y + n)))\n\n if G.inventoryhandler.should_game_freeze():\n G.inventoryhandler.send_event(\"on_resize\", width, height)", "title": "" }, { "docid": "16d79c51fa64494c89aa8ba635cdafe9", "score": "0.6717966", "text": "def _tk_resize(self,event):\n w = self._window.winfo_width()-self._canvas._dw\n h = self._window.winfo_height()-self._canvas._dh\n self._canvas._lastw = self._canvas._currw\n self._canvas._lasth = self._canvas._currh\n self._canvas._currw = w\n self._canvas._currh = h\n dw = self._canvas._currw-self._canvas._lastw\n dh = self._canvas._currh-self._canvas._lasth\n self._canvas.config(width=w,height=h)\n if dw or dh:\n self._canvas.move('all',dw/2.0,dh/2.0)\n _Context.Instance().refresh()", "title": "" }, { "docid": "08ee0081ae2e0bf7f48c7945a48f2469", "score": "0.668153", "text": "def resizeEvent(self, event):\n self.rubberBand.resize(self.size())", "title": "" }, { "docid": "eeeac7b25cdcfed60e28199d5017eff8", "score": "0.6678866", "text": "def resizeEvent(self, event):\n\n self.settings.setValue(\"geometry\", self.saveGeometry())\n\n super().resizeEvent(event)", "title": "" }, { "docid": "4bd6d87d7ac96ca7e0c292a5062731b4", "score": "0.66657233", "text": "def resize_if_necessary(self):\n self.toplinesizer.Layout() # misschien is dit bij wx variant nodig terwijl het bij de qt\n # variant vanzelf gebeurt?", "title": "" }, { "docid": "98fdfcb7a638d72fc060ce5922fdafd3", "score": "0.66656166", "text": "def _resize(self, event):\n old_width, old_height = self.mne.fig_size_px\n new_width, new_height = self._get_size_px()\n new_margins = _calc_new_margins(\n self, old_width, old_height, new_width, new_height\n )\n self.subplots_adjust(**new_margins)\n # zen mode bookkeeping\n self.mne.zen_w *= old_width / new_width\n self.mne.zen_h *= old_height / new_height\n self.mne.fig_size_px = (new_width, new_height)\n self.canvas.draw_idle()", "title": "" }, { "docid": "516dc86dd08c1e76c44aa5bdb407c9d9", "score": "0.6665127", "text": "def OnSize(self, event):\n\n self.Invalidate()", "title": "" }, { "docid": "8308f12be2971d528cc016adc2d3f0e5", "score": "0.6586651", "text": "def on_resize(self, width, height):\n\n glViewport(0, 0, width, height)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n gluPerspective(60., float(width)/height, 0.05, 1000)\n glMatrixMode(GL_MODELVIEW)\n return True", "title": "" }, { "docid": "767c0ccd83762b649a2abf0867c1c0ea", "score": "0.65726393", "text": "def onFrameSizeUpdate(self, frameWidth, frameHeight):\n\n pass", "title": "" }, { "docid": "60087bfb733c4f0c559c49d433812d22", "score": "0.65127563", "text": "def _resize_and_draw(self, event=None):\n self.draw(self.canvas.winfo_width(), self.canvas.winfo_height())", "title": "" }, { "docid": "2ef79f855c9cf3b6702343152576f51e", "score": "0.6495703", "text": "def OnResize(self, *unused):\n g = self.parent.geometry()\n if g != self._geometry and self.player:\n u, v = self.player.video_get_size() # often (0, 0)\n if v > 0 and u > 0:\n # get window size and position\n g, x, y = g.split('+')\n w, h = g.split('x')\n if u > v: # ... for landscape\n # adjust the window height\n h = round(float(w) * v / u)\n else: # ... for portrait\n # adjust the window width\n w = round(float(h) * u / v)\n self.parent.geometry(\"%sx%s+%s+%s\" % (w, h, x, y))\n self._geometry = self.parent.geometry() # actual", "title": "" }, { "docid": "4536929c84f174970f4b31c8fba5f4e9", "score": "0.64822936", "text": "def x_resize(self, event):\n self._translator.resize(self.width, self.height)\n self.width = self.winfo_width()\n self.height = self.winfo_height()\n self._translator = CoordinateTranslator(\n self.width, self.height, len(self.data.get_temperature()))\n self._controller.update_plot()", "title": "" }, { "docid": "b8517a32cd49493e53876a3de6cce31a", "score": "0.6458348", "text": "def on_resize_parentx(self,event):\n ##print(\"parent event size=\"+str(event.width)+\" X \"+str(event.height))\n self.canvas_width = event.width\n self.canvas.config(width=self.canvas_width)\n self.show_image()", "title": "" }, { "docid": "b2ff4d0dc4d8619c11b2c60574bc7a6e", "score": "0.6424668", "text": "def on_resize(self, width, height):\n super(GUI, self).on_resize(width, height)\n self.slack_slider.width = width - self.GUI_PADDING * 2\n self.beta_slider.width = width - self.GUI_PADDING * 2\n\n height -= self.GUI_HEIGHT\n if height <= 0:\n return\n\n drawing_width, drawing_height = 400, 400\n if drawing_height == 0 or drawing_height == 0:\n return\n display_aspect = width / float(height)\n drawing_aspect = drawing_width / float(drawing_height)\n if drawing_aspect > display_aspect:\n self.drawing_width = width\n self.drawing_height = width / drawing_aspect\n else:\n self.drawing_height = height\n self.drawing_width = height * drawing_aspect\n self.drawing_x = (width - self.drawing_width) / 2\n self.drawing_y = (height - self.drawing_height) / 2 + self.GUI_HEIGHT", "title": "" }, { "docid": "3e7cb0fe569ed3d32a6b0fc2d77fda15", "score": "0.641843", "text": "def resizeEvent(self, event):\n width = event.size().width()\n self.tb_width = (0.5 * width) - 50\n self.table.setColumnWidth(0, 0.30 * self.tb_width)\n self.table.setColumnWidth(1, 0.40 * self.tb_width)\n self.table.setColumnWidth(2, 0.15 * self.tb_width)\n self.table.setColumnWidth(3, 0.15 * self.tb_width)\n\n # Important don't delete it\n QMdiSubWindow.resizeEvent(self, event)", "title": "" }, { "docid": "79348bf47a0d3b8d5228dc1ae55a6d79", "score": "0.6415301", "text": "def _gVideoResizeEvent(self, event):\n if self.imgdisplayed is not None:\n self._displayimage(self.imgdisplayed)\n QtGui.QGraphicsView.resizeEvent(self.ui.gVideo, event)", "title": "" }, { "docid": "740b878ca5f067022bc8d1cfba8d3edd", "score": "0.64054227", "text": "def _resize(self, event):\n h,w = Graph.reset(self)\n w_spacing = w/len(self._tf)\n Graph.draw_axes(h, w, 1)\n self._draw_connections(h, w_spacing) \n self._draw_points(h, w_spacing)\n print(\"Graph Displayed\") # Confirmation", "title": "" }, { "docid": "7381a0437c1d6e9fa2ba36e43780d253", "score": "0.640094", "text": "def _size ( self, event ):\r\n super( ThemedControl, self )._size( event )\r\n self.updated = True", "title": "" }, { "docid": "319a1a9d95fd7186cfe24bd94ba0752d", "score": "0.6357787", "text": "def _gVideoResizeEvent(self, event):\n if self.imgdisplayed is not None:\n self._displayimage(self.imgdisplayed, -1.0, -1.0)\n QtGui.QGraphicsView.resizeEvent(self.ui.gVideo, event)", "title": "" }, { "docid": "81339b4eaa40284baa751d36b35cda75", "score": "0.63368255", "text": "def resize(self, size: QSize) -> None:\n print(f'QmlOffscreenRenderer.resize: {self.size} → {size}, {self.initialized}')\n if not self.initialized or self.size == size:\n return\n\n self.size = size\n\n if self.rootItem and self.ctx.makeCurrent():\n self._destroyFrameBuffer()\n self._createFrameBuffer()\n self._updateSizes()", "title": "" }, { "docid": "10c13849592b0ef2fc55b5fa3c6f8058", "score": "0.63337874", "text": "def resize(self, event):\n new_width = event.width - 30\n self.set_notebook_width(new_width)\n self.set_scroll_region(event.height - 30)", "title": "" }, { "docid": "c7ac4bf604238ace97a58964a8f8ccf9", "score": "0.6289703", "text": "def resizeEvent(self, event):\n dim = event.size()\n self.height1 = dim.height()\n self.width1 = dim.width()\n wscale = self.width1 / 800\n hscale = self.height1 / 600\n tmpy = 80 * hscale\n tmpx = 170 * wscale\n self.genderBox.setGeometry(tmpx, tmpy, 470, 35)\n tmpy = 10\n tmpx = self.width1 - 160\n self.helpBttn.setGeometry(tmpx, tmpy, 150, 50)\n tmpy = 20\n tmpwidth = self.width1 - 20\n self.titleLbl.setGeometry(10, 30, tmpwidth, 40)\n tmpx = 110 * wscale\n tmpy = 160 * hscale\n tmpwidth = 600 * wscale\n tmpheight = 300 * hscale\n self.declList.setGeometry(tmpx, tmpy, tmpwidth, tmpheight)\n tmpy = self.height1 - 70\n tmpx = 10\n self.cancelBttn.setGeometry(tmpx, tmpy, 150, 50)\n tmpx = self.width1 - 170\n self.height1 - 70\n self.acceptBttn.setGeometry(tmpx, tmpy, 150, 50)\n tmpos = self.pos()\n self.winx = tmpos.x()\n self.winy = tmpos.y()\n self.geometry = list([self.winx, self.winy, self.width1, self.height1])", "title": "" }, { "docid": "eeb02c61fed2e23771a400b03222b812", "score": "0.6271389", "text": "def size_changed(self, size_x, size_y):\n size_x = max(1, size_x)\n size_y = max(1, size_y)\n self.__size_x = size_x\n self.__size_y = size_y\n self.display.SetSize(size_x, size_y)\n self.render()", "title": "" }, { "docid": "fc3b393ea6e0d1df9caf41ec780d9a44", "score": "0.6241969", "text": "def resize(self):\n self.msg += \"Resizing figure, deleting keys from layout\\n\"\n for key in [\"width\", \"height\", \"autosize\", \"margin\"]:\n try:\n del self.plotly_fig[\"layout\"][key]\n except (KeyError, AttributeError):\n pass", "title": "" }, { "docid": "6e236d9341a5c1f2e7623b3439e95367", "score": "0.6215719", "text": "def update_sizes(self) -> None:\n if setup.screen_size.changed():\n self.re_setup_menu()\n self.background_x_mult = setup.screen_size.get_width() * c.BACKGROUND_X_SCALER\n self.background_y_mult = setup.screen_size.get_height() * c.BACKGROUND_Y_SCALER\n self.menu_y = int(setup.screen_size.get_height() / 3.5)\n\n self.view = pygame.Rect((0, 0), (setup.screen_size.get_width(), setup.screen_size.get_height()))\n\n size_delta = (int(self.background_rect.width*self.background_x_mult), int(self.background_rect.height*self.background_y_mult))\n self.background = pygame.transform.scale(self.background, size_delta)", "title": "" }, { "docid": "bc5ab447eed83314bd2a1f28b38351f2", "score": "0.6205133", "text": "def resizedGfx(self):\n self.w = int(self.canvas.clientWidth)\n self.h = int(self.canvas.clientHeight)", "title": "" }, { "docid": "e90d9ef5892444d26549747e86a1306c", "score": "0.6190203", "text": "def resizeEvent(self, event):\r\n QtGui.QTableView.resizeEvent(self, event)\r\n\r\n parent = self.parent()\r\n if (not self._initial_size and parent and\r\n (self.isVisible() or isinstance(parent, QtGui.QMainWindow))):\r\n self._initial_size = True\r\n self.resizeColumnsToContents()", "title": "" }, { "docid": "146c985e1a08e8f8bec8dd591483fc63", "score": "0.61643076", "text": "def on_layout_requested(self, event):\n self.size_hint_updated()", "title": "" }, { "docid": "6ebff4e313aad81d4f722cd26f90b81f", "score": "0.61550546", "text": "def on_size_change(self,event):\n self.panel_width,self.panel_height = event.GetSize()\n print (\"Width =\",self.panel_width,\"Height =\",self.panel_height)\n w = self.panel_width\n h = self.panel_height\n try:\n self.fields['graph0'].im_size_show = (int((w-384)/2),int(h/5))\n self.fields['graph1'].im_size_show = (int((w-384)/2),int(h/5))\n self.fields['graph2'].im_size_show = (int((w-384)/2),int(h/5))\n self.fields['graph3'].im_size_show = (int((w-384)/2),int(h))\n except:\n pass\n\n #self.panel.SetSizer(self.sizer_main)\n #self.sizer_main.Fit(self)\n self.Layout()\n self.panel.Layout()\n #self.panel.Fit()", "title": "" }, { "docid": "b8499e141555fb4866c20c6646573b4b", "score": "0.61459696", "text": "def resizeEvent(self, event):\r\n\r\n QtGui.QTableView.resizeEvent(self, event)\r\n\r\n if self._editor.auto_size:\r\n self.resizeColumnsToContents()\r\n self.resizeRowsToContents()\r\n\r\n else:\r\n parent = self.parent()\r\n if (not self._initial_size and parent and\r\n (self.isVisible() or isinstance(parent, QtGui.QMainWindow))):\r\n self._initial_size = True\r\n if self._editor.auto_size:\r\n self.resizeColumnsToContents()\r\n self.resizeRowsToContents()", "title": "" }, { "docid": "ef846209dbfd3d9fc5a9324419e1385e", "score": "0.61013967", "text": "def resize(self, new_rows, new_cols):\n\t\tnew_x_res = self.resolution[0] * new_rows / self.rows\n\t\tnew_y_res = self.resolution[1] * new_cols / self.cols\n\n\t\tself.resolution = [new_x_res, new_y_res]\n\t\tself.window.resize(*self.resolution)", "title": "" }, { "docid": "b3c1d66ccc21a2c314042f141d9a18a2", "score": "0.6077875", "text": "def resize(self, width, height):\n pass", "title": "" }, { "docid": "045f094872f47dd66c8bbe6863c9d1b2", "score": "0.6064716", "text": "def on_resizeButton_clicked(self):\n for column in range(self.callTrace.columnCount()):\n self.callTrace.resizeColumnToContents(column)", "title": "" }, { "docid": "0ad3a84f114e68b65e97b694f3ea8d3f", "score": "0.60419774", "text": "def on_drag_end(self, widget, context):\n self.pane_resize()", "title": "" }, { "docid": "fe81904f286db52a086c18e29e441e1f", "score": "0.6028545", "text": "def display_size_update(self, rect_):\r\n self.screen_size = rect_", "title": "" }, { "docid": "d34e6c579919a174d447f27bde2081c3", "score": "0.60209614", "text": "def _onCanvasFrameDimension(self, event):\n canvas = self.getCanvas()\n frame = self.getCanvasFrame()\n\n canvasHeight = event.height\n canvasWidth = event.width\n\n # Get the new canvas dimensions\n if canvasWidth <= frame.winfo_reqwidth():\n canvasWidth = frame.winfo_reqwidth()\n if canvasHeight <= frame.winfo_reqheight():\n canvasHeight = frame.winfo_reqheight()\n\n if self.__canvasFrameDimension == \"both\":\n canvas.itemconfig(self.canvasWindow, height=canvasHeight,\n width=canvasWidth)\n elif self.__canvasFrameDimension == \"x\":\n canvas.itemconfig(self.canvasWindow, width=canvasWidth)\n elif self.__canvasFrameDimension == \"y\":\n canvas.itemconfig(self.canvasWindow, height=canvasHeight)", "title": "" }, { "docid": "906ff98be964101bb8aa857191ea7757", "score": "0.60031354", "text": "def menuBarResize(self, val='True', **kwargs):\n \n pass", "title": "" }, { "docid": "20391e7e2e6acd0158cee5dfd73ba9b8", "score": "0.5998202", "text": "def add_resize_listener_func(self, func):\n self._resize_listeners.append(func)", "title": "" }, { "docid": "2be01e634f952434a009ec775473f7ea", "score": "0.59910166", "text": "def window_configure(self, *args):\n\n self.window_size = args[0].get_size()\n self.apply_dynamic_scaling_all(self.window_size[0], self.window_size[1]) # todo: add std. win. width and height\n self.apply_dynamic_font_all(self.default_font_factor, self.window_size[1])", "title": "" }, { "docid": "936e2e4a94e861497dcd9000a29bd1b4", "score": "0.59852576", "text": "def resize_empty_screen(self, wid, hig):\n self.SetSize(wid, hig)", "title": "" }, { "docid": "8f391f9884e7abedf1493413596def64", "score": "0.5976079", "text": "def showEvent(self, event):\r\n QtGui.QSplitter.showEvent(self, event)\r\n if not self._initialized:\r\n self._initialized = True\r\n self._resize_items()", "title": "" }, { "docid": "7486d222cf4bbf74876fa53899794e7b", "score": "0.5961208", "text": "def check_resize(game, event):\n # get new screen width and height\n w, h = event.dict['size']\n # place aspect ratios in settings\n w_ratio = 16\n h_ratio = 9\n #find how many pixels in new screen size\n w_pxls = w / w_ratio\n h_pxls = h / h_ratio\n\n # ensure aspect ratio is maintained\n if w_pxls < h_pxls:\n w = int(h_pxls * w_ratio)\n elif h_pxls < w_pxls:\n h = int(w_pxls * h_ratio)\n if w <= 400 and h <= 225:\n print('screen lower than minimum')\n w = 400\n h = 225\n\n print('w: ' + str(w))\n print('h: ' + str(h))\n game.resize_percentage = round(w / game.DISPLAY_W, 2)\n print('rp: ' + str(game.resize_percentage))\n game.DISPLAY_H = h\n game.DISPLAY_W = w\n game.resize_displays()\n game.mid_h = h / 2\n game.mid_w = w / 2\n game.resize_fonts()\n\n\n game.ball.resize()\n game.bat.resize()\n game.obstacle.resize()\n game.scoreboard.resize()\n\n game.menu.resize_main()\n game.main_menu.resize()\n game.options.resize()\n game.credits.resize()\n game.create_user.resize()\n game.select_user.resize()\n game.game_over_screen.resize()", "title": "" }, { "docid": "0d242c0e99915717ea4f76fb5b7b4e15", "score": "0.59186184", "text": "def reset_original_size(self):\n self.set_view()\n self.emit_signal_if_scene_rect_changed() # sends signal", "title": "" }, { "docid": "0c8630dda03334cb6916818e98635056", "score": "0.5907763", "text": "def check_resize(event):\n global size, size_orig\n global width, w_orig, w_offset\n global height, h_orig, h_offset\n global RESIZE\n size_orig = event.size\n w_orig = event.w\n h_orig = event.h\n width = min(event.w, int(event.h * 1920 / 1080))\n height = int(width * 1080 / 1920)\n size = (width, height)\n h_offset = int((h_orig - height) / 2)\n w_offset = int((w_orig - width) / 2)\n RESIZE = True", "title": "" }, { "docid": "4d2bb11adfe92cce282971765ac05a60", "score": "0.5893002", "text": "def window_size(self, window_size):\n self.render_window.SetSize(window_size[0], window_size[1])\n self._window_size_unset = False\n self.render()", "title": "" }, { "docid": "6494aaadc5f569c3e9736fc7623f3f2e", "score": "0.58919287", "text": "def invalidate(self):\n super(QDockFrameLayout, self).invalidate()\n self._size_hint = QSize()\n self._min_size = QSize()\n self._max_size = QSize()", "title": "" }, { "docid": "9f52d62edab650da2834913db11606ab", "score": "0.5888416", "text": "def resize(self, widget):\r\n width = widget.width\r\n height = widget.height\r\n \r\n if self.dimension is BOTH or self.dimension is HORIZONTAL:\r\n width = self.getWidth(widget)\r\n if self.dimension is BOTH or self.dimension is VERTICAL:\r\n height = self.getHeight(widget)\r\n \r\n widget.resizeTo(width, height)", "title": "" }, { "docid": "78c29e227ed00cd37234280ebe2c7ac7", "score": "0.58856606", "text": "def hideEvent(self, event):\n\n # -- Store the current window size in our scribble settings\n self.storeWidgetGeometry()", "title": "" }, { "docid": "093526c047552ec8af2d110045808bbe", "score": "0.58665204", "text": "def _on_size_allocate(self, widget, event, data=None):\n\n x, y, width, height = self.get_allocation()\n if width > self.edit_region_width:\n margin = (width - self.edit_region_width) / 2\n self.set_left_margin(margin)\n self.set_right_margin(margin)", "title": "" }, { "docid": "36013738a4943a38612fd19c3cb76be4", "score": "0.5863162", "text": "def setFrameDimensions(self):\n if self.__canvasFrameDimension is not None:\n self.getCanvas().bind('<Configure>', self._onCanvasFrameDimension)", "title": "" }, { "docid": "6964f69afe6d40cf51480088047c1831", "score": "0.58570856", "text": "def resizeEvent(self, resize_event):\n\n if self.image is not None:\n if self._oversize:\n scaled_image_size = resize_event.size()\n else:\n scaled_image_size = resize_event.size().boundedTo(self.image.size())\n\n self.scaled_image = self.image.scaled(scaled_image_size, QtCore.Qt.KeepAspectRatio, QtCore.Qt.SmoothTransformation)", "title": "" }, { "docid": "6144815fbe4f933a0cfc9a95064007f0", "score": "0.5837666", "text": "def configureWindow(self):\r\n self.master.title(self.title)\r\n self.master.geometry(\"{0}x{1}+0+0\".format(self.master.winfo_screenwidth()\r\n ,self.master.winfo_screenheight()))\r\n self.master.resizable(width=False, height=False)", "title": "" }, { "docid": "216ba54cd53ccb9a634cd6f26f522054", "score": "0.583537", "text": "def scaleFitWindow(self):\r\n e = 2.0 # So that no scrollbars are generated.\r\n w1 = self.dock.width() - e\r\n h1 = self.dock.height() - e\r\n a1 = w1/ h1\r\n # Calculate a new scale value based on the pixmap's aspect ratio.\r\n w2 = self.canvas.pixmap.width() - 0.0\r\n h2 = self.canvas.pixmap.height() - 0.0\r\n a2 = w2 / h2\r\n #logging.info('dock_w:%d h:%d \\n canvas_w:%d h:%d'%(w1,h1,w2,h2))\r\n return w1 / w2 if a2 >= a1 else h1 / h2", "title": "" }, { "docid": "62c465f439e45456f9ec470d28561e28", "score": "0.5824469", "text": "def _doResize(self):\n\n if not self: # avoid a PyDeadObject error\n return\n\n if self.GetSize().height < 32:\n return # avoid an endless update bug when the height is small.\n\n numCols = self.GetColumnCount()\n if numCols == 0:\n return # Nothing to resize.\n\n col_widths = [(self._GetMinColWidth(col)\n if col in self._autosize_columns\n else self.GetColumnWidth(col))\n for col in range(numCols)]\n\n # We're showing the vertical scrollbar -> allow for scrollbar width\n # NOTE: on GTK, the scrollbar is included in the client size, but on\n # Windows it is not included\n listWidth = self.GetClientSize().width\n\n if sum(col_widths) < listWidth:\n col_widths[self._resizeCol] += listWidth - sum(col_widths)\n\n for col, width in enumerate(col_widths):\n self.SetColumnWidth(col, width)", "title": "" }, { "docid": "af7f28a236c5d321a09ca6faa0012492", "score": "0.5818246", "text": "def _size ( self, event ):\r\n control = self.control\r\n if control is not None:\r\n control.Layout()\r\n control.Refresh()", "title": "" }, { "docid": "d9bdf44eaf481aa6685448cd86b742ba", "score": "0.5809648", "text": "def resize(width, height):\n wnd().resizeTo(width, height)", "title": "" }, { "docid": "0125be69f72d9d9da29e7e6eff6b42cb", "score": "0.5807572", "text": "def getAutoResize(self, **kwargs):\n \n pass", "title": "" }, { "docid": "c3984a6b1f6a93902ba951db08806507", "score": "0.57903713", "text": "def shell_icon_size_changed(self, icon_size):\n pass", "title": "" }, { "docid": "8f7774ce3f335cbfed4d675e2565b698", "score": "0.5752382", "text": "def resizeEvent(self, event: QResizeEvent):\n text = self.normal_text()\n\n if self.get_text_width(text, 0.7) > self.label.frameGeometry().width():\n text = self.medium_text()\n\n if self.get_text_width(text, 0.5) > self.label.frameGeometry().width():\n text = self.small_text()\n\n self.label.setText(text)\n\n self.detailed_view.resizeEvent(event)", "title": "" }, { "docid": "5e327a221a51c66eb1801ae08f2f0522", "score": "0.574986", "text": "def ammolist_resize(self, event):\n\n if not self.AmmoList.GetColumnCount():\n return\n\n column_width = self.AmmoList.GetClientSize()[0] - 4\n self.AmmoList.SetColumnWidth(0, column_width / 3)\n self.AmmoList.SetColumnWidth(1, column_width / 3)\n self.AmmoList.SetColumnWidth(2, column_width / 3)", "title": "" }, { "docid": "d4a9b701667845babad538989c65ff66", "score": "0.5746227", "text": "def set_window_size(self, width, height):\n print(width, height)\n gl.glViewport(0, 0, width, height)\n self.bind_size(width, height)", "title": "" }, { "docid": "9df84fc420cd08c543ff35dbcde025e9", "score": "0.5743378", "text": "def sizeHint(self):\n return QtCore.QSize(800, 600)", "title": "" }, { "docid": "be1b27a1596479a0f7f3018a30000d30", "score": "0.5731725", "text": "def iconSizeChanged(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "c49d481da4e0a8bbabeed1bf1864c85e", "score": "0.5697398", "text": "def resize_ui_components(self, PICK):\n self.set_column_widths_log_entry_tab()\n self.set_column_widths_event_tab()\n self.set_column_widths_vector_view_tab()", "title": "" }, { "docid": "88281e9f972587e010976ecb5f654e27", "score": "0.56946164", "text": "def resizeEvent(self, event):\r\n if self._editor.factory.word_wrap:\r\n for i in range(self.topLevelItemCount()):\r\n mi = self.indexFromItem(self.topLevelItem(i))\r\n id = self.itemDelegate(mi)\r\n id.sizeHintChanged.emit(mi)\r\n super(self.__class__, self).resizeEvent(event)", "title": "" } ]
a926b398d55b0d419a47a274dabe7003
Return distbelief conditional_gradient values. Return values been generated from the distbelief conditional_gradient unittest, running with a learning rate of 0.1 and a lambda_ of 0.1. These values record how a parameter vector of size 10, initialized with 0.0, gets updated with 10 consecutive conditional_gradient steps. It uses random gradients.
[ { "docid": "28f73ae6b769b9399f6857f05c9b9ce7", "score": "0.0", "text": "def _dbParamsCG01(self):\n db_grad = [[]] * 10\n db_out = [[]] * 10\n # pylint: disable=line-too-long\n db_grad[0] = [\n 0.00096264342, 0.17914793, 0.93945462, 0.41396621, 0.53037018,\n 0.93197989, 0.78648776, 0.50036013, 0.55345792, 0.96722615\n ]\n db_out[0] = [\n -4.1555551e-05, -7.7334875e-03, -4.0554531e-02, -1.7870162e-02,\n -2.2895107e-02, -4.0231861e-02, -3.3951234e-02, -2.1599628e-02,\n -2.3891762e-02, -4.1753378e-02\n ]\n db_grad[1] = [\n 0.17075552, 0.88821375, 0.20873757, 0.25236958, 0.57578111,\n 0.15312378, 0.5513742, 0.94687688, 0.16012503, 0.22159521\n ]\n db_out[1] = [\n -0.00961733, -0.0507779, -0.01580694, -0.01599489, -0.03470477,\n -0.01264373, -0.03443632, -0.05546713, -0.01140388, -0.01665068\n ]\n db_grad[2] = [\n 0.35077485, 0.47304362, 0.44412705, 0.44368884, 0.078527533,\n 0.81223965, 0.31168157, 0.43203235, 0.16792089, 0.24644311\n ]\n db_out[2] = [\n -0.02462724, -0.03699233, -0.03154434, -0.03153357, -0.00876844,\n -0.05606323, -0.02447166, -0.03469437, -0.0124694, -0.01829169\n ]\n db_grad[3] = [\n 0.9694621, 0.75035888, 0.28171822, 0.83813518, 0.53807181,\n 0.3728098, 0.81454384, 0.03848977, 0.89759839, 0.93665648\n ]\n db_out[3] = [\n -0.04124615, -0.03371741, -0.0144246, -0.03668303, -0.02240246,\n -0.02052062, -0.03503307, -0.00500922, -0.03715545, -0.0393002\n ]\n db_grad[4] = [\n 0.38578293, 0.8536852, 0.88722926, 0.66276771, 0.13678469,\n 0.94036359, 0.69107032, 0.81897682, 0.5433259, 0.67860287\n ]\n db_out[4] = [\n -0.01979208, -0.0380417, -0.03747472, -0.0305847, -0.00779536,\n -0.04024222, -0.03156913, -0.0337613, -0.02578116, -0.03148952\n ]\n db_grad[5] = [\n 0.27885768, 0.76100707, 0.24625534, 0.81354135, 0.18959245,\n 0.48038563, 0.84163809, 0.41172323, 0.83259648, 0.44941229\n ]\n db_out[5] = [\n -0.01555188, -0.04084422, -0.01573331, -0.04265549, -0.01000746,\n -0.02740575, -0.04412147, -0.02341569, -0.0431026, -0.02502293\n ]\n db_grad[6] = [\n 0.27233034, 0.056316052, 0.5039115, 0.24105175, 0.35697976,\n 0.75913221, 0.73577434, 0.16014607, 0.57500273, 0.071136251\n ]\n db_out[6] = [\n -0.01890448, -0.00767214, -0.03367592, -0.01962219, -0.02374279,\n -0.05110247, -0.05128598, -0.01254396, -0.04094185, -0.00703416\n ]\n db_grad[7] = [\n 0.58697265, 0.2494842, 0.08106143, 0.39954534, 0.15892942,\n 0.12683646, 0.74053431, 0.16033, 0.66625422, 0.73515922\n ]\n db_out[7] = [\n -0.03772914, -0.01599993, -0.00831695, -0.02635719, -0.01207801,\n -0.01285448, -0.05034328, -0.01104364, -0.04477356, -0.04558991\n ]\n db_grad[8] = [\n 0.8215279, 0.41994119, 0.95172721, 0.68000203, 0.79439718,\n 0.43384039, 0.55561525, 0.22567581, 0.93331909, 0.29438227\n ]\n db_out[8] = [\n -0.03919835, -0.01970845, -0.04187151, -0.03195836, -0.03546333,\n -0.01999326, -0.02899324, -0.01083582, -0.04472339, -0.01725317\n ]\n db_grad[9] = [\n 0.68297005, 0.67758518, 0.1748755, 0.13266537, 0.70697063,\n 0.055731893, 0.68593478, 0.50580865, 0.12602448, 0.093537711\n ]\n db_out[9] = [\n -0.04510314, -0.04282944, -0.0147322, -0.0111956, -0.04617687,\n -0.00535998, -0.0442614, -0.03158399, -0.01207165, -0.00736567\n ]\n # pylint: enable=line-too-long\n return db_grad, db_out", "title": "" } ]
[ { "docid": "36c653aeb3b687e085e9534cdbd33142", "score": "0.608587", "text": "def value_gradient():\n # sess.run(calculated) to calculate value of state\n state = tf.placeholder(\"float\",[None,4])\n w1 = tf.get_variable(\"w1\",[4,10])\n b1 = tf.get_variable(\"b1\",[10])\n h1 = tf.nn.relu(tf.matmul(state,w1) + b1)\n w2 = tf.get_variable(\"w2\",[10,1])\n b2 = tf.get_variable(\"b2\",[1])\n calculated = tf.matmul(h1,w2) + b2\n\n # sess.run(optimizer) to update the value of a state\n newvals = tf.placeholder(\"float\",[None,1])\n diffs = calculated - newvals\n loss = tf.nn.l2_loss(diffs)\n optimizer = tf.train.AdamOptimizer(0.1).minimize(loss)", "title": "" }, { "docid": "7031d5b8e1d0033983741d80f26d07ae", "score": "0.5833809", "text": "def gradient(self, y_true, y_pred):\n predictions = np.clip(y_pred, self.epsilon, 1. - self.epsilon)\n return - y_true / predictions", "title": "" }, { "docid": "fd4d2d98de481b90a7100fc253e4a2b2", "score": "0.5808814", "text": "def test_kernel_gradient():\n gamma = 0.1\n x, x_ref = X[0], X[1]\n my = rbf_kernel_gradient(gamma, x[np.newaxis, :], x_ref[np.newaxis, :])[:, 0]\n numerical = []\n for i in range(D):\n h = np.zeros_like(x)\n h[i] += 1e-3\n x_f, x_b = x+h, x-h\n partial_gd = (rbf_kernel(gamma, x_f[np.newaxis, :], x_ref[np.newaxis, :]) - \\\n rbf_kernel(gamma, x_b[np.newaxis, :], x_ref[np.newaxis, :]))[0, 0] / 2e-3\n numerical.append(partial_gd)\n assert_array_almost_equal(np.array(numerical), my, 6)", "title": "" }, { "docid": "67a580bb89366d8230a68aaf7a1a3f10", "score": "0.57066965", "text": "def _betainc_custom_gradient(a, b, x):\n return _betainc_naive(a, b, x)", "title": "" }, { "docid": "f3a420da11bbc458ec19d7a5832d36e5", "score": "0.5667132", "text": "def get_gradient(lambdas, data, index_info):\n\n model = get_train_model(lambdas, index_info)\n transition_grad = get_transition_gradient(model, data)\n feature_grad = get_feature_gradient(model, data)\n gradient = np.concatenate((transition_grad, feature_grad))\n\n return gradient * -1", "title": "" }, { "docid": "ced6a91e2de5e96ab5686cf1e17a9f48", "score": "0.56448185", "text": "def compute_value_gradient(self, real_value):\r\n\r\n return (real_value - self.value) * RatesNeuralNetwork.sigmoid(self.value)", "title": "" }, { "docid": "22d5dbec41e21f1e125bbc72de08df30", "score": "0.5629934", "text": "def value_gradient(self):\n\n # lower and upper bound brightnesss\n lower_blue = np.array([0, 0, 170])\n upper_blue = np.array([255, 255, 255])\n light_hsv = cv2.inRange(self.hsv, lower_blue, upper_blue)\n\n # lower and upper bound darkness\n lower_blue = np.array([0, 0, 0])\n upper_blue = np.array([255, 255, 80])\n dark_hsv = cv2.inRange(self.hsv, lower_blue, upper_blue)\n\n light = cv2.countNonZero(light_hsv)\n dark = cv2.countNonZero(dark_hsv)\n height, width, _ = self.img.shape\n size = height*width\n return light/size, dark/size", "title": "" }, { "docid": "07b1030573e2d14f0b6489fc7c95cd05", "score": "0.56227076", "text": "def test_gradient_param(self, setup):\n sess, tri, trinp, test_points = setup\n\n # Disable project\n trinp.project = True\n tri.project = True\n\n x = tf.placeholder(tf.float64, [1, 2])\n\n true_gradient = trinp.parameter_derivative(test_points)\n true_gradient = np.array(true_gradient.todense())\n\n y = tri(x)\n grad_tf = tf.gradients(y, tri.parameters)[0]\n dense_gradient = np.zeros(true_gradient[0].shape, dtype=np.float)\n\n for i, test in enumerate(test_points):\n gradient = sess.run(grad_tf, feed_dict={x: test[None, :]})\n dense_gradient[:] = 0.\n dense_gradient[gradient.indices] = gradient.values[:, 0]\n assert_allclose(dense_gradient, true_gradient[i])", "title": "" }, { "docid": "3fe49c4cb4f3f79bad0731182a1867f5", "score": "0.5619189", "text": "def fairness_parameter_gradient(self, **optimization_target_args):\n pass", "title": "" }, { "docid": "fa53087e1e82789100184e0617bff0db", "score": "0.5583061", "text": "def model_parameter_gradient(self, **optimization_target_args):\n gradient = -self.utility_function.gradient(**optimization_target_args)\n fairness_gradient = self.fairness_function.gradient(**optimization_target_args)\n\n if self.error_delta == 0.0:\n grad_fairness = self.fairness_rate * fairness_gradient\n else:\n lambda1, lambda2 = self.fairness_rate\n grad_fairness = lambda1 * -fairness_gradient + lambda2 * fairness_gradient\n\n gradient += grad_fairness\n return gradient", "title": "" }, { "docid": "c1afe73e4a20aa221683b000e737ee7d", "score": "0.55646396", "text": "def computeGradients(X,Y,Ws,bs,lambdaValue):\n sizeD = X.shape[0]\n netSize = len(Ws)\n assert netSize>=2, \"network size must be greater than 2\"\n\n #forward pass\n scores,hiddenlayers,P = evaluateClassifier(X,Ws,bs)\n\n #backward pass\n dWs = []\n dbs = []\n\n #gradient on final scores\n dscores = P#N*K\n dscores -=Y#N*K\n dscores /= sizeD\n\n #from last score to\n for i in range(netSize-1):\n dWi = np.dot(hiddenlayers[netSize-i-2].T,dscores)#netSize must be greater than 2\n dbi = np.sum(dscores,axis=0,keepdims=False)\n dh = np.dot(dscores,Ws[netSize-i-1].T)\n dh[hiddenlayers[netSize-i-2]<=0] = 0\n dscores = dh\n dWs.append(dWi)\n dbs.append(dbi)\n\n #backpropagate to W1 and b1\n dW1 = np.dot(X.T,dscores)\n db1 = np.sum(dscores,axis=0,keepdims=False)\n dWs.append(dW1)\n dbs.append(db1)\n\n # reverse dWs and dbs\n dWs.reverse()\n dbs.reverse()\n\n #reg grad for Ws\n for i in range(netSize):\n dWs[i] += lambdaValue*Ws[i]\n return dWs,dbs", "title": "" }, { "docid": "5afac6e58dd09cb6076be6bcf6052b41", "score": "0.554397", "text": "def policy_gradient():\n # Placeholders for state and prefered action\n state = tf.placeholder(\"state\",[None,4])\n actions = tf.placeholder(\"float\",[None,2])\n # Variable for parameters 4 inputs, 2 output(one-hot)\n params = tf.get_variable(\"policy_parameters\",[4,2])\n # Layer\n y = tf.matmul(state,params)\n # Output\n probabilities = tf.nn.softmax(y)\n # Multiply probabilitie for each action by the prefered action.\n good_probabilities = tf.reduce_sum(tf.mul(probabilities, actions), axis=[1])\n # maximize the log probability\n log_probabilities = tf.log(good_probabilities)\n loss = -tf.reduce_sum(log_probabilities)\n optimizer = tf.train.AdamOptimizer(0.01).minimize(loss)", "title": "" }, { "docid": "8012067aa8c5f69a256803137263d0a0", "score": "0.55414635", "text": "def gradient(self, y_true, y_pred):\n pass", "title": "" }, { "docid": "f266d75e7aad7f0883cf8b62b51c3b05", "score": "0.5537623", "text": "def helper_gradient(function,gradient,sample,\n eps=np.sqrt(np.finfo(float).eps)):\n value = function(sample)\n num_dims = sample.shape[0]\n num_qoi = value.shape[0]\n grad = np.empty((num_dims,num_qoi),float)\n for i in range(num_dims):\n perturbed_sample = sample.copy()\n perturbed_sample[i] += eps\n perturbed_value = function(perturbed_sample)\n grad[i,:] = (perturbed_value-value)/eps\n assert np.allclose(gradient(sample).squeeze(),grad.squeeze(),atol=eps*10)", "title": "" }, { "docid": "0c60b4a00911bc4807ef1222965caa6a", "score": "0.5501193", "text": "def _define_gradients(self):\n dWax = np.zeros_like(self.Wax)\n dWaa = np.zeros_like(self.Waa)\n dWya = np.zeros_like(self.Wya)\n\n db = np.zeros_like(self.b)\n dby = np.zeros_like(self.by)\n\n da_next = np.zeros_like(self.hidden_list[0])\n\n return dWax, dWaa, dWya, db, dby, da_next", "title": "" }, { "docid": "6156756e6cff93fe5010aa5e1f279b18", "score": "0.5468707", "text": "def model_parameter_gradient(self, **optimization_target_args):\n gradient = -self.utility_function.gradient(**optimization_target_args)\n\n if self.fairness_rate > 0:\n fairness = self.fairness_function(**optimization_target_args)\n fairness_gradient = self.fairness_function.gradient(**optimization_target_args)\n\n grad_fairness = self.fairness_rate * fairness * fairness_gradient\n gradient += grad_fairness\n\n return gradient", "title": "" }, { "docid": "bf8ecef147b19e5ac6123fd2df6f4593", "score": "0.54677916", "text": "def build_loss_and_gradients(self, var_list):\n # Form dictionary in order to replace conditioning on prior or\n # observed variable with conditioning on a specific value.\n scope = tf.get_default_graph().unique_name(\"inference\")\n dict_swap = {z: qz.value()\n for z, qz in six.iteritems(self.latent_vars)}\n for x, qx in six.iteritems(self.data):\n if isinstance(x, ed.RandomVariable):\n if isinstance(qx, ed.RandomVariable):\n dict_swap[x] = qx.value()\n else:\n dict_swap[x] = qx\n\n p_log_prob = 0.0\n for z in six.iterkeys(self.latent_vars):\n z_copy = copy(z, dict_swap, scope=scope)\n p_log_prob += tf.reduce_sum(\n self.scale.get(z, 1.0) * z_copy.log_prob(dict_swap[z]))\n\n for x in six.iterkeys(self.data):\n if isinstance(x, ed.RandomVariable):\n if dict_swap:\n x_copy = copy(x, dict_swap, scope=scope)\n else:\n x_copy = x\n p_log_prob += tf.reduce_sum(\n self.scale.get(x, 1.0) * x_copy.log_prob(dict_swap[x]))\n\n reg_penalty = tf.reduce_sum(tf.losses.get_regularization_losses(scope=self.scope))\n loss = -p_log_prob + reg_penalty\n\n grads = tf.gradients(loss, var_list)\n grads_and_vars = list(zip(grads, var_list))\n return loss, grads_and_vars", "title": "" }, { "docid": "35beed0bd8bd7829dd314d8ea1eb0e8d", "score": "0.5445362", "text": "def feature_learning_gradient_based(self, v, eta, fps, tau_s, numSamples, f_regularize=lambda C: 0.):\n # Data contribution\n if isinstance(self.sig_obs, (float, np.floating)):\n sig_obs = self.sig_obs * np.ones(self.D * self.K)\n else:\n sig_obs = self.sig_obs\n dCdt_data = (sig_obs**(-2))[:,None] * ( np.outer(v, self.mu) - self.C @ ( np.outer(self.mu, self.mu) + self.Sig ) )\n dCdt_regularizer = (sig_obs**(-2))[:,None] * 1/numSamples * f_regularize(self.C)\n dC = eta / (fps * numSamples * tau_s) * ( dCdt_data + dCdt_regularizer )\n return self.C + dC", "title": "" }, { "docid": "efc3fdb00750d5c83aa83bfb5c41f217", "score": "0.54450005", "text": "def pgradient(self):\n configs = self._configscurrent\n nconf, nelec = configs.configs.shape[:2]\n na, nb = len(self.a_basis), len(self.b_basis)\n nup, ndown = self._mol.nelec\n\n # order of spin channel: upup,updown,downdown\n d_all, ij = configs.dist.dist_matrix(configs.configs)\n r_all = np.linalg.norm(d_all, axis=-1)\n bvalues = np.stack([b.value(d_all, r_all) for b in self.b_basis], axis=-1)\n inds = tuple(zip(*ij))\n b_2d_values = np.zeros((nelec, nelec, nconf, nb))\n for s, shape in enumerate([(nup, nup), (nup, ndown), (ndown, ndown)]):\n b_2d_values[inds] = bvalues.swapaxes(0, 1)\n\n a = self.a_values\n up, down = slice(0, nup), slice(nup, None)\n c_ders = np.zeros((nconf, self._mol.natm, na, na, nb, 3))\n einstr = \"inIk,jnIl,ijnm->nIklm\"\n c_ders[..., 0] = np.einsum(einstr, a[up], a[up], b_2d_values[up, up])\n c_ders[..., 1] = np.einsum(einstr, a[up], a[down], b_2d_values[up, down])\n c_ders[..., 2] = np.einsum(einstr, a[down], a[down], b_2d_values[down, down])\n c_ders += c_ders.swapaxes(2, 3)\n\n return {\"ccoeff\": 0.5 * c_ders}", "title": "" }, { "docid": "9e6b747e7ef5976647562955abcba7f6", "score": "0.5442318", "text": "def infer(self, potential, iterations, cf=True):\n energy = tf.zeros(shape=(10,), dtype=self.dtype)\n for i in tf.range(iterations):\n if cf:\n grd, energy = self.gradient_cf(potential)\n else:\n with tf.GradientTape(watch_accessed_variables=False) as tape:\n tape.watch(self.trainable_variables) # only watch mu, sigma and rou\n energy = self.bethe_free_energy(potential)\n grd = tape.gradient(energy, self.trainable_variables)\n\n # umax = tf.reduce_max(tf.abs(grd[0]), axis=[1, 2, 3, 4, 5], keepdims=True)\n # omax = tf.reduce_max(tf.abs(grd[1]), axis=[1, 2, 3, 4, 5], keepdims=True)\n # pmax = tf.reduce_max(tf.abs(grd[2]), axis=[1, 2, 3, 4, 5], keepdims=True)\n # self.mu.assign(tf.minimum(tf.maximum(self.mu - grd[0] * (self.lr / umax), -1.), 1.))\n # self.sigma.assign(tf.minimum(tf.maximum(self.sigma - grd[1] * (self.lr / omax), 0.01), 10.))\n # self.rou.assign(tf.minimum(tf.maximum(self.rou - grd[2] * (self.lr / pmax), -0.99), 0.99))\n self.optimizer.apply_gradients(zip(grd, self.trainable_variables))\n\n if tf.equal(tf.math.mod(i, 50), 0) or tf.equal(i + 1, iterations):\n tf.print(tf.strings.format('iter: {} dmu = {}, dsigma = {}, drou = {}, dw={}, Energy = {}', (\n i, tf.reduce_mean(tf.abs(grd[0])), tf.reduce_mean(tf.abs(grd[1])), tf.reduce_mean(tf.abs(grd[2])),\n tf.reduce_mean(tf.abs(grd[3])), tf.reduce_mean(energy))))\n return energy", "title": "" }, { "docid": "67398bc9cb64005c9b5a40cfdbc7bd67", "score": "0.54416275", "text": "def _estimate_gradient(self, points, values, prediction_points):\n if self.surrogate is not None:\n self.surrogate.fit(prediction_points, values)\n self.surrogate.optimizations_number = 1\n prediction_function = self.surrogate.predict\n else:\n from scipy.interpolate import LinearNDInterpolator\n\n prediction_function = LinearNDInterpolator(\n prediction_points, values, fill_value=0\n ).__call__\n\n gradient_values = gradient(\n point=prediction_points,\n runmodel_object=prediction_function,\n order=\"first\",\n df_step=self.step_size,\n )\n return gradient_values", "title": "" }, { "docid": "c132d4bff77dd4a495f9cb09f8ac34ae", "score": "0.543683", "text": "def fairness_parameter_gradient(self, **optimization_target_args):\n if self.error_delta == 0.0:\n return self.fairness_function(**optimization_target_args)\n else:\n return np.array([-self.fairness_function(**optimization_target_args) - self._error_delta,\n self.fairness_function(**optimization_target_args) - self._error_delta]).squeeze()", "title": "" }, { "docid": "9e277bc497446bf29085892181028e77", "score": "0.5363549", "text": "def f_gradient(x, y, lbd, km):\r\n\r\n temp_vector = km.dot(x)\r\n n = len(y)\r\n\r\n grad_value = np.diag(-y).dot(km.dot(exp(-y * temp_vector)/(1 + exp(-y * temp_vector))))/n + lbd * temp_vector\r\n\r\n return grad_value", "title": "" }, { "docid": "580e5dfccc2eb296337da3f5f6efcb90", "score": "0.5363145", "text": "def get_gradient(self, label, sample):\n self.predict(sample)\n self.calc_delta(label)\n self.calc_gradient()", "title": "" }, { "docid": "cb0fd286ba82d3765bc2abc48a773452", "score": "0.53611887", "text": "def gradient(self):\n gradient = np.zeros(len(self.variables))\n \n if self.state1 < self.nStates-1:\n gradient[self.state1] = self.transitCount / self.pi1\n else:\n for state in range((self.nStates-1)):\n gradient[state] = -self.transitCount/self.pi1\n \n gradient[self.bivariateGradInd] = self.transitCount/self.theta\n return gradient", "title": "" }, { "docid": "a3da846e3a548fd4dc973da5835bfa5d", "score": "0.5356187", "text": "def process_gradient(self):\n return", "title": "" }, { "docid": "f4e3f5cd8f010ccfa9e57486a2596872", "score": "0.53547686", "text": "def critic_z_gradient_penalty(self, batch_size, y_true, y_pred):\n alpha = tf.keras.backend.random_uniform((batch_size, 1, 1))\n interpolated = (alpha * y_true) + ((1 - alpha) * y_pred)\n\n with tf.GradientTape() as gp_tape:\n gp_tape.watch(interpolated)\n # 1. Get the discriminator output for this interpolated image.\n pred = self.critic_z(interpolated)\n\n # 2. Calculate the gradients w.r.t to this interpolated image.\n grads = gp_tape.gradient(pred, [interpolated])[0]\n norm = tf.sqrt(tf.reduce_sum(tf.square(grads), axis=[1, 2]))\n gp = tf.reduce_mean((1.0 - norm) ** 2)\n\n return gp", "title": "" }, { "docid": "00f9b0658a2fcd08f59a893ea6573524", "score": "0.5348646", "text": "def gradient(self, params):\n return 10 * (params - 3)", "title": "" }, { "docid": "3b7a96aee3fd64bf99edcd1ae8eb693c", "score": "0.5343812", "text": "def test_gradient(self):\n discretization = GridWorld([[0, 1], [0, 1]], [2, 2])\n delaunay = _Triangulation(discretization)\n\n points = np.array([[0, 0],\n [1, 0],\n [0, 1],\n [1, 1]], dtype=np.int)\n nodes = delaunay.discretization.state_to_index(points)\n\n # Simplex with node values:\n # 3 - 1\n # | \\ |\n # 1 - 2\n # --> x\n\n values = np.zeros(delaunay.nindex)\n values[nodes] = [1, 2, 3, 1]\n\n test_points = np.array([[0.01, 0.01],\n [0.99, 0.99]])\n\n true_grad = np.array([[1, 2], [-2, -1]])\n\n # Construct true H (gradient as function of values)\n true_H = np.zeros((2 * delaunay.input_dim, delaunay.nindex))\n\n true_H[0, nodes[[0, 1]]] = [-1, 1]\n true_H[1, nodes[[0, 2]]] = [-1, 1]\n true_H[2, nodes[[2, 3]]] = [-1, 1]\n true_H[3, nodes[[1, 3]]] = [-1, 1]\n\n # Evaluate gradient with and without values\n H = delaunay.gradient_parameter_derivative(test_points).toarray()\n delaunay.parameters = values\n grad = delaunay.gradient(test_points)\n\n # Compare\n assert_allclose(grad, true_grad)\n assert_allclose(H, true_H)\n assert_allclose(true_grad,\n H.dot(values).reshape(-1, delaunay.input_dim))", "title": "" }, { "docid": "22bf60ac4a91286535593dc3b2dce3fc", "score": "0.5337247", "text": "def grad_descent (x, y, learning): \n\n w = np.zeros(22)\n converage=1\n\n for runs in range(30):\n gradient = grad(w, x, y, 0)\n w = w - (learning * gradient)\n normalg= np.linalg.norm(gradient)\n print(\"normalg: \", normalg)\n if np.isinf(normalg):\n print(\"normalg goes to inf, goning to break the loop.\")\n break\n if normalg <= converage:\n print(\"normalg <= converage!!!\")\n break\n iteration_list.append(runs)\n print(\"w: \",w )\n print(\"i:\", runs)\n return w", "title": "" }, { "docid": "5df40f294c48e7d39fce90ba15a62722", "score": "0.528242", "text": "def policy_gradient(state, weight):\n # probabilities of actions\n probs = policy(state, weight)\n\n # random action\n action = np.random.choice(len(probs[0]), p=probs[0])\n\n # softmax gradient\n # probs shape (1, 2)\n # s shape (2, 1)\n s = probs.reshape(-1, 1)\n\n # softmax matrix\n softmax = np.diagflat(s) - s @ s.T\n\n # Take the obs for the action taken\n dsoftmax = softmax[action, :]\n\n # Derivative of natural logarithm\n dlog = dsoftmax / probs[0, action]\n\n gradient = state.T @ dlog[None, :]\n\n return action, gradient", "title": "" }, { "docid": "3eda0ad4deca20d60958f3d62c082e99", "score": "0.52776885", "text": "def bias_gradient_statistics(model, layer_name=None):\n\n return parameter_statistics(model, 'b', 'grad', layer_name)", "title": "" }, { "docid": "409e5c93318ab5cd95ee10d1445fd0e0", "score": "0.5273136", "text": "def variables_gradient(self, loss: tf.Tensor, state: np.ndarray, label: tf.Tensor) -> List:\n variables = self.get_variables()\n losses = []\n for i, var in enumerate(variables):\n new_vars = copy.copy(variables)\n new_vars[i] = tf.add(var, self.g_epsilon)\n self.set_variables(new_vars)\n new_loss = self.state_to_loss(state, label)\n losses.append(new_loss)\n\n self.set_variables(variables)\n dy = [tf.subtract(x, loss) for x in losses]\n grads = [tf.divide([y], self.g_epsilon) for y in dy]\n return grads", "title": "" }, { "docid": "0ca1475f01f1768b94cb23be1bd875a9", "score": "0.5272091", "text": "def _conditional_bias(forecast, reference, dim='svd', **metric_kwargs):\n acc = _pearson_r(forecast, reference, dim=dim)\n conditional_bias = acc - _std_ratio(forecast, reference, dim=dim) ** -1\n return conditional_bias", "title": "" }, { "docid": "4ea2a2bd926445a55193d5d1f2fcb84e", "score": "0.52589333", "text": "def rand_free_conf(self):\n uniform_val = np.random.uniform(0, 1, self.state_dim)\n node = self.threshold[:, 0]*(1-uniform_val) + self.threshold[:, 1]*uniform_val\n return node", "title": "" }, { "docid": "e456a997c8b1cfcf541bec152faa4691", "score": "0.52534866", "text": "def _gradient_penalty(self, real_data, generated_data):\n discriminator = self\n batch_size = real_data.size()[0]\n\n # Calculate interpolation\n alpha = torch.rand(batch_size, 1)\n alpha = alpha.expand_as(real_data)\n if self.use_cuda:\n alpha = alpha.cuda()\n interpolated = alpha * real_data.data + (1 - alpha) * generated_data.data\n interpolated = Variable(interpolated, requires_grad=True)\n if self.use_cuda:\n interpolated = interpolated.cuda()\n\n # Calculate probability of interpolated examples\n prob_interpolated = discriminator(interpolated)\n\n # Calculate gradients of probabilities with respect to examples\n gradients = torch_grad(outputs=prob_interpolated, inputs=interpolated,\n grad_outputs=torch.ones(\n prob_interpolated.size()).cuda() if self.use_cuda else torch.ones(\n prob_interpolated.size()),\n create_graph=True, retain_graph=True)[0]\n\n # Gradients have shape (batch_size, num_channels, img_width, img_height),\n # so flatten to easily take norm per example in batch\n gradients = gradients.view(batch_size, -1)\n\n # Derivatives of the gradient close to 0 can cause problems because of\n # the square root, so manually calculate norm and add epsilon\n\n # gradients_norm = torch.sqrt(torch.sum(gradients ** 2, dim=1) + 1e-12)\n\n # Return gradient penalty\n return self.LAMBDA * ((gradients.norm(2, dim=1) - 1) ** 2).mean()", "title": "" }, { "docid": "2fa793f6c99f14ebefee4834dcfe421f", "score": "0.5252034", "text": "def test_kernel_gradient_on_hyper_params():\n gamma = 5\n K, my = rbf_kernel(gamma, X, X, gradient_on_gamma=True)\n gamma_ = gamma + 1e-3\n K_ = rbf_kernel(gamma_, X, X)\n numeric = (K_ - K) / 1e-3\n assert_array_almost_equal(numeric, my, 3)", "title": "" }, { "docid": "50e0b85cbcdfdfc945fdb92344c28f88", "score": "0.5250229", "text": "def compute_gradient(theta, X, y, lambda_):\n h = sigmoid(X @ theta)\n l2 = np.append([0], ((lambda_ * theta[1:]) / len(y)))\n return (X.T @ (h - y)) / len(y) + l2", "title": "" }, { "docid": "77d5f45933aaebe24ee64fda6ce0b4f2", "score": "0.5237509", "text": "def gradient_duels_gen(self, num_feat, num_datapoints):\n #n = 14 # number of datapoints\n #x1 = np.linspace(self.Gridmin1, self.Gridmax1, n)[:, None] # Operating temp.\n x1 = np.arange(self.Gridmin1, self.Gridmax1, self.temp_diff)[:,None]\n \n # sampling from indexes\n indexes = np.arange(x1.shape[0])\n \n ind_samp1 = np.random.choice(indexes, size = 1)\n x_samp1_01 = x1[ind_samp1]\n \n ind_samp2_all1 = np.random.choice(indexes, size = num_datapoints)\n x_samp21 = x1[ind_samp2_all1]\n x_samp1_n1 = x_samp21[:-1]\n x_samp11 = np.append(x_samp1_01, x_samp1_n1)[:,None]\n \n u1, u1_der = objfunc.beta_utility_gen(x_samp11, self.l,\n self.a, self.b)\n u2, u2_der = objfunc.beta_utility_gen(x_samp21, self.l,\n self.a, self.b) \n \n u2_der = u2_der[:,0] \n y_der = np.zeros(u2_der.shape[0])\n y_der[u2_der > 0] = 1.\n \n y_der[u2_der < 0] = -1.\n\n return (x_samp11, x_samp21, y_der, u1, u2)", "title": "" }, { "docid": "988f0d739c253b24475a9d8fc0657b62", "score": "0.5225748", "text": "def compute_gradient_penalty(D, real_samples, fake_samples,index):\n # Random weight term for interpolation between real and fake samples\n # sample shape:(time_length, batch_size*scene_traj_num, dim) = (81, 128, 2)\n real_samples = real_samples.permute(1,2,0)\n fake_samples = fake_samples.permute(1,2,0)\n # (batch_size*scene_traj_num,2,time_length)\n alpha = Tensor(np.random.random((real_samples.size(0), 1, 1)))\n # Get random interpolation between real and fake samples\n interpolates = (alpha * real_samples + ((1 - alpha) * fake_samples)).requires_grad_(True)\n\n # scene_vel = diff(interpolates, 1)\n # scene_acc = diff(interpolates, 2)\n # interpolates = torch.cat((interpolates, scene_vel, scene_acc), 1)\n interpolates = interpolates.permute(2, 0, 1)\n\n d_interpolates = D(interpolates,index)\n out_shape = real_samples.size(0)\n fake = Variable(Tensor(out_shape,1).fill_(1.0), requires_grad=False)\n # Get gradient w.r.t. interpolates\n # with torch.autograd.detect_anomaly():\n gradients = autograd.grad(\n outputs=d_interpolates,\n inputs=interpolates,\n grad_outputs=fake,\n create_graph=True,\n retain_graph=True,\n only_inputs=True,\n )[0]\n gradients = gradients.contiguous().view(gradients.size(0), -1)\n # b = torch.nn.utils.clip_grad_norm_(gradients, 10)\n # GP\n # gp = ((gradients.norm(2, dim=1) - 1) ** 2).mean()\n # LP\n # gp = (torch.clamp(gradients.norm(2, dim=1) - 1, 0, float(\"inf\")) ** 2).mean()\n # wgan-div\n gp = (gradients.norm(2, dim=1) ** 6).mean()\n return gp", "title": "" }, { "docid": "3d34d0cb30407fa060d532a72478ce85", "score": "0.5211051", "text": "def variables_gradient_exact(self, state: np.ndarray, label: tf.Tensor) -> List:\n variables = self.get_variables()\n grads = []\n for i, var in enumerate(variables):\n new_vars_plus = copy.copy(variables)\n new_vars_minus = copy.copy(variables)\n\n new_vars_plus[i] = tf.add(var, np.pi/2)\n new_vars_minus[i] = tf.subtract(var, np.pi/2)\n\n self.set_variables(new_vars_plus)\n loss_plus = self.state_to_loss(state, label)\n\n self.set_variables(new_vars_minus)\n loss_minus = self.state_to_loss(state, label)\n grad = tf.cast(tf.subtract(loss_plus, loss_minus), tf.float32)\n grads.append(grad)\n\n self.set_variables(variables)\n return grads", "title": "" }, { "docid": "45b69162d2419fb6a54b9823f2be7448", "score": "0.5210655", "text": "def compute_gradient_penalty(D, real_samples, fake_samples):\n batch_size = real_samples.size(0)\n lamda = 10 \n alpha = torch.rand(batch_size,1)\n \n #Slicing only a size same as real_samples to avoid conflict with different train batch sizes \n #fake_samples = fake_samples[:batch_size, :]\n \n #print('alpha {} real {} fake {}'.format(alpha.size(), real_samples.size(), fake_samples.size()))\n if cuda:\n alpha=alpha.cuda()\n\n interpolates = (alpha * real_samples.data) + ((1 - alpha) * fake_samples.data) #.requires_grad_(True)\n interpolates = autograd.Variable(interpolates, requires_grad=True)\n if cuda:\n interpolates=interpolates.cuda()\n \n d_interpolates = D(interpolates)\n d_interpolates = d_interpolates.view(-1,1) ### newly added line\n \n fake = Variable(Tensor(real_samples.shape[0], 1).fill_(1.0), requires_grad = True)\n \n # Get gradient w.r.t. interpolates\n gradients = autograd.grad(\n outputs=d_interpolates,\n inputs=interpolates,\n grad_outputs=fake,\n create_graph=True, \n retain_graph=True,\n only_inputs=True,\n )[0]\n \n gradient_penalty = (((gradients.norm(2, dim=1) - 1) ** 2)* lamda).mean()\n return gradient_penalty", "title": "" }, { "docid": "1c65cb52ea2592836b56d16ed0b3b758", "score": "0.5209864", "text": "def test_fully_observed_gradient(self):\n weights = np.zeros(8 + 32)\n learner = Learner(MatrixBeliefPropagator)\n self.set_up_learner(learner, latent=False)\n learner.start_time = time.time()\n learner.set_regularization(0.0, 1.0)\n gradient_error = check_grad(learner.subgrad_obj, learner.subgrad_grad, weights)\n\n # numerical_grad = approx_fprime(weights, learner.subgrad_obj, 1e-4)\n # analytical_grad = learner.subgrad_grad(weights)\n # plt.plot(numerical_grad, 'r')\n # plt.plot(analytical_grad, 'b')\n # plt.show()\n\n print(\"Gradient error: %f\" % gradient_error)\n assert gradient_error < 1e-1, \"Gradient is wrong\"", "title": "" }, { "docid": "1a08f918a59011eae30f07b4f69cb3a9", "score": "0.5208682", "text": "def get_gradients(model, epsilon, delta=0.01, max_norm=1.0):\n scale = np.sqrt(2 * np.log(1.25 / delta)) / epsilon\n\n # Compute the gradient norm\n total_norm = 0.0\n for p in model.parameters():\n if p.requires_grad:\n total_norm += p.grad.data.norm(2).item() ** 2\n total_norm = total_norm ** (1. / 2)\n\n # Clip and add noise\n gradients = []\n for p in model.parameters():\n if p.requires_grad:\n grad = p.grad.data.clone()\n grad /= max(1.0, total_norm / max_norm)\n grad += torch.normal(\n torch.zeros(p.grad.data.size()), \n torch.zeros(p.grad.data.size()) + scale * max_norm\n )\n gradients.append(grad)\n return gradients", "title": "" }, { "docid": "12caef42fe697a2e7c051fb69f59993b", "score": "0.52036786", "text": "def return_weights(X, b, d, mincols):\n max_iter = 100\n threshold= .00001\n w= np.empty((1,X.shape[1]))\n w.fill(1)\n p_1 = 1/(1+b*d)\n n = X.shape[0]\n E_old = 1\n for i in np.arange(0, max_iter):\n d = pairwise_distances(X,X, metric = weighted_euclidean, **{'weights':w})\n grad_w = np.empty((1,X.shape[1]))\n part_pq = -b/((1+b*d)**2)\n p = 1/(1+b*d)\n E = (2/(n*(n-1))) * np.sum(np.triu(.5*((p*(1-p_1) + p_1*(1-p))), 1))\n if E_old - E < threshold:\n break\n E_old = E\n part_eq = (1-2*p_1)\n w_valid = np.where(w > 0)[1]\n\n if w_valid.shape[0] == mincols:\n break\n\n for j in w_valid:\n d_w = pairwise_distances(X, X, metric = single_delta, **{'F':j})\n part_w = w[0, j]*(d_w)**2 / d\n part_w = np.triu(part_w, 1)\n grad_w_j = 1/(n*(n-1)) * part_eq * part_pq * part_w\n grad_w_j = np.triu(grad_w_j, 1)\n grad_w[ 0, j] = np.nansum(grad_w_j)\n grad_w = grad_w * 50\n w = w-grad_w\n w = w.clip(min=0)\n #if i %10 == 0 and i > 0:\n #print(\"Iteration {} Finished\".format(i))\n #print(\"Weights : {} \".format(w))\n #print(\"Function Improvement : {}\".format(E_old - E))\n\n wmax = np.max(w)\n w = w / wmax\n print(\"{} Iterations Required\".format(i))\n return w", "title": "" }, { "docid": "e72ad7b7d0ac7f4666cf382e59af9690", "score": "0.51941454", "text": "def evaluate_gradient(self, locations: np.ndarray) -> np.ndarray:\n v1 = self.geological_feature_a.evaluate_gradient(locations)\n # v1 /= np.linalg.norm(v1,axis=1)[:,None]\n v2 = self.geological_feature_b.evaluate_gradient(locations)\n # v2 /= np.linalg.norm(v2,axis=1)[:,None]\n return np.cross(v1, v2, axisa=1, axisb=1)", "title": "" }, { "docid": "ce5b823ebb84c377b232eb5b7e699c5a", "score": "0.5193387", "text": "def compute_gradient(outputs, perturbed_outputs, perturbation_values):\n\n if len(perturbed_outputs.shape) == 1:\n perturbed_outputs.shape = (len(perturbed_outputs), 1)\n (srom_size, dim) = perturbed_outputs.shape\n\n if len(outputs) != srom_size:\n raise ValueError(\"# output samples must match perturbed outputs\")\n\n if len(perturbation_values) != dim:\n raise ValueError(\"length of perturbation_values must match \"\n \"dimension\")\n\n gradients = np.zeros((srom_size, dim))\n\n for i in range(dim):\n grad = ((perturbed_outputs[:, i] - outputs.flatten())\n / perturbation_values[i])\n gradients[:, i] = grad\n\n return gradients", "title": "" }, { "docid": "1449140da04e71a188a0876ebc27713a", "score": "0.519283", "text": "def test_magnetic_pressure_gradient(DummyStellarator):\n eq = Equilibrium.load(\n load_from=str(DummyStellarator[\"output_path\"]), file_format=\"hdf5\"\n )\n\n # partial derivatives wrt rho\n num_rho = 110\n grid = LinearGrid(NFP=eq.NFP, rho=num_rho)\n drho = grid.nodes[1, 0]\n data = eq.compute([\"|B|\", \"grad(|B|^2)_rho\"], grid=grid)\n B2_r = np.convolve(data[\"|B|\"] ** 2, FD_COEF_1_4, \"same\") / drho\n np.testing.assert_allclose(\n data[\"grad(|B|^2)_rho\"][3:-2],\n B2_r[3:-2],\n rtol=1e-3,\n atol=1e-3 * np.nanmean(np.abs(data[\"grad(|B|^2)_rho\"])),\n )\n\n # partial derivative wrt theta\n num_theta = 90\n grid = LinearGrid(NFP=eq.NFP, theta=num_theta)\n dtheta = grid.nodes[1, 1]\n data = eq.compute([\"|B|\", \"grad(|B|^2)_theta\"], grid=grid)\n B2_t = np.convolve(data[\"|B|\"] ** 2, FD_COEF_1_4, \"same\") / dtheta\n np.testing.assert_allclose(\n data[\"grad(|B|^2)_theta\"][2:-2],\n B2_t[2:-2],\n rtol=1e-2,\n atol=1e-2 * np.nanmean(np.abs(data[\"grad(|B|^2)_theta\"])),\n )\n\n # partial derivative wrt zeta\n num_zeta = 90\n grid = LinearGrid(NFP=eq.NFP, zeta=num_zeta)\n dzeta = grid.nodes[1, 2]\n data = eq.compute([\"|B|\", \"grad(|B|^2)_zeta\"], grid=grid)\n B2_z = np.convolve(data[\"|B|\"] ** 2, FD_COEF_1_4, \"same\") / dzeta\n np.testing.assert_allclose(\n data[\"grad(|B|^2)_zeta\"][2:-2],\n B2_z[2:-2],\n rtol=1e-2,\n atol=1e-2 * np.mean(np.abs(data[\"grad(|B|^2)_zeta\"])),\n )", "title": "" }, { "docid": "5a3d4847f2b628169a86a692ba0ce8fb", "score": "0.5191244", "text": "def _gradient(self, _x, _y):\n ### YOUR CODE HERE\n n_features,*other = _x.shape\n v = np.matmul(self.W, _x)\n q = self.softmax(v)\n sfmx = np.reshape(q-_y,(self.k,1))\n _g = np.matmul(sfmx, np.reshape(_x,(n_features,1)).T)\n return _g\n ### END YOUR CODE", "title": "" }, { "docid": "0bfd6fa164ac8dd7f861558a5348f7ef", "score": "0.51905155", "text": "def test_gradient(self):\n weights = np.zeros(8 + 32)\n learner = Learner(MatrixBeliefPropagator)\n self.set_up_learner(learner, latent=True)\n learner.start_time = time.time()\n learner.set_regularization(0.0, 1.0)\n gradient_error = check_grad(learner.subgrad_obj, learner.subgrad_grad, weights)\n\n # numerical_grad = approx_fprime(weights, learner.subgrad_obj, 1e-4)\n # analytical_grad = learner.subgrad_grad(weights)\n # plt.plot(numerical_grad, 'r')\n # plt.plot(analytical_grad, 'b')\n # plt.show()\n\n print(\"Gradient error: %e\" % gradient_error)\n assert gradient_error < 1e-1, \"Gradient is wrong\"", "title": "" }, { "docid": "a4aa9b2483cb517c0034ebd127de8257", "score": "0.5185588", "text": "def rosenbrock_criterion_and_gradient(params):\n return rosenbrock_scalar_criterion(params), rosenbrock_gradient(params)", "title": "" }, { "docid": "e05fa8987a1fb67e9d06882bafe9dd49", "score": "0.5179254", "text": "def perceptron_gradient(i):\n inputs = data[i]\n input_len = len(inputs['tokens'])\n gold_labels = inputs['gold_tags']\n features = Features(inputs, feature_names)\n\n def score(cur_tag, pre_tag, i):\n return parameters.dot_product(features.compute_features(cur_tag, pre_tag, i))\n\n tags = decode(input_len, tagset, score)\n fvector = compute_features(tags, input_len, features) # Add the predicted features\n #print('Input:', inputs) # helpful for debugging\n #print(\"Predicted Feature Vector:\", fvector.fdict)\n #print(\"Predicted Score:\", parameters.dot_product(fvector))\n fvector.times_plus_equal(-1, compute_features(gold_labels, input_len, features)) # Subtract the features for the gold labels\n #print(\"Gold Labels Feature Vector: \", compute_features(gold_labels, input_len, features).fdict)\n #print(\"Gold Labels Score:\", parameters.dot_product(compute_features(gold_labels, input_len, features)))\n return fvector", "title": "" }, { "docid": "457ddb880d430d707902245cb167d2fa", "score": "0.5176523", "text": "def gradient_descent(features, values, theta, alpha, num_iterations):\n\n # Write code here that performs num_iterations updates to the elements of theta.\n # times. Every time you compute the cost for a given list of thetas, append it \n # to cost_history.\n # See the Instructor notes for hints. \n \n m = len(values)\n cost_history = []\n\n for i in range(num_iterations):\n predicted_values = np.dot(features, theta)\n theta = theta - alpha / m * np.dot((predicted_values - values), features)\n \n cost = compute_cost(features, values, theta)\n cost_history.append(cost)\n\n return theta, pd.Series(cost_history) # leave this line for the grader", "title": "" }, { "docid": "7223d2c960671fd35aa3482f74ffce63", "score": "0.51736283", "text": "def gradient(self, predicted: Tensor, actual: Tensor) -> Tensor:\n raise NotImplementedError", "title": "" }, { "docid": "ce6f4331a827f4983971f8c4707036bc", "score": "0.516938", "text": "def compute_gradient_penalty(self,D, real_samples, fake_samples):\n # Random weight term for interpolation between real and fake samples\n # Get random interpolation between real and fake samples\n interpolates = self.interpolate(real_samples,fake_samples).requires_grad_(True)\n d_interpolates = D(interpolates)\n# fake = nn.Parameter(nn.Tensor(real_samples.shape[0], 1).fill_(1.0), requires_grad=False)\n # Get gradient w.r.t. interpolates\n gradients = grad(\n outputs=d_interpolates,\n inputs=interpolates,\n grad_outputs=torch.ones_like(d_interpolates).to(self.device),\n create_graph=True,\n retain_graph=True,\n only_inputs=True,\n )[0]\n gradients = gradients.view(gradients.size(0), -1)\n gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()\n return gradient_penalty", "title": "" }, { "docid": "f4bd3107a8f569724bdda5e24c90d7d3", "score": "0.51625127", "text": "def get_gradient(f, r, h=1e-4, normalize=True):\n N = r.size//3\n grad = np.zeros(r.shape)\n for i in range(N):\n r_copy = r.copy()\n x, y, z = r[i]\n variations = np.array([[x+h, y, z],\n [x-h, y, z],\n [x, y+h, z],\n [x, y-h, z],\n [x, y, z+h],\n [x, y, z-h]])\n varied_potentials = []\n for j in variations:\n r_copy[i] = j\n varied_potentials.append(f(r_copy))\n dx = (varied_potentials[0] - varied_potentials[1])/(2*h)\n dy = (varied_potentials[2] - varied_potentials[3])/(2*h)\n dz = (varied_potentials[4] - varied_potentials[5])/(2*h)\n grad[i] = [dx, dy, dz]\n if normalize:\n try:\n grad = grad/(abs(np.max(grad)))\n except FloatingPointError as e:\n print(np.max(grad))\n print(grad)\n raise e\n return grad", "title": "" }, { "docid": "8878e0f5fd1e720a383922db0b5bd042", "score": "0.51243705", "text": "def compute_gradient_for_all(self):\r\n\r\n # Compute theta * x (for clarification, * is DOT PRODUCT)\r\n sigma = self.theta.dot(self.x.T)\r\n\r\n # Sigmoid the result of theta * x\r\n sigmoid_v = np.vectorize(self.sigmoid) # So we can use our sigmoid()-method on vectors\r\n sigma = sigmoid_v(sigma) # Sigmoid all values\r\n\r\n # Subtract all values by corresponding label in the labels vector (self.y)\r\n sigma -= self.y\r\n\r\n # Now we must multiply by Xk^i\r\n sigma = self.x.T.dot(sigma)\r\n\r\n for k in range(self.FEATURES):\r\n self.gradient[k] = sigma[k] / self.TRAINING_DATAPOINTS", "title": "" }, { "docid": "057a2981152acc851edc93a94c793277", "score": "0.5124171", "text": "def compute_gradient_penalty(D, real_samples, fake_samples):\n # Random weight term for interpolation between real fake samples\n alpha = torch.cuda.FloatTensor(np.random.random((real_samples.size(0), 1, 1, 1)))\n # Get random interpolation between real and fake samples\n interpolates = (alpha * real_samples + ((1 - alpha) * fake_samples)).reauires_grad_(True)\n validity = D(interpolates)\n fake = autograd.Variable(torch.cuda.FloatTensor(np.ones(validity.shape)), requires_grad=False)\n # Get gradient w.r.t interpolates\n gradients = autograd.grad(outputs=validity, inputs=interpolates,\n grad_outputs=fake, create_graph=True, retain_graph=True,\n only_inputs=True)[0]\n gradients = gradients.view(gradients.size(0), -1)\n # gradients_penalty = ((gradients.norm(2, dim=1) - 1)**2).mean()\n gradients_penalty = (torch.clamp(gradients.norm(2, dim=1) - 1., min=0.)).mean()\n return gradients_penalty", "title": "" }, { "docid": "fff856b440e782122aa6ab2e56f95726", "score": "0.51237553", "text": "def infer(self, potential, iterations, cf=True):\n energy = tf.zeros(shape=(10,), dtype=self.dtype)\n for i in tf.range(iterations):\n if cf:\n grd, energy = self.gradient_cf(potential)\n else:\n with tf.GradientTape(watch_accessed_variables=False) as tape:\n tape.watch(self.trainable_variables)\n energy = self.bethe_free_energy(potential)\n grd = tape.gradient(energy, self.trainable_variables)\n\n self.optimizer.apply_gradients(zip(grd, self.trainable_variables))\n\n if tf.equal(tf.math.mod(i, 50), 0) or tf.equal(i + 1, iterations):\n tf.print(tf.strings.format('iter: {} dmu = {}, dsigma = {}, dw={}, Energy = {}', (\n i, tf.reduce_mean(tf.abs(grd[0])), tf.reduce_mean(tf.abs(grd[1])), tf.reduce_mean(tf.abs(grd[2])),\n tf.reduce_mean(energy))))\n return energy", "title": "" }, { "docid": "fc8669f470a231bcfd3c41a84ae3899f", "score": "0.51141465", "text": "def __gradient_descend(self, batch, labels):\n\n # current activations vectors, weights matrices and biases vectors\n a_1, a_2, a_3, a_4, w_lj, w_ji, w_ik, b_2, b_3, b_4 = self.nn\n\n # temporary biases vectors and weights matrices\n b_2t, b_3t, b_4t = numpy.zeros(self.l_2), numpy.zeros(self.l_3), numpy.zeros(L_4)\n w_lj_t, w_ji_t, w_ik_t = numpy.zeros([L_1, self.l_2]), numpy.zeros([self.l_2, self.l_3]), \\\n numpy.zeros([self.l_3, L_4]),\n\n # for every image in mini-batch we calculate gradient descent step approximation, and then average them\n for sample in range(len(batch)):\n\n # compute desired output based on label\n count += 1\n tgt = numpy.zeros(10)\n tgt[labels[sample]] = 1.0\n self.evaluate_img(batch[sample])\n\n # partial derivatives for biases (in vector form)\n a = 2 * (a_4 - tgt) * a_4 * (1 - a_4)\n b = (2 * (a_4 - tgt) * a_4 * (1 - a_4)).dot(w_ik.T) * (a_3 * (1 - a_3))\n c = (2 * (a_4 - tgt) * a_4 * (1 - a_4)).dot(w_ik.T) * (a_3 * (1 - a_3)).dot(w_ji.T) * (a_2 * (1 - a_2))\n\n b_2t += c\n b_3t += b\n b_4t += a\n\n # partial derivative for weight is just bias vector dot activation vector because of the Chain Rule\n w_lj_t += c.reshape((-1, 1)).dot([a_1]).T\n w_ji_t += b.reshape((-1, 1)).dot([a_2]).T\n w_ik_t += a.reshape((-1, 1)).dot([a_3]).T\n\n # update weights in NN internal state\n for x in zip(range(4, 10), [w_lj_t, w_ji_t, w_ik_t, b_2t, b_3t, b_4t]):\n self.nn[x[0]] -= x[1] / len(batch) * self.eta", "title": "" }, { "docid": "72a3b094c4177afd136c739f28a73a71", "score": "0.51052195", "text": "def critic_gradient(self):\n cinput = self.critic_Q.input\n coutput = self.critic_Q.output\n\n # compute the gradient of the action with q value, dq/da.\n action_grads = K.gradients(coutput, cinput[1])\n\n return K.function([cinput[0], cinput[1]], action_grads)", "title": "" }, { "docid": "fecbfd37f5231cf150330d3d27230c31", "score": "0.51039535", "text": "def getChargeGradients(self):\n grad_en, grad_orbs = self.getMOgradients() \n # occupation numbers, 2 - doubly occupied, 0 - virtual orbital\n f = self.dftb.getOccupation()\n # density matrix depends only on occupied orbitals\n orbs = self.dftb.getKSCoefficients()\n occ_indx = np.where(f > 0.0)[0]\n CdCdp = np.tensordot(grad_orbs[:,:,occ_indx], orbs[:,occ_indx], axes=(2,1))\n # gradients of density matrix\n gradP = 2 * (CdCdp + np.swapaxes(CdCdp, 1,2))\n # overlap matrix \n S = self.dftb.S\n # gradients of overlap matrix\n gradS = self.gradS\n \n # number of atoms\n atomlist = self.dftb.getGeometry()\n valorbs = self.dftb.valorbs\n Nat = len(atomlist)\n # charge gradients\n dQdp = np.zeros((3*Nat,Nat))\n\n # difference density matrix relative to reference density matrix\n PmP0 = self.dftb.P-self.dftb.P0\n # iterate over atoms\n mu = 0\n for i,(Zi,posi) in enumerate(atomlist):\n # iterate over orbitals on center i\n for (ni,li,mi) in valorbs[Zi]:\n # iterate over atoms\n nu = 0\n for j,(Zj,posj) in enumerate(atomlist):\n # iterate over orbitals on center j\n for (nj,lj,mj) in valorbs[Zj]:\n dQdp[:,i] += gradP[:,mu,nu] * S[mu,nu] + PmP0[mu,nu] * gradS[:,mu,nu]\n nu += 1\n mu += 1\n\n return dQdp", "title": "" }, { "docid": "c0bb567bc45fafc45796d754a1aa57ba", "score": "0.5099989", "text": "def _erfcx_custom_gradient(x):\n return _erfcx_naive(x)", "title": "" }, { "docid": "b603ba87dcb9a489b65272a10241c643", "score": "0.50907755", "text": "def pc_conditional_sampler(rng, score_state, labels):\n # Initial sample\n rng, step_rng = random.split(rng)\n x = sde.prior_sampling(step_rng, shape)\n\n timesteps = jnp.linspace(sde.T, eps, sde.N)\n\n def loop_body(i, val):\n rng, x, x_mean = val\n t = timesteps[i]\n vec_t = jnp.ones(shape[0]) * t\n rng, step_rng = random.split(rng)\n x, x_mean = conditional_corrector_update_fn(step_rng, score_state, x, vec_t, labels)\n rng, step_rng = random.split(rng)\n x, x_mean = conditional_predictor_update_fn(step_rng, score_state, x, vec_t, labels)\n return rng, x, x_mean\n\n _, x, x_mean = jax.lax.fori_loop(0, sde.N, loop_body, (rng, x, x))\n return inverse_scaler(x_mean if denoise else x)", "title": "" }, { "docid": "fcf1793f515421aee8d64d02107897dc", "score": "0.5087558", "text": "def compute_gradient_penalty(self, D, real_samples, fake_samples):\r\n # Random weight term for interpolation between real and fake samples\r\n alpha = self.Tensor(np.random.random((real_samples.size(0), 1, 1)))\r\n # Get random interpolation between real and fake samples\r\n interpolates = (alpha * real_samples + ((1 - alpha) * fake_samples)).requires_grad_(True)\r\n d_interpolates = D(interpolates)\r\n fake = Variable(self.Tensor(real_samples.shape[0], 1).fill_(1.0), requires_grad=False)\r\n # Get gradient w.r.t. interpolates\r\n gradients = torch.autograd.grad(\r\n outputs=d_interpolates,\r\n inputs=interpolates,\r\n grad_outputs=fake,\r\n create_graph=True,\r\n retain_graph=True,\r\n only_inputs=True,\r\n )[0]\r\n gradients = gradients.view(gradients.size(0), -1)\r\n gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()\r\n return gradient_penalty", "title": "" }, { "docid": "d8e718e02490ca062c045e12062ea40e", "score": "0.5060968", "text": "def test_conditional_backward():\n width = 5\n height = 7\n z_size = 2\n batch_size = 3\n\n model = ConditionalCPPN(\n ConditionalModelConfig(width=width,\n height=height,\n n_units_xyr=3,\n n_hidden_units=[\n 10,\n 10,\n ],\n z_size=z_size,\n in_width=64,\n in_height=64,\n in_channel=1,\n use_batch_norm=False))\n model.zerograds()\n\n # create inputs: inputs is dict whose key is batch index, and value is tuple of (x, z) for each index\n x, z, inputs = gen_input_batch(batch_size, width, height, z_size)\n c = chainer.Variable(get_dammy_input(batch_size, 64, 64, 1)) # init dammy conditional input\n\n # forward prop\n y = model.forward(x, z, c)\n\n # taking loss at only first image\n t = get_dammy_output(batch_size, width, height)\n loss = F.mean_squared_error(y[0], t[0])\n\n g_x, g_z = chainer.grad((loss, ), inputs[0])\n g_c = chainer.grad((loss, ), (c, ))[0].data\n\n assert g_c[0].sum() != 0.0, f\"gradient of c is zero\"\n assert g_x.data.sum() != 0.0, f\"gradient of x is zero\"\n assert g_z.data.sum() != 0.0, f\"gradient of z is zero\"\n\n g_x, g_z = chainer.grad((loss, ), inputs[1])\n assert g_c[1].sum() == 0.0, f\"gradient of c is zero\"\n assert g_x.data.sum() == 0.0, f\"gradient of x is zero\"\n assert g_z.data.sum() == 0.0, f\"gradient of z is zero\"\n\n g_x, g_z = chainer.grad((loss, ), inputs[2])\n assert g_c[2].sum() == 0.0, f\"gradient of c is zero\"\n assert g_x.data.sum() == 0.0, f\"gradient of x is zero\"\n assert g_z.data.sum() == 0.0, f\"gradient of z is zero\"", "title": "" }, { "docid": "54de286220e9ff41169c25d92ff536f9", "score": "0.5057102", "text": "def _CreateGradientProcess(self):\n # We compute the scalar value gradient\n if self.domain_size == 2:\n self.local_gradient = KratosMultiphysics.ComputeNodalGradientProcess2D(self.main_model_part, self.scalar_variable, self.gradient_variable, KratosMultiphysics.NODAL_AREA)\n else:\n self.local_gradient = KratosMultiphysics.ComputeNodalGradientProcess3D(self.main_model_part, self.scalar_variable, self.gradient_variable, KratosMultiphysics.NODAL_AREA)", "title": "" }, { "docid": "846640f7c1bf03bc54edd095d18c52d5", "score": "0.5051862", "text": "def process_gradient(engine, learning_rate, parameters, grad_clip):\n with tf.GradientTape() as tape:\n engine.optical_system.update()\n engine.clear_ray_history()\n engine.ray_trace(3)\n #output = tf.stack(\n # [engine.finished_rays[\"y_end\"], engine.finished_rays[\"z_end\"]],\n # axis=1\n #)\n #goal = engine.finished_rays[\"rank\"]\n #goal *= - magnification\n #error = tf.squared_difference(output - goal)\n error = engine.finished_rays[\"y_end\"]**2 + engine.finished_rays[\"z_end\"]**2\n \n grad = tape.gradient(error, parameters)\n \n grad = tf.where(tf.math.is_finite(grad), grad, tf.zeros_like(grad))\n grad = tf.clip_by_value(grad, -grad_clip, grad_clip)\n grad *= learning_rate\n \n return grad, tf.reduce_sum(error)", "title": "" }, { "docid": "dd62e32fee294b780a644352244c2d78", "score": "0.50498974", "text": "def compute_gradient_penalty(D, real_samples, fake_samples):\n # Random weight term for interpolation between real and fake samples\n cuda = True if torch.cuda.is_available() else False\n Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor\n alpha = Tensor(np.random.random((real_samples.size(0), 1, 1, 1)))\n # Get random interpolation between real and fake samples\n interpolates = (alpha * real_samples + ((1 - alpha)\n * fake_samples)).requires_grad_(True)\n d_interpolates = D(interpolates)\n fake = Variable(Tensor(real_samples.shape[0], 1, 62, 62).fill_(\n 1.0), requires_grad=False)\n# torch.Size([8, 6, 224, 224])\n# torch.Size([8, 1, 26, 26])\n # print(real_samples.shape)\n # print(real_samples.size(0))\n # print(alpha.shape)\n # print(interpolates.shape)\n # print(d_interpolates.shape)\n # Get gradient w.r.t. interpolates\n gradients = autograd.grad(\n outputs=d_interpolates,\n inputs=interpolates,\n grad_outputs=fake,\n create_graph=True,\n retain_graph=True,\n only_inputs=True,\n )[0]\n gradients = gradients.view(gradients.size(0), -1)\n gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()\n return gradient_penalty", "title": "" }, { "docid": "602855e85cf779a0938208dbb92d9a37", "score": "0.5044244", "text": "def make_gradient(bcs: Boundaries) -> Callable:\n assert isinstance(bcs.grid, PolarGrid)\n bcs.check_value_rank(0)\n\n # calculate preliminary quantities\n dim_r = bcs.grid.shape[0]\n r_min, _ = bcs.grid.axes_bounds[0]\n dr = bcs.grid.discretization[0]\n scale_r = 1 / (2 * dr)\n \n # prepare boundary values\n boundary = bcs[0]\n value_lower_bc = boundary.low.get_virtual_point_evaluator()\n value_upper_bc = boundary.high.get_virtual_point_evaluator()\n \n @jit_allocate_out(out_shape=(2, dim_r))\n def gradient(arr, out=None):\n \"\"\" apply gradient operator to array `arr` \"\"\"\n # no-flux at the origin \n i = 0\n if r_min == 0:\n out[0, i] = (arr[1] - arr[0]) * scale_r\n else:\n arr_r_l = value_lower_bc(arr, (i,))\n out[0, i] = (arr[1] - arr_r_l) * scale_r \n out[1, i] = 0 # no angular dependence by definition\n \n for i in range(1, dim_r - 1): # iterate inner radial points\n out[0, i] = (arr[i + 1] - arr[i - 1]) * scale_r\n out[1, i] = 0 # no angular dependence by definition\n\n i = dim_r - 1\n arr_r_h = value_upper_bc(arr, (i,))\n out[0, i] = (arr_r_h - arr[i - 1]) * scale_r\n out[1, i] = 0 # no angular dependence by definition\n \n return out\n \n return gradient # type: ignore", "title": "" }, { "docid": "58a378a2efe7033c47901d9629542330", "score": "0.50441283", "text": "def calculate_gradient(self):\n\n self.Foliations['G_x'] = np.sin(np.deg2rad(self.Foliations[\"dip\"])) * \\\n np.sin(np.deg2rad(self.Foliations[\"azimuth\"])) * self.Foliations[\"polarity\"]\n self.Foliations['G_y'] = np.sin(np.deg2rad(self.Foliations[\"dip\"])) * \\\n np.cos(np.deg2rad(self.Foliations[\"azimuth\"])) * self.Foliations[\"polarity\"]\n self.Foliations['G_z'] = np.cos(np.deg2rad(self.Foliations[\"dip\"])) * self.Foliations[\"polarity\"]", "title": "" }, { "docid": "fcd074383a3528488950dad3bf0eaf37", "score": "0.50408494", "text": "def model_parameter_gradient(self, **optimization_target_args):\n pass", "title": "" }, { "docid": "2d4494f9feea3ceabee0fcf820960aa4", "score": "0.5034822", "text": "def gradient_descent(features, values, theta, alpha, num_iterations):\n \n m = len(values)\n cost_history = []\n\n for i in range(num_iterations):\n predicted_values = np.dot(features, theta) \n residuals = values - predicted_values\n\n # record cost\n sum_of_square_residuals = np.square(residuals).sum()\n cost = sum_of_square_residuals / (2*m)\n cost_history.append(cost)\n\n # update theta\n theta = theta + (alpha/m) * np.dot(residuals, features)\n\n return theta, pandas.Series(cost_history)", "title": "" }, { "docid": "81bd5fed29e2e72239ea253dd9935f52", "score": "0.502759", "text": "def body_fn(w, should_continue, seed):\n beta_seed, unif_seed, next_seed = samplers.split_seed(seed, n=3)\n z = beta.sample(sample_shape=sample_batch_shape, seed=beta_seed)\n # set_shape needed here because of b/139013403\n tensorshape_util.set_shape(z, w.shape)\n w = tf.where(should_continue,\n (1. - (1. + b) * z) / (1. - (1. - b) * z),\n w)\n if not self.allow_nan_stats:\n w = tf.debugging.check_numerics(w, 'w')\n unif = samplers.uniform(\n sample_batch_shape, seed=unif_seed, dtype=self.dtype)\n # set_shape needed here because of b/139013403\n tensorshape_util.set_shape(unif, w.shape)\n should_continue = should_continue & (\n concentration * w + dim * tf.math.log1p(-x * w) - c <\n # Use log1p(-unif) to prevent log(0) and ensure that log(1) is\n # possible.\n tf.math.log1p(-unif))\n return w, should_continue, next_seed", "title": "" }, { "docid": "b1d765ec711865c886d7c3435cf5d785", "score": "0.50275344", "text": "def _apply_critic_gradients(self, gradient, trainable_variables):\n self._critic_optimizer.apply_gradients(zip(gradient, trainable_variables))", "title": "" }, { "docid": "fabd42e07a79dc815d895028d488c682", "score": "0.5025568", "text": "def gradients(self, states, actions):\n\n return self.sess.run(self.action_grads, feed_dict={\n self.state: states,\n self.action: actions\n })[0]", "title": "" }, { "docid": "fd29def8876c8ee3aa7afa3b3f0f9155", "score": "0.50216705", "text": "def gradient_descent(features, values, theta, alpha, num_iterations):\r\n \r\n m = len(values)\r\n cost_history = []\r\n\r\n for i in range(num_iterations):\r\n # your code here\r\n predicted_values = np.dot(features, theta)\r\n theta = theta + (alpha/m)*np.dot((values - predicted_values),features)\r\n cost_history.append(compute_cost(features, values, theta))\r\n \r\n return theta, pandas.Series(cost_history)", "title": "" }, { "docid": "1351bde491c56bcfc90f4ddf3d9e3b4a", "score": "0.50214595", "text": "def gradient_descent(features, values, theta, alpha, num_iterations):\n\n\t# Write code here that performs num_iterations updates to the elements of theta.\n\t# times. Every time you compute the cost for a given list of thetas, append it \n\t# to cost_history.\n\t# See the Instructor notes for hints. \n\t\n\tcost_history = [] \n\t# print features.shape\n\t# print features.shape[0] #1157\n\t# print features.shape[1] #3\n\tm = features.shape[0]\n\t# print \"features=\", features\n\t# print \"values=\", values\n\t# print \"theta=\", theta\n\t# print \"alpha=\", alpha\n\t# print \"num_iterations=\", num_iterations\n\n\t###########################\n\t### YOUR CODE GOES HERE ###\n\t###########################\n\t# print features.shape\n\t# print len(theta)\n\t# print range(1, features.shape[1])\n\tfor k in range(num_iterations): \n\t\tcost_history.append(compute_cost(features, values, theta))\n\t\t# print \"latest_cost=\", latest_cost\n\t\t# print \"np.unique(np.dot(features, theta))=\", np.unique(np.dot(features, theta)) \n\t\t# print \"np.dot(features, theta)=\", np.dot(features, theta)\n\t\t# print \"values - np.dot(features, theta)=\", values - np.dot(features, theta)\n\t\t# print \"theta=\", theta\n\t\terrors = np.dot(features, theta)\n\t\ttheta = theta + alpha/m * np.dot((values - errors), features)\n\n\treturn theta, pandas.Series(cost_history) # leave this line for the grader", "title": "" }, { "docid": "1b33a5e227bbc454b6924f7b8528327b", "score": "0.50211924", "text": "def gradient_descent(features, values, theta, alpha, num_iterations):\n m = len(values)\n cost_history = []\n\n for i in range(num_iterations):\n predicted = np.dot(features,theta)\n pred = np.dot((values-predicted),features)\n theta = theta + (alpha/m)*pred\n cost = compute_cost(features, values, theta)\n cost_history.append(cost)\n return theta, pandas.Series(cost_history)", "title": "" }, { "docid": "cf350017e9e8e8fe8e5814d573caf556", "score": "0.50205314", "text": "def get_gradient(x, x_start, y, theta, bias, fair_direction, regularizer):\n prob = 1/(1+np.exp(-np.sum(x * theta)-bias))\n # scalar = 2 * regularizer * np.sum(fair_direction * (x - x_start))\n # grad = (prob - y) * theta + scalar * fair_direction\n # return grad / np.linalg.norm(grad)\n\n # prob = 1/(1+np.exp(-np.dot(x, theta)-bias))\n # return (prob - y) * theta + 2 * regularizer * np.array([(x[0] - x_start[0]), 0], dtype = 'float64')\n\n return (prob - y) * np.array([0, theta[1]], dtype='float64')", "title": "" }, { "docid": "de85929aafdc1de2ed6eb5b251cf4d14", "score": "0.5020356", "text": "def _rbf_func(x):\n N, D = x.shape\n diff = x.unsqueeze(1) - x.unsqueeze(0) # [N, N, D]\n dist_sq = torch.sum(diff**2, -1) # [N, N]\n h, _ = torch.median(dist_sq.view(-1), dim=0)\n if h == 0.:\n h = torch.ones_like(h)\n else:\n h = h / max(np.log(N), 1.)\n\n kappa = torch.exp(-dist_sq / h) # [N, N]\n kappa_grad = -2 * kappa.unsqueeze(-1) * diff / h # [N, N, D]\n return kappa, kappa_grad", "title": "" }, { "docid": "5608ef800e443e4a702b23512a8141d3", "score": "0.501444", "text": "def reparameterized_target_gradient(self):\n x = self.get_macrocycle_parameters()\n reparameterized_f_g = self.target_gradient()\n repar = self.reparameterize()\n\n if len(repar) != 0:\n for i in range(self.nmp):\n if repar[i].reparamed:\n reparameterized_f_g[1][i] *= (x[i]+repar[i].offset)\n\n return reparameterized_f_g # dont need to do anything to f", "title": "" }, { "docid": "a9cc7766eb443f43a3cfa4130b669bdb", "score": "0.50086755", "text": "def gradient_descent(self, training_data, epochs, eta, test_data):\n\n num_test = len(test_data)\n for _ in xrange(epochs):\n random.shuffle(training_data)\n self.update_params(training_data, eta)\n temp_test_accu = 1.0 * self.evaluate(test_data)/num_test\n return temp_test_accu, self.lamb_init, self.bias_init, self.weight_init", "title": "" }, { "docid": "a2a229127c2af5d0ba98c283eb49abf9", "score": "0.50034535", "text": "def critic_x_gradient_penalty(self, batch_size, y_true, y_pred):\n alpha = tf.keras.backend.random_uniform((batch_size, 1, 1))\n interpolated = (alpha * y_true) + ((1 - alpha) * y_pred)\n\n with tf.GradientTape() as gp_tape:\n gp_tape.watch(interpolated)\n # 1. Get the discriminator output for this interpolated image.\n pred = self.critic_x(interpolated)\n\n grads = gp_tape.gradient(pred, [interpolated])[0]\n norm = tf.sqrt(tf.reduce_sum(tf.square(grads), axis=[1, 2]))\n gp = tf.reduce_mean((norm - 1.0) ** 2)\n\n return gp", "title": "" }, { "docid": "379a4cf4b3a55c785eebd6b7e9b4c11a", "score": "0.49968395", "text": "def gradients(self):\n return self.optimiser.compute_gradients(self.loss)", "title": "" }, { "docid": "8dce001a3688eed678fbab2dbcebc269", "score": "0.49932095", "text": "def gradientDescent(params,features,learning_rate,expectedValues):\n error = 0\n newParams = list(params)\n\n # PREVIOUS Implementation\n # for param in range(len(params)):\n # sumErrors = 0 # Keeps an accumulate of the errors\n # acc = 0 # coefficient value\n # for instance in range(len(features)):\n # yhat = calculateHyp(params,features.iloc[instance])\n # error = yhat - expectedValues.iloc[instance]\n # acc = acc + (error * features.iloc[instance,param]) # Calculate sumatory of gradient descent formula\n # # acc = acc + (learning_rate * (expectedValues.iloc[instance] - yhat) * yhat * (1 - yhat) * )\n # newParams[param] = params[param] - learning_rate * (1/len(features) * acc) # Here is the formula taught for gradient descent, acc is the value obtained from the sumatory\n \n # Optimized Version\n acc = 0\n yHat = calculateHyp(params,features)\n error = yHat - expectedValues\n acc = numpy.dot(error,features) # numpy takes care of all of this by calculating the dot product, thus getting the five parameters\n\n newParams = params - learning_rate * (1 / len(features) * acc)\n return newParams", "title": "" }, { "docid": "8261c2b33fd88366f9eb22dff8c5f7cb", "score": "0.4992119", "text": "def run_gradient_descent_iteration(X, Y, theta, alpha, lambda_factor, temp_parameter):\n # print('X = ', X)\n # print('Y = ', Y)\n # print('theta = ', theta)\n # print('alpha = ', alpha)\n # print('lambda_factor = ', lambda_factor)\n # print('temp_parameter = ', temp_parameter)\n\n p = np.array([]).astype(float)\n p = compute_probabilities( X, theta, temp_parameter )\n\n # print(\"p = \", p)\n\n row_ind = Y.copy() #tendrá ? filas\n col_ind = np.array(range(len(Y))) \n data = np.array( [ 1 for i in range( len(Y) ) ] ) \n y_equal_label = sparse.coo_matrix((data, (row_ind, col_ind)),shape=(len(theta),len(Y)))\n\n # print(\"y_equal_label = \",y_equal_label.toarray() )\n\n term1 = np.array([]).astype(float)\n term1 = np.dot (\n y_equal_label.toarray().astype(float) - p , X\n )\n\n # print(\"term1 = \", term1)\n\n term2 = np.array([]).astype(float)\n term2 = lambda_factor * theta\n\n # print(\"term2 = \", term2)\n\n derivative = np.array([]).astype(float)\n derivative = term1 * (-1) / (temp_parameter *len(X)) + lambda_factor * term2\n \n # print(\"derivative = \", derivative)\n\n updated_theta = np.array([]).astype(float)\n updated_theta = theta - alpha * derivative \n\n\n return updated_theta#, \"Este es mi output\"\n # raise NotImplementedError", "title": "" }, { "docid": "c5aa21a30400d72c5953e83b607bc41d", "score": "0.49900797", "text": "def step(self, closure=None):\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n weight_decay = group['weight_decay']\n momentum = group['momentum']\n threshold = group['threshold']\n lr = group['lr']\n \n device = group['params'][0].grad.device\n grad_norm = torch.norm(torch.stack([torch.norm(p.grad.detach()).to(device) \\\n for p in group['params'] if p.grad is not None]))\n if grad_norm.isnan() or grad_norm.isinf():\n raise RuntimeError(f'The total norm for gradients from is non-finite, so it cannot be clipped.')\n\n if grad_norm > threshold:\n actual_lr = lr * threshold / (grad_norm + 1e-8)\n else:\n actual_lr = lr\n \n for p in group['params']:\n if p.grad is None:\n continue\n d_p = p.grad.data\n if weight_decay != 0:\n d_p.add_(p.data, alpha=weight_decay)\n param_state = self.state[p]\n if 'momentum_buffer' not in param_state:\n buf = param_state['momentum_buffer'] = torch.zeros_like(p.data)\n else:\n buf = param_state['momentum_buffer']\n buf.mul_(momentum).add_(d_p, alpha=actual_lr)\n p.data.add_(-buf)\n\n return loss", "title": "" }, { "docid": "fe0eed9a27ce50d493baabc187d16361", "score": "0.4989592", "text": "def compute_gradient_penalty(D, real_samples, fake_samples):\n\n # Random weight term for interpolation between real and fake samples\n alpha = Tensor(np.random.random((real_samples.size(0), 1, 1, 1)))\n # Get random interpolation between real and fake samples\n interpolates = (alpha * real_samples + ((1 - alpha) * fake_samples)).requires_grad_(True)\n d_interpolates = D(interpolates).reshape(-1, 1)\n fake = Variable(Tensor(real_samples.shape[0], 1).fill_(1.0), requires_grad=False)\n # Get gradient w.r.t. interpolates\n gradients = autograd.grad(\n outputs=d_interpolates,\n inputs=interpolates,\n grad_outputs=fake,\n create_graph=True,\n retain_graph=True,\n only_inputs=True,\n )[0]\n gradients = gradients.view(gradients.size(0), -1)\n gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()\n return gradient_penalty", "title": "" }, { "docid": "656f848354a5f71361fb277ae72896fb", "score": "0.49882525", "text": "def gradient(C0,x,y):\r\n #concentration = C0/(4*np.pi*d*D*duT)*np.exp(-(x-dis2targ)**2/(400*D*duT*50)) #depends on diffusion conditions along x-axis\r\n concentration = C0 * np.exp(-(x-dis2targ)**2/(4*Dt)) #/(4*np.pi*Dt)**0.5\r\n return concentration", "title": "" }, { "docid": "64bda9d814679b4fd9497f6ad66b3088", "score": "0.49871475", "text": "def cell_gradient_BC(self):\n if getattr(self, \"_cell_gradient_BC\", None) is None:\n BC = self.set_cell_gradient_BC(self._cell_gradient_BC_list)\n n = self.vnC\n if self.dim == 1:\n G = _ddxCellGradBC(n[0], BC[0])\n elif self.dim == 2:\n G1 = sp.kron(speye(n[1]), _ddxCellGradBC(n[0], BC[0]))\n G2 = sp.kron(_ddxCellGradBC(n[1], BC[1]), speye(n[0]))\n G = sp.block_diag((G1, G2), format=\"csr\")\n elif self.dim == 3:\n G1 = kron3(speye(n[2]), speye(n[1]), _ddxCellGradBC(n[0], BC[0]))\n G2 = kron3(speye(n[2]), _ddxCellGradBC(n[1], BC[1]), speye(n[0]))\n G3 = kron3(_ddxCellGradBC(n[2], BC[2]), speye(n[1]), speye(n[0]))\n G = sp.block_diag((G1, G2, G3), format=\"csr\")\n # Compute areas of cell faces & volumes\n S = self.face_areas\n V = (\n self.aveCC2F * self.cell_volumes\n ) # Average volume between adjacent cells\n self._cell_gradient_BC = sdiag(S / V) * G\n return self._cell_gradient_BC", "title": "" }, { "docid": "6024f484385b52ef55c712ddf276848c", "score": "0.498429", "text": "def CG(b, x0, TOLERANCE=1.0e-10, MAX_ITERATIONS=100):\r\n\r\n # Initializations\r\n x = x0\r\n d = -tf.gradient(Loss, x)\r\n r0 = b - finite_differences(x,d)\r\n\r\n\r\n # Start iterations\r\n for i in range(MAX_ITERATIONS):\r\n a = float(np.dot(d.T, r0) / np.dot(d.T, finite_differences(x, d)))\r\n x = x - a * gradient(x)\r\n\r\n ri = r0 - np.dot(finite_differences(x, d), d)\r\n\r\n # print i, np.linalg.norm(ri)\r\n # Checks stopping condition\r\n if np.linalg.norm(ri) < TOLERANCE:\r\n return x\r\n\r\n # Otherwise go on to find new direction\r\n b = float(np.dot(gradient(x).T, finite_differences(x, d)))\r\n d = - gradient(x) + b * d\r\n r0 = ri\r\n return x", "title": "" }, { "docid": "13869890bc96c80cf348ea4b1d2e17e6", "score": "0.49820924", "text": "def conditional_predictor_update_fn(rng, state, x, t, labels):\n score_fn = mutils.get_score_fn(sde, score_model, state.params_ema, state.model_state, train=False,\n continuous=continuous)\n\n def total_grad_fn(x, t):\n ve_noise_scale = sde.marginal_prob(x, t)[1]\n return score_fn(x, t) + classifier_grad_fn(x, ve_noise_scale, labels)\n\n if predictor is None:\n predictor_obj = NonePredictor(sde, total_grad_fn, probability_flow)\n else:\n predictor_obj = predictor(sde, total_grad_fn, probability_flow)\n return predictor_obj.update_fn(rng, x, t)", "title": "" }, { "docid": "9df0f4f63c89d6f519ac3c7557073791", "score": "0.49804133", "text": "def gradient(X, Y, Y_predicted):\n Error = Y_predicted - Y # Calculate error\n dW = np.dot(X.T, Error) # Compute derivative of error w.r.t weight, i.e., (target - output) * x\n db = np.sum(Error) # Compute derivative of error w.r.t bias\n return dW, db # return derivative of weight and bias", "title": "" }, { "docid": "a11a27d91183dc76912739a7982ded3d", "score": "0.49787378", "text": "def evaluate_gradient(self, evaluation_points, property_array):\n if property_array.shape[0] != self.n_nodes:\n logger.error(\"Property array does not match grid\")\n raise ValueError(\"cannot assign {} vlaues to array of shape {}\".format(\n property_array.shape[0], self.n_nodes))\n \n idc, inside = self.position_to_cell_corners(evaluation_points)\n T = np.zeros((idc.shape[0], 3, 8))\n T[inside, :, :] = self.get_element_gradient_for_location(evaluation_points[inside, :])[1]\n # indices = np.array([self.position_to_cell_index(evaluation_points)])\n # idc = self.global_indicies(indices.swapaxes(0,1))\n # print(idc)\n T[inside, 0, :] *= property_array[idc[inside, :]]\n T[inside, 1, :] *= property_array[idc[inside, :]]\n T[inside, 2, :] *= property_array[idc[inside, :]]\n return np.array(\n [np.sum(T[:, 0, :], axis=1), np.sum(T[:, 1, :], axis=1) ,\n np.sum(T[:, 2, :], axis=1) ]).T", "title": "" }, { "docid": "66298dbd5963d8e321f1f2c765037516", "score": "0.497799", "text": "def gradient_descent(features, values, theta, alpha, num_iterations):\n \n m = len(values)\n cost_history = []\n\n X = np.matrix(features)\n Y = np.matrix(values).transpose()\n for i in range(num_iterations):\n TH = np.matrix(theta).transpose()\n XTY = X*TH-Y\n cost = compute_cost(features, values, theta)\n cost_history.append(cost)\n TH = TH - alpha/m * (X.transpose()*XTY)\n theta = np.array(TH.transpose())[0]\n return theta, pd.Series(cost_history)", "title": "" }, { "docid": "6c690b72f44c533cdd6a01526716c14f", "score": "0.49740028", "text": "def gradient_check_internal(f, x, h=1e-4, threshold=1e-5):\n rndstate = random.getstate()\n random.setstate(rndstate)\n np.random.seed(0)\n fx, grad = f(x) # Evaluate function value at original point\n\n # Iterate over all indices in x\n it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])\n result = []\n while not it.finished:\n ix = it.multi_index\n\n x_step = x.copy()\n x_step[ix] += h / 2\n random.setstate(rndstate)\n np.random.seed(0)\n fx_pos, _ = f(x_step)\n\n x_step = x.copy()\n x_step[ix] -= h / 2\n random.setstate(rndstate)\n np.random.seed(0)\n fx_neg, _ = f(x_step)\n\n numgrad = (fx_pos - fx_neg) / h\n\n reldiff = abs(numgrad - grad[ix]) / max(1, abs(numgrad), abs(grad[ix]))\n\n if reldiff > threshold:\n result.append((ix, grad[ix], numgrad))\n\n it.iternext() # Step to next dimension\n\n return result", "title": "" }, { "docid": "c50502964387b0c3def84180d9c11b65", "score": "0.49720913", "text": "def hyperparam_gradients(self, batch):\n X, y, side_info = batch\n batch_factor = self.n / np.shape(X)[-1]\n\n m_u, S_u = [self.dist_params['qu'][key] for key in ['mean', 'var']]\n m_w = self.get_expected_w()\n\n def get_derivations(h):\n \"\"\"\n This debug function numerically derivates the ELBO w.r.t. the hyperparameters\n with (ELBO(x + h) - ELBO(x))/h, where x is the hyperparameter\n :param h: change for the derivation\n :return: gradients w.r.t. the h.params\n \"\"\"\n old_kp = self.kernel_params\n old_kernel = self.kernel\n res = {}\n for key in sorted(self.kernel_params.keys()) + [None]:\n if key is not None and self.kernel_params[key] is None:\n # This means that this Kernel is not used\n continue\n new_kps = old_kp.copy()\n\n if key is not None:\n new_kps[key] += h\n else:\n key = 'old'\n self.kernel_params = new_kps\n self.kernel = kernels.Kernel_Handler(**new_kps)\n self.update_kernel()\n c = {}\n if self.omega_gradients:\n self.local_params = self.update_local_params(X, y, side_info=side_info)\n c['dc'] = np.array(self.local_params)\n Theta = np.diag(self.theta(c['dc']))\n elbo = self.calc_ELBO(X, y, side_info)\n c['L'] = sum(elbo)\n c['L1'] = elbo[0]\n c['KL_u'] = elbo[1]\n c['KL_w'] = elbo[2]\n c['Kmm'] = self.Kmm\n c['Knm'] = self.get_Kmn(X, side_info).T\n if side_info is None:\n c['Knn'] = self.kernel(X.T)\n else:\n c['Knn'] = self.kernel(X.T, side_X=side_info.T)\n c['Kmminv'] = self.Kmm_inv\n c['KnmKmminv'] = np.dot(c['Knm'], self.Kmm_inv)\n c['KnmKmminvKmn'] = multi_dot([c['Knm'], self.Kmm_inv, c['Knm'].T])\n c['ykmu'] = multi_dot([y.T, c['Knm'], self.Kmm_inv, m_u])\n c['trthk'] = np.trace(multi_dot([Theta, c['Knn'] - multi_dot([c['Knm'], self.Kmm_inv, c['Knm'].T])]))\n c['trkmntknmsu'] = np.trace(multi_dot([self.Kmm_inv, c['Knm'].T, Theta, c['Knm'], self.Kmm_inv, S_u]))\n c['mk+xwth'] = self.sh.multi_dot([(multi_dot([c['Knm'], self.Kmm_inv, m_u]) + self.sh.dot(X.T, m_w)).T,\n Theta,\n self.sh.multi_dot([c['Knm'], self.Kmm_inv, m_u]) + self.sh.dot(X.T,\n m_w)])\n c['trks'] = np.trace(np.dot(self.Kmm_inv, S_u))\n c['mkm'] = multi_dot([m_u.T, self.Kmm_inv, m_u])\n c['logk'] = self.sh.logdet(self.Kmm)\n c['theta_diag'] = np.array(self.theta(c['dc']))\n c['KL_omega'] = np.array((sum(\n [ci / 4 * np.tanh(ci / 2) for i, ci in enumerate(c['dc'])]))\n - sum([core.math.logcosh(ci / 2) for ci in c['dc']]))\n\n res[key] = c\n self.kernel = old_kernel\n return {var_key: {key: (res[var_key][key] - res['old'][key]) / h for key in res['old']} for var_key in res}\n\n cs = self.local_params\n Theta = self.sh.diag(self.theta(cs))\n\n #Getting the kernel gradients\n if side_info is not None:\n dKmms = self.kernel.gradients(self.Z.T, side_X=self.Z_side.T)\n dKnms = self.kernel.gradients(X.T, self.Z.T, side_X=side_info.T, side_Y=self.Z_side.T)\n dKnns = self.kernel.gradients(X.T, side_X=side_info.T)\n Kmn_prime, Kmn = self.get_Kmn_prime_Kmn(X, side_info=side_info)\n else:\n dKmms = self.kernel.gradients(self.Z.T)\n dKnms = self.kernel.gradients(X.T, self.Z.T)\n dKnns = self.kernel.gradients(X.T)\n Kmn_prime, Kmn = self.get_Kmn_prime_Kmn(X)\n dkmminvs = {key: - multi_dot([self.Kmm_inv, dKmms[key], self.Kmm_inv]) for key in dKnns}\n dknmkmminvs = {\n key: np.dot(dKnms[key], self.Kmm_inv) - multi_dot([Kmn.T, self.Kmm_inv, dKmms[key], self.Kmm_inv])\n for key in dKnns}\n dknmkmminvkmns = {key: 2 * multi_dot([dKnms[key], self.Kmm_inv, Kmn])\n - multi_dot([Kmn.T, self.Kmm_inv, dKmms[key], self.Kmm_inv, Kmn])\n for key in dKnns}\n tanhs = [np.tanh(c / 2) for c in cs]\n if self.omega_gradients:\n dcs = self.local_parameter_gradients(dKnms, dkmminvs,\n {key: np.diag(dKnns[key]) - np.diag(dknmkmminvkmns[key]) for key in\n dKnns},\n X, Kmn, Kmn_prime=Kmn_prime)\n\n dtheta_diag = self.theta_gradients(cs)\n dTheta = {key: np.asarray([dcs[key][i] * dtheta_diag[i] for i in range(len(cs))]) for key in dKnns}\n K_tilde = self.get_K_tilde(X, Kmn, side_info=side_info)\n\n derivations = {}\n c_diffs = {}\n m_eps = multi_dot([Kmn.T, self.Kmm_inv, m_u])\n Xw = self.sh.dot(X.T, m_w)\n xwkmu = m_eps + Xw\n\n # Calculating the gradients, w.r.t. the different hyperparameters\n for key in dKnns:\n if self.epoch_count < self.ls_head_start and key in ['rbf', 'lin']:\n continue\n\n if core.math.debug():\n c = {}\n c['ykmu'] = multi_dot([y.T, dknmkmminvs[key], m_u])\n c['trthk'] = self.sh.trace(self.sh.dot(Theta, dKnns[key])) \\\n - self.sh.trace(self.sh.dot(Theta, dknmkmminvkmns[key]))\n c['trkmntknmsu'] = self.sh.trace(2 * self.sh.multi_dot([Kmn_prime, Theta, dknmkmminvs[key], S_u]))\n c['mk+xwth'] = 2 * self.sh.to_dense(self.sh.multi_dot([xwkmu.T,\n Theta, dknmkmminvs[key], m_u]))\n c['KL_omega'] = 0\n if self.omega_gradients:\n c['trthk'] += self.sh.trace(self.sh.multiply(dTheta[key], K_tilde))\n c['trkmntknmsu'] += self.sh.trace(\n self.sh.multi_dot([self.sh.multiply(Kmn_prime, dTheta[key]), Kmn_prime.T, S_u]))\n c['mk+xwth'] += self.sh.to_dense(self.sh.dot(self.sh.multiply(xwkmu.T, dTheta[key]), xwkmu))\n c['KL_omega'] = -sum([dcs[key][i] * (.25 * tanhs[i] * (1 + ci / 2 * tanhs[i]) - ci / 8)\n for i, ci in enumerate(cs)])\n\n c['trks'] = np.trace(multi_dot([dkmminvs[key], S_u]))\n c['mkm'] = multi_dot([m_u.T, dkmminvs[key], m_u])\n c['logk'] = np.trace(np.dot(self.Kmm_inv, dKmms[key]))\n\n derivations[key] = multi_dot([y.T, dknmkmminvs[key], m_u])\n derivations[key] -= self.sh.trace_dot(Theta, dKnns[key])\n derivations[key] += self.sh.trace_dot(Theta, dknmkmminvkmns[key])\n\n derivations[key] -= self.sh.trace(2 * self.sh.multi_dot([Kmn_prime, Theta, dknmkmminvs[key], S_u]))\n derivations[key] -= (2 * self.sh.to_dense(self.sh.multi_dot([xwkmu.T,\n Theta, dknmkmminvs[key], m_u])))\n if self.omega_gradients:\n derivations[key] -= self.sh.trace(\n self.sh.multi_dot([np.multiply(Kmn_prime, dTheta[key]), Kmn_prime.T, S_u]))\n derivations[key] -= np.sum(np.multiply(dTheta[key], np.diag(K_tilde)))\n derivations[key] -= self.sh.to_dense(self.sh.dot(self.sh.multiply(m_eps.T, dTheta[key]), m_eps))\n derivations[key] -= 2 * self.sh.to_dense(self.sh.dot(self.sh.multiply(m_eps.T, dTheta[key]), Xw))\n derivations[key] -= self.get_expected_wXThetaXw(Xw, X, dTheta[key])\n derivations[key] *= 1 / 2\n if self.omega_gradients:\n derivations[key] -= sum([dcs[key][i] * (.25 * tanhs[i] * (1 + ci / 2 * tanhs[i]) - ci / 8)\n for i, ci in enumerate(cs)])\n\n derivations[key] *= batch_factor\n if core.math.debug(): c['L1'] = derivations[key].copy()\n derivations[key] -= 1 / 2 * self.sh.trace_dot(dkmminvs[key], S_u)\n derivations[key] -= 1 / 2 * multi_dot([m_u.T, dkmminvs[key], m_u])\n derivations[key] -= 1 / 2 * self.sh.trace_dot(self.Kmm_inv, dKmms[key])\n derivations[key] = self.sh.to_dense(derivations[key])\n if core.math.debug():\n c['KL_u'] = derivations[key] - c['L1']\n c_diffs[key] = c\n\n if core.math.debug():\n # Checking if the calculated and numerical gradients are similar\n h = 1E-3\n real_devt = get_derivations(h)\n print()\n for key in c_diffs:\n print(key, real_devt[key]['L'], derivations[key],\n '--------------------------------', derivations[key] / real_devt[key]['L'], 'diff:',\n abs(derivations[key] - real_devt[key]['L']))\n if not 0.97 < derivations[key] / real_devt[key]['L'] < 1.03 \\\n and abs(derivations[key] - real_devt[key]['L']) > 1E-4:\n print('AYAYAY calc -real - calc/real')\n for key2 in c_diffs[key]:\n if not (abs(real_devt[key][key2] - c_diffs[key][key2]) < 1E-10\n or 0.95 < real_devt[key][key2] / c_diffs[key][key2] < 1.05):\n print(key2, real_devt[key][key2], c_diffs[key][key2], c_diffs[key][key2] /\n self.sh.to_dense(real_devt[key][key2]))\n\n return derivations", "title": "" }, { "docid": "972e69d8dd2bab3cecf7147b30feaec4", "score": "0.49636534", "text": "def _gradient_clipping(self, grad, var, non_zero, eps=1e-3, threshold=1e-2):\n\n norm_grad = array_ops.ones_like(var) * non_zero(self._axis_aware_euclidian_norm(grad))\n norm_var = math_ops.maximum(self._axis_aware_euclidian_norm(var), eps)\n dynamic_threshold = threshold * (norm_var / norm_grad)\n return array_ops.where(dynamic_threshold < 1., grad * dynamic_threshold, grad)", "title": "" }, { "docid": "671f29a0aa721b80af35f52b1629b216", "score": "0.4958247", "text": "def get_activations_and_gradient(self, z):\n a = self.activate(z)\n return a, np.multiply(a, 1 - a)", "title": "" }, { "docid": "a1e1dea42461e12a2d26f1ccf9d469cd", "score": "0.495605", "text": "def do_reinforce_modified_policy_gradient_bias_subtracted(self, iterations=200, batch_size=1500, gamma=0.9,\n learning_rate=3e-4, init_variance=0.01):\n average_returns = [0] * iterations\n optimizer = torch.optim.Adam(self.policy_network.parameters(), lr=learning_rate)\n\n variance = torch.nn.parameter.Parameter(torch.ones(2) * init_variance) # for two dimensions\n\n b = 0 # Subtraction co-efficient\n std_dev_b = 1.0 # Division co-efficient\n\n for iteration in range(iterations): # Updated once per loop\n net_reward = 0.0\n episodes = 0\n loss = 0.0\n step = 0\n prev_state = self.env.reset()\n\n G_tau = []\n current_trajectory = {\"log_probabilities\": [], \"rewards\": []}\n\n while step < batch_size: # collection of data for batch size (say 500 steps)\n\n mean = self.policy_network(prev_state) # Forward pass\n\n assert mean.shape == (2,), f\"{mean.shape}\"\n assert -2.00 <= sum(mean) <= 2.00, f\"{sum(mean)}\"\n\n covariance = torch.diag(abs(variance) + 1e-3 * torch.ones(2))\n # to avoid covariance going to zero! or negative\n probable_actions = MultivariateNormal(loc=mean, covariance_matrix=covariance)\n action = probable_actions.sample() # Choose a 2-D vector for action\n\n current_state, reward, done, _ = self.env.step(action)\n\n current_trajectory[\"log_probabilities\"].append(probable_actions.log_prob(action))\n current_trajectory[\"rewards\"].append(reward)\n\n prev_state = current_state\n\n if done or step == batch_size - 1: # episode terminated\n T = len(current_trajectory[\"rewards\"]) # Total steps\n # For calculating b\n for t in range(T):\n discounted_reward = gamma ** (T - t) * current_trajectory[\"rewards\"][t]\n G_tau.append(discounted_reward)\n\n discounted_reward = sum(current_trajectory[\"rewards\"][t] * (gamma ** t) for t in\n range(len(current_trajectory[\"rewards\"])))\n\n objective_over_all_episodes = 0.0\n for t in range(T):\n summed_future_reward = 0.0\n for t_bar in range(t, T):\n summed_future_reward += (gamma ** (t_bar - t)) * (current_trajectory[\"rewards\"][t_bar])\n objective_over_all_episodes += current_trajectory[\"log_probabilities\"][t] * ((summed_future_reward - b)/std_dev_b)\n\n loss += objective_over_all_episodes\n net_reward += discounted_reward\n current_trajectory = {\"log_probabilities\": [],\n \"rewards\": []} # Resetting for collecting next episode data\n prev_state = self.env.reset()\n episodes += 1\n\n step += 1\n\n # End of say, 500 steps, update policy\n optimizer.zero_grad() # Clear gradients, to prevent accumulation\n loss = -1 * loss * (1 / episodes)\n # Objective function, -1 => Gradient Ascent (Torch does descent by default)\n loss.backward() # Calculate gradient\n optimizer.step() # Update neural net (aka policy)\n average_returns[iteration] = net_reward / episodes # For plotting later\n\n assert len(G_tau) == batch_size, f\"{batch_size, len(G_tau)}\"\n b = sum(G_tau) / batch_size\n std_dev_b = np.std(G_tau)\n\n # # DEBUG\n print(f\"Iteration: {iteration}, \"\n f\"Average Return : {average_returns[iteration]}, \"\n f\"Episodes: {episodes}\")\n print(f\"Avg loss : {loss}\")\n\n self.plot_average_returns(average_returns, title=f\"Average Returns | Batch Size: {batch_size}\")", "title": "" } ]
32f78e2a0364d8bb29944dbe2da16fc5
Tracker for next neuron id in the given genome
[ { "docid": "dedcb910b62f8b3963721ac1eaea7b52", "score": "0.8468444", "text": "def next_nid(genome):\n nid = genome['last_neuron'] + 1\n genome['last_neuron'] = nid\n return nid", "title": "" } ]
[ { "docid": "fcf8bdb7177722960dff82a8755c738f", "score": "0.59789985", "text": "def next_id(self):\n nid = self._next_id\n self._next_id = self._next_id + 1\n return nid", "title": "" }, { "docid": "a3d6cfa9e8896e9291b73b372ea92d75", "score": "0.59274477", "text": "def nextId(self):\r\n \r\n pass", "title": "" }, { "docid": "5432564591795c1acdb3b5fadad12f5f", "score": "0.58401716", "text": "def get_next_unique_id(self) -> str:", "title": "" }, { "docid": "5a201d394e803f8d5555fefda93fcf8e", "score": "0.5815166", "text": "def get_next_id(self) -> int:\r\n pass", "title": "" }, { "docid": "711a98274259de153db0e69fbce70661", "score": "0.58012456", "text": "def next_mcid(self):\n\tresult = self.next()\n\n\tif self.count == 0:\n\t self.save()\n\t self.count = self.iterations\n\telse:\n\t self.count -= 1\n\n\treturn result", "title": "" }, { "docid": "94646c06b3aa9b19d8b7d24cd3e875c4", "score": "0.5765719", "text": "def _get_next_id(self):\n if self.sensors:\n next_id = max(self.sensors.keys()) + 1\n else:\n next_id = 1\n if next_id <= 254:\n return next_id", "title": "" }, { "docid": "2647d544e0f3efa7aa62c2668a674069", "score": "0.5743264", "text": "def next_generation(self):\n\t\tchild_pop = [copy.deepcopy(self.elite_ind)]\n\t\t\n\t\t# Perform tournament selection.\n\t\tfor i in range(len(self.genomes)-1):\n\t\t\ttourn = random.sample(self.genomes,2)\n\t\t\tif tourn[0]['fitness'] < tourn[1]['fitness']:\n\t\t\t\tchild_pop.append(copy.deepcopy(tourn[0]))\n\t\t\telse:\n\t\t\t\tchild_pop.append(copy.deepcopy(tourn[1]))\n\t\t\tchild_pop[-1]['id'] = self.child_id\n\t\t\tself.child_id += 1\n\n\t\t\n\t\t#Crossover\n\t\tchild_pop = single_point_crossover(child_pop, cross_over_prob)\n\n\t\t# Mutate genes in the child genomes.\n\t\tchild_pop = random_value_mutation(child_pop, mutation_prob)\n\t\t\n\n\t\tself.genomes = child_pop\n\t\tself.id_map = {k:v for k,v in zip([x['id'] for x in self.genomes],[i for i in range(self.pop_size)])}", "title": "" }, { "docid": "6f38b7e6e15a59f8cf539fcbbeeafe5a", "score": "0.5736271", "text": "def _get_next_id(self):\n # type: () -> int\n ret = self._next_id\n self._next_id = ret + 1\n return ret", "title": "" }, { "docid": "37634bb616b66f045df9680feeae7e15", "score": "0.5727054", "text": "def _get_next_id(self) -> int:\n ret = self._next_id\n self._next_id = ret + 1\n return ret", "title": "" }, { "docid": "40a299ce12e16024815467a07dfb0c1b", "score": "0.5615426", "text": "def _next_id():\n global iid\n with iid_clock:\n result = iid\n iid = iid + 1\n return result", "title": "" }, { "docid": "56aa3c0e549c48cbda6d3f2f8fbffd99", "score": "0.556686", "text": "def _next_id(self):\n self._id += 1\n return self._id", "title": "" }, { "docid": "85adb3359737c232f867186583565de6", "score": "0.55654854", "text": "def get_next_id(self):\n return None", "title": "" }, { "docid": "85adb3359737c232f867186583565de6", "score": "0.55654854", "text": "def get_next_id(self):\n return None", "title": "" }, { "docid": "99f4a68fa89f59e504394e9a4acedeff", "score": "0.55591255", "text": "def get_next_individual(self):\n pass", "title": "" }, { "docid": "4dae28d305f3ef0ea65bae77858d9abb", "score": "0.5550575", "text": "def get_new_network_id(db):", "title": "" }, { "docid": "280681a733c378915ac4d33e3ebd1648", "score": "0.5548189", "text": "def next_id(cls, sess):\n current = sess.query(func.max(cls.id)).one()[0] or -1\n return current + 1", "title": "" }, { "docid": "13de5cd23b40fdb5369784b153b638ca", "score": "0.55082417", "text": "def init_individual():\n genome = create_genome()\n\n # Create i/p and o/p neurons\n nid = 0\n for i in range(INPUTS):\n neuron = create_neuron(layer=Layer.INPUT)\n neuron['id'] = nid\n genome['neurons'][nid] = neuron\n genome['ip_neurons'].append(nid)\n nid += 1\n\n for i in range(OUTPUTS):\n neuron = create_neuron(layer=Layer.OUTPUT)\n neuron['id'] = nid\n genome['neurons'][nid] = neuron\n genome['op_neurons'].append(nid)\n nid += 1\n\n for i in range(BIAS):\n neuron = create_neuron(layer=Layer.BIAS)\n neuron['id'] = nid\n genome['neurons'][nid] = neuron\n genome['bias_neurons'].append(nid)\n nid += 1\n\n genome['last_neuron'] = nid - 1\n # Create a gene for every ip, op pair\n innov_no = 0\n for i in range(INPUTS):\n for j in range(OUTPUTS):\n gene = create_gene(innov_no=innov_no)\n gene['ip'] = genome['ip_neurons'][i]\n gene['op'] = genome['op_neurons'][j]\n gene['wt'] = random.random() * 2 - 1\n genome['genes'][innov_no] = gene\n #genome['genes'][(gene['ip'], gene['op'])] = gene\n innov_no += 1\n\n for i in range(BIAS):\n for j in range(OUTPUTS):\n gene = create_gene(innov_no=innov_no)\n gene['ip'] = genome['bias_neurons'][i]\n gene['op'] = genome['op_neurons'][j]\n gene['wt'] = random.random() * 2 - 1\n genome['genes'][innov_no] = gene\n #genome['genes'][(gene['ip'], gene['op'])] = gene\n innov_no += 1\n\n return genome", "title": "" }, { "docid": "6ab163bda76c30d4fb577da427063129", "score": "0.5506506", "text": "def next_uid(self) -> int:\n ...", "title": "" }, { "docid": "04dcabceebe67b1184031249ec9bf6fe", "score": "0.55025584", "text": "def assign_id(self):\n ev_data, pie, prev_dict = self.reset_env()\n for frame_id in range(1+self.stepSize, self.ds.image_count+1, self.stepSize): # Upto last but one\n image_id = str(frame_id).zfill(6)\n cur_dict = ev_data[image_id] # Current frame is detection, previous frame is groundtruth\n cur_dict = self.two_frame_reid(pie, cur_dict, prev_dict)\n ev_data[image_id] = cur_dict\n self.save_frame(image_id, cur_dict)\n # for the next iteration\n prev_dict = cur_dict\n # Save the updated ev\n #self.visualize_reid(ev_data)", "title": "" }, { "docid": "4fab2187bcf913e5a373a86b857b6013", "score": "0.54925704", "text": "def _nextId(self):\n res = self._nextIdInt\n self._nextIdInt += 1\n return res", "title": "" }, { "docid": "78792f9e9ec173a6dc2d13b056224ec6", "score": "0.54902375", "text": "def get_next_id(self):\n self.id_ctr += 1\n return self.id_ctr", "title": "" }, { "docid": "45fb7a4beff8057d754747832d6a843c", "score": "0.5488286", "text": "def get_next_id(self):\n if len(self.sensors):\n next_id = max(self.sensors.keys()) + 1\n else:\n next_id = 1\n if next_id <= 254:\n return next_id\n return None", "title": "" }, { "docid": "1e1e282ff41437918e540a75aac6ac3c", "score": "0.5481234", "text": "def get_next_id(self):\n self.next_id += 1\n return self.next_id - 1", "title": "" }, { "docid": "0157f55109d7523a4b7573bf07dbac57", "score": "0.54343045", "text": "def _get_node_id(self):\n pointer = self.last_node_id\n self.last_node_id += 1\n return pointer", "title": "" }, { "docid": "bc1be17843be1dccad868dc950fa7d99", "score": "0.5433168", "text": "def _get_next_id(self):\n if self.sensors:\n next_id = max(self.sensors.keys()) + 1\n else:\n next_id = 1\n if next_id <= self.const.MAX_NODE_ID:\n return next_id\n return None", "title": "" }, { "docid": "e9d4cc5e07b1fefbecae8c2e7fa22a7e", "score": "0.5424672", "text": "def next_id(self):\n return (max(self.instances) + 1) if self.instances else 1", "title": "" }, { "docid": "b5960fcb28ee9baf6e6d20b1175fd29c", "score": "0.5406518", "text": "def get_next_id(cls):\n cls.next_id += 1\n return cls.next_id", "title": "" }, { "docid": "22b6e640104f2c81be927c7c4cf7b0b1", "score": "0.53898156", "text": "def advance_next_gen(self) -> None:\n leader_board = self.get_leader_board()\n breeders = [self.players[int(i)] for i in leader_board[\"model\"]]\n players = self.breed(breeders)\n self.players = players\n\n # Metadata\n self.gen_number += 1", "title": "" }, { "docid": "03cf49241e23b0384dc2c7a02066c22a", "score": "0.5383871", "text": "def _next_id(self):\n DbMO._ID += 1\n return DbMO._ID", "title": "" }, { "docid": "d20f8767c6444e5f810dc16287d39715", "score": "0.5378135", "text": "def get_next_id(cls):\n cls.current_id += 1\n return cls.current_id", "title": "" }, { "docid": "9ce299df45bb495ff00db4703239920b", "score": "0.5376336", "text": "def get_next_id(self):\n\n self.current_id += 1\n\n return self.current_id", "title": "" }, { "docid": "f8598f6b6218c3b380d8cdf211c7fc8f", "score": "0.5354867", "text": "def node_id_get():\n NMLayer1.nodeID += 1\n return NMLayer1.nodeID", "title": "" }, { "docid": "179595cbf2b794c8a2d5cd4ed3059107", "score": "0.532394", "text": "def get_next_announcement_id(self, origin_node_id):\n logger.debug(\"Getting next announcement ID.\")\n\n # Let's not hit the disk if we don't have to\n if self.next_picture_id is not None:\n logger.debug(\"Updating announcement ID in memory.\")\n rtn = self.next_announcement_id\n self.next_announcement_id += 1\n return \"%d-%d\"%(rtn, origin_node_id)\n\n logger.debug(\"Updating announcement ID from disk.\")\n # Query the database to see what the last id was?\n SQL = \"SELECT announcement_id FROM db_announcements WHERE \" \\\n \"origin_node_id = ?\"\n results = self._sql(SQL, [origin_node_id]).fetchall()\n\n # If it's empty, this is a new database\n if not results:\n self.next_announcement_id = 2\n return \"1-%d\"%origin_node_id\n else:\n id_list = [int(r[0].split(\"-\")[0]) for r in results]\n id_list.sort(reverse=True)\n current_id = id_list[0]\n rtn = current_id+1\n self.next_announcement_id = rtn+1\n return \"%d-%d\"%(rtn, origin_node_id)", "title": "" }, { "docid": "a5d7b171a0ccd22d2f731fbf440dce62", "score": "0.5291159", "text": "def _get_next_id(self):\n\n current_id = self._last_id + 1\n self._last_id = current_id\n\n return current_id", "title": "" }, { "docid": "bb473350bb521071a5773b184cb0ae7c", "score": "0.52792424", "text": "def step(self):\n _neighbor_iter = self.model.grid.iter_neighbors((self._row, self._col), True)\n neighbors_opinion = Counter(n.get_state() for n in _neighbor_iter)\n # Following is a a tuple (attribute, occurrences)\n polled_opinions = neighbors_opinion.most_common()\n tied_opinions = []\n for neighbor in polled_opinions:\n if neighbor[1] == polled_opinions[0][1]:\n tied_opinions.append(neighbor)\n\n self._next_state = self.random.choice(tied_opinions)[0]", "title": "" }, { "docid": "0aeebf2c450d1bdc2a5f4de753136927", "score": "0.5275587", "text": "def graph_id_get():\n NMLayer1.graphID += 1\n return NMLayer1.graphID", "title": "" }, { "docid": "fe1a070f7af027a79fc380cd57455364", "score": "0.5274608", "text": "def next_member(band):", "title": "" }, { "docid": "1b435f0ea70680e927323e8a8e52024c", "score": "0.52724135", "text": "def next_object_id():\n id = Transition._next_id\n Transition._next_id += 1\n return id", "title": "" }, { "docid": "2c14ccac2b4a55feb0ceba1ff506da38", "score": "0.5267359", "text": "def __next(self, number=1):\n idd = self.uqsetting.lastid + number\n self.uqsetting.lastid = idd\n return idd", "title": "" }, { "docid": "1e9e156b738ac594654b9a7b5feab3ca", "score": "0.52543956", "text": "def get_next_person_id(self, origin_node_id):\n logger.debug(\"Getting next person ID.\")\n \n # Let's not hit the disk if we don't have to\n if self.next_person_id is not None:\n logger.debug(\"Updating person ID in memory.\")\n rtn = self.next_person_id\n self.next_person_id += 1\n return \"%d-%d\"%(rtn, origin_node_id)\n \n logger.debug(\"Updating person ID from disk.\")\n \n SQL = \"SELECT person_id FROM db_person_bio WHERE origin_node_id = ?\"\n results = self._sql(SQL, [origin_node_id]).fetchall()\n \n # If it's empty, this is a new database\n if not results:\n self.next_person_id = 2\n return \"1-%d\"%origin_node_id\n else:\n id_list = [int(r[0].split(\"-\")[0]) for r in results]\n id_list.sort(reverse=True)\n current_id = id_list[0]\n rtn = current_id+1\n self.next_person_id = rtn+1\n return \"%d-%d\"%(rtn, origin_node_id)", "title": "" }, { "docid": "5f19933e84ff7e22fc91507cbd1c4d64", "score": "0.5252095", "text": "def _generate_vertex_id() -> int:\r\n result = DefaultMultiDirectedHyperGraph.next_id\r\n DefaultMultiDirectedHyperGraph.next_id += 1\r\n return result", "title": "" }, { "docid": "55155342d5584b32a3bab379c31f802b", "score": "0.52378625", "text": "def _get_next_id(self, event):\n return self.redis.incr(self.NEXT_ID_KEY % event)", "title": "" }, { "docid": "54dcdd5df73aa78b8e26ab740521c9db", "score": "0.52281964", "text": "def get_next_id(self, table, start_id=0):\n all_ids = self.lookup_all('id', table)\n next_id = start_id\n while next_id in all_ids:\n next_id += 1\n return next_id", "title": "" }, { "docid": "c94dcd05ec60aacd8976f504a9ba3203", "score": "0.52199024", "text": "def update_identity_pool(self):\n for camID, stracker_state in self.stracker_states.items():\n while len(stracker_state.tracklets_finished) > 0:\n tracklet = stracker_state.tracklets_finished.pop()\n if tracklet.globalID >= 0:\n # Tracklet has been re-identified\n identity = self.identity_pool[tracklet.globalID]\n\n # Update Identity features using this tracklet with randomly chosen features\n # recent_detections = tracklet.detection_history[-len(tracklet.feature_history):]\n # p = np.array([detection.score for _, detection in recent_detections])\n # p = p / np.sum(p)\n # inds = np.random.choice(range(len(tracklet.feature_history)), self.n_feature_samples, replace=False,\n # p=p)\n # new_features = np.stack([tracklet.feature_history[ind][1]['openreid'].copy() for ind in inds])\n\n # New update method: Shrink the feature history size by HCC\n if tracklet.sample_features is None:\n tracklet.update_sample_features()\n identity.features = np.vstack((identity.features, tracklet.sample_features))\n\n else:\n # Tracklet hasn't been re-identified yet, start a new identity\n if tracklet.time_lived >= self.n_feature_samples:\n self.max_id += 1\n tracklet.globalID = self.max_id\n self.identity_pool[self.max_id] = Identity(self.max_id, camID, tracklet.id, tracklet,\n n_feature_samples=self.n_feature_samples)\n self.logger.info('Initiating global ID #{} from tracklet (cam = {}, localID = {})'.format(\n self.max_id, camID, tracklet.id\n ))", "title": "" }, { "docid": "ea4626a2d452b395943068c33104d404", "score": "0.52120024", "text": "def next_reg_sequence(self):\r\n self.registration_sequence += 1\r\n return self.registration_sequence", "title": "" }, { "docid": "8ba9d425a2db5575bb097c4e1e4acc50", "score": "0.5199645", "text": "def next_job_id():\n return state.next_job_id()", "title": "" }, { "docid": "f2f7b97476b9561deb3ed77db3545d87", "score": "0.5194709", "text": "def _get_unique_node_id(self):\n self.__unique_node_counter += 1\n return f\"unique_{self.__unique_node_counter}\"", "title": "" }, { "docid": "bf579b94d1a8187ba1aa97b5db6aeaae", "score": "0.51920646", "text": "def nextcid(self):\n self.cidseq += 1\n return self.cidseq", "title": "" }, { "docid": "f9d3ba22ad1f2860a6293b32e2df36ab", "score": "0.5183412", "text": "def gen_id(self):\n self._id_counter += 1\n return self._id_counter", "title": "" }, { "docid": "3b68605cc45a6c8f40db4b24ac26dd0a", "score": "0.51816994", "text": "def run(self, id_to_follow):\n id_is_in_msg = True\n max_lost_iterations = 30\n curr_lost_iterations = 0\n dist_to_human = 0.6\n last_time = rospy.Time.now()\n while not rospy.is_shutdown() and id_is_in_msg:\n if self.new_msg:\n self.new_msg = False\n found_id_in_msg = False\n for person in self.last_pipol_msg.peopleSet:\n #print \"person is: \" + str(person)\n if person.targetId == id_to_follow:\n found_id_in_msg = True\n # Create nav goal to there if its the id we want\n# ps = createGoal(person.x, person.y, dist_to_human)\n ps = PoseStamped()\n ps.header.frame_id = \"base_link\"\n ps.header.stamp = rospy.Time.now()\n ps.pose.position.x = person.x - dist_to_human\n ps.pose.position.y = person.y\n # Compute orientation from the current robot pose to the point\n #ps.pose.orientation = compute_orientation(PoseStamped(), ps)\n ps.pose.orientation = Quaternion(0.0, 0.0, 0.0, 1.0)\n #Publish\n if rospy.Time.now() - last_time > rospy.Duration(1.0):\n self.move_base_pub.publish(ps)\n last_time = rospy.Time.now()\n# else:\n# print \"person.targetId != id_to_follow\"\n# print str(person.targetId) + \" != \" + str(id_to_follow)\n if not found_id_in_msg: # If we lost the id stop\n curr_lost_iterations += 1\n if curr_lost_iterations > max_lost_iterations:\n id_is_in_msg = False\n else:\n rospy.sleep(0.1)\n \n rospy.logerr(\"We lost the id! Stopping the tracking!\")", "title": "" }, { "docid": "aa552f86d18ae478191f448abe5a1326", "score": "0.51689225", "text": "def _next_street(self):\n if self._street == RIVER:\n self._current = TERMINAL_ID\n return\n\n for p in self._players:\n if p.has_folded or p.is_allin:\n self._current = TERMINAL_ID\n return\n\n # TODO add deal cards\n self._street += 1\n self._current = PLAYER_1_ID\n return", "title": "" }, { "docid": "f6f3b025b8c166d4e99fcd5adb8b026c", "score": "0.51657444", "text": "def get_node_id(self):\n with self.lock:\n node_id = self.next_id\n self.next_id += 1\n return node_id", "title": "" }, { "docid": "5b52a056d10c857aef389bf1f0b88e80", "score": "0.5165147", "text": "def tell(self, x, y):\n # print(f\"in tell {x}, {y}\")\n self.seen_count[x] += 1\n (snr,) = y\n self.seen_snr[x] = float(snr)\n if snr > 500:\n target = 10\n else:\n target = 1\n if self.seen_count[x] == 1:\n self.next_point = (x + 1) % self.num_samples\n else:\n self.next_point = self.agent(x, max(target - self.seen_count[x], 0))", "title": "" }, { "docid": "3d3e97d517d5f3393639a494a87af7c1", "score": "0.5161672", "text": "def _GetNextFlowId(self):\n ret = self._next_flow_id\n self._next_flow_id += 1\n return ret", "title": "" }, { "docid": "37666ccebcf2af0117760392db375409", "score": "0.5156098", "text": "def nextId(self):\n\tself.nextid += 1\n\treturn (\"_%d\" % self.nextid)", "title": "" }, { "docid": "32eacb0541e8aa5a03504e250c5d4f7f", "score": "0.51528364", "text": "def _get_gml_id(self):\n self._gml_id += 1\n return 'gml{0}'.format(self._gml_id)", "title": "" }, { "docid": "56d3e58a9e5cd9e22e5d3ff09bf6ebad", "score": "0.51495326", "text": "def _generateEventID ():\r\n global _nextEventID\r\n _nextEventID += 1\r\n return _nextEventID", "title": "" }, { "docid": "83ca8f62cd700450785e6ac89cfebd0b", "score": "0.51428884", "text": "def tag ( self ):\n\n self.id = Node.Ni\n Node.Ni += 1", "title": "" }, { "docid": "fb61f81742c00bacb7e014c3bc64b3fc", "score": "0.51401377", "text": "def test_next_id_property(stw, donor1, donor2):\n assert stw.next_id == 1\n\n \"\"\"given a blank donor collection\n when two donors are added and next_id called\n integer above max is returned\"\"\"\n stw.create_donor(donor2)\n assert stw.next_id == 1\n\n stw.create_donor(donor1)\n assert stw.next_id == 3", "title": "" }, { "docid": "fb61f81742c00bacb7e014c3bc64b3fc", "score": "0.51401377", "text": "def test_next_id_property(stw, donor1, donor2):\n assert stw.next_id == 1\n\n \"\"\"given a blank donor collection\n when two donors are added and next_id called\n integer above max is returned\"\"\"\n stw.create_donor(donor2)\n assert stw.next_id == 1\n\n stw.create_donor(donor1)\n assert stw.next_id == 3", "title": "" }, { "docid": "a96c9af225c3136f234a4392c3fab238", "score": "0.51401204", "text": "def _next_intermediate_id(self):\r\n # Note the format of intermediate uncertain-number UIDs \r\n # differs from elementary uncertain-number UIDs (changed in v1.3.7).\r\n # GTC code only tests UIDs for order and equality, it does not make \r\n # use of the internal structure of a UID. However, previously an elementary \r\n # UID and an intermediate UID could be equal, although they were intended \r\n # to identify different types of node object. Now intermediate UIDs are \r\n # 3-tuples and elementary UIDs 2-tuples, so there can be no confusion.\r\n self._intermediate_id_counter += 1\r\n return (\r\n self._id,\r\n self._intermediate_id_counter,\r\n 0\r\n )", "title": "" }, { "docid": "b45ca4ea35000ddd5f4531a00408b6e4", "score": "0.51346064", "text": "def next_neuron(self, simulation_step, area):\n\t\tself.i += 1\n\t\treturn TrivialNeuron(self.i - 1)", "title": "" }, { "docid": "c0eccd2428da7a89d470b499fb7bc226", "score": "0.5133675", "text": "def next_obs_ph(self):\n pass", "title": "" }, { "docid": "a5cb1171a68a34ccdeb6f9951a96d236", "score": "0.5128901", "text": "def increase_gen(self):\n\n self.current_gen += 1", "title": "" }, { "docid": "75bc712f5d6ccfd6b1d6ab3e0efcc687", "score": "0.51256377", "text": "def nextId(self):\n if self.__nextId__ is False:\n # Request the next urn\n self.__prev__, self.__nextId__ = self.getPrevNextUrn(reference=self.urn.reference)\n return self.__nextId__", "title": "" }, { "docid": "af3e1dc9407c2b953b4d0a7750aa723a", "score": "0.511992", "text": "async def _generate_next_id(self):\n async with self._lock:\n next_id = self._next_id\n self._next_id += 1\n return next_id", "title": "" }, { "docid": "211d168409c03e9a8a34c82d31630a67", "score": "0.51158303", "text": "def generatePointID(self):\n self.idGenerator += 1\n return self.idGenerator", "title": "" }, { "docid": "d44cc0aa03b18297f21e5ef69fa5f760", "score": "0.5108937", "text": "def __init__(self):\n self.current_id = 0\n self.current_gen = 1", "title": "" }, { "docid": "c5eb56d14addb404548c42491e8bc074", "score": "0.5105979", "text": "def fetch_next_txids():", "title": "" }, { "docid": "fee7d6608c83137df6f817eb95c05cf5", "score": "0.5101974", "text": "def acquire_ident(self):\n ident = self.next_id\n self.next_id += 2\n return ident", "title": "" }, { "docid": "8905666531a722e34d7ed16338db1fb8", "score": "0.50991255", "text": "def next_dataset(self):", "title": "" }, { "docid": "6e9ecd758ca49ec64f7de656f6bacd6a", "score": "0.50908786", "text": "def step(self,automaton):\n self.nextLocation = self.getBestLocation(automaton)", "title": "" }, { "docid": "90ca01cecc8bd87433929e40e538eff7", "score": "0.5081366", "text": "def _unique_id():\n result = _unique_id.next_id\n _unique_id.next_id += 1\n return result", "title": "" }, { "docid": "4a578f09cf120889702534894f5080e4", "score": "0.50787044", "text": "def getnextid(session, qry):\n nextid = qry.one()\n session.commit()\n data = nextid[0]\n if data is not None:\n return data + 1\n else:\n return 1", "title": "" }, { "docid": "53c120172dd5444f830fcbdfb8c1a3a2", "score": "0.50754595", "text": "def generate_new_trader_id(self):\n _id = self.get_num_records(self.table) + 1\n print(_id)\n return _id", "title": "" }, { "docid": "1cf44098498cad7fdad42786e09d57de", "score": "0.50678444", "text": "def next(self, session_id):\n \n self.dbc.find_update_increment(1, session_id)", "title": "" }, { "docid": "65f104cf45cf2bfd2b17ef859adfa70e", "score": "0.50670964", "text": "def next_track(self, message):\n pass", "title": "" }, { "docid": "a593d09ffe7e6f04ad41c6366dcc56f4", "score": "0.50659955", "text": "def _next_image(self, image_id, action):\n return self._all_graph[image_id][action]", "title": "" }, { "docid": "13197ba22271b8d1791464c5b6c7a39d", "score": "0.5048989", "text": "def get_next_picture_id(self, origin_node_id):\n logger.debug(\"Getting next picture ID.\")\n \n # Let's not hit the disk if we don't have to\n if self.next_picture_id is not None:\n logger.debug(\"Updating picture ID in memory.\")\n rtn = self.next_picture_id\n self.next_picture_id += 1\n return \"%d-%d\"%(rtn, origin_node_id)\n \n logger.debug(\"Updating picture ID from disk.\")\n # Query the database to see what the last id was?\n SQL = \"SELECT picture_id FROM db_pictures WHERE origin_node_id = ?\"\n results = self._sql(SQL, [origin_node_id]).fetchall()\n \n # If it's empty, this is a new database\n if not results:\n self.next_picture_id = 2\n return \"1-%d\"%origin_node_id\n else:\n id_list = [int(r[0].split(\"-\")[0]) for r in results]\n id_list.sort(reverse=True)\n current_id = id_list[0]\n rtn = current_id+1\n self.next_picture_id = rtn+1\n return \"%d-%d\"%(rtn, origin_node_id)", "title": "" }, { "docid": "5fbb5f21aaaaf4b336db9f8af2db7b95", "score": "0.5043903", "text": "def db_next_hid( self ):\n conn = object_session( self ).connection()\n table = self.table\n trans = conn.begin()\n try:\n next_hid = select( [table.c.hid_counter], table.c.id == self.id, for_update=True ).scalar()\n table.update( table.c.id == self.id ).execute( hid_counter = ( next_hid + 1 ) )\n trans.commit()\n return next_hid\n except:\n trans.rollback()\n raise", "title": "" }, { "docid": "4cfbb0c28553f6f613b2309a6430b022", "score": "0.5041089", "text": "def train_one_generation(s):\n s.fitness_computation_on_population()\n s.create_roulette_wheel()\n s.select_chromosomes_from_roulette_wheel()\n s.track_best_chromosomes()\n s.track_performance_over_generations()\n s.generate_next_generation()\n s.generation += 1", "title": "" }, { "docid": "ec5d2a9b96693755e1b7ae31fa2aebd1", "score": "0.5040065", "text": "def next_station(self):\n if self.__connected:\n try:\n data = f\"{self.__line_number} {self.__station + 1} {self.__id}\"\n self.__send_to_server(data)\n self.__station += 1\n if self.__station in self.__stations.keys():\n self.__total_people_count += self.__stations[self.__station]\n except:\n self.__connected = False\n self.asking_user_to_reconnect = False\n\n if int(self.__station) + 1 > Bus.MAX_STATION: # stop the bus client when he reaches his final station\n self.stop = True\n self.__gui.display_finished()\n return\n else:\n print(\"user trying to send update the server about change in the station but i am offline.\")", "title": "" }, { "docid": "2e89662d09ae9601c638a0c4416bec0b", "score": "0.5038427", "text": "def _get_next_entrust_no(self):\n return str(self.seq_gen.get_next('entrust_no'))", "title": "" }, { "docid": "f307b129bdb0994e8f8c147081bf79b6", "score": "0.5036329", "text": "def next_id(self):\n return self._next_id.value", "title": "" }, { "docid": "9ab93c3fe0b2d893c69feac9f7ec64b2", "score": "0.50349325", "text": "def alloc_id(self):\n self._nextid += 1\n return self._nextid", "title": "" }, { "docid": "9ab93c3fe0b2d893c69feac9f7ec64b2", "score": "0.50349325", "text": "def alloc_id(self):\n self._nextid += 1\n return self._nextid", "title": "" }, { "docid": "56ed7336019bd5d31f65dfd835ae0066", "score": "0.5033949", "text": "async def get_next_id(db: asyncpg.pool.Pool):\n query = \"\"\"\n SELECT id FROM giveaways ORDER BY id DESC LIMIT 1\n \"\"\"\n res = await db.fetch(query)\n if not res:\n return 0\n res = res[0]['id']\n return res + 1", "title": "" }, { "docid": "4fb2ebafc2c77352ded5966611649cdf", "score": "0.5028199", "text": "def findCounterExample(self):\n\t\twhile True:\n\t\t\tcurrent_state_h = self.resetExploration()\n\n\t\t\tfor ep in range(STEPS_PER_EPISODE):\n\t\t\t\ta = int(random.random()//(1/(len(self.world.map.availableActions(self.world.map.current)))))\n\t\t\t\t(r_h,r_m,next_state_h,next_state_m,obs) = self.executeOneStepExploration(current_state_h,a)\n\n\t\t\t\tif obs != 'null':\n\t\t\t\t\tself.observation_seq.append(obs)\n\t\t\t\t\tself.reward_trace.append(r_m)\n\n\t\t\t\tif self.isCounterExample(r_h,r_m):\n\t\t\t\t\tself.nuof_counter_examples += 1\n\t\t\t\t\treturn None\n\t\t\t\telse:\n\t\t\t\t\tcurrent_state_h = next_state_h\n\t\t\t\t\tself.world.map.current = next_state_m", "title": "" }, { "docid": "5d66b23ab5cbefdff261d26a989c9d7b", "score": "0.5024713", "text": "def eval_genomes(genomes, config):\n global gen\n #win = WIN\n gen += 1\n\n # start by creating lists holding the genome itself, the\n # neural network associated with the genome and the\n # bird object that uses that network to play\n nets = []\n birds = []\n ge = []\n for genome_id, genome in genomes:\n index = 0\n genome.fitness = 0 # start with fitness level of 0\n net = neat.nn.FeedForwardNetwork.create(genome, config)\n nets.append(net)\n birds.append(Bird(230,350))\n birds[index].index = index\n ge.append(genome)\n\n base = Base(FLOOR)\n pipes = [Pipe(700)]\n score = 0\n\n run = True\n while run and len(birds) > 0:\n\n pipe_ind = 0\n if len(birds) > 0:\n if len(pipes) > 1 and birds[0].x > pipes[0].x: # determine whether to use the first or second\n pipe_ind = 1\n\n top_pipe = pipes[pipe_ind].height\n bot_pipe = pipes[pipe_ind].bottom\n pipe = pipes[pipe_ind]\n\n #decide_birds_parallel(nets, birds, pipe)\n\n for x, bird in enumerate(birds):\n ge[x].fitness += 0.1\n decide_birds(nets, birds, bird, top_pipe, bot_pipe)\n\n base.move()\n\n rem = []\n add_pipe = False\n for pipe in pipes:\n pipe.move()\n # check for collision\n for bird in birds:\n if pipe.collide(bird):\n ge[birds.index(bird)].fitness -= 1\n nets.pop(birds.index(bird))\n ge.pop(birds.index(bird))\n birds.pop(birds.index(bird))\n\n if pipe.x < 0:\n rem.append(pipe)\n\n if not pipe.passed and pipe.x < bird.x:\n pipe.passed = True\n add_pipe = True\n\n if add_pipe:\n score += 1\n # can add this line to give more reward for passing through a pipe (not required)\n for genome in ge:\n genome.fitness += 5\n pipes.append(Pipe(WIN_WIDTH))\n\n for r in rem:\n pipes.remove(r)\n\n for bird in birds:\n if bird.y - 10 >= FLOOR or bird.y < -50:\n nets.pop(birds.index(bird))\n ge.pop(birds.index(bird))\n birds.pop(birds.index(bird))\n\n # break if score gets large enough\n if score > 20:\n pickle.dump(nets[0], open(\"best.pickle\", \"wb\"))\n break", "title": "" }, { "docid": "46919608a184fdb5a8bb3678bfdb487c", "score": "0.5022995", "text": "def next_index():\n random_index = 0\n while random_index in prev:\n random_index = r.randint(0, len(taunts) - 1)\n prev[0] = prev[1]\n prev[1] = random_index\n return random_index", "title": "" }, { "docid": "06111e3f96a3e41a4f5974d0e12635bc", "score": "0.5019364", "text": "def nextNet(self):\n self.cIndex += 1\n if self.cIndex < len(self.generation.pop):\n self.updateCurrentNet()\n else:\n print '-' * 30\n log = \"GENERATION {}, AVERAGE {}\"\n print log.format(self.generation.generationNumber, self.generation.averageFitness())\n print '#' * 30\n self.generation.generateNextPop()\n self.cIndex = 0\n self.updateCurrentNet()", "title": "" }, { "docid": "65d224f19df66218edaabea8d12d20ec", "score": "0.5015222", "text": "def take_id(self):\n result = self.id\n self.id += 1\n return result", "title": "" }, { "docid": "7ac5a1c01050d3d728929b048205f9a6", "score": "0.5014364", "text": "def receive_connection(self, origin_neuron, axon):\n\t\tself.ingoing += 1", "title": "" }, { "docid": "f6d465a727dd3deda8e4da13d6606442", "score": "0.5013583", "text": "def relabel_id(self, g, old, new):\n self.id_to_node[g.nodes[new]['mob_id']] = new", "title": "" }, { "docid": "feb058a52296401b50aba8d8829d4828", "score": "0.501338", "text": "def pick_next_cell(self):\n maximal_connectivity = self.get_maximal_connectivity()\n assert(maximal_connectivity > 1)\n not_yet_visited_cells = np.logical_and( self.already_inspected_cells == False, self.actual_connectivity_tested == False )\n possible_indices = np.where( np.logical_and(self.connectivity_vector == maximal_connectivity,\n not_yet_visited_cells ) )\n next_frame_id = self.mesh_one.elements[possible_indices[0][0]].id_in_frame\n\n return next_frame_id", "title": "" }, { "docid": "2de9345060555d20e1eba7c446e43429", "score": "0.5010251", "text": "def get_id():\n new_id = ID_COUNTER.count\n ID_COUNTER.count += 1\n return new_id", "title": "" }, { "docid": "6820a33a0a3c66bcdaa7af87d378fbb7", "score": "0.500999", "text": "def tracker():\n pass", "title": "" }, { "docid": "89ce60f1ef54d88310b0aa2b7a9f3785", "score": "0.5008181", "text": "def visit (self):\n self.count += 1", "title": "" }, { "docid": "520825c61e25b15f491698d63d1ffe7d", "score": "0.5003811", "text": "def _get_next_tree_id(self):\r\n qs = self.get_queryset()\r\n max_tree_id = list(qs.aggregate(Max(self.tree_id_attr)).values())[0]\r\n max_tree_id = max_tree_id or 0\r\n return max_tree_id + 1", "title": "" }, { "docid": "22f4040859d3cc90afc9eb835faebd15", "score": "0.50011396", "text": "def next_id(self):\n for n in itertools.count(start=len(self._all) + 1):\n id_str = \"{}{}{}\".format(LAYER_ID, core.Node.ID_SEPARATOR, n)\n try:\n self._root.by_id(id_str)\n except KeyError:\n return id_str", "title": "" } ]
a32121a9940e96988968f7fa32f121dc
Load training data from file in ``.npz`` format.
[ { "docid": "f8d684abd15e1b6a8abcd32d14e978da", "score": "0.0", "text": "def load_training_data(file, validation_split=0, axes=None, n_images=None, verbose=False):\n\n f = np.load(file)\n X, Y = f['X'], f['Y']\n if axes is None:\n axes = f['axes']\n axes = axes_check_and_normalize(axes)\n\n # assert X.shape == Y.shape\n assert X.ndim == Y.ndim\n assert len(axes) == X.ndim\n assert 'C' in axes\n if n_images is None:\n n_images = X.shape[0]\n assert X.shape[0] == Y.shape[0]\n assert 0 < n_images <= X.shape[0]\n assert 0 <= validation_split < 1\n\n X, Y = X[:n_images], Y[:n_images]\n channel = axes_dict(axes)['C']\n\n if validation_split > 0:\n n_val = int(round(n_images * validation_split))\n n_train = n_images - n_val\n assert 0 < n_val and 0 < n_train\n X_t, Y_t = X[-n_val:], Y[-n_val:]\n X, Y = X[:n_train], Y[:n_train]\n assert X.shape[0] == n_train and X_t.shape[0] == n_val\n X_t = move_channel_for_backend(X_t,channel=channel)\n Y_t = move_channel_for_backend(Y_t,channel=channel)\n\n X = move_channel_for_backend(X,channel=channel)\n Y = move_channel_for_backend(Y,channel=channel)\n\n axes = axes.replace('C','') # remove channel\n if backend_channels_last():\n axes = axes+'C'\n else:\n axes = axes[:1]+'C'+axes[1:]\n\n data_val = (X_t,Y_t) if validation_split > 0 else None\n\n if verbose:\n ax = axes_dict(axes)\n n_train, n_val = len(X), len(X_t) if validation_split>0 else 0\n image_size = tuple( X.shape[ax[a]] for a in axes if a in 'TZYX' )\n n_dim = len(image_size)\n n_channel_in, n_channel_out = X.shape[ax['C']], Y.shape[ax['C']]\n\n print('number of training images:\\t', n_train)\n print('number of validation images:\\t', n_val)\n print('image size (%dD):\\t\\t'%n_dim, image_size)\n print('axes:\\t\\t\\t\\t', axes)\n print('channels in / out:\\t\\t', n_channel_in, '/', n_channel_out)\n\n return (X,Y), data_val, axes", "title": "" } ]
[ { "docid": "ace84a1c87b92c64a54709a4a12c05a0", "score": "0.72628057", "text": "def load_npy(self, filename):\n self.set_data(np.load(filename))", "title": "" }, { "docid": "7b2e39750ff5265c68e2283630227104", "score": "0.70330673", "text": "def load(self, filename: str) -> None:\n with np.load(filename + \".npz\") as data:\n self._data = data['data']\n self._labels = data['labels']", "title": "" }, { "docid": "80721b5885f107b7052ad18eebe80629", "score": "0.6879551", "text": "def load_data(self):\n try:\n data = np.load(\"pregen_dataset.npz\")\n except:\n raise Exception(\"There is no pregen_dataset.npz to load in this folder!\")\n self.N = data[\"N\"]\n self.x_1d = data[\"x\"]\n self.y_1d = data[\"y\"]\n self.z_1d = data[\"z\"]\n self.shape = data[\"shape\"]\n self.terrain = data[\"terrain\"]", "title": "" }, { "docid": "2e3e2a22c18986c95a54977869e82c9d", "score": "0.68498284", "text": "def load_npz(npz_file):\n data = np.load(npz_file, encoding=\"latin1\")\n return data['arr_0']", "title": "" }, { "docid": "b3db8401bdcd57a1cb5d2817ddcf6568", "score": "0.6806034", "text": "def load_npz(file, obj, path='', strict=True):\n with numpy.load(file) as f:\n d = NpzDeserializer(f, path=path, strict=strict)\n d.load(obj)", "title": "" }, { "docid": "2a01fc0f0ff22fcd0aacf1a8e98a44e4", "score": "0.6738641", "text": "def load_model(self, filename):\n [self.num_layers, self.sizes, self.weights, self.biases] = np.load(\n filename, allow_pickle=True)", "title": "" }, { "docid": "17d67c9b95add5d73d297ced46d0333d", "score": "0.6724067", "text": "def load_data(path='mnist.npz'):\n \n # 这是原本的路径\n '''path = get_file(path,\n origin='https://s3.amazonaws.com/img-datasets/mnist.npz',\n file_hash='8a61469f7ea1b51cbae51d4f78837e45')'''\n \n # 这是我修改后保存数据的路径\n path = 'G:/desktop/1/mnist.npz'\n \n f = np.load(path)\n x_train, y_train = f['x_train'], f['y_train']\n x_test, y_test = f['x_test'], f['y_test']\n f.close()\n return (x_train, y_train), (x_test, y_test)", "title": "" }, { "docid": "cd773c7908f4fa0c1db1dddea829ec72", "score": "0.6700202", "text": "def load_npz(filepath):\n d = np.load(filepath)\n return d['Xtr'], d['Xte'], d['ytr'], d['yte'], d['Ztr'], d['Zte']", "title": "" }, { "docid": "0ca55a0695d91ea4fbad798158c2e647", "score": "0.66919774", "text": "def load_npz(file_name):\n if not file_name.endswith('.npz'):\n file_name += '.npz'\n with np.load(file_name) as loader:\n loader = dict(loader)\n adj_matrix = sp.csr_matrix((loader['adj_data'], loader['adj_indices'],\n loader['adj_indptr']), shape=loader['adj_shape'])\n\n if 'attr_data' in loader:\n attr_matrix = sp.csr_matrix((loader['attr_data'], loader['attr_indices'],\n loader['attr_indptr']), shape=loader['attr_shape'])\n else:\n attr_matrix = None\n\n labels = loader.get('labels')\n\n return adj_matrix, attr_matrix, labels", "title": "" }, { "docid": "2a19c99fc850a03c304dcc53ac4aeb8a", "score": "0.6670107", "text": "def loadnpy(filename):\n return np.load(filename)", "title": "" }, { "docid": "6a6f4918998a0185fd54080bd3e24ca0", "score": "0.66639733", "text": "def load_neuraldata(filename):\r\n data = np.load(filename)[()];\r\n return np.array(data)", "title": "" }, { "docid": "032f79ff45f83d05dcb873441ca0b45f", "score": "0.66138434", "text": "def load_data(path='mnist.npz'):\n f = np.load(path)\n x_train, y_train = f['x_train'], f['y_train']\n x_test, y_test = f['x_test'], f['y_test']\n f.close()\n return (x_train, y_train), (x_test, y_test)", "title": "" }, { "docid": "e42cf43aaa617c575ebcb3df1e1782c3", "score": "0.65952045", "text": "def load_model(self, file_name):\n self.w_nodes = np.loadtxt(file_name)", "title": "" }, { "docid": "c1fb319a6fbec5bd1beb14469babac78", "score": "0.6530733", "text": "def test_load_file(self):\n loader = Loader('./tests/example.npz')\n loader.load_file()\n self.assertIsNotNone(loader.data)", "title": "" }, { "docid": "615af61615ccfe8a9bb26c0f059872ad", "score": "0.6502738", "text": "def load_data(file_name):\n f = gzip.open(file_name, 'rb')\n training_data, validation_data, test_data = cPickle.load(f)\n f.close()\n return (training_data, validation_data, test_data)", "title": "" }, { "docid": "56c6fb07374646eabd1f85788818d592", "score": "0.64150727", "text": "def loadFile(self, filename):\n f = open(filename, 'r')\n dataset = f.readlines()\n for data in dataset[1:]:\n d = data.split()\n self.lexiVector[d[0]] = np.array(d[1:], dtype=float)", "title": "" }, { "docid": "fd1f65995732d3d772c34efedf6388ce", "score": "0.64106315", "text": "def load_dat(filename):\n fh = open(filename, \"rb\")\n spam = fh.read(12)\n _, _, sampSize, _ = unpack(\">IIHH\", spam)\n veclen = int(sampSize / 4)\n fh.seek(12, 0)\n dat = np.fromfile(fh, dtype=np.float32)\n dat = dat.reshape(int(len(dat) / veclen), veclen)\n dat = dat.byteswap()\n fh.close()\n return dat", "title": "" }, { "docid": "7653420abf9d9c75ed95c3ac15d4eff3", "score": "0.63710654", "text": "def loadTrainingData(filename, images, facePoints=None, delim=None, offset=None): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "575420bb93113e982d44dd375352af96", "score": "0.6348388", "text": "def read_npzdata(folder, file, *arg):\n #import pdb; pdb.set_trace()\n full_path = os.path.join(folder, file)\n fold.file_exists(full_path)\n npz_data = np.load(full_path)\n \n parameters = []\n for param in arg:\n param_read = npz_data[param]\n parameters.append(param_read)\n \n del npz_data, param_read\n return parameters", "title": "" }, { "docid": "b2a271732c9d2a6e42c3b6f875e5a569", "score": "0.633575", "text": "def load_data(filename):\n with open(filename) as training_file:\n # Split the lines on commas and convert data to floats.\n data = np.array([list(map(float, line.split(','))) for line in training_file.readlines()])\n # Extract label from dataset and return.\n return np.transpose(data[:, :-1]), np.array([data[:, -1]])", "title": "" }, { "docid": "6fc5abb82acf3c8a5ed2016cbf82d5b7", "score": "0.6335043", "text": "def _load_npz(fname):\n npz = np.load(fname, allow_pickle=True)\n return (npz['info'].tolist(), npz['data'], npz['blinks'], \n npz['saccades'], npz['messages'], \n tuple(npz['ch_names']), tuple(npz['eye_names']))", "title": "" }, { "docid": "14f396d185668f51919034415eff9289", "score": "0.6321453", "text": "def fromfilename(cls, filename):\n data = pickle.load(open(filename, 'rb'))\n data.data = np.load(filename + '_data.npy')\n data.randoms = np.load(filename + '_randoms.npy')\n return data", "title": "" }, { "docid": "1b82281dcf6942f347bf3d898d432140", "score": "0.6290441", "text": "def load(filename) :\n # determine filename\n dir = os.path.dirname(__file__)\n f = os.path.join(dir, '', filename)\n\n # load data\n with open(f, 'r') as fid :\n data = np.loadtxt(fid, delimiter=\",\")\n\n return data", "title": "" }, { "docid": "ba67897a34f3249ce8f2cda7ac7361a3", "score": "0.6229675", "text": "def load_data(data_file):\n print('loading data ...')\n f = gzip.open(data_file, 'rb')\n train_set, valid_set, test_set = load_pickle(f)\n f.close()\n\n train_set_x, train_set_y = make_numpy_array(train_set)\n valid_set_x, valid_set_y = make_numpy_array(valid_set)\n test_set_x, test_set_y = make_numpy_array(test_set)\n\n return [(train_set_x, train_set_y), (valid_set_x, valid_set_y), (test_set_x, test_set_y)]", "title": "" }, { "docid": "bbf663ea51019c667153f44e40cd648a", "score": "0.6216395", "text": "def load_z(self):\n self.z = self.read_var(self.zvar)\n self.test_shape(self.zvar, self.z.shape, 2)", "title": "" }, { "docid": "db1a06499b5ab8896a2966449680898f", "score": "0.61855006", "text": "def load_npy(self, path):\n self.matrix = np.load(os.path.join(path, \"vectors.npy\"))\n # self.load_with_alpha(0.6)\n self.vocabulary = Vocabulary_simple()\n self.vocabulary.load(path)\n self.name += os.path.basename(os.path.normpath(path))", "title": "" }, { "docid": "059e3c3596be62da7eb85430098a3f98", "score": "0.6182488", "text": "def load(self, f):\n # see if there are header rows\n with possibly_open_file(f, 'rb') as g:\n header_lines = 0\n for i, line in enumerate(g):\n try:\n nums = [float(tok) for tok in\n re.split('\\s|,', line.decode('utf-8'))\n if len(tok)]\n if len(nums) >= 2:\n header_lines = i\n break\n except ValueError:\n continue\n\n self.data = np.loadtxt(f, unpack=True, skiprows=header_lines)\n\n if hasattr(f, 'read'):\n fname = f.name\n else:\n fname = f\n\n self.filename = fname\n self.name = os.path.splitext(os.path.basename(fname))[0]", "title": "" }, { "docid": "52b7fdd7c14db900284bc41b428334f3", "score": "0.6151441", "text": "def load(self, filename):\n\n return np.loadtxt(filename, dtype='int32', skiprows=1)", "title": "" }, { "docid": "27a4b2ad7e04fdbd2aa151197a44c794", "score": "0.6146876", "text": "def _load_npz(self, path):\n npzfile = np.load(path)\n return dict(npzfile)", "title": "" }, { "docid": "d36f26a108bf6d4ac18731a826f892e0", "score": "0.61197513", "text": "def read_dataset(filename):", "title": "" }, { "docid": "db0ec23fa5238767ed1209db8b0787d4", "score": "0.6106835", "text": "def load(filename):\n filepath = reader_filepath(sample, filename, pathfunc)\n return np.load(filepath, allow_pickle=allow_pickle)", "title": "" }, { "docid": "fa32036f63f638d39900c1c427cee9e8", "score": "0.6099805", "text": "def load(self, file_name):\n super(NeuralNet, self).load(file_name)\n self._model = models.load_model(file_name)\n read_list = joblib.load('{}_nn'.format(file_name))\n self._stopped_epoch = read_list[0]\n self._inference_model = None\n self._inference_batch_size = None", "title": "" }, { "docid": "7781e744a66c65540be2ade22a3050d0", "score": "0.60986054", "text": "def load_data(self) -> None:\n if self.extension == \"parquet\":\n train = pd.read_parquet(self.paths[0])\n validation = pd.read_parquet(self.paths[1])\n test = pd.read_parquet(self.paths[2])\n elif self.extension == \"csv\":\n train = pd.read_csv(self.paths[0])\n validation = pd.read_csv(self.paths[1])\n test = pd.read_csv(self.paths[2])\n\n self.data = [train, validation, test]", "title": "" }, { "docid": "486288060f31ca8f7409e3bc944f4eb6", "score": "0.60809344", "text": "def loadz(filepath):\n y = np.load(filepath)\n return coo_matrix((y['data'],(y['row'],y['col'])),shape=y['shape'])", "title": "" }, { "docid": "e802d90ca7dfd06842c6411fe0dddbd8", "score": "0.6066158", "text": "def loadz(file):\r\n y = np.load(file)\r\n return coo_matrix((y['data'],(y['row'],y['col'])),shape=y['shape'])", "title": "" }, { "docid": "94d5aa51a262e460cdf91ef35932adfe", "score": "0.6058451", "text": "def laod(cls, file):\n data = np.load(file)\n return cls(data)", "title": "" }, { "docid": "72c436c14565634415b71f48f4be5c72", "score": "0.6052672", "text": "def loadz(file):\n y = np.load(file)\n return coo_matrix((y['data'],(y['row'],y['col'])),shape=y['shape'])", "title": "" }, { "docid": "d99af401d5ab8e21152bd62dc29d876d", "score": "0.6047507", "text": "def load_data():\n f = gzip.open('mnist.pkl.gz', 'rb')\n training_data, validation_data, test_data = cPickle.load(f,encoding='bytes')\n f.close()\n return (training_data, validation_data, test_data)", "title": "" }, { "docid": "bacb4ffb5016017493f02e756fe43fd0", "score": "0.6047478", "text": "def load_training_data(data_file: str) -> list:\n # NOTE: torch expects float data;\n # default numpy.loadtxt reads as float64,\n # so specify dtype=numpy.single\n raw = numpy.loadtxt(data_file, dtype=numpy.single, delimiter=',')\n data = list()\n for i in range(raw.shape[0]):\n data.append((raw[i][1:].tolist(), [raw[i][0]]))\n return data", "title": "" }, { "docid": "dc3a9ab8373050b343cde23889da46de", "score": "0.60469246", "text": "def load(file: str) -> PointData:\n data = np.load(file)\n point = data['point']\n point_data_data = data['point_data']\n\n point_data = PointData(point, point_data_data)\n return point_data", "title": "" }, { "docid": "857a010f8123fad97e19a4750cbef747", "score": "0.60373116", "text": "def load(cls, filepath, name=None):\n if isinstance(filepath, np.lib.npyio.NpzFile):\n npfile = filepath\n else:\n npfile = np.load(filepath, allow_pickle=True)\n arrs = dict(npfile.items())\n state = arrs.pop(BrainBase.STATE).item()\n if name is not None:\n state[BrainBase.NAME] = name\n return cls.fromState(state, arrs=arrs)", "title": "" }, { "docid": "4d830a93d1a478acfede48538a4b4940", "score": "0.6033458", "text": "def load_npy(name):\n\twith open(name, \"rb\") as fr:\n\t\treturn np.load(fr)", "title": "" }, { "docid": "df92d4ef1e2151c831440b54f4384728", "score": "0.60331804", "text": "def load_data():\n f = gzip.open('mnist.pkl.gz', 'rb')\n train_set, valid_set, test_set = pickle.load(f, encoding='latin')\n f.close()\n return train_set, valid_set, test_set", "title": "" }, { "docid": "2310924c4c5f13215ff44a224ee4d39e", "score": "0.6031938", "text": "def load_training_data():\n\n train_data_numpy = np.empty((NUM_TRAINING_SAMPLES,) + tuple(GEOMETRY.sinogram_shape))\n train_labels_numpy = np.empty((NUM_TRAINING_SAMPLES,) + tuple(GEOMETRY.volume_shape))\n i = 0\n for index in TRAIN_INDEX:\n train_data_file = '../data_preprocessing/sinograms/sinogram_' + str(index) + '.npy'\n train_data_numpy[i, :, :, :] = np.load(train_data_file)[:GEOMETRY.number_of_projections, :, :]\n train_label_file = '../data_preprocessing/recon_360/recon_' + str(index) + '.npy'\n train_labels_numpy[i, :, :, :] = np.load(train_label_file)\n i = i + 1\n\n return train_data_numpy, train_labels_numpy", "title": "" }, { "docid": "ecda2166afacf6eff60eb0fab315f04a", "score": "0.60268885", "text": "def loadpklz(filename):\n import gzip\n import pickle\n with gzip.open(filename, 'rb') as f:\n obj = pickle.load(f)\n return obj", "title": "" }, { "docid": "3ef256b86a224cc8cffb86b6bcdbbd3f", "score": "0.6021615", "text": "def load_experiment(filename):\r\n data = np.load(filename)[()];\r\n return np.array(data)", "title": "" }, { "docid": "c105f55ab89203b6bf1a0f216afd7038", "score": "0.60150373", "text": "def load_data(input_file):\n data = np.genfromtxt(input_file, delimiter=',', skip_header=0, names=None)\n return data", "title": "" }, { "docid": "6aebd43cd6aaff39d95ea9f24ed45d24", "score": "0.600216", "text": "def load_data(filename):\n file = open(filename, \"r\")\n lines = file.readlines()\n\n data = []\n for line in lines:\n data.append(line.split(\"\\n\")[0].split(\"\\t\"))\n\n return np.array(data, dtype=float)", "title": "" }, { "docid": "ae7e8e7ad585c106b79125af464021c7", "score": "0.59909415", "text": "def read_model_data(model, filename):\n filename = os.path.join('models/', '%s.%s' % (filename, PARAM_EXTENSION))\n with open(filename, 'r') as f:\n data = pickle.load(f)\n\n savemat('dir_est_weights_raw.mat', {'data': data})\n\n lasagne.layers.set_all_param_values(model, data)", "title": "" }, { "docid": "c37811897f738402f426a77f5a09bb08", "score": "0.599009", "text": "def load_model(self, filename):\n model = np.load(f\"models/{filename}\", allow_pickle=True)\n self.beta = model[\"beta\"].reshape(-1, 1)", "title": "" }, { "docid": "05abe7a3b4f714d2f51298f149738005", "score": "0.5985719", "text": "def import_spyview_dat(data_dir, filename):\n with open(os.path.join(data_dir, filename)) as f:\n dat = np.loadtxt(f)\n return dat", "title": "" }, { "docid": "14d02e680fda012bc52728947ea891cf", "score": "0.59768766", "text": "def load_data():\n f = gzip.open('mnist.pkl.gz', 'rb')\n training_data, validation_data, test_data = pickle.load(f, encoding=\"latin1\")\n f.close()\n return (training_data, validation_data, test_data)", "title": "" }, { "docid": "208fe2a5225fe658ec1ceada52a3cd3e", "score": "0.5968667", "text": "def load_data():\r\n f = gzip.open('mnist.pkl.gz', 'rb')\r\n training_data, validation_data, test_data = pickle.load(f, encoding=\"latin1\")\r\n f.close()\r\n return (training_data, validation_data, test_data)", "title": "" }, { "docid": "1c760dc8e7129b5052d64bd9a7f5bd99", "score": "0.596175", "text": "def from_file(cls,data,dist='rice',**kwargs):\n\n kwargs.update(unpack=False)\n data = np.loadtxt(data,**kwargs)\n\n return cls(data,dist=dist)", "title": "" }, { "docid": "7bc07adea956cb3ffe7425ad81a92b4b", "score": "0.5960422", "text": "def load_model(self, weight_file): \n\t\tself.w = np.load(weight_file)", "title": "" }, { "docid": "1639d1ac2515c942689c0eb7ebfeb4ea", "score": "0.5933849", "text": "def load(self, file):\n if isinstance(file, basestring):\n with open(file, \"rb\") as file:\n self.load(file)\n else:\n loaded = load(file)\n self.model = loaded[0]\n self.priorCount = loaded[1]\n self.countPerFeature = loaded[2]", "title": "" }, { "docid": "4b3a6605369c364262917a78bacba547", "score": "0.593033", "text": "def load_data():\n f = gzip.open('../data/mnist.pkl.gz', 'rb')\n training_data, validation_data, test_data = cPickle.load(f)\n f.close()\n return (training_data, validation_data, test_data)", "title": "" }, { "docid": "2ee123f1c040051be2fbb70289ff7f9e", "score": "0.59225124", "text": "def load_data():\n\n # Get the data.\n train_data_filename = maybe_download('train-images-idx3-ubyte.gz')\n train_labels_filename = maybe_download('train-labels-idx1-ubyte.gz')\n test_data_filename = maybe_download('t10k-images-idx3-ubyte.gz')\n test_labels_filename = maybe_download('t10k-labels-idx1-ubyte.gz')\n\n # Extract it into numpy arrays.\n train_data = extract_data(train_data_filename, FLAGS.train_size + FLAGS.validation_size)\n train_labels = extract_labels(train_labels_filename, FLAGS.train_size + FLAGS.validation_size)\n test_data = extract_data(test_data_filename, FLAGS.test_size)\n test_labels = extract_labels(test_labels_filename, FLAGS.test_size)\n\n validation_data = train_data[:FLAGS.validation_size, ...]\n validation_labels = train_labels[:FLAGS.validation_size]\n train_data = train_data[FLAGS.validation_size:, ...]\n train_labels = train_labels[FLAGS.validation_size:]\n\n return train_data, train_labels, validation_data, validation_labels, test_data, test_labels", "title": "" }, { "docid": "6323602c8d12dfa5f36e2121f7cc31be", "score": "0.59224015", "text": "def load_train_idx():\n with open('traindata.pik', 'r') as f:\n traindata = pik.load(f)\n \n return traindata", "title": "" }, { "docid": "8607a80e8cdd7460f35cbcd55f1ce353", "score": "0.5921994", "text": "def load_dataset(filename):\n with open(filename, 'rb') as f:\n return pickle.load(f)", "title": "" }, { "docid": "bc107e65e57376dd5776990c8db7056b", "score": "0.591993", "text": "def load_checkpoint(self, file_path):\n checkpoint = torch.load(file_path)\n model = RNNClassifier(\n checkpoint[\"input_size\"],\n checkpoint[\"hidden_size\"],\n checkpoint[\"output_size\"],\n checkpoint[\"n_layers\"],\n )\n model.load_state_dict(checkpoint[\"state_dict\"])\n super().leverage_model(model)", "title": "" }, { "docid": "624b307a6ac4b0cc7a039d01770b85ab", "score": "0.59192276", "text": "def load_data(self):\n self.data = self.read_var(self.datavar)\n self.test_shape(self.datavar, self.data.shape, 2)", "title": "" }, { "docid": "2608962fc6fd854fdec3f32eb31e4e10", "score": "0.5918931", "text": "def load_cached_data(filename='/tmp/smallnorb.pkl.gz'):\n\n with gzip.open(filename, 'r') as file:\n train_data, test_data, train_labels, test_labels, train_info, test_info = \\\n cPickle.load(file)\n\n return train_data, test_data, train_labels, test_labels, train_info, test_info", "title": "" }, { "docid": "ecc039a44fd96899259dfc34630abe27", "score": "0.59123504", "text": "def load_state(self, file):\n tmp_A = sp.load(file)\n\n self.D[0] = 1\n for n in xrange(self.N):\n self.D[n + 1] = tmp_A[n + 1].shape[2]\n self._init_arrays()\n self.A = tmp_A", "title": "" }, { "docid": "6a71c000542fcf0545fc50d3f0773d1b", "score": "0.59035015", "text": "def load_file(filename):", "title": "" }, { "docid": "eeee226d2aeec56ee8580b0b5c49a6b5", "score": "0.5896797", "text": "def load(exr_path):\n from time import time\n from subprocess import call\n\n # Convert to .npz\n npz_f = '/tmp/%s_t%s.npz' % \\\n (basename(exr_path).replace('.exr', ''), time())\n call(['python2',\n '%s/../../commandline/exr2npz.py' % dirname(abspath(__file__)),\n exr_path,\n npz_f])\n\n # Load this .npz\n data = np.load(npz_f)\n return data", "title": "" }, { "docid": "da161d2355a03244fc70e86199b4893e", "score": "0.5891124", "text": "def load_data(fn):\n\tdata = []\n\twith open(FILE_DIR + fn, \"r\") as f:\n\n\t\tfor line in f.readlines():\n\n\t\t\tsample = line.split()\n\n\t\t\t# Strip punctuation from sentence\n\t\t\tfor i in range(len(sample)):\n\n\t\t\t\tsample[i] = \"\".join(\n\t\t\t\t\tl for l in sample[i] if l not in string.punctuation)\n\n\t\t\tdata.append((sample[:-1], int(sample[-1])))\n\n\treturn np.array(data)", "title": "" }, { "docid": "6dca2ec325b48c4d370cace3b9d48e4f", "score": "0.5890306", "text": "def load_vectors(file_name):\n fin = io.open(file_name, 'r', encoding='utf-8', newline='\\n', errors='ignore')\n # The first line contains the number of rows (n) and the dimensionality (d)\n n, d = map(int, fin.readline().split())\n data = dict()\n for line in fin:\n tokens = line.rstrip().split(' ')\n data[tokens[0]] = map(float, tokens[1:])\n fin.close()\n return data", "title": "" }, { "docid": "f71ae65ce70481e859fbc2f28e428d8d", "score": "0.58823687", "text": "def read_training_data():\n data_file = open('../RPCRunner/data/data', 'rb')\n labels_file = open('../RPCRunner/data/labels', 'rb')\n labels = np.loadtxt(labels_file, dtype=np.int8)\n data = np.fromstring(np.array([data_file.read(650) for i in labels]),\n dtype=np.uint8)\n return np.reshape(data, (-1, 650)), labels", "title": "" }, { "docid": "75b5b71358a496bcfdb84c7b28d92b28", "score": "0.58816165", "text": "def load_data(self,filename=\"data.pickle\"):\n with open(filename,\"r\") as f:\n self.feature_vectors,self.classifications,self.keys = pickle.load(f)", "title": "" }, { "docid": "ce4c53f05e7764f08259748bad35d17c", "score": "0.5881304", "text": "def load(cls, name, file_, **kwargs):\n dict_data = np.load(file_)\n return cls.from_dict(name, dict_data, **kwargs)", "title": "" }, { "docid": "8f2553b3cd84c4269780b655966f3a38", "score": "0.5879019", "text": "def load(cls,path):\n fpath=Path(path)\n prm=loads(open(fpath+'prm.dat','r'))\n data=dict(np.load(fpath+'data.npz'))\n results=dict(np.load(fpath+'results.npz'))\n return cls(data=data,results=results,parameters=prm)", "title": "" }, { "docid": "1b4ea148bd9c9522995b340647fcb100", "score": "0.5878805", "text": "def load_model(self, weight_file): \n\n\t\tself.w = np.load(weight_file)", "title": "" }, { "docid": "0e47e854901d749ecd0073c16742bbdd", "score": "0.5869626", "text": "def load_binary_data(filename, dtype=np.float32):\n f = open(filename, \"rb\")\n data = f.read()\n f.close()\n _data = np.fromstring(data, dtype)\n if sys.byteorder == 'big':\n _data = _data.byteswap()\n return _data", "title": "" }, { "docid": "d97b26bade4261bf6630570a2d167c41", "score": "0.5863161", "text": "def load_pkl(file):\n f = open(file, 'rb')\n data = pickle.load(f)\n f.close()\n return data", "title": "" }, { "docid": "b646d37a7d7cdc25a2e436037d455497", "score": "0.5855168", "text": "def dataLoader(file):\n data = []\n with open(file) as file:\n for line in file:\n data.append(line.strip().split(\" \"))\n file.close()\n return np.array(data)", "title": "" }, { "docid": "4555609604f3f468467ccb76c64a5b78", "score": "0.58483803", "text": "def _load_training_data(self):\n self._save_training_data()", "title": "" }, { "docid": "75c93b7a546dc78fd7d5bf1baf192e9a", "score": "0.584714", "text": "def load_data(path, kind='train'):\n labels_path = os.path.join(path, '%s-labels-idx1-ubyte.gz' % kind)\n images_path = os.path.join(path, '%s-images-idx3-ubyte.gz' % kind)\n with gzip.open(labels_path, 'rb') as lbpath:\n labels = np.frombuffer(lbpath.read(), dtype=np.uint8, offset=8)\n with gzip.open(images_path, 'rb') as imgpath:\n images = np.frombuffer(imgpath.read(), dtype=np.uint8, offset=16).reshape(len(labels), 784)\n return images, labels", "title": "" }, { "docid": "77099e667ba7de507acd0ec1d609a32e", "score": "0.5845702", "text": "def load_data():\n data_file = \"./data/mnist.pkl.gz\"\n # Note: it is around 17MB\n check_and_download(data_file,\n \"https://github.com/mnielsen/neural-networks-and-deep-learning/raw/master/data/mnist.pkl.gz\")\n f = gzip.open(data_file, 'rb')\n training_data, validation_data, test_data = pickle.load(\n f, encoding=\"latin1\")\n f.close()\n return training_data, validation_data, test_data", "title": "" }, { "docid": "d4083e04b3cdfeff7ff87e7012e69b3e", "score": "0.5833193", "text": "def read_data_pkl(data_file):\n with open(data_file, \"rb\") as fp:\n data = pkl.load(fp)\n return data", "title": "" }, { "docid": "d6a63f9e0d156ca44eacada673598541", "score": "0.5832519", "text": "def load_data(path):\n kwargs = {'num_workers': 1, 'pin_memory': True, 'drop_last': True} if args.cuda else {'drop_last': True}\n\n # normalize = transforms.Normalize((0.957, 0.647, 0.349), (0.080, 0.148, 0.153))\n # normalize = transforms.Normalize((0.640, 0.435, 0.240, 0.440), (0.475, 0.342, 0.214, 0.380))\n train_transform = transforms.Compose([\n transforms.Resize(args.input_size),\n transforms.RandomCrop(args.output_size),\n # transforms.RandomHorizontalFlip(),\n # transforms.ToTensor(),\n # normalize,\n ])\n center_transform = transforms.Compose([\n transforms.Resize(args.input_size),\n transforms.CenterCrop(args.output_size),\n # transforms.ToTensor(),\n # normalize,\n ])\n # train_set = Dataset(class_num=2, data_path=os.path.join(path, 'train.txt'),\n if args.data_repeat:\n train_set = Dataset(class_num=2, data_path=os.path.join(path, 'train_new.txt'),\n file_path=path, grayscale=False, p=0.5,transform=train_transform)\n else:\n train_set = Dataset(class_num=2, data_path=os.path.join(path, 'train.txt'),\n file_path=path, grayscale=False, p=0.5,transform=train_transform)\n train_loader = torch.utils.data.DataLoader(train_set,\n batch_size=args.batch_size, shuffle=True, **kwargs)\n test_set = Dataset(class_num=2, data_path=os.path.join(path, 'test.txt'),\n file_path=path, grayscale=False, transform=center_transform)\n test_loader = torch.utils.data.DataLoader(test_set,\n batch_size=args.test_batch_size, shuffle=False, **kwargs)\n return train_loader, test_loader", "title": "" }, { "docid": "5a77584e7d869ab0144d148d41370502", "score": "0.5830943", "text": "def read_dta_file(filename):\n data = np.fromfile(filename, dtype='>f8')\n return data", "title": "" }, { "docid": "1074bf89110f4eb577e00cbf26d558d4", "score": "0.5827046", "text": "def load_training_file(file):\n with open(file) as f:\n haiku = f.read()\n return haiku", "title": "" }, { "docid": "ceae20025d93e4cf5c43e2feca116183", "score": "0.582278", "text": "def load_poses(file_name):\n\tpose_file = os.path.join(poses_path+ file_name)\n\tposes = []\n\t# Read and parse the poses\n\twith open(pose_file, 'r') as f:\n\t\tfor line in f.readlines():\n\t\t\tT = np.fromstring(line, dtype=float, sep=' ')\n\t\t\tT = T.reshape(3, 4)\n\t\t\tposes.append(T)\n\t\t\t# print T\n\tposes_array = np.array(poses)\n\treturn poses_array", "title": "" }, { "docid": "4d1ecbb0a854744c276050008f2a1bf2", "score": "0.5818108", "text": "def train(self, train_file, verbose=False):\n f = open(train_file, 'r')\n f.close()", "title": "" }, { "docid": "68a580887e4de92a770a702ebf25f687", "score": "0.58161604", "text": "def read_train_data_f(filename2read, dim_p, h_prev, h_post, idx_f_use, data_size, sp_x=False, sp_y=False, is_npz=False):\n idx_f_use = make_numpy_array(idx_f_use, keep_1dim=True)\n len_filename = len(filename2read)\n _data_size = 0\n _data_size_list = []\n for nidx_d in range(0, len_filename):\n filename2read_sel = filename2read[nidx_d]\n data_read_tmp = np.load(filename2read_sel, allow_pickle=True)\n f_train_in = data_read_tmp['data_f'] if is_npz else data_read_tmp[()]['data_f']\n _data_size = _data_size + f_train_in.shape[0]\n _data_size_list.append(f_train_in.shape[0])\n\n _ratio = data_size / float(_data_size)\n data_size_list = [int(x * _ratio) for x in _data_size_list]\n data_size_list[-1] = data_size - np.sum(data_size_list[0:-1])\n\n dim_x, dim_y = dim_p * h_prev, dim_p * h_post\n dim_x_3 = 3 * h_prev\n if sp_y:\n dim_y_3 = 3 * (h_post + 1)\n else:\n dim_y_3 = 3 * (h_post + 2)\n dim_f = idx_f_use.shape[0] if len(idx_f_use) > 0 else 0\n\n idx_xin_tmp, idx_yin_tmp = np.arange(0, dim_x_3), np.arange(0, dim_y_3)\n if sp_y:\n idx_y0 = np.arange(0, dim_p * h_post)\n idx_y1 = np.arange(dim_p, dim_p * (h_post + 1))\n else:\n idx_y0 = np.arange(dim_p, dim_p * (h_post + 1))\n idx_y1 = np.arange(dim_p * 2, dim_p * (h_post + 2))\n\n h_prev_ref, h_post_ref = dim_x_3, dim_y_3\n\n x_train = np.zeros((data_size, dim_x), dtype=np.float32)\n y0_train = np.zeros((data_size, dim_y), dtype=np.float32)\n y1_train = np.zeros((data_size, dim_y), dtype=np.float32)\n f_train = np.zeros((data_size, dim_f), dtype=np.float32) if dim_f > 0 else []\n\n cnt_data = 0\n idx_sel_list = []\n for nidx_d in range(0, len_filename):\n filename2read_sel = filename2read[nidx_d]\n data_read_tmp = np.load(filename2read_sel, allow_pickle=True)\n\n if is_npz:\n x_train_in = data_read_tmp['data_x_sp'] if sp_x else data_read_tmp['data_x']\n y_train_in = data_read_tmp['data_y_sp'] if sp_y else data_read_tmp['data_y']\n else:\n x_train_in = data_read_tmp[()]['data_x_sp'] if sp_x else data_read_tmp[()]['data_x']\n y_train_in = data_read_tmp[()]['data_y_sp'] if sp_y else data_read_tmp[()]['data_y']\n\n if nidx_d == 0:\n h_prev_ref, h_post_ref = x_train_in.shape[1], y_train_in.shape[1]\n\n if dim_p == 2:\n idx_xin_tmp = np.setdiff1d(idx_xin_tmp, np.arange(2, h_prev_ref, 3))\n idx_yin_tmp = np.setdiff1d(idx_yin_tmp, np.arange(2, h_post_ref, 3))\n\n x_train_in = x_train_in[:, idx_xin_tmp]\n y_train_in = y_train_in[:, idx_yin_tmp]\n\n if dim_f > 0:\n f_train_in = data_read_tmp['data_f'] if is_npz else data_read_tmp[()]['data_f']\n f_train_in = f_train_in[:, idx_f_use]\n\n # Update\n size_before = int(x_train_in.shape[0])\n idx_rand_tmp_ = np.random.permutation(size_before)\n size_after = int(data_size_list[nidx_d])\n idx_rand_tmp = idx_rand_tmp_[np.arange(0, size_after)]\n idx_sel_list.append(idx_rand_tmp)\n\n idx_update_tmp = np.arange(cnt_data, cnt_data + size_after)\n x_train[idx_update_tmp, :] = x_train_in[idx_rand_tmp, :]\n y_train_in_tmp = y_train_in[idx_rand_tmp, :]\n\n y0_train[idx_update_tmp, :] = y_train_in_tmp[:, idx_y0]\n y1_train[idx_update_tmp, :] = y_train_in_tmp[:, idx_y1]\n\n if dim_f > 0:\n f_train[idx_update_tmp, :] = f_train_in[idx_rand_tmp, :]\n\n cnt_data = cnt_data + size_after\n\n idx_update = np.arange(0, cnt_data)\n x_train = x_train[idx_update, :]\n y0_train = y0_train[idx_update, :]\n y1_train = y1_train[idx_update, :]\n\n if dim_f > 0:\n f_train = f_train[idx_update, :]\n else:\n f_train = []\n\n return x_train, y0_train, y1_train, f_train, idx_sel_list", "title": "" }, { "docid": "dd56c0d88fd9b1f4f836a40e37582211", "score": "0.5786443", "text": "def load(cls, filename):\n print('Loading', filename)\n \n data = np.load(filename)\n \n dataset = Dataset(data['input_interval'], \n data['prediction_interval'], \n data['categories'].tolist())\n \n dataset.train_set = data['train_set_input'], data['train_set_label']\n dataset.test_set = data['test_set_input'], data['test_set_label']\n \n dataset.generation_done = True\n \n return dataset", "title": "" }, { "docid": "14124338f8ba0286bfb477eb25df46da", "score": "0.5786158", "text": "def load_data(fp, ts):\n\n # have timestamp?\n if ts:\n dt = np.dtype([\n ('event', np.int, 2),\n ('score', np.float),\n ('event_feature', np.dtype([('timestamp', np.int)]))\n ])\n else:\n dt = np.dtype([\n ('event', np.int, 2),\n ('score', np.float)\n ])\n\n # load training data\n x = np.genfromtxt(fname=fp, delimiter='\\t', dtype=dt)\n\n # close file\n if fp is not sys.stdin:\n fp.close()\n\n return x", "title": "" }, { "docid": "1d21555dfa6d658f565f8dbde1082828", "score": "0.5783919", "text": "def load(self,file):\n self.set_defaults()\n if \"+\" in file:\n files = file.split(\"+\")\n else:\n files = [file]\n for file in files:\n if \".pymodel\" in file:\n with open(file,\"r\") as stream:\n obj = cPickle.load(stream)\n if type(obj)==LineRecognizer:\n for k,v in obj.__dict__:\n self.__dict__[k] = v\n else:\n self.cmodel = obj\n elif \".cmodel\" in file:\n self.cmodel = ocropy.load_IModel(file)\n elif \".csize\" in file:\n self.linemodel = SimpleLineModel()\n self.linemodel.load(file)\n else:\n raise Exception(\"unknown extension\")", "title": "" }, { "docid": "bf45305415b0d697663c8429126a09ff", "score": "0.5781997", "text": "def load_data(filename):\n if \"images\" in filename:\n offset = IMAGE_OFFSET\n length = IMAGE_SIZE * IMAGE_SIZE\n else:\n offset = LABEL_OFFSET\n length = 1\n\n filepath = os.sep.join((DATA_DIR, filename))\n if not os.path.exists(filepath):\n print(\"Downloading {}\".format(filename))\n urllib.request.urlretrieve(URL+filename, filepath)\n print(\"Done!\")\n with gzip.open(filepath, \"rb\") as file_:\n data = np.frombuffer(file_.read(), np.uint8, offset=offset)\n return data.reshape(-1, length)", "title": "" }, { "docid": "e03bace59271a35937392a1ca7ab59ad", "score": "0.5781892", "text": "def load_data(self):\n np.random.seed(1234)\n self.input_data = np.random.rand(self.config.n_samples, self.config.n_features)\n self.input_labels = np.ones((self.config.n_samples,), dtype=np.int32)", "title": "" }, { "docid": "3204e624fdd38b8b664d02c05cef657b", "score": "0.5778889", "text": "def load_labels(filename):\n with gzip.open(filename, 'rb') as f:\n magic, _ = struct.unpack('>ii', f.read(8))\n if magic != MNIST_LABEL:\n raise ValueError(\"Error reading labels\")\n array = np.frombuffer(f.read(), dtype='uint8')\n array = array.reshape(array.size, 1)\n return array", "title": "" }, { "docid": "ae777a5694f99a2088335ac9080037fa", "score": "0.57754767", "text": "def read_npy(fname, **kwargs):\n if path.splitext(fname)[-1].lower() == \".npy\":\n data = np.load(fname)\n color = kwargs.get('color', None)\n if color is not None:\n data = retrieve_color_section(data, color)\n return data\n else:\n return None", "title": "" }, { "docid": "5d16b85d0b79c7dcf82438956931c838", "score": "0.5772619", "text": "def load_data_file(self, filename, decimal_point='.'):\n # self.raw_data = pd.DataFrame.from_csv(filename, sep=';')\n self.raw_data = pd.read_csv(filename, sep=';', index_col = 0, \n parse_dates = True, decimal=decimal_point)\n self.cpi_predictions = self.find_data(['cpi'])\n self.cpi_jae_predictions = self.find_data(['jae','xe'], \n exclude_cols=['CPI-jae'])\n self.gap_predictions = self.find_data(['gap'], \n exclude_cols=['Produksjonsgap'])", "title": "" }, { "docid": "7bd9c735a737d7cb6df08ba85fb8bd5c", "score": "0.5759066", "text": "def initialize_data_from_file(self, filepath):\n with open(filepath, 'r') as f:\n for line in f:\n terms = line.strip().split(',')\n self.examples.append(Feature(terms))", "title": "" }, { "docid": "a9620849daf21bb0d109b2e5fe298b24", "score": "0.5751678", "text": "def read_compas(filepath='../data/compas_preprocessed.npz',\n random_state=42):\n if filepath.endswith('ready.npz') and os.path.exists(filepath):\n return load_npz(filepath)\n\n d = np.load(filepath)\n X, y, Z = d['X'], d['y'], d['Z']\n\n n = X.shape[0] # Number of examples\n\n # Get train test split\n tr_idx, te_idx = _get_train_test_split(n, train_frac, random_state)\n Xtr, Xte, ytr, yte, Ztr, Zte = _apply_train_test_split(X, y, Z,\n tr_idx, te_idx)\n\n # Whiten feature data\n Xtr, Xte = _whiten_data(Xtr, Xte)\n\n # Center sensitive data\n Ztr, Zte = _center_data(Ztr, Zte)\n\n # Add intercept\n Xtr, Xte = _add_intercept(Xtr, Xte)\n\n # Labels are already 0/1\n\n return Xtr, Xte, ytr, yte, Ztr, Zte", "title": "" }, { "docid": "606f32d24c8b666a456f3342a12b7ce2", "score": "0.5751383", "text": "def load_data(filename):\n\tud_graph = grew.graph(filename)\n\treturn ud_graph", "title": "" }, { "docid": "ccfe99bf810b14318106e6c15e9d8067", "score": "0.5750405", "text": "def load_train_data(self):\n for r in self.train_data:\n self.train_label.append(r.pop(0))\n self.train_data = np.array(self.train_data)", "title": "" }, { "docid": "ffab2a5c5e62ed614e7b545cadc2d134", "score": "0.5749849", "text": "def extract_features(filename, train_dir):\n print('Extracting', filename)\n with open(train_dir+filename) as file:\n data = numpy.genfromtxt(file, delimiter=',')\n num_datapoints = len(data)\n rows = 1\n cols = len(data[0])\n data = data.reshape(num_datapoints, rows, cols, 1)\n return data", "title": "" }, { "docid": "900ab0d0f5568fe4e2492c84d2464f77", "score": "0.574869", "text": "def save_training_data(file, X, Y, axes):\n isinstance(file,(Path,string_types)) or _raise(ValueError())\n file = Path(file).with_suffix('.npz')\n file.parent.mkdir(parents=True,exist_ok=True)\n\n axes = axes_check_and_normalize(axes)\n len(axes) == X.ndim or _raise(ValueError())\n np.savez(str(file), X=X, Y=Y, axes=axes)", "title": "" }, { "docid": "e5cffd8d644bcd015251bb977bf4327f", "score": "0.57472396", "text": "def load_test_data(model_name, dir_name):\n print(\"Loading test data...\")\n filename = GRU_DATA_DIRECTORY + dir_name + '/' + model_name + '_test_data.npz'\n npzfile = np.load(filename)\n return npzfile[\"test_data\"]", "title": "" } ]
55581e6316965b25df6fc0a49bab86fe
Check pickling of an object across another process. python is the path to the python interpreter (defaults to sys.executable) Set verbose=True to print the unpickled object in the other process.
[ { "docid": "03fa3ae7b2e67f43a813be7bd9c17298", "score": "0.63948804", "text": "def check(obj, *args, **kwds):\n # == undocumented ==\n # python -- the string path or executable name of the selected python\n # verbose -- if True, be verbose about printing warning messages\n # all other args and kwds are passed to dill.dumps #FIXME: ignore on load\n verbose = kwds.pop('verbose', False)\n python = kwds.pop('python', None)\n if python is None:\n import sys\n python = sys.executable\n # type check\n isinstance(python, str)\n import subprocess\n fail = True\n try:\n _obj = dumps(obj, *args, **kwds)\n fail = False\n finally:\n if fail and verbose:\n print(\"DUMP FAILED\")\n #FIXME: fails if python interpreter path contains spaces\n # Use the following instead (which also processes the 'ignore' keyword):\n # ignore = kwds.pop('ignore', None)\n # unpickle = \"dill.loads(%s, ignore=%s)\"%(repr(_obj), repr(ignore))\n # cmd = [python, \"-c\", \"import dill; print(%s)\"%unpickle]\n # msg = \"SUCCESS\" if not subprocess.call(cmd) else \"LOAD FAILED\"\n msg = \"%s -c import dill; print(dill.loads(%s))\" % (python, repr(_obj))\n msg = \"SUCCESS\" if not subprocess.call(msg.split(None,2)) else \"LOAD FAILED\"\n if verbose:\n print(msg)\n return", "title": "" } ]
[ { "docid": "be5a0d70305d6329b2a939b438987978", "score": "0.57015604", "text": "def is_picklable(obj):\n try:\n pickle.dumps(obj)\n except Exception:\n return False\n return True", "title": "" }, { "docid": "f261160b7dd7376478f85f0b1a0037bc", "score": "0.554075", "text": "def test_pickle():\n pass", "title": "" }, { "docid": "7727c93a2da3438d527808186874317f", "score": "0.5514787", "text": "def _try_to_pickle(obj):\n pickle.dumps(obj)", "title": "" }, { "docid": "b19488131b14b85ec8157ee1f75d034b", "score": "0.5501831", "text": "def check():\n try_it = pickle.load(open(\"data/pickleLilEvery.pkl\", 'rb'))\n print(try_it)", "title": "" }, { "docid": "aedb80a1e4d83b69e3c6544676598ab4", "score": "0.55003035", "text": "def check_pickable(obj):\n # TESTME!\n temp_file = join(const.TEMP_DIR, str(timestamp()) + \"tmp\")\n try:\n pick_dump(obj, temp_file)\n except:\n return False\n finally:\n try:\n removefile(temp_file)\n except Exception as e:\n log.error(\"The file %s could not be removed: %s\",\n temp_file, e)\n return True", "title": "" }, { "docid": "fa8bb88cdee9b89335723b70f787eef4", "score": "0.5398693", "text": "def test_pickle() -> None:\n pack_unpack_test(\n Decimal(42),\n use_pickle=True,\n )", "title": "" }, { "docid": "0c7473e624a360822433760d27e76291", "score": "0.5341918", "text": "def load_obj_pickle(adress, name, load_type=None, load_way=None):\n import cPickle\n\n print 'loading data...', adress + name + '.cpkl'\n\n if load_type == 'serial' or load_type == None:\n if load_way == None or load_way == 'cpickle':\n with open(adress + name + '.cpkl', 'rb') as f:\n return cPickle.load(f)", "title": "" }, { "docid": "3972f5540230fb1dbb5c5ef30791cc20", "score": "0.5327268", "text": "def pickles(obj,exact=False,safe=False,**kwds):\n if safe: exceptions = (Exception,) # RuntimeError, ValueError\n else:\n exceptions = (TypeError, AssertionError, NotImplementedError, PicklingError, UnpicklingError)\n try:\n pik = copy(obj, **kwds)\n #FIXME: should check types match first, then check content if \"exact\"\n try:\n #FIXME: should be \"(pik == obj).all()\" for numpy comparison, though that'll fail if shapes differ\n result = bool(pik.all() == obj.all())\n except (AttributeError, TypeError):\n warnings.filterwarnings('ignore')\n result = pik == obj\n warnings.resetwarnings()\n if hasattr(result, 'toarray'): # for unusual types like sparse matrix\n result = result.toarray().all()\n if result: return True\n if not exact:\n result = type(pik) == type(obj)\n if result: return result\n # class instances might have been dumped with byref=False\n return repr(type(pik)) == repr(type(obj)) #XXX: InstanceType?\n return False\n except exceptions:\n return False", "title": "" }, { "docid": "a8417e7002339d6400ade2ba7d06fc43", "score": "0.53073394", "text": "def test_can_pickle(self):\n settings = UploadSettings(None, FakeDataServiceApi(), None, ProjectNameOrId.create_from_name('mouse'), None)\n params = ('one', 'two', 'three')\n context = UploadContext(settings, params, multiprocessing.Manager().Queue(), 12)\n pickle.dumps(context)", "title": "" }, { "docid": "f777cad13eb4904124f6a91ec22a7088", "score": "0.5282426", "text": "def test_pickle_round_trip(tmpdir, acetone):\n with tmpdir.as_cwd():\n acetone.pickle(state=\"test\")\n mols = unpickle()\n pickle_mol = mols[\"test\"]\n for atom in acetone.atoms:\n pickle_atom = pickle_mol.get_atom_with_name(atom.atom_name)\n assert pickle_atom.__dict__ == atom.__dict__\n assert acetone.bonds == pickle_mol.bonds\n assert acetone.angles == pickle_mol.angles\n assert acetone.dihedrals == pickle_mol.dihedrals", "title": "" }, { "docid": "a51c893e14f8495e1e28bf6d0156475a", "score": "0.52675366", "text": "def test_save_load_pickle(self):\n pickle.loads(pickle.dumps(self.V))\n return True", "title": "" }, { "docid": "f7431ee7caa0c5cd6ca7bf2fe3ff3ed2", "score": "0.52216977", "text": "def test_pickle(self):\n obj = pickle.dumps(self.config)\n config2 = pickle.loads(obj)\n self.assertEqual(self.config.db.sqlite_path, config2.db.sqlite_path)\n self.assertEqual(self.config.server.port, config2.server.port)", "title": "" }, { "docid": "497f7bcc480f85ebeac11610ba95b49d", "score": "0.52023524", "text": "def jarIt(file,obj):\n file = open(file,'w')\n cPickle.dump(obj,file)\n print \"pickled \",obj\n file.close()\n del file\n return", "title": "" }, { "docid": "e0e31a8ac5ec574a861b250bf7488b5a", "score": "0.5148917", "text": "def test_store_object_can_be_serialized_by_pickle():\n pickle.dump(_get_store(\"https://example.com\"), io.BytesIO())\n pickle.dump(_get_store(\"databricks\"), io.BytesIO())\n # pickle.dump(_get_store(f\"sqlite:///{tmpdir.strpath}/mlflow.db\"), io.BytesIO())\n # This throws `AttributeError: Can't pickle local object 'create_engine.<locals>.connect'`", "title": "" }, { "docid": "81dbd9931acc68c6916659d12628ae72", "score": "0.51201713", "text": "def manage_pickle(path, fun, args=None, verbose=True) :\n\n\t# Test if the file already exists\n\tif os.path.exists(path) :\n\t\t# The file exists: load with pickle, and return\n\t\tif verbose==True:\n\t\t\tprint(\"manage_pickle: the file '{}' has been loaded\"\n\t\t\t\t.format(path))\n\t\twith open(path, 'rb') as file:\n\t\t\treturn pickle.load(file)\n\telse:\n\t\tif args==None: result = fun()\n\t\telse: result = fun(args)\n\t\twith open(path, 'wb') as file:\n\t\t\tpickler = pickle.Pickler(file)\n\t\t\tpickler.dump(result)\n\t\treturn result", "title": "" }, { "docid": "f725c6c115d4e1559ece22523001ca33", "score": "0.50983566", "text": "def test_repr_using_jsonpickle():\n thing = ObjWithJsonPickleRepr()\n thing.child = ObjWithJsonPickleRepr()\n thing.child.parent = thing\n encoded = jsonpickle.encode(thing)\n decoded = jsonpickle.decode(encoded)\n assert id(decoded) == id(decoded.child.parent)", "title": "" }, { "docid": "34a919347f2a8859995cc4f97dca79b6", "score": "0.50923824", "text": "def test_pickle(self, trigger, jitter):\n trigger.jitter = jitter\n data = pickle.dumps(trigger, 2)\n trigger2 = pickle.loads(data)\n\n for attr in BaseCombiningTrigger.__slots__:\n assert repr(getattr(trigger2, attr)) == repr(getattr(trigger, attr))", "title": "" }, { "docid": "34a919347f2a8859995cc4f97dca79b6", "score": "0.50923824", "text": "def test_pickle(self, trigger, jitter):\n trigger.jitter = jitter\n data = pickle.dumps(trigger, 2)\n trigger2 = pickle.loads(data)\n\n for attr in BaseCombiningTrigger.__slots__:\n assert repr(getattr(trigger2, attr)) == repr(getattr(trigger, attr))", "title": "" }, { "docid": "e25eff53c700776937033b01e54d0baa", "score": "0.50833565", "text": "def test_double_pickle(tmpdir, acetone):\n with tmpdir.as_cwd():\n acetone.pickle(state=\"input\")\n # remove all coords\n acetone.coords[\"input\"] = []\n acetone.pickle(state=\"after\")\n\n # now check we have both states\n mols = unpickle()\n assert \"after\" in mols\n assert \"input\" in mols", "title": "" }, { "docid": "7f1eeb74bb049a15354626157337988c", "score": "0.5041047", "text": "def test_pickle(self):\n data1 = {1, 2, 3}\n data2 = {4, 5}\n logoutputpickle = io.BytesIO()\n pickle.dump(data1, logoutputpickle)\n pickle.dump(data2, logoutputpickle)\n instream = io.BytesIO(logoutputpickle.getvalue())\n self.assertEquals (data1, pickle.load(instream))\n self.assertEquals (data2, pickle.load(instream))", "title": "" }, { "docid": "41675bb2c22d538adccda0d279e7e1ed", "score": "0.5021402", "text": "def load_python_obj(file_name:str) -> Any:\n with open(file_name,'rb') as f:\n obj = pickle.load(f)\n return obj", "title": "" }, { "docid": "4ed860389b3110898ca3f0d9ebc10c2a", "score": "0.5017657", "text": "def test_pickle():\n utils.pickle_save(d, 'd')\n test = utils.pickle_load('d')\n\n subprocess.check_call(['rm', 'd.pkl'])\n\n assert isinstance(d, dict)\n assert test['entry1'] == 'test'\n assert test['entry2'] == 'also_test'", "title": "" }, { "docid": "a22301d7508879c587d61f9b136dfc33", "score": "0.5012499", "text": "def test_pickle(level_name):\n observations = [\"RGBD\"]\n config = {\"width\": \"640\", \"height\": \"480\", \"botCount\": \"2\", \"random_seed\": \"42\"}\n renderer = \"hardware\"\n\n env_1 = deepmind_lab.Lab(level_name, observations, config=config, renderer=renderer)\n env_1 = DmLabCompatibilityV0(env_1)\n\n env_2 = pickle.loads(pickle.dumps(env_1))\n\n obs_1, info_1 = env_1.reset()\n obs_2, info_2 = env_2.reset()\n assert data_equivalence(obs_1, obs_2)\n assert data_equivalence(info_1, info_2)\n for _ in range(100):\n actions = env_1.action_space.sample()\n obs_1, reward_1, term_1, trunc_1, info_1 = env_1.step(actions)\n obs_2, reward_2, term_2, trunc_2, info_2 = env_2.step(actions)\n # assert data_equivalence(obs_1, obs_2)\n assert reward_1 == reward_2\n assert term_1 == term_2 and trunc_1 == trunc_2\n assert data_equivalence(info_1, info_2)\n\n env_1.close()\n env_2.close()", "title": "" }, { "docid": "00ac57b69eedb9e322441fcefa9198a7", "score": "0.5003634", "text": "def test_pickle_simple():\n t = task(actions=[\"echo\"], name=\"t\")\n pkl = pickle.dumps(t)\n t2 = pickle.loads(pkl)\n assert t2.actions == [\"echo\"]", "title": "" }, { "docid": "932e57724b5c81e71d8f6e9777dc100b", "score": "0.500185", "text": "def pickleme(obj, filename, protocol=2):\n with open(filename, 'w') as f:\n cPickle.dump(obj, f, protocol=protocol)", "title": "" }, { "docid": "f1deb625d50a9ce49d9a817bf92127ab", "score": "0.49529862", "text": "def saveToPickleFile(python_object, path_to_file='object.pkl'):\n out_file = open(path_to_file, 'wb')\n pkl.dump(python_object, out_file)\n out_file.close()", "title": "" }, { "docid": "9ec1c3ca17254909c67fd95a957e2296", "score": "0.49479043", "text": "def test_pickling(simulation_factory, two_particle_snapshot_factory):\n sim = simulation_factory(two_particle_snapshot_factory())\n mc = hoomd.hpmc.integrate.Sphere(d=0.1, a=0.1)\n mc.shape['A'] = dict(diameter=1.1)\n mc.shape['B'] = dict(diameter=1.3)\n sim.operations.integrator = mc\n\n cl = hoomd.hpmc.update.Clusters(trigger=hoomd.trigger.Periodic(5),\n pivot_move_ratio=0.1)\n operation_pickling_check(cl, sim)", "title": "" }, { "docid": "a88f96038af3edd13950cf60e602c0cd", "score": "0.49454793", "text": "def test_pickle(self):\n x,s = self.create_testdata()\n X = Container()\n X.add(x)\n task = mmCCZI()\n task.fit(X)\n with tempfile.TemporaryFile(mode='w+b') as tf:\n cPickle.dump(task, tf)", "title": "" }, { "docid": "fa4cd65f92a716bbe641988333a3e24f", "score": "0.4927952", "text": "def test_pickle():\n\n dct = ODict([\n (\"c\", 3),\n (\"d\", 4),\n ])\n data = pickle.dumps(dct)\n dct2 = pickle.loads(data)\n assert dct == dct2", "title": "" }, { "docid": "15f47236e456522515d6ae0c33db045a", "score": "0.49261117", "text": "def save_obj_pickle(obj, adress, name, save_type='single_file'):\n\n print 'saving pickle data...', adress + name\n grow(adress)\n if save_type == 'single_file':\n with open(adress + name + '.cpkl', 'wb') as f:\n cPickle.dump(obj, f, cPickle.HIGHEST_PROTOCOL)\n\n #\"\"\"\n # Serial divided\n if save_type == 'serial_divided':\n os.system(\"rm \" + adress + \"/*.cpkl\" + \"> /dev/null 2>&1\")\n beg = time.time()\n for dic in obj:\n # separating test_dic of xs_dic\n if dic['name'] == 'xs_dic':\n with open(adress + name + '/' + dic['name'] + '.cpkl', 'wb') as f:\n cPickle.dump(dic, f, cPickle.HIGHEST_PROTOCOL)\n else:\n # separating each test\n for test in dic['test'].keys():\n with open(adress + name + '/' + dic['name'] + '_' + test + '.cpkl', 'wb') as f:\n cPickle.dump(dic['test'][test], f, cPickle.HIGHEST_PROTOCOL)\n print time.time() - beg # 0.51", "title": "" }, { "docid": "80269120c76a9348b8811add40e150b8", "score": "0.49125978", "text": "def zodb_pickle(obj):\n f = BytesIO()\n p = PersistentPickler(_persistent_id, f, _protocol)\n klass = obj.__class__\n assert not hasattr(obj, '__getinitargs__'), \"not ready for constructors\"\n args = None\n\n mod = getattr(klass, '__module__', None)\n if mod is not None:\n klass = mod, klass.__name__\n\n state = obj.__getstate__()\n\n p.dump((klass, args))\n p.dump(state)\n return f.getvalue()", "title": "" }, { "docid": "bde011e49d12df0fe6742652a1b73838", "score": "0.49102893", "text": "def MeasurePickle(python, options, extra_args):\n bm_path = Relative(\"performance/bm_pickle.py\")\n return MeasureGeneric(python, options, bm_path, extra_args=extra_args)", "title": "" }, { "docid": "44d6940d10ee29c368d91669198158cf", "score": "0.4906187", "text": "def load(self, pickled_blackbox) -> None:\n loaded_data = None\n\n try:\n loaded_data = pickle.loads(pickled_blackbox)\n except pickle.UnpicklingError as e:\n loaded_data = None\n print(\"UnpicklingError: \", str(e))\n\n if loaded_data:\n try:\n self.models = loaded_data[\"models\"]\n self.scaler = loaded_data[\"scaler\"]\n self.verbose = loaded_data[\"verbose\"]\n except KeyError:\n print(\"Keys not found in pickle!\")", "title": "" }, { "docid": "6a888fe0bf1136a22b79f39d24addedc", "score": "0.49028245", "text": "def test_pickle_and_unpickle_no_usage(token_response):\n pickled_versions = [\n pickle.dumps(token_response, protocol=n)\n for n in range(pickle.HIGHEST_PROTOCOL + 1)\n ]\n unpickled_versions = [pickle.loads(x) for x in pickled_versions]\n for x in unpickled_versions:\n assert x.by_resource_server == token_response.by_resource_server", "title": "" }, { "docid": "9f9c0f1f5bc3aeb5971596ed5461043f", "score": "0.4900738", "text": "def test_builtin_realizations(name, protocol):\n # get class instance\n original = getattr(cosmology, name)\n\n # pickle and unpickle\n f = pickle.dumps(original, protocol=protocol)\n unpickled = pickle.loads(f)\n\n # test equality\n assert unpickled == original\n assert unpickled.meta == original.meta", "title": "" }, { "docid": "e3c53598aa52428cf0f1954efb520e30", "score": "0.48850453", "text": "def test_is_pickleable(self, obs_dim, action_dim):\n env = MetaRLEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))\n with mock.patch(('metarl.tf.policies.'\n 'continuous_mlp_policy.MLPModel'),\n new=SimpleMLPModel):\n policy = ContinuousMLPPolicy(env_spec=env.spec)\n\n env.reset()\n obs, _, _, _ = env.step(1)\n\n with tf.compat.v1.variable_scope('ContinuousMLPPolicy/MLPModel',\n reuse=True):\n return_var = tf.compat.v1.get_variable('return_var')\n # assign it to all one\n return_var.load(tf.ones_like(return_var).eval())\n output1 = self.sess.run(\n policy.model.outputs,\n feed_dict={policy.model.input: [obs.flatten()]})\n\n p = pickle.dumps(policy)\n with tf.compat.v1.Session(graph=tf.Graph()) as sess:\n policy_pickled = pickle.loads(p)\n output2 = sess.run(\n policy_pickled.model.outputs,\n feed_dict={policy_pickled.model.input: [obs.flatten()]})\n assert np.array_equal(output1, output2)", "title": "" }, { "docid": "627be76d59515ae06cab73dd5b567cc5", "score": "0.48549765", "text": "def unpickle_something(pickle_filename):\n if not os.path.isfile(pickle_filename):\n return f\"{pickle_filename} doesn't exist!\"\n with open(pickle_filename, 'rb') as file:\n something_unpickled = pickle.load(file)\n print(f\"Loaded '{pickle_filename}'\")\n return something_unpickled", "title": "" }, { "docid": "0a93746c6048116f345d7eef7890244e", "score": "0.48459494", "text": "def _dump(obj, fname) :\n\ttry :\n\t\tf = open(fname, \"w\")\n\t\tpickle.dump(obj, f, _Pickle_Protocol)\n\t\tf.flush()\n\tfinally :\n\t\tf.close()", "title": "" }, { "docid": "0f17921d510c2f8674dc0d4f74a7be9c", "score": "0.4827176", "text": "def test_1234_pickle():\n __location__ = os.path.realpath(\n os.path.join(os.getcwd(), os.path.dirname(__file__))\n )\n with open(os.path.join(__location__, \"spec-1234.json\")) as f:\n d = json.loads(f.read())\n spec = CombinatorialSpecification.from_dict(d)\n spec.count_objects_of_size(10)\n s = pickle.dumps(spec)\n new_spec = pickle.loads(s)\n assert new_spec == spec", "title": "" }, { "docid": "8903443b7e1967093113856f7a2763b2", "score": "0.48209557", "text": "def unpickleObject(fname):\n f = open(os.path.expanduser(fname),'rb')\n try:\n obj = cPickle.load(f)\n except:\n f.close()\n raise\n return obj", "title": "" }, { "docid": "92969e44078803a08f86567d27e6cf82", "score": "0.48031557", "text": "def test_pickle(self, timezone):\n\n trigger = CronTrigger(year=2016, month='5-6', day='20-28', hour=7, minute=25, second='*',\n timezone=timezone)\n data = pickle.dumps(trigger, 2)\n trigger2 = pickle.loads(data)\n\n for attr in CronTrigger.__slots__:\n assert getattr(trigger2, attr) == getattr(trigger, attr)", "title": "" }, { "docid": "a309e8a098b5040f4ee223353e3794e2", "score": "0.47961402", "text": "def readFromPickleFile(path_to_file='object.pkl'):\n in_file = open(path_to_file, 'rb')\n python_object = pkl.load(in_file)\n return python_object", "title": "" }, { "docid": "86856721b4bc23c7c7775359132ceed4", "score": "0.47818154", "text": "def big_dump(file_path, object_to_dump):\n SimPickler.dump(file_path+\".new\", object_to_dump)\n shutil.move(file_path+\".new\", file_path)", "title": "" }, { "docid": "bef54f4987ef06f223c65aea0eb56521", "score": "0.4778086", "text": "def worker_load(adress):\n with open(adress, 'rb') as f:\n return cPickle.load(f)", "title": "" }, { "docid": "b4b5afa73652769a0de4886561fc09ba", "score": "0.47728184", "text": "def load_object(filename):\n\twith open(filename, \"rb\") as f:\n\t\ttry:\n\t\t\treturn pickle.load(f)\n\t\texcept EOFError:\n\t\t\tpass", "title": "" }, { "docid": "3378626c3f14906f335a248c94ea59d5", "score": "0.476833", "text": "def mp_with_object():\n g = object()\n return MetaProcessor(g), g, ext", "title": "" }, { "docid": "b35efe333d9e09761e50fb0f93137b84", "score": "0.47649705", "text": "def pickleObject(obj,fname):\n f = open(os.path.expanduser(fname),'wb')\n cPickle.dump(obj,f,1)", "title": "" }, { "docid": "fdd350400703e1b1bdc6de7f5186c25e", "score": "0.475868", "text": "def is_picklable(self, event):\n try:\n pickle.dumps(event)\n except Exception as e:\n message = \"(event - %s) is not picklable, Reason: %s\" % (\n event.identify(), e)\n assert False, message", "title": "" }, { "docid": "520a35af30c7254cac14ab7f67c463b1", "score": "0.47565234", "text": "def evaluate_result(pickled_result):\n # its a pickled object with four parts\n code, result, stdout, stderr = pickle.loads(pickled_result)\n # just dump the stderr and stdout\n sys.stderr.write(stderr)\n sys.stdout.write(stdout)\n # code indicates whether we had an exception or not\n if code:\n return result\n else:\n raise result", "title": "" }, { "docid": "9912c01da21e04d133f2fb5730bd4c85", "score": "0.47526616", "text": "def dump(file_path, object_to_dump):\n\n if(os.path.exists(file_path)):\n os.remove(file_path)\n\n output_pkl = open(file_path, 'wb')\n pickle.dump(object_to_dump,output_pkl)\n output_pkl.close()", "title": "" }, { "docid": "d4c0bc1f80923506249b523dc6ecb1ec", "score": "0.47362074", "text": "def picklable_wrapper(obj):\n try:\n pickle.dumps(obj)\n return obj\n except:\n return PicklableWrapper(obj)", "title": "" }, { "docid": "ca30c336bd12ccfdeddb39e9f8362a8f", "score": "0.47143298", "text": "def load_pickle(filename):\n print(f\"Loaded from utils/{filename}\")\n with open(\"utils/\"+filename, 'rb') as f:\n return pickle.load(f)", "title": "" }, { "docid": "6533f4212d27e3a2af01ea5549b32097", "score": "0.4710462", "text": "def writepickle(obj, filepath, verbose=False, protocol = -1):\n\tif os.path.splitext(filepath)[1] == \".gz\":\n\t\tpkl_file = gzip.open(filepath, 'wb')\n\telse:\n\t\tpkl_file = open(filepath, 'wb')\n\t\n\tpickle.dump(obj, pkl_file, protocol)\n\tpkl_file.close()\n\tif verbose: print \"Wrote %s\" % filepath", "title": "" }, { "docid": "17d5b989a5f284f5dbdaaa1fb0580307", "score": "0.47098103", "text": "def dump(obj, file, protocol=None, byref=None, fmode=None, recurse=None, **kwds):#, strictio=None):\n from .settings import settings\n protocol = settings['protocol'] if protocol is None else int(protocol)\n _kwds = kwds.copy()\n _kwds.update(dict(byref=byref, fmode=fmode, recurse=recurse))\n Pickler(file, protocol, **_kwds).dump(obj)\n return", "title": "" }, { "docid": "505f818acd73142c2d5080e426591872", "score": "0.470276", "text": "def __init__(self, do_pickle: bool = False) -> None:\n super().__init__()\n self.do_pickle = do_pickle", "title": "" }, { "docid": "505f818acd73142c2d5080e426591872", "score": "0.470276", "text": "def __init__(self, do_pickle: bool = False) -> None:\n super().__init__()\n self.do_pickle = do_pickle", "title": "" }, { "docid": "892b80348acd0372b1ee525b33f4ee1e", "score": "0.47005093", "text": "def __init__(self, lookupdir):\n lookupdir = lookupdir.strip('/')\n self.__pickle_dir__ = lookupdir\n self.__full_dir__ = '%s/%s' % (os.getcwd(), lookupdir)\n sys.path.append(self.__pickle_dir__)\n self.check_dir()\n \"\"\"\n okay, dir exists\n \"\"\"\n try:\n self.__pickle_files__ = os.listdir(self.__pickle_dir__)\n except:\n self.__pickle_files__ = []\n\n for filename in self.__pickle_files__:\n if filename.endswith('.pick'):\n self.process_import(filename)", "title": "" }, { "docid": "81031b1d5e000b19dde895207f017740", "score": "0.46926653", "text": "def get_pickleable_etype(cls, loads=..., dumps=...):\n ...", "title": "" }, { "docid": "5d4a1c0bde43a81a33f768ffb9824d9a", "score": "0.46892667", "text": "def load_object(self, value):\n if value is None:\n return None\n return pickle.loads(value)", "title": "" }, { "docid": "c761528771917e6bdae60d52752776df", "score": "0.46856233", "text": "def load_obj(name):\n with open(name, 'rb') as f:\n return pickle.load(f)", "title": "" }, { "docid": "cd9aefa76418517c4519c64eeb6fea6b", "score": "0.46820268", "text": "def loadPickle(name, picklepath=datapath, num=None, red=0):\n \n if red:\n if num == None:\n # Check if there is more than one\n flist = filelist(picklepath)\n if (name + '_red_1.pkl') in flist:\n print 'LOADPICKLE: Warning! There is more than one pickle file for this object! Make sure it is the right one!'\n f = open(picklepath+name+'_red.pkl', 'rb')\n pickle = cPickle.load(f)\n f.close()\n elif num != None:\n f = open(picklepath+name+'_red_'+numCheck(num)+'.pkl', 'rb')\n pickle = cPickle.load(f)\n f.close()\n return pickle\n else:\n if num == None:\n # Check if there is more than one\n flist = filelist(picklepath)\n if (name + '_obs_1.pkl') in flist:\n print 'LOADPICKLE: Warning! There is more than one pickle file for this object! Make sure it is the right one!'\n f = open(picklepath+name+'_obs.pkl', 'rb')\n pickle = cPickle.load(f)\n f.close()\n elif num != None:\n f = open(picklepath+name+'_obs_'+numCheck(num)+'.pkl', 'rb')\n pickle = cPickle.load(f)\n f.close()\n return pickle", "title": "" }, { "docid": "30a11a0a7f623d25c621367d2c967a97", "score": "0.46734425", "text": "def loadVariable(filename='objs.pkl',verbose=False):\r\n with open(filename, 'rb') as f:\r\n variable = pickle.load(f)\r\n if(verbose):\r\n print(\"It has been successfully loaded in \"+filename)\r\n return variable", "title": "" }, { "docid": "ab72955193d7bf1c6b15a278695094f1", "score": "0.4672109", "text": "def test_clone(self):\n obj = self.binning\n self.assertEqual(obj, obj.clone())", "title": "" }, { "docid": "ab72955193d7bf1c6b15a278695094f1", "score": "0.4672109", "text": "def test_clone(self):\n obj = self.binning\n self.assertEqual(obj, obj.clone())", "title": "" }, { "docid": "00e26ab19fde795ff1322b9c73737d51", "score": "0.4671238", "text": "def pickle_load(filename: str) -> object:\n with open(f'../pickles/{filename}.pickle', 'rb') as f:\n print(f'Loading {filename}.pickle')\n return pickle.load(f)", "title": "" }, { "docid": "615ad0e3d0e622cb14882227bcc326d0", "score": "0.46633083", "text": "def save_pickle(filename, obj):\n with open(\"utils/\"+filename, 'wb') as f:\n pickle.dump(obj, f)\n print(f\"Saved to utils/{filename}\")", "title": "" }, { "docid": "cf9ab55fcdfa40fae8d9ccef76c0b033", "score": "0.46621472", "text": "def test_parse_pickle(self):\n data = {'key': 'value'}\n raw = pickle.dumps(data)\n res = self.encoder_pickle.parse(raw)\n assert res['key'] == 'value'", "title": "" }, { "docid": "a60edf50a1fbceede21f61dda780ed38", "score": "0.4647641", "text": "def write_pickle(self, obj, protocol= -1):\n dump_pickle(obj, self.file, protocol)", "title": "" }, { "docid": "e7465d4b306995a8668e9c105406711a", "score": "0.46459946", "text": "def test_pickle(frozen_thawed_angle: AngleClass) -> None:\n Angle = frozen_thawed_angle\n test_data = 38.0, 257.125, 0.0\n\n orig = Angle(test_data)\n pick = pickle.dumps(orig)\n thaw = pickle.loads(pick)\n\n assert orig is not thaw\n assert orig == thaw\n\n # Ensure both produce the same pickle - so they can be interchanged.\n cy_pick = pickle.dumps(getattr(vec_mod, 'Cy_' + Angle.__name__)(test_data))\n py_pick = pickle.dumps(getattr(vec_mod, 'Py_' + Angle.__name__)(test_data))\n\n assert cy_pick == py_pick == pick", "title": "" }, { "docid": "c12d392929beb2d3cffebdcb947c61cb", "score": "0.46389517", "text": "def find_pickleable_exception(exc, loads=..., dumps=...):\n ...", "title": "" }, { "docid": "e4c9c079c48017b82ceb1a87b34abfec", "score": "0.46337512", "text": "def test_prevents_core_dump(self):\n instance = self.test_instance\n instance.open()\n self.mock_module_daemon.prevent_core_dump.assert_called_with()", "title": "" }, { "docid": "a9325060d11afe2981900175a96b8962", "score": "0.46261737", "text": "def dumpit(obj, fname):\n pklname = fname + '.pkl'\n with open(os.path.join('tmp2', pklname), 'wb') as f:\n pickle.dump(obj, f, -1)", "title": "" }, { "docid": "974b32305ff7c728624b3af7ee1f983e", "score": "0.46063992", "text": "def test_pickle_current(self):\r\n dump = pickle.dumps(NodeSet(\"foo[1-100]\"))\r\n self.assertNotEqual(dump, None)\r\n nodeset = pickle.loads(dump)\r\n self.assertEqual(nodeset, NodeSet(\"foo[1-100]\"))\r\n self.assertEqual(str(nodeset), \"foo[1-100]\")\r\n self.assertEqual(nodeset[0], \"foo1\")\r\n self.assertEqual(nodeset[1], \"foo2\")\r\n self.assertEqual(nodeset[-1], \"foo100\")", "title": "" }, { "docid": "decadd7795fb0a5737abc7654935e473", "score": "0.46051767", "text": "def readpickle(filepath, verbose=False):\n\tif os.path.splitext(filepath)[1] == \".gz\":\n\t\tpkl_file = gzip.open(filepath,'rb')\n\telse:\n\t\tpkl_file = open(filepath, 'rb')\n\tobj = pickle.load(pkl_file)\n\tpkl_file.close()\n\tif verbose: print \"Read %s\" % filepath\n\treturn obj", "title": "" }, { "docid": "f68f7aad5632a77af3002be65879818d", "score": "0.4602073", "text": "def save_python_obj(file_name:str,obj:Any) -> None:\n with open(file_name,'wb') as f:\n pickle.dump(obj,f,protocol=4)", "title": "" }, { "docid": "27963cddebbe7962e7e4a5a84563c335", "score": "0.4594329", "text": "def read_object(filename):\n with open(filename, 'rb', buffering=2000000) as f:\n obj = pickle.load(f)\n return obj", "title": "" }, { "docid": "36b190e2a74d18a6ab3ff7f2b5fd4087", "score": "0.459425", "text": "def loads_either(cloudpickle_out, pickle_out):\n if CLOUDPICKLE_AVAILABLE and cloudpickle_out is not None:\n return cloudpickle.loads(cloudpickle_out)\n elif pickle_out is not None:\n return pickle.loads(pickle_out)\n else:\n raise ValueError('Not able to load the pickled object')", "title": "" }, { "docid": "ecb3a73a86d89304e7c94fedc6ca3f8a", "score": "0.45935196", "text": "def pick_dump(obj, path):\n with open(path, \"wb\") as f:\n pick.dump(obj, f)", "title": "" }, { "docid": "64d48446781f45c2f3cde3123010fc45", "score": "0.45796266", "text": "def _PickleBenchmark(base_python, changed_python, options, extra_args):\n return SimpleBenchmark(MeasurePickle,\n base_python, changed_python, options, extra_args)", "title": "" }, { "docid": "84aa5d19309c1e5ab875b0d2d9d4adbd", "score": "0.45681998", "text": "def pickleChecker():\n PICKLE_FOLDER = \"../../pickles/\"\n PICKLE_PATH_X = Path(PICKLE_FOLDER + 'x_hand.npy')\n PICKLE_PATH_Y = Path(PICKLE_FOLDER + 'y_hand.npy')\n try:\n X = np.load(PICKLE_PATH_X)\n Y = np.load(PICKLE_PATH_Y)\n return X, Y\n except IOError as ioe:\n print(ioe)\n return ioe", "title": "" }, { "docid": "8aa2f106fd0f272d3f6ad3b73cf0576d", "score": "0.45653313", "text": "def pickle_dump(obj, f, ext='.pkl'):\n if isinstance(f, Path):\n f = f.as_posix()\n \n with open(f+ext, 'wb') as file:\n return cloudpickle.dump(obj=obj, file=file, protocol=pickle.HIGHEST_PROTOCOL)", "title": "" }, { "docid": "a76dac2185a0cf73735e3efd9518493a", "score": "0.4564233", "text": "def test_thing_with_fd():\n fd = open(__file__, 'r')\n fd.close()\n obj = Thing(fd)\n jsonstr = jsonpickle.encode(obj)\n newobj = jsonpickle.decode(jsonstr)\n assert newobj.name is None", "title": "" }, { "docid": "55203861360f9fc3fbfd0a1e2227d85b", "score": "0.4557539", "text": "def pickle_dumps(obj: Any, error_msg: str):\n try:\n return pickle.dumps(obj)\n except TypeError as e:\n sio = io.StringIO()\n inspect_serializability(obj, print_file=sio)\n msg = f\"{error_msg}:\\n{sio.getvalue()}\"\n raise TypeError(msg) from e", "title": "" }, { "docid": "523740e83de61ba54e2cc2cbce22659c", "score": "0.45540762", "text": "def test_omits_prevent_core_dump_if_prevent_core_false(self):\n instance = self.test_instance\n instance.prevent_core = False\n instance.open()\n self.assertFalse(self.mock_module_daemon.prevent_core_dump.called)", "title": "" }, { "docid": "497f21f0f4a7926c810884623178d1e3", "score": "0.4553845", "text": "def register_pickle():\n def pickle_dumps(obj, dumper=pickle.dumps):\n return dumper(obj, protocol=pickle_protocol)\n\n registry.register('pickle', pickle_dumps, unpickle,\n content_type='application/x-python-serialize',\n content_encoding='binary')", "title": "" }, { "docid": "88b92510d640a6e5b96a9e1dc044da7a", "score": "0.4553397", "text": "def load_obj(path, filename):\n file_path = os.path.join(path, filename)\n with open(file_path, 'rb') as handle:\n return pickle.load(handle)", "title": "" }, { "docid": "d68319a44208d8f0598e609497cc7bb4", "score": "0.45527503", "text": "def pickle_obj(obj: Any, outfile: str = \"temp_dir/temp.p\") -> None:\n with open(outfile, \"wb\") as fp:\n pickle.dump(obj, fp)", "title": "" }, { "docid": "2ae208e023dc8a4972a817cda2dc53d5", "score": "0.45501328", "text": "def test_pickle(self, timezone):\n trigger = DateTrigger(date(2016, 4, 3), timezone=timezone)\n data = pickle.dumps(trigger, 2)\n trigger2 = pickle.loads(data)\n assert trigger2.run_date == trigger.run_date", "title": "" }, { "docid": "2dd135372283e36681574f34f142d653", "score": "0.45471847", "text": "def test4CrossInheritanceObject(self):\n from derived import Derived, DerivedCustomReduce\n\n d1 = Derived()\n attr = 7\n d2 = DerivedCustomReduce(attr)\n\n # Generic __reduce__ should raise an exception\n self.assertRaises(IOError, pickle.dumps, d1)\n\n # Custom __reduce__ should allow pickling\n d2_2 = pickle.loads(pickle.dumps(d2))\n self.assertEqual(d2_2.attr, attr)", "title": "" }, { "docid": "e2da4fe72f69feb81018663390a259e1", "score": "0.4539921", "text": "def test3PickleFacadeCheck(self):\n\n def get_root_facade():\n return ROOT\n\n facade = pickle.loads(pickle.dumps(get_root_facade()))\n\n # Check attributes of the unserialized facade\n self.assertEqual(facade.__name__, ROOT.__name__)\n self.assertEqual(facade.__file__, ROOT.__file__)", "title": "" }, { "docid": "123b5f3a3a4cb2174ca1041864fb71f4", "score": "0.45366675", "text": "def read_pickle_object(pickle_file_name):\n with open(pickle_file_name, 'rb') as inn:\n m = pickle.load(inn)\n return m", "title": "" }, { "docid": "9ffd4fcb430cf8d133eace520f9732df", "score": "0.45343965", "text": "def syncPickle(self,token,obj):\r\n hashValue = hashlib.md5(token.encode(\"utf8\")).hexdigest()\r\n objectpath = self.doc.module_path+\"/objects/\"\r\n \r\n if not os.path.isdir(objectpath):\r\n os.makedirs(objectpath)\r\n logger.warning(\"[Sync pickle] make folder: %s \"%objectpath)\r\n \r\n with open(\"{}{}.pkl\".format(objectpath,hashValue),\"wb\") as pfile:\r\n pickle.dump(obj,pfile)\r\n pfile.close()\r\n logger.info(\"[Sync pickle] pickled %s \"%hashValue)\r\n \r\n # if exists, append to pickle file\r", "title": "" }, { "docid": "13dae520758b46974b5b558c43e9140e", "score": "0.45284578", "text": "def test_comp_pickle_save():\n data = ['test', 'list']\n filename = 'test_comp_pkl_save'\n comp_pickle_save(data, filename)\n\n assert os.path.isfile(filename) is True", "title": "" }, { "docid": "d19bf2704921a1b0315c41c84a5cd390", "score": "0.45246783", "text": "def loadPickle(filename):\n file = open(filename, 'rb')\n obj = pickle.load(file)\n file.close()\n #logging.debug(\"Loaded \" + filename + \" with object \" + str(type(obj)))\n\n return obj", "title": "" }, { "docid": "8eec84ec8fc7df78915a8f9e6b3b2c3e", "score": "0.45126012", "text": "def clone_via_serialize(obj):\n s = cPickle.dumps(obj, get_pickle_protocol())\n return cPickle.loads(s)", "title": "" }, { "docid": "ab8b7b7dcc21890f118e2cfef56c1bfc", "score": "0.45121163", "text": "def no_match(py_obj, h_group, call_id=0, **kwargs):\n pickled_obj = pickle.dumps(py_obj)\n d = h_group.create_dataset('data_%i' % call_id, data=[pickled_obj])\n d.attrs[\"type\"] = [b'pickle']\n\n warnings.warn(\"%s type not understood, data have been serialized\" % type(py_obj),\n SerializedWarning)", "title": "" }, { "docid": "a95b87f99b0e50c66ee999c88a37f0a8", "score": "0.4506766", "text": "def load_obj(name):\n with open(name + '.pkl', 'rb') as f:\n return pickle.load(f)", "title": "" }, { "docid": "a95b87f99b0e50c66ee999c88a37f0a8", "score": "0.4506766", "text": "def load_obj(name):\n with open(name + '.pkl', 'rb') as f:\n return pickle.load(f)", "title": "" }, { "docid": "763b1e654318eee177e0aead9dfc665a", "score": "0.4494884", "text": "def _to_picklefile(self, outprefix, verbose=False):\r\n if verbose:\r\n s = \"Exporting cross-correlations in binary format to file: {}.pickle\"\r\n print(s.format(outprefix))\r\n\r\n f = psutils.openandbackup(outprefix + '.pickle', mode='wb')\r\n pickle.dump(self, f, protocol=2)\r\n f.close()", "title": "" }, { "docid": "576efd210816e2c291cd3aff1983414a", "score": "0.44890103", "text": "def load():\n\n with open(\"example.pickle\", \"rb\") as f:\n return pickle.load(file=f)", "title": "" } ]
cfba5384395dd909785d5d1aa4c50363
Load recurrent neural network from h5.file
[ { "docid": "091e5e3e7c9e117ec65308a08f85f7cb", "score": "0.60439736", "text": "def load_RNN(self, model=None):\n\n if model != None:\n return models.load_model(model)", "title": "" } ]
[ { "docid": "a472291e19ca01e1c6c4f112fa1d57ec", "score": "0.7449959", "text": "def load(path: str):\n with h5py.File(path, 'r') as net_file:\n net = TensorNetwork(backend=net_file[\"backend\"][()])\n nodes = list(net_file[\"nodes\"].keys())\n edges = list(net_file[\"edges\"].keys())\n\n for node_name in nodes:\n node_data = net_file[\"nodes/\" + node_name]\n node_type = get_component(node_data['type'][()])\n node_type._load_node(net, node_data)\n\n nodes_dict = {node.name: node for node in net.nodes_set}\n\n for edge in edges:\n edge_data = net_file[\"edges/\" + edge]\n Edge._load_edge(edge_data, nodes_dict)\n return net", "title": "" }, { "docid": "7ee11035c32d78a0774d9aafc2156b52", "score": "0.6647814", "text": "def load(filepath, load_weights=True):\n # TODO: support loading LambdaLayer that includes parametric self defined function with outside variables\n M = utils.load_hdf5_graph(filepath=filepath, load_weights=load_weights)\n return M", "title": "" }, { "docid": "1d2a45d91cd2380dc051484acca2d50b", "score": "0.6638091", "text": "def load_model(self):\n self.online_network.total_model.load_weights(\"models/{}.h5\".format(self.name))\n self.target_network.total_model.load_weights(\"models/{}.h5\".format(self.name))\n # load memory\n self.memory.samples = []\n self.memory.read_from_file(\"{}{}.mem\".format(self.save_big_path, self.name))\n # load steps and epsilon\n mylogfile = open(\"models/{}.csv\".format(self.name), 'r')\n firstRow = mylogfile.readline()\n fieldnames = firstRow.strip('\\n').split(\";\")\n self.steps = int(fieldnames[0])\n self.epsilon = float(fieldnames[1])", "title": "" }, { "docid": "29183a0011a8b619fefeac47d13cca0c", "score": "0.6504803", "text": "def load(cls, filename):\n data = np.load(filename, allow_pickle=True)\n neurons = [w.shape[0] for w in data[\"weights\"]]\n neurons.append(data[\"weights\"][-1].shape[1])\n network = cls(neurons)\n network.weights = data[\"weights\"]\n network.biases = data[\"biases\"]\n return network", "title": "" }, { "docid": "0a27968244c4e77e7422b8fe518551d1", "score": "0.6485048", "text": "def load_example(file_path):\n data = h5py.File(file_path, 'r')\n rgb_images = list(data['image'])\n depth_images = list(data['depth_image'])\n frame_indices = np.arange(len(rgb_images))\n gripper_status = list(data['gripper'])\n action_status = list(data['label'])\n gripper_action_goal_idx = []\n\n # print(\"gripper \",gripper_status)\n # print(\"frames \", frame_indices)\n\n # generate new action labels and goal action indices\n gripper_action_label, gripper_action_goal_idx = generate_gripper_action_label(data, action_status, gripper_status, gripper_action_goal_idx)\n\n rgb_images = ConvertImageListToNumpy(np.squeeze(rgb_images), format='list')\n depth_images = ConvertImageListToNumpy(np.squeeze(depth_images), format='list')\n numpy_data = {\n 'rgb_images': rgb_images,\n 'depth_images': depth_images,\n 'frame_indices': frame_indices,\n 'gripper_status': gripper_status,\n 'action_status': action_status,\n 'gripper_action_label': gripper_action_label,\n 'gripper_action_goal_idx': gripper_action_goal_idx\n }\n\n return data, numpy_data", "title": "" }, { "docid": "b132f70c1944a3fb40d32205e40ecc67", "score": "0.64657694", "text": "def load_model(self, filename):\n if \"/\" in filename:\n basename = \"/\".join(filename.split(\"/\")[:-1]) + \"/\" + filename.split(\"/\")[-1].split(\".\")[0]\n else:\n basename = filename.split(\".\")[0]\n serializers.load_hdf5(filename + '.weights', self)", "title": "" }, { "docid": "b0eb3eb7df6ea26390f936d3b13e8978", "score": "0.64656603", "text": "def reload_net(path):\n return keras.models.load_model(os.path.normpath(path))", "title": "" }, { "docid": "22be45c34475cfec1667433e2a31deac", "score": "0.64272606", "text": "def load_model(self, model_dir: str) -> \"OneClassNeuralNetwork\":\n params = np.load(f'{model_dir}/params.npz')\n w = params['w']\n V = params['V']\n nu = params['nu'].tolist()\n model = load_model(f'{model_dir}/model.h5',\n custom_objects={'custom_hinge': self.custom_ocnn_loss(nu, w, V)})\n return model", "title": "" }, { "docid": "a9fcacf30761b31a91c3aa57c955d8e4", "score": "0.6398747", "text": "def load(path):\r\n net = torch.load(path)\r\n return net", "title": "" }, { "docid": "cc9057976e1fd6334d78a3616bef8eec", "score": "0.63440007", "text": "def load_model(self, filename):\n summary = torch.load(filename)\n gen = Generator(self.args)\n gen.load_state_dict(summary['gnet'])", "title": "" }, { "docid": "abd314ad5cc9da2e0a547244052e1387", "score": "0.62680495", "text": "def load_h5(fname):\n with h5py.File(fname, 'r') as h5f:\n return h5f['data'][:]", "title": "" }, { "docid": "cceb570ec15af08c764ea25d1849252a", "score": "0.62357044", "text": "def load(self, file_name):\n with self.graph.as_default():\n saver = tf.train.Saver(tf.trainable_variables())\n saver.restore(self.session, self.io_nn.model_dir_checkpoints + file_name + '.ckpt')\n print(\"Loaded network from file \" + file_name)", "title": "" }, { "docid": "0e2585576168677c2cfba83b27970410", "score": "0.6230742", "text": "def read_data(self):\n file = h5py.File(self.fname, 'r')\n \n self.x_train = np.float32(file['x_train'][:])\n self.y_train = np.float32(file['y_train'][:, 0])\n self.x_test = np.float32(file['x_test'][:])\n self.y_test = np.float32(file['y_test'][:, 0])\n \n file.close()", "title": "" }, { "docid": "bd7bc8ac8f3b1d36a9e565230c42ddfa", "score": "0.62071514", "text": "def load(cls, filename):\n data = np.load(filename)\n network = cls(data[\"neuron_counts\"])\n network.W[...] = data[\"W\"]\n network.b[...] = data[\"b\"]\n index = 0\n for cell in network.cells:\n for p in cell.parameters():\n p[...] = data[\"arr_\" + str(index)]\n index += 1\n return network", "title": "" }, { "docid": "ceca592c1b2d4eb082fb8efc3fcd179f", "score": "0.62018853", "text": "def load_network(path, separator=\";,\"):\n\n layers = []\n weights = []\n biases = []\n\n file = open(path, \"r\")\n\n layers_line = file.readline()\n\n for size in layers_line.split(separator[0]):\n layers.append(int(size))\n\n for k in range(len(layers) - 1):\n weight = numpy.zeros((layers[k+1], layers[k]), dtype=float)\n line = file.readline()\n nodes = line.split(separator[0])\n\n for i in range(len(nodes)):\n node = nodes[i].split(separator[1])\n\n for j in range(len(node)):\n weight[i, j] = float(node[j])\n\n weights.append(weight)\n\n for k in range(len(layers) - 1):\n bias = numpy.zeros((layers[k+1], 1), dtype=float)\n line = file.readline()\n nodes = line.split(separator[0])\n\n for i in range(len(nodes)):\n bias[i, 0] = float(nodes[i])\n\n biases.append(bias)\n\n network = MultiPerceptron(layers)\n network.set_weights_and_biases(weights, biases)\n\n return network", "title": "" }, { "docid": "4cba79333cbabfb4dd6d42374806bd87", "score": "0.6201638", "text": "def model_from_file(cls, model_path, input_shape, n_dense_neurons=1000,\n dropout_ratio=0.5, pool_size=(2, 2), activation_name='sigmoid',\n depth=4, n_base_filters=16, n_classes=2,\n batch_normalization=False, min_receptive_field_size=None):\n net_name = os.path.splitext(os.path.basename(model_path))[0]\n net = cls(input_shape, net_name, n_dense_neurons=n_dense_neurons,\n dropout_ratio=dropout_ratio, pool_size=pool_size,\n activation_name=activation_name, depth=depth,\n n_base_filters=n_base_filters, n_classes=n_classes,\n batch_normalization=batch_normalization,\n min_receptive_field_size=min_receptive_field_size)\n net.model = net._get_model()\n net.model.load_weights(model_path)\n return net", "title": "" }, { "docid": "237fc295d7127bdbf31d2a606a593214", "score": "0.6157838", "text": "def load_network(filepath):\n\n with open(filepath, 'rb') as input_f:\n network = pickle.load(input_f)\n\n return network", "title": "" }, { "docid": "bf653656691fd5022fc48c0783ea3b27", "score": "0.61289394", "text": "def load_model(self, filename):\n summary = torch.load(filename)\n discriminator = Discriminator(self.args)\n discriminator.load_state_dict(summary['dnet'])", "title": "" }, { "docid": "f444d81bef1d15e11564c7d21f871464", "score": "0.6128591", "text": "def load_from_file(model_fname):\n log_info(\"Loading reranker from %s...\" % model_fname)\n with file_stream(model_fname, 'rb', encoding=None) as fh:\n data = pickle.load(fh)\n ret = RerankingClassifier(cfg=data['cfg'])\n ret.load_all_settings(data)\n\n # re-build TF graph and restore the TF session\n tf_session_fname = os.path.abspath(re.sub(r'(.pickle)?(.gz)?$', '.tfsess', model_fname))\n ret._init_neural_network()\n ret.saver.restore(ret.session, tf_session_fname)\n return ret", "title": "" }, { "docid": "58f777ba64a0883012697793bf3e9c37", "score": "0.6106363", "text": "def load(self, file_name):\n with self.graph.as_default():\n saver = tf.train.Saver()\n saver.restore(self.session, io.tf_save_path + file_name + '.ckpt')\n print \"Loaded network from file \" + file_name", "title": "" }, { "docid": "b4ccc7a477a89c3a24efa7aa8a05f7bc", "score": "0.6103693", "text": "def load_model(model_weights_path):\n\n model = models.resnet50(pretrained=False)\n for param in model.parameters():\n param.requires_grad = False\n in_fea = model.fc.in_features\n model.fc = nn.Linear(in_fea, 120)\n model_weights = torch.load(model_weights_path, map_location=torch.device('cpu'))\n model.load_state_dict(model_weights)\n return model", "title": "" }, { "docid": "cec903bb713cc8fd2fb2a89f08498220", "score": "0.61029655", "text": "def load_network_from_file(self, model_path = \"model.bin\") -> str:\n # TODO: change assert to Raise.\n assert os.path.exists(model_path), \"File does not exist. You can download network with download_network()\"\n self.network.load_state_dict(torch.load(model_path, map_location = torch.device(self.device)))\n self.network.device = self.device\n return f'Weights for network loaded from {model_path}'", "title": "" }, { "docid": "152d8dff1732ec69c7223ce4caf9f8c8", "score": "0.60978454", "text": "def load_model(self,filename):\r\n file_name_model = \"best_DNN_models\\\\\" + filename + '.json'\r\n file_name_weights = \"best_DNN_models\\\\\" + filename + '.h5'\r\n print(\"Loading model from: \" + file_name_model)\r\n json_file = open(file_name_model, 'r')\r\n loaded_model_json = json_file.read()\r\n json_file.close()\r\n loaded_model = model_from_json(loaded_model_json)\r\n loaded_model.load_weights(file_name_weights)\r\n self.model = loaded_model\r\n print(\"Model loaded successfully!\")", "title": "" }, { "docid": "74290cd0501ece4fbb1377c9f46397d9", "score": "0.60837543", "text": "def load(file):\n model_file=open(file,'rb') \n nn= pickle.load(model_file)\n model_file.close()\n return nn", "title": "" }, { "docid": "3c609b96fd6939402b2ae44091b2c130", "score": "0.60827225", "text": "def load_model_from_h5(run_hyperparameters: Dict, model_filepath: str) -> keras.Model:\n custom_objects = _get_custom_objects(run_hyperparameters[\"metrics\"])\n return keras.models.load_model(model_filepath, custom_objects=custom_objects)", "title": "" }, { "docid": "f93bd5b97a5442c08a4249157074e87b", "score": "0.6067554", "text": "def __init__(self, model_name, anchor_box_ball=(5, 5), anchor_box_post=(2, 5)):\r\n self.network = load_model(model_name + '.hdf5')\r\n self.network.summary() # prints the neural network summary\r\n self.anchor_box_ball = anchor_box_ball\r\n self.anchor_box_post = anchor_box_post", "title": "" }, { "docid": "0db44088eb263bf1f38bc915ec2c8080", "score": "0.60656494", "text": "def readNNFile(filename):\n f = open(filename, 'r')\n nInput = int(f.readline())\n nHidden = int(f.readline())\n nOutput = int(f.readline())\n nn = NN(nInput, nHidden, nOutput)\n for i in range(nHidden):\n for j in range(nInput):\n nn.w_hid[i, j] = float(f.readline())\n for i in range(nHidden):\n nn.b_hid[i] = float(f.readline())\n for i in range(nOutput):\n for j in range(nHidden):\n nn.w_out[i, j] = float(f.readline())\n for i in range(nOutput):\n nn.b_out[i] = float(f.readline())\n f.close()\n return nn", "title": "" }, { "docid": "8ca78dd1ad7a22c06375f35062ed4537", "score": "0.60544026", "text": "def load_model():\n Resnet50_model = Sequential()\n Resnet50_model.add(GlobalAveragePooling2D(input_shape=(7, 7, 2048)))\n Resnet50_model.add(Dense(133, activation='softmax'))\n\n Resnet50_model.load_weights('../application_data/weights.best.Resnet50.hdf5')\n global model\n model = Resnet50_model", "title": "" }, { "docid": "e321dd6a6f61f1dd28f2109888fea53f", "score": "0.60408264", "text": "def load_resnet50(scopes):\n filename = 'resnet50.h5'\n weights_path = get_file(\n filename, __model_url__ + 'resnet/' + filename,\n cache_subdir='models',\n file_hash='9df0843bdadb58ed24d360564c45b119')\n return load_keras_weights(scopes, weights_path)", "title": "" }, { "docid": "5950db59b80351e2c84e867d2ba18eeb", "score": "0.60384166", "text": "def read_network(fname, full_output=False):\n # read GRN files\n data_columns = [\"tf_target\", \"prob\"]\n if full_output:\n data_columns = [\n \"tf_target\",\n \"prob\",\n \"tf_expression\",\n \"target_expression\",\n \"weighted_binding\",\n \"activity\",\n ]\n # read the GRN file\n rnet = pd.read_csv(\n fname,\n sep=\"\\t\",\n usecols=data_columns,\n dtype=\"float64\",\n converters={\"tf_target\": str},\n index_col=\"tf_target\",\n )\n\n return rnet", "title": "" }, { "docid": "34a8245456d56d22510d33dffcc76a13", "score": "0.60269505", "text": "def load(self, path):\n checkpoint = torch.load(path, map_location='cpu')\n self.net.load_state_dict(checkpoint['net'])", "title": "" }, { "docid": "0308effb9da6c83aca16b503f486d73f", "score": "0.60229796", "text": "def load(self, path_to_model):\n chemml_model = pd.read_csv(path_to_model,index_col=0)\n self.model = load_model(chemml_model.loc['path_to_file'][0])\n # optimizer config\n opt = self.model.optimizer.get_config()\n opt_list = [opt['name']]\n del opt['name']\n opt_list.append(opt)\n self.opt = self.parse_opt_config(opt_list)\n \n self.nepochs=int(chemml_model.loc['nepochs'][0])\n self.batch_size=int(chemml_model.loc['batch_size'][0])\n self.loss=chemml_model.loc['loss'][0]\n self.is_regression=eval(chemml_model.loc['is_regression'][0])\n self.nclasses=chemml_model.loc['nclasses'][0]\n \n if str(self.nclasses).lower() == 'nan':\n self.nclasses = None\n else:\n self.nclasses = int(self.nclasses)\n \n self.feature_size = int(chemml_model.loc['feature_size'][0])\n \n # layer config\n self.layers = [(n['class_name'],n['config']) for n in self.model.get_config()['layers']]\n self.nhidden = None\n self.nneurons = None\n self.activations = None\n \n return self", "title": "" }, { "docid": "cf10103822709db63448604d9ba3c97f", "score": "0.6021567", "text": "def load_model_networks(self, directory, extension=\"_final\"):\n raise NotImplementedError", "title": "" }, { "docid": "cd011126757d029cef4564014f276363", "score": "0.6019663", "text": "def load_model(path):\n if not path.endswith('.h5'):\n path = '{}.h5'.format(path)\n\n with CustomObjectScope({'relu6': keras.applications.mobilenet.relu6,'DepthwiseConv2D': keras.applications.mobilenet.DepthwiseConv2D}):\n model = keras.models.load_model(path)\n return model", "title": "" }, { "docid": "c1f4485399791997dd44b5f220c356d4", "score": "0.60178465", "text": "def load_model(filepath, neural_network):\n print (\"Loading model from %s ...\" % filepath)\n if neural_network:\n from keras.models import load_model\n model = load_model(filepath)\n else:\n import pickle\n model = pickle.load(open(filepath, 'rb'))\n print (\"Loading model done.\")\n return model", "title": "" }, { "docid": "c96bddae6ee8c8fb4cf7a27149a9c2d7", "score": "0.60125995", "text": "def read_file_into_graph(self):\n\n assert_or = False\n assert_and = False\n input_bounds = {}\n input_elements = []\n\n with open(self.filepath, \"r\") as f:\n for line in f:\n if line.startswith(\"#\"):\n continue\n elements = line.split()\n if elements[0] == \"Input\":\n input_bounds[elements[1]] = {\"lb\": None, \"ub\": None}\n self.graph.add_node(elements[1], node_type=\"input\")\n\n if elements[0] == \"ReLU\":\n\n relu_in_name = elements[1] + \"_in\"\n relu_out_name = elements[1]\n bias = float(elements[2])\n variables, coeffs = self.get_vars_and_coefficients(elements, str_only=True)\n\n relu_entry = namedtuple(\"ReLU\", [\"relu_in_name\", \"relu_out_name\", \"bias\", \"variables\", \"coeffs\"])\n self.relu_nodes[elements[1]] = relu_entry(relu_in_name, relu_out_name, bias, variables, coeffs)\n # relu_in node is created and added to self.relu_in_nodes later, other variables also created later\n\n self.graph.add_node(relu_in_name, node_type=\"relu_in\", bias=bias)\n self.graph.add_node(relu_out_name, node_type=\"relu_out\")\n self.graph.add_edge(relu_in_name, relu_out_name)\n for v, w in zip(variables, coeffs):\n self.graph.add_edge(v, relu_in_name, weight=w)\n\n\n if elements[0] == \"MaxPool\":\n\n self.max_pool_nodes[elements[1]] = elements[2:]\n self.graph.add_node(elements[1], node_type=\"max_pool\")\n self.graph.add_edges_from(((v, elements[1]) for v in elements[2:]), weight=1)\n\n\n if elements[0] == \"Linear\":\n variables, coeffs = self.get_vars_and_coefficients(elements, str_only=True)\n bias = float(elements[2])\n self.linear_nodes[elements[1]] = (bias, variables, coeffs)\n\n #self.graph.add_edges_from((v.name, linear.name) for v in variables)\n self.graph.add_node(elements[1], node_type=\"linear\", bias=bias)\n for v, w in zip(variables, coeffs):\n self.graph.add_edge(v, elements[1], weight=w)\n\n\n if elements[0] == \"Assert\":\n input_elements.append(elements)\n\n # explicit bounds for input neurons\n if len(elements) == 5 and elements[-1] in input_bounds:\n if elements[1] == \"<=\":\n new_lb = float(elements[2]) / float(elements[3])\n if input_bounds[elements[-1]][\"lb\"] is None or input_bounds[elements[-1]][\"lb\"] < new_lb:\n input_bounds[elements[-1]][\"lb\"] = new_lb\n\n elif elements[1] == \">=\":\n new_ub = float(elements[2]) / float(elements[3])\n if input_bounds[elements[-1]][\"ub\"] is None or input_bounds[elements[-1]][\"ub\"] > new_ub:\n input_bounds[elements[-1]][\"ub\"] = new_ub\n\n if elements[0] == \"AssertOut\":\n assert elements[1] in [\"<=\", \">=\"] and not assert_or\n assert_and = True\n cons = namedtuple(\"output_cons\", [\"lhs\", \"operator\", \"elements\"])\n self.output_cons.append(cons(float(elements[2]), True if elements[1] == \">=\" else False, elements))\n #print(\"assertout\", elements)\n\n if elements[0] == \"AssertOr\":\n # assertOr properties are basically the same as AND, just the \"imaginary\" operator is turned around\n # we assume that all subsequent ORs in the file form one disjunction\n assert elements[1] in [\"<=\", \">=\"] and not assert_and\n assert_or = True\n cons = namedtuple(\"output_cons\", [\"lhs\", \"operator\", \"elements\"])\n self.output_cons.append(cons(float(elements[2]), False if elements[1] == \">=\" else True, elements))\n\n for var_name, bounds in input_bounds.items():\n self.vars[var_name] = self.model.addVar(name=var_name, lb=bounds[\"lb\"], ub=bounds[\"ub\"])\n self.input_nodes[var_name] = self.vars[var_name]\n\n for assert_input_count, elements in enumerate(input_elements):\n if elements[1] == \"<=\":\n self.model.addCons(float(elements[2]) <= self.quicksum_coeff_var(elements),\n name=\"input_cons_\" + str(assert_input_count))\n elif elements[1] == \">=\":\n self.model.addCons(float(elements[2]) >= self.quicksum_coeff_var(elements),\n name=\"input_cons_\" + str(assert_input_count))\n else:\n raise NotImplementedError(\"This property cannot be verified: \" + elements[1])\n\n self.model.hideOutput()\n for var_name, bounds in input_bounds.items():\n if bounds[\"lb\"] is None:\n self.model.setObjective(self.vars[var_name])\n self.model.optimize()\n if self.model.getStatus() != \"optimal\":\n raise ValueError(\"LP lower bound of input cannot be solved to optimality\")\n else:\n bound = self.model.getDualbound()\n self.model.freeTransform()\n self.model.chgVarLbGlobal(self.vars[var_name], bound - 10 * self.eps)\n if bounds[\"ub\"] is None:\n self.model.setObjective(self.vars[var_name], sense=\"maximize\")\n self.model.optimize()\n if self.model.getStatus() != \"optimal\":\n raise ValueError(\"LP upper bound of input cannot be solved to optimality\")\n else:\n bound = self.model.getDualbound()\n self.model.freeTransform()\n self.model.chgVarUbGlobal(self.vars[var_name], bound + 10 * self.eps)\n\n self.model.setObjective(0.0)\n\n self.model.hideOutput(quiet=False)\n\n return self.model, self.vars", "title": "" }, { "docid": "59dc3681f30d8f9111428804473895bd", "score": "0.6010998", "text": "def load_model(self):\n \n self.model = Darknet(self.config_path, img_size=self.img_size)\n\n #check if yolov3.weights file exists else download it\n if not os.path.exists(self.weights_path):\n print(\"downloading weights from web\")\n filename=self.weights_path\n url=\"https://pjreddie.com/media/files/yolov3.weights\"\n chunkSize = 1024\n r = requests.get(url, stream=True)\n with open(filename, 'wb') as f:\n pbar = tqdm( unit=\"B\", total=int( r.headers['Content-Length'] ) )\n for chunk in r.iter_content(chunk_size=chunkSize): \n if chunk: # filter out keep-alive new chunks\n pbar.update (len(chunk))\n f.write(chunk)\n\n\n\n self.model.load_weights(self.weights_path)\n self.model.cuda()\n self.model.eval()\n self.classes = utils.load_classes(self.class_path)\n self.Tensor = torch.cuda.FloatTensor", "title": "" }, { "docid": "467f91546c16df05fadba1adefbbc01f", "score": "0.60007584", "text": "def _load_model(self, model, load_file):\n\n vars = {}\n def _gather(name, obj):\n if isinstance(obj, h5py.Dataset):\n vars[name] = obj[...]\n\n with h5py.File(load_file) as f:\n f.visititems(_gather)\n\n model.assign(vars)", "title": "" }, { "docid": "6611594accf2d6e237abb9ab117e50da", "score": "0.5979233", "text": "def load(self, filepath):\n var_dict = torch.load(filepath)\n self.rnn_model.load_state_dict(var_dict['rnn_state_dict'])\n self.rnn_init_hidden = nn.Parameter(\n torch.from_numpy(var_dict['rnn_init_hidden']).to(self.device))\n self.transition_bias = float(var_dict['transition_bias'])\n self.transition_bias_denominator = float(\n var_dict['transition_bias_denominator'])\n self.crp_alpha = float(var_dict['crp_alpha'])\n self.sigma2 = nn.Parameter(\n torch.from_numpy(var_dict['sigma2']).to(self.device))\n\n self.logger.print(\n 3, 'Loaded model with transition_bias={}, crp_alpha={}, sigma2={}, '\n 'rnn_init_hidden={}'.format(\n self.transition_bias, self.crp_alpha, var_dict['sigma2'],\n var_dict['rnn_init_hidden']))", "title": "" }, { "docid": "0a5186ae4250bb01f62c3cd51ec1b86b", "score": "0.5949441", "text": "def load_network(self, which_epoch, train=True):\n save_filename = '%s_net.pth' % which_epoch\n load_path = join(self.save_dir, save_filename)\n net = self.net\n if isinstance(net, torch.nn.DataParallel):\n net = net.module\n print('loading the model from %s' % load_path)\n checkpoint = torch.load(load_path, map_location=self.device)\n if hasattr(checkpoint['model_state_dict'], '_metadata'):\n del checkpoint['model_state_dict']._metadata\n net.load_state_dict(checkpoint['model_state_dict'])\n if train:\n self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n self.scheduler.load_state_dict(checkpoint['scheduler_state_dict'])\n self.opt.epoch_count = checkpoint[\"epoch\"]\n else:\n net.eval()", "title": "" }, { "docid": "9796def368ca05d71d277dea78bd5832", "score": "0.59286076", "text": "def load_HUXt_run(filepath):\n if os.path.isfile(filepath):\n\n data = h5py.File(filepath, 'r')\n\n cr_num = np.int32(data['cr_num'])\n cr_lon_init = data['cr_lon_init'][()] * u.Unit(data['cr_lon_init'].attrs['unit'])\n simtime = data['simtime'][()] * u.Unit(data['simtime'].attrs['unit'])\n simtime = simtime.to(u.day)\n dt_scale = data['dt_scale'][()]\n v_boundary = data['_v_boundary_init_'][()] * u.Unit(data['_v_boundary_init_'].attrs['unit'])\n r = data['r'][()] * u.Unit(data['r'].attrs['unit'])\n lon = data['lon'][()] * u.Unit(data['lon'].attrs['unit'])\n nlon = lon.size\n map_inwards = np.int32(data['_map_inwards_'])\n if map_inwards == 1:\n map_inwards = True\n else:\n map_inwards = False\n\n if cr_num != 9999:\n if nlon == 1:\n model = HUXt(cr_num=cr_num, cr_lon_init=cr_lon_init, r_min=r.min(), r_max=r.max(),\n lon_out=lon, simtime=simtime, dt_scale=dt_scale, map_inwards=map_inwards)\n elif nlon > 1:\n model = HUXt(cr_num=cr_num, cr_lon_init=cr_lon_init, r_min=r.min(), r_max=r.max(),\n lon_start=lon.min(), lon_stop=lon.max(), simtime=simtime, dt_scale=dt_scale,\n map_inwards=map_inwards)\n else:\n if nlon == 1:\n model = HUXt(v_boundary=v_boundary, cr_lon_init=cr_lon_init, r_min=r.min(), r_max=r.max(),\n lon_out=lon, simtime=simtime, dt_scale=dt_scale, map_inwards=map_inwards)\n elif nlon > 1:\n model = HUXt(v_boundary=v_boundary, cr_lon_init=cr_lon_init, r_min=r.min(), r_max=r.max(),\n lon_start=lon.min(), lon_stop=lon.max(), simtime=simtime, dt_scale=dt_scale,\n map_inwards=map_inwards)\n\n model.v_grid_cme[:, :, :] = data['v_grid_cme'][()] * u.Unit(data['v_boundary'].attrs['unit'])\n model.v_grid_amb[:, :, :] = data['v_grid_amb'][()] * u.Unit(data['v_boundary'].attrs['unit'])\n\n # Create list of the ConeCMEs\n cme_list = []\n all_cmes = data['ConeCMEs']\n for k in all_cmes.keys():\n cme_data = all_cmes[k]\n t_launch = cme_data['t_launch'][()] * u.Unit(cme_data['t_launch'].attrs['unit'])\n lon = cme_data['longitude'][()] * u.Unit(cme_data['longitude'].attrs['unit'])\n lat = cme_data['latitude'][()] * u.Unit(cme_data['latitude'].attrs['unit'])\n width = cme_data['width'][()] * u.Unit(cme_data['width'].attrs['unit'])\n thickness = cme_data['thickness'][()] * u.Unit(cme_data['thickness'].attrs['unit'])\n thickness = thickness.to('solRad')\n v = cme_data['v'][()] * u.Unit(cme_data['v'].attrs['unit'])\n cme = ConeCME(t_launch=t_launch, longitude=lon, latitude=lat, v=v, width=width, thickness=thickness)\n\n # Now sort out coordinates.\n # Use the same dictionary structure as defined in ConeCME._track_2d_\n coords_group = cme_data['coords']\n coords_data = {j: {'lon_pix': np.array([]) * u.pix, 'r_pix': np.array([]) * u.pix,\n 'lon': np.array([]) * model.lon.unit, 'r': np.array([]) * model.r.unit}\n for j in range(len(coords_group))}\n\n for time_key, pos in coords_group.items():\n t = np.int(time_key.split(\"_\")[2])\n coords_data[t]['lon_pix'] = pos['lon_pix'][()] * u.Unit(pos['lon_pix'].attrs['unit'])\n coords_data[t]['r_pix'] = pos['r_pix'][()] * u.Unit(pos['r_pix'].attrs['unit'])\n coords_data[t]['lon'] = pos['lon'][()] * u.Unit(pos['lon'].attrs['unit'])\n coords_data[t]['r'] = pos['r'][()] * u.Unit(pos['r'].attrs['unit'])\n\n cme.coords = coords_data\n cme_list.append(cme)\n\n # Update CMEs in model output\n model.cmes = cme_list\n\n else:\n # File doesnt exist return nothing\n print(\"Warning: {} doesnt exist.\".format(filepath))\n cme_list = []\n model = []\n\n return model, cme_list", "title": "" }, { "docid": "0c8c3bdc0236c1bb9152f24953709ed7", "score": "0.5927499", "text": "def deeplabv3plus_resnet50(num_classes=21, output_stride=8, pretrained_backbone='imagenet'):\n return load_model('deeplabv3plus', 'resnet50', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)", "title": "" }, { "docid": "f9214d7a2ccd32be28190440d06178cb", "score": "0.5900768", "text": "def load(self,fn=None,deep=False,load='full'):\n fh=self._h5(fn=fn,mode='r')\n self._fromh5(fh,deep=deep,load=load)\n if load!='demand':\n self._h5close()", "title": "" }, { "docid": "184d670690f9a64cd1f92957b6e5b40d", "score": "0.5899939", "text": "def start_network(model_data_path):\n # Default input size\n height = 228\n width = 304\n channels = 3\n batch_size = 1\n\n # Create a placeholder for the input image\n input_node = tf.compat.v1.placeholder(dtype=tf.float32, shape=(1, height, width, channels))\n\n # Construct the network\n net = models.ResNet50UpProj({'data': input_node}, batch_size, 1, False, trainable=False)\n\n sess = tf.compat.v1.Session()\n\n # Load the converted parameters\n print('Loading the model')\n\n net.load(model_data_path, sess)\n\n #using a namedtuple to return the network state.\n Network = collections.namedtuple('Network', ['sess', 'net', 'input_node'])\n network = Network(sess, net, input_node)\n print('returning the model')\n\n return network", "title": "" }, { "docid": "f404a002e23a44b07ce49746917d3678", "score": "0.5894621", "text": "def deeplabv3_resnet50(num_classes=21, output_stride=8, pretrained_backbone='imagenet'):\n return load_model('deeplabv3', 'resnet50', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)", "title": "" }, { "docid": "46b3dc3e5ecf606ead22b23c2d12ee9d", "score": "0.58853614", "text": "def load_dnn(dnnFilename):\n with h5py.File(dnnFilename, 'r') as h5f:\n dnn_layers = list(h5f.keys())\n W = []\n b = []\n print(\"reading in the DNN parameters ...\")\n for l in range(len(dnn_layers)//2):\n W.append(h5f['w'+str(l)][:])\n print(\"layer {}: [{}]\".format(l, W[l].shape))\n b.append(h5f['b'+str(l)][:])\n print(\"done.\")\n return b, W", "title": "" }, { "docid": "bc3da1ea45a92c18a86f3907e17aa7a3", "score": "0.5879295", "text": "def load(self, brainFile):", "title": "" }, { "docid": "f761f10f59bfa221202a1bd4eb15abdf", "score": "0.5877103", "text": "def read_neutron_data(pfolder, fname):\n with h5py.File(pfolder + \"\\\\\" + fname, 'r') as file:\n datasetnames = list(file.keys())\n itemnames = list(file[datasetnames[0]].keys())\n datanames = list(file[datasetnames[0]][itemnames[2]].keys())\n # data are on the third layer of these .h5 files\n x_vals = np.array(file[datasetnames[0]][itemnames[2]][datanames[2]])\n y_vals = np.array(file[datasetnames[0]][itemnames[2]][datanames[0]])\n ydev_vals = np.array(file[datasetnames[0]][itemnames[2]][datanames[1]])\n return x_vals, y_vals, ydev_vals", "title": "" }, { "docid": "f7a44d9e471e26b29d2cb0dd6ea2e27f", "score": "0.5875665", "text": "def load(self):\r\n self.mean=np.load(\"results/models/CNN1D/mean.npy\")\r\n self.aplt=np.load(\"results/models/CNN1D/aplt.npy\")\r\n self.d_threshold=np.load(\"results/models/CNN1D/threshold.npy\")\r\n self.model = load_model('results/models/CNN1D/cnn_.h5')\r\n self.model.summary()", "title": "" }, { "docid": "470c93ba861b860fa0e9118277ba8411", "score": "0.5864153", "text": "def from_legacy_h5(file_path, filename=None):\n with h5py.File(file_path, 'r') as f:\n if filename is None:\n filename = f.attrs['microstructure_name']\n micro = Microstructure(name=filename, overwrite_hdf5=True)\n if 'symmetry' in f['EnsembleData/CrystalStructure'].attrs:\n sym = f['EnsembleData/CrystalStructure'].attrs['symmetry']\n parameters = f['EnsembleData/CrystalStructure/LatticeParameters'][()]\n micro.set_lattice(Lattice.from_symmetry(Symmetry.from_string(sym),\n parameters))\n if 'data_dir' in f.attrs:\n micro.data_dir = f.attrs['data_dir']\n # load feature data\n if 'R_vectors' in f['FeatureData']:\n print('some grains')\n avg_rods = f['FeatureData/R_vectors'][()]\n print(avg_rods.shape)\n if 'grain_ids' in f['FeatureData']:\n grain_ids = f['FeatureData/grain_ids'][()]\n else:\n grain_ids = range(1, 1 + avg_rods.shape[0])\n if 'centers' in f['FeatureData']:\n centers = f['FeatureData/centers'][()]\n else:\n centers = np.zeros_like(avg_rods)\n # add all grains to the microstructure\n grain = micro.grains.row\n for i in range(avg_rods.shape[0]):\n grain['idnumber'] = grain_ids[i]\n grain['orientation'] = avg_rods[i, :]\n grain['center'] = centers[i]\n grain.append()\n micro.grains.flush()\n # load cell data\n if 'grain_ids' in f['CellData']:\n micro.set_grain_map(f['CellData/grain_ids'][()],\n f['CellData/grain_ids'].attrs['voxel_size'])\n micro.recompute_grain_bounding_boxes()\n micro.recompute_grain_volumes()\n if 'mask' in f['CellData']:\n micro.set_mask(f['CellData/mask'][()],\n f['CellData/mask'].attrs['voxel_size'])\n return micro", "title": "" }, { "docid": "4c1060779239e0bb3a046ba2fc94db09", "score": "0.58499646", "text": "def __init__(self, weight_file='model/weights_40-0.56.h5'):\n\n\n print(\"Loading model...\")\n self.model = self.get_model()\n self.model.load_weights(weight_file)\n# self.combined_model = Model(inputs=self.model.input, outputs=NoiseAwareLayer(name='noise')(self.model.output))\n# self.combined_model.compile(loss='categorical_crossentropy',\n# optimizer='adam',\n# metrics=['accuracy'])\n# self.combined_model.load_weights('model/combined_weights_31-0.49.h5')\n print(\"Model loaded!\")", "title": "" }, { "docid": "73d7fb95578f796a58679b57e1c2500a", "score": "0.5814465", "text": "def load_gradient(file_path):\n obj = load_hdf5(filename=file_path, obj_class=Gradient_calculator, ignore_attrs_if_err=[])\n\n\n return obj", "title": "" }, { "docid": "abae4b9c3254d1201f4e9d8e40dc6e83", "score": "0.58138245", "text": "def loadModel(self, filepath):\n saved = torch.load(filepath, map_location='cpu')\n state_dict = saved['state_dict']\n config = XLNetConfig(num_labels = 2)\n model = XLNetForSequenceClassification(config)\n # loading the trained parameters with model\n model.load_state_dict(state_dict)\n return model", "title": "" }, { "docid": "fc8fdfcd8a564ba20b4bdcbcbe31e284", "score": "0.58108306", "text": "def reload_net(self):\n import caffe\n self.net = caffe.Net(self.prototxt, 1, weights=self.caffemodel)", "title": "" }, { "docid": "17e72d422b0e4ed3462237a79c0ff135", "score": "0.5799166", "text": "def test_file_hdf5(self, file_name):\n sess = tf.Session()\n self.load_model(sess, log_dir = config.log_dir)\n mix_stft, feats = self.read_hdf5_file(file_name)\n out_feats = self.process_file(mix_stft, sess)\n self.plot_features(feats, out_feats)", "title": "" }, { "docid": "ca0d68eb63250e72d9c9022a837a8954", "score": "0.5793921", "text": "def h5_to_ml_ready_numpy(file_path: str) -> np.ndarray:\n h5_file = h5py.File(file_path, \"r\")\n latent_rep = np.asarray(h5_file.get(\"latent_space\"))\n print(\"raw data: \" + tcols.OKBLUE + f\"{file_path}\" + tcols.ENDC + \", \", end=\"\")\n latent_rep_flat = reshaper(latent_rep)\n return latent_rep_flat", "title": "" }, { "docid": "187e6d810484733ce23761dba6f18add", "score": "0.5792707", "text": "def loadFromFile(self, kpath):\n kpath = os.path.abspath(kpath)\n (dirname, basename) = os.path.split(kpath)\n self.name = basename.rpartition('.')[0]\n self.opath = os.path.join(dirname, self.name+\".onnx\")\n self.kmodel = Kmodels.load_model(kpath, custom_objects={ 'tf': tf, 'relu6': helper.relu6 })", "title": "" }, { "docid": "2e43e708b6b1c0cd698f78b4cb7bd645", "score": "0.5788703", "text": "def load_model(self, model_file):\n net_params = torch.load(model_file)\n for action_type in self.action_types:\n self.nets[action_type]['net'].load_state_dict(net_params[action_type])", "title": "" }, { "docid": "db97dea8774addf1211cc98ec865b680", "score": "0.57791567", "text": "def load_model(self, modelpath):\n try:\n self.model = model_from_json(open(modelpath + \".json\").read())\n self.model.load_weights(modelpath + \".h5\")\n except Exception, e:\n self.log(\"Failed to load CNN model %s. (%s)\" % (modelpath,str(e)) )\n return\n\n self.log(\"Loaded CNN model %s.\" % modelpath)\n return", "title": "" }, { "docid": "f96e24d2cdc207c95d2dd3cd76d1d52a", "score": "0.5769877", "text": "def load_graph(filename):\r\n with tf.gfile.GFile(filename, 'rb') as f:\r\n graph_def = tf.GraphDef()\r\n graph_def.ParseFromString(f.read())\r\n tf.import_graph_def(graph_def, name='')", "title": "" }, { "docid": "f2b96ca746274fb7c1215f04b712c4af", "score": "0.5768957", "text": "def load_model(self, model_name, network):\n if model_name[-4:] != '.npz':\n log.error(\"Model not found\")\n raise ValueError\n\n with np.load(os.path.join(self.path_to_model_dir, model_name)) as f:\n param_values = [f['arr_%d' % i] for i in range(len(f.files))]\n\n lasagne.layers.set_all_param_values(network, param_values, trainable=True)", "title": "" }, { "docid": "3b3d171071dc7c3e93a6801d5ab8ae5c", "score": "0.5761669", "text": "def __load_model_from_file__(self, h5file):\n if not \"Models shape\" in h5file.attrs.keys() or \\\n not \"Num models\" in h5file.attrs.keys() or \\\n not \"Cell size\" in h5file.attrs.keys():\n return False\n \n self.CELL_SIZE = h5file.attrs[\"Cell size\"]\n \n self.models = np.empty(shape=h5file.attrs[\"Models shape\"], dtype=object)\n\n with tqdm(desc=\"Loading models\", total=h5file.attrs[\"Num models\"], file=sys.stderr) as pbar:\n def _add_model(name, g):\n if \"v\" in g.attrs.keys() and \"u\" in g.attrs.keys():\n v = g.attrs[\"v\"]\n u = g.attrs[\"u\"]\n self.models[v, u] = self.CREATE_ANOMALY_MODEL_FUNC()\n self.models[v, u].__load_model_from_file__(g)\n pbar.update()\n\n h5file.visititems(_add_model)\n \n if isinstance(self.patches, PatchArray):\n self.patches.calculate_rasterization(self.CELL_SIZE)\n\n return True", "title": "" }, { "docid": "4a6e2b523e79bc9dfc224b245bd125e3", "score": "0.57497185", "text": "def load_h5(fname):\n f = h5py.File(fname, 'r')\n data = dict()\n for k in f.keys():\n data[k] = f[k][:]\n return data", "title": "" }, { "docid": "cf81b5d3ae9044c2bddec3b4f03509d2", "score": "0.57453305", "text": "def load_networks(self, epoch):\n for name in self.model_names:\n if isinstance(name, str):\n load_filename = '%s_net_%s.pth' % (epoch, name)\n load_path = os.path.join(self.save_dir, load_filename)\n net = getattr(self, 'net' + name)\n if isinstance(net, torch.nn.DataParallel):\n net = net.module\n None\n state_dict = torch.load(load_path, map_location=self.device)\n if hasattr(state_dict, '_metadata'):\n del state_dict._metadata\n net.load_state_dict(state_dict)", "title": "" }, { "docid": "94f3b7c7f83cdaaec9aea35fd4ea7bac", "score": "0.5736626", "text": "def load_graph(filename):\n with tf.gfile.FastGFile(filename, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n fix_graph_def(graph_def)\n tf.import_graph_def(graph_def, name='')", "title": "" }, { "docid": "f455f4d7f1073c9f70bf76e3c4d2bf0e", "score": "0.5728204", "text": "def load_graph(filename):\n with tf.gfile.FastGFile(filename, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n tf.import_graph_def(graph_def, name='')", "title": "" }, { "docid": "f455f4d7f1073c9f70bf76e3c4d2bf0e", "score": "0.5728204", "text": "def load_graph(filename):\n with tf.gfile.FastGFile(filename, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n tf.import_graph_def(graph_def, name='')", "title": "" }, { "docid": "77f1feedc87b1494cdc26b98c12fa545", "score": "0.5725236", "text": "def load_darknet_weights(self, weights_path):\n # Open the weights file\n with open(weights_path, \"rb\") as f:\n header = np.fromfile(\n f, dtype=np.int32, count=5\n ) # First five are header values\n self.header_info = header # Needed to write header when saving weights\n self.seen = header[3] # number of images seen during training\n weights = np.fromfile(f, dtype=np.float32) # The rest are weights\n # Establish cutoff for loading backbone weights\n cutoff = None\n if \"darknet53.conv.74\" in weights_path:\n cutoff = 75\n\n ptr = 0\n for i, (module_def, module) in enumerate(\n zip(self.module_defs, self.module_list)\n ):\n if i == cutoff:\n break\n if module_def[\"type\"] == \"convolutional\":\n conv_layer = module[0]\n if module_def[\"batch_normalize\"]:\n # Load BN bias, weights, running mean and running variance\n bn_layer = module[1]\n num_b = bn_layer.bias.numel() # Number of biases\n # Bias\n bn_b = torch.from_numpy(weights[ptr: ptr + num_b]).view_as(\n bn_layer.bias\n )\n bn_layer.bias.data.copy_(bn_b)\n ptr += num_b\n # Weight\n bn_w = torch.from_numpy(weights[ptr: ptr + num_b]).view_as(\n bn_layer.weight\n )\n bn_layer.weight.data.copy_(bn_w)\n ptr += num_b\n # Running Mean\n bn_rm = torch.from_numpy(weights[ptr: ptr + num_b]).view_as(\n bn_layer.running_mean\n )\n bn_layer.running_mean.data.copy_(bn_rm)\n ptr += num_b\n # Running Var\n bn_rv = torch.from_numpy(weights[ptr: ptr + num_b]).view_as(\n bn_layer.running_var\n )\n bn_layer.running_var.data.copy_(bn_rv)\n ptr += num_b\n else:\n # Load conv. bias\n num_b = conv_layer.bias.numel()\n conv_b = torch.from_numpy(weights[ptr: ptr + num_b]).view_as(\n conv_layer.bias\n )\n conv_layer.bias.data.copy_(conv_b)\n ptr += num_b\n # Load conv. weights\n num_w = conv_layer.weight.numel()\n conv_w = torch.from_numpy(weights[ptr: ptr + num_w]).view_as(\n conv_layer.weight\n )\n conv_layer.weight.data.copy_(conv_w)\n ptr += num_w", "title": "" }, { "docid": "462eebb3635eebb63e58969151eeccbc", "score": "0.57218695", "text": "def load_graph(filename):\n with tf.gfile.GFile(filename, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n tf.import_graph_def(graph_def, name='')", "title": "" }, { "docid": "22aa906e9ef7f13b161df66fd5f3471d", "score": "0.5718611", "text": "def __init__(self, path_model='./data/models/facenet/facenet_keras.h5'):\n self.path_model = path_model\n self.model = tf.keras.models.load_model(self.path_model, compile=False)", "title": "" }, { "docid": "552a3157f02c58c63171db3a1cf21a83", "score": "0.57166165", "text": "def LoadH5(file_name):\n obj = {}\n with h5py.File(file_name, 'r') as hf:\n for k in hf.keys():\n obj[k] = np.asarray(hf.get(k))\n\n return obj", "title": "" }, { "docid": "b499d517ad6ab73566d2d54c69f6a64f", "score": "0.5703583", "text": "def load_weights(self, filename):\r\n f = open(filename)\r\n rawData = [[float(n) for n in line.split()] for line in f.read().splitlines()]\r\n\r\n self.hiddenLayer = np.array(rawData[:self.K])\r\n self.outputLayer = np.array(rawData[self.K])\r\n\r\n self.hiddenBias = np.array(rawData[self.K+1])\r\n self.outputBias = rawData[self.K+2][0]\r\n\r\n f.close()", "title": "" }, { "docid": "b965b7e7a90f68232174fbb44bee41b3", "score": "0.57018536", "text": "def load_networks(self, epoch):\n for name in self.model_names:\n if isinstance(name, str):\n load_filename = 'epoch_%d_net_%s.pth' % (epoch, name)\n load_path = os.path.join(self.config.checkpoints_dir, load_filename)\n if not os.path.exists(load_path):\n continue\n net = getattr(self, 'net' + name)\n print('loading the models from %s' % load_path)\n state_dict = torch.load(load_path, map_location=str(self.device))\n if hasattr(state_dict, '_metadata'):\n del state_dict._metadata\n\n net.load_state_dict(state_dict)", "title": "" }, { "docid": "c281a275bf1818c138701f3180ce2d38", "score": "0.5700677", "text": "def scNT_seq_neuron_labeling(\n url=\"https://www.dropbox.com/s/lk9cl63yd28mfuq/neuron_labeling.h5ad?dl=1\",\n filename=\"neuron_labeling.h5ad\",\n):\n adata = get_adata(url, filename)\n\n return adata", "title": "" }, { "docid": "25e9afc9df40ca200835ce0817d1cf9e", "score": "0.5690368", "text": "def read_network(file, config):\n SPEED_OF_LIGHT = 299792458 # meter per second\n PROPAGATION_FACTOR = 0.77 # https://en.wikipedia.org/wiki/Propagation_delay\n\n # default values\n default_link_delay = 3\n\n if not file.endswith(\".graphml\"):\n raise ValueError(\"{} is not a GraphML file\".format(file))\n graphml_network = nx.read_graphml(file, node_type=int)\n networkx_network = nx.Graph()\n\n # Setting the nodes of the NetworkX Graph\n for n in graphml_network.nodes(data=True):\n node_id = \"pop{}\".format(n[0])\n\n if config['node_parameter_mode'] == 'probabilistic_continuous':\n cap = np.random.normal(config['node_cap_mean'], config['flow_dr_stdev'])\n elif config['node_parameter_mode'] == 'probabilistic_discrete':\n cap = np.random.choice(config['node_cap_values'], p=config['node_cap_weights'])\n cap = n[1].get('NodeCap', cap)\n\n node_type = n[1].get(\"NodeType\", \"Normal\")\n node_name = n[1].get(\"label\", None)\n if cap is None:\n raise ValueError(\"No NodeCap. set for node{} in file {} (as cmd argument or in graphml)\".format(n, file))\n # Adding a Node in the NetworkX Graph\n # Type of node. For now it is either \"Normal\" or \"Ingress\"\n # Init 'remaining_cap' to the node capacity\n networkx_network.add_node(node_id, name=node_name, type=node_type, cap=cap, available_sf={},\n remaining_cap=cap)\n\n # set links\n # calculate link delay based on geo positions of nodes;\n for e in graphml_network.edges(data=True):\n # Check whether LinkDelay value is set, otherwise default to None\n source = \"pop{}\".format(e[0])\n target = \"pop{}\".format(e[1])\n # As edges are undirectional, only LinkFwdCap determines the available data rate\n if config['link_cap_parameter_mode'] == 'probabilistic_continuous':\n link_fwd_cap = np.random.normal(config['link_cap_mean'], config['link_cap_stdev'])\n elif config['link_cap_parameter_mode'] == 'probabilistic_discrete':\n link_fwd_cap = np.random.choice(config['link_cap_values'], p=config['link_cap_weights'])\n link_fwd_cap = e[2].get(\"LinkFwdCap\", link_fwd_cap)\n\n # Setting a default delay of 3 incase no delay specified in GraphML file\n # and we are unable to set it based on Geo location\n if config['link_delay_parameter_mode'] == 'geo_location':\n n1 = graphml_network.nodes(data=True)[e[0]]\n n2 = graphml_network.nodes(data=True)[e[1]]\n n1_lat, n1_long = n1.get(\"Latitude\", None), n1.get(\"Longitude\", None)\n n2_lat, n2_long = n2.get(\"Latitude\", None), n2.get(\"Longitude\", None)\n if n1_lat is None or n1_long is None or n2_lat is None or n2_long is None:\n log.warning(f'Unable to calc based on Geo Location,'\n f' Now using default delay {default_link_delay} for edge: ({source},{target})')\n link_delay = default_link_delay\n else:\n distance = dist((n1_lat, n1_long), (n2_lat, n2_long)).meters # in meters\n # round delay to int using np.around for consistency with emulator\n link_delay = (distance / SPEED_OF_LIGHT * 1000) * PROPAGATION_FACTOR # in milliseconds\n elif config['link_delay_parameter_mode'] == 'probabilistic_continuous':\n link_delay = np.random.normal(config['link_delay_mean'], config['link_delay_stdev'])\n elif config['link_delay_parameter_mode'] == 'probabilistic_discrete':\n link_delay = np.random.choice(config['link_delay_values'], p=config['link_delay_weights'])\n link_delay = e[2].get(\"LinkDelay\", link_delay)\n\n # Adding the undirected edges for each link defined in the network.\n # delay = edge delay , cap = edge capacity\n networkx_network.add_edge(source, target, delay=link_delay, cap=link_fwd_cap, remaining_cap=link_fwd_cap)\n\n # setting the weight property for each edge in the NetworkX Graph\n # weight attribute is used to find the shortest paths\n for edge in networkx_network.edges.values():\n edge['weight'] = weight(edge['cap'], edge['delay'])\n # Setting the all-pairs shortest path in the NetworkX network as a graph attribute\n shortest_paths(networkx_network)\n\n # Filter ingress nodes\n p_ingress = config['node_ingress_probability']\n p_egress = config['node_egress_probability']\n log.info(f'Ingress node probability {p_ingress}.')\n log.info(f'Egress node probability {p_egress}.')\n ing_nodes = []\n eg_nodes = []\n for node in networkx_network.nodes.items():\n if node[1][\"type\"] == \"Ingress\" or np.random.choice([True, False], p=[p_ingress, 1-p_ingress]):\n ing_nodes.append(node[0])\n if node[1][\"type\"] == \"Egress\" or np.random.choice([True, False], p=[p_egress, 1-p_egress]):\n eg_nodes.append(node[0])\n log.info(\"Total of {} ingress nodes available\".format(len(ing_nodes)))\n log.info(\"Total of {} egress nodes available\".format(len(eg_nodes)))\n log.info(nx.info(networkx_network))\n\n return networkx_network, ing_nodes, eg_nodes", "title": "" }, { "docid": "9eba6489c691e6709af3040dda7f2152", "score": "0.5689411", "text": "def load_from_checkpoint(self, checkpoint_file):\n assert isinstance(checkpoint_file, str) and checkpoint_file.endswith('.h5'), \"Wrong checkpoint file argument\"\n self.model = Models.load_from_cpt(checkpoint_file)", "title": "" }, { "docid": "d96b9ac5c760aaea2be32ce65f43e66d", "score": "0.56861275", "text": "def load_checkpoint(self, folder, filename_no):\n 'no37.neural.data'\n filename = f\"no{filename_no}.neural.data\"\n filepath = os.path.join(folder, filename)\n if os.path.exists(filepath):\n self.model.load_weights(filepath)\n else:\n print(\"No model in path '{}'\".format(filepath))", "title": "" }, { "docid": "da98671e8670ed64ead085282d110de4", "score": "0.56785977", "text": "def load_h5_file(file_path):\n # load\n fr = h5py.File(file_path, \"r\")\n a_group_key = list(fr.keys())[0]\n data = list(fr[a_group_key])\n # transform to appropriate numpy array\n data = data[0:]\n data = np.stack(data, axis=0)\n return data", "title": "" }, { "docid": "b1c1b9226f3cad90cf60a03d27c38c64", "score": "0.56785023", "text": "def load_darknet_weights(self, weights_path):\n\n # Open the weights file\n with open(weights_path, \"rb\") as f:\n header = np.fromfile(f, dtype=np.int32, count=5) # First five are header values\n self.header_info = header # Needed to write header when saving weights\n self.seen = header[3] # number of images seen during training\n weights = np.fromfile(f, dtype=np.float32) # The rest are weights\n\n # Establish cutoff for loading backbone weights\n cutoff = None\n if \"darknet53.conv.74\" in weights_path:\n cutoff = 75\n\n ptr = 0\n for i, (module_def, module) in enumerate(zip(self.module_defs, self.module_list)):\n if i == cutoff:\n break\n if module_def[\"type\"] == \"convolutional\":\n conv_layer = module[0]\n if module_def[\"batch_normalize\"]:\n # Load BN bias, weights, running mean and running variance\n bn_layer = module[1]\n num_b = bn_layer.bias.numel() # Number of biases\n # Bias\n bn_b = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.bias)\n bn_layer.bias.data.copy_(bn_b)\n ptr += num_b\n # Weight\n bn_w = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.weight)\n bn_layer.weight.data.copy_(bn_w)\n ptr += num_b\n # Running Mean\n bn_rm = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.running_mean)\n bn_layer.running_mean.data.copy_(bn_rm)\n ptr += num_b\n # Running Var\n bn_rv = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.running_var)\n bn_layer.running_var.data.copy_(bn_rv)\n ptr += num_b\n else:\n # Load conv. bias\n num_b = conv_layer.bias.numel()\n conv_b = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(conv_layer.bias)\n conv_layer.bias.data.copy_(conv_b)\n ptr += num_b\n # Load conv. weights\n num_w = conv_layer.weight.numel()\n conv_w = torch.from_numpy(weights[ptr : ptr + num_w]).view_as(conv_layer.weight)\n conv_layer.weight.data.copy_(conv_w)\n ptr += num_w", "title": "" }, { "docid": "cade5accc2125511f88d25ee6c9f12ba", "score": "0.56767017", "text": "def test_file_hdf5(self, file_name):\n\t\tsess = tf.Session()\n\t\tself.load_model(sess, log_dir = config.log_dir)\n\t\tmix_stft, feats = self.read_hdf5_file(file_name)\n\t\tout_feats = self.process_file(mix_stft, sess)\n\t\tself.plot_features(feats, out_feats)", "title": "" }, { "docid": "8a1da216e63d000006943e3abc9b2549", "score": "0.5672409", "text": "def __init__(self, file_path):\n assert os.path.exists(file_path), \"Config file doesn't exist, filepath: {}\".format(file_path)\n self._file_path = file_path\n super(NeuralNetConfig, self).__init__(self._file_path)\n # object.__init__(self._file_path)\n self.__net_data = self.get_net_data('CNN')\n\n assert os.path.exists(self._train_dataset_dir), f'train_dataset_dir \\'{self._train_dataset_dir}\\' does not exist'\n\n if 'version' in self.__net_data.keys():\n # if this is wrong, then it will fail when initializing neuralnet\n self.__net = self.__net_data['version']\n\n if 'model_dir' in self.__net_data:\n self.model_dir = self.__net_data['model_dir']\n\n if 'checkpoint_dir' in self.__net_data:\n self._checkpoint_dir = self.__net_data['checkpoint_dir']\n\n if 'num_classes' in self.__net_data:\n self.num_classes = int(self.__net_data['num_classes'])\n\n if 'optimizer' in self.__net_data:\n assert self.__net_data['optimizer'].lower() in optimizer,\\\n \"Supplied optimizer, \\'{}\\', doesn\\'t exist, available options are {}\".format(\n self.__net_data['optimizer'], list(optimizer))\n self._optimizer = self.__net_data['optimizer'].lower()\n\n if 'learning_rate' in self.__net_data:\n self.learning_rate = float(self.__net_data['learning_rate'])\n\n if 'momentum' in self.__net_data:\n self.momentum = float(self.__net_data['momentum'])\n\n if 'loss' in self.__net_data:\n assert self.__net_data['loss'].lower() in loss_function,\\\n \"Supplied loss fn, \\'{}\\', doesn\\'t exist, available options are {}\".format(\n self.__net_data['loss'], list(loss_function))\n self._loss_fn = self.__net_data['loss'].lower()\n\n if 'metrics' in self.__net_data:\n if self.__net_data['metrics'].lower() != 'none':\n for m in self.__net_data['metrics'].lower().split(','):\n assert m.strip() in metrics, \"Supplied metric, \\'{}\\', doesn\\'t exist, available options are {}\".format(\n m, list(metrics))\n self._metrics = self.__net_data['metrics'].lower()\n\n if 'evaluate_metrics' in self.__net_data:\n for m in self.__net_data['evaluate_metrics'].lower().split(','):\n assert m in metrics, \"Supplied evaluate_metrics, \\'{}\\', doesn\\'t exist, available options are {}\"\\\n .format(m, list(metrics))\n self._evaluate_metrics = self.__net_data['evaluate_metrics'].lower()\n\n if 'distributed_strategy' in self.__net_data:\n assert self.__net_data['distributed_strategy'].lower() in distributed_strategy, \\\n \"Supplied distributed_strategy, \\'{}\\', doesn\\'t exist, available options are {}\".format(\n self.__net_data['distributed_strategy'], list(distributed_strategy))\n self._distributed_strategy = self.__net_data['distributed_strategy'].lower()\n\n if 'batch_size' in self.__net_data:\n self.batch_size = int(self.__net_data['batch_size'])\n\n if 'log_dir' in self.__net_data:\n self.log_dir = self.__net_data['log_dir']\n\n if 'log_every_n_steps' in self.__net_data:\n self.log_every_n_steps = int(self.__net_data['log_every_n_steps'])\n\n if 'max_steps_per_epoch' in self.__net_data:\n self._max_steps_per_epoch = self.__net_data['max_steps_per_epoch']\n\n if 'num_validation_images' in self.__net_data:\n self.num_validation_images = int(self.__net_data['num_validation_images'])\n\n if 'epochs' in self.__net_data:\n self.__epochs = int(self.__net_data['epochs'])\n\n if 'bench_timings' in self.__net_data:\n assert bool('False') # I have this to remind me of what python does here\n self.bench_timings = self.__net_data['bench_timings'].lower() in ['true', '1', 't', 'y', 'yes', 'yeah', 'yup', 'certainly', 'uh-huh']\n\n if 'bench_file' in self.__net_data:\n self.bench_path = self.__net_data['bench_file']\n\n if 'input_shape' in self.__net_data:\n self.__input_shape = self.__net_data['input_shape']", "title": "" }, { "docid": "3eeeb5c881b62288050a1993bececc3b", "score": "0.56712586", "text": "def load(path):\n with open(path, 'rb') as f:\n qrnn = pickle.load(f)\n backend = importlib.import_module(qrnn.backend)\n model = backend.load_model(f, qrnn.quantiles)\n qrnn.model = model\n return qrnn", "title": "" }, { "docid": "d0443dcdd5e135df9a98592b64b89a94", "score": "0.5662143", "text": "def load_from_file(self, net_file):\n self.ann.create_from_file(net_file)", "title": "" }, { "docid": "cb7040d99593af28d4b2ec0cf7564c61", "score": "0.5661993", "text": "def load_model(self, weight_file):\n self.W = np.fromfile(weight_file, dtype=np.float32)\n print ('model loaded from', weight_file)", "title": "" }, { "docid": "ef01248c9779053af420065729ea3d2b", "score": "0.5660996", "text": "def load_model(self, filename):\n import numpy as np\n from lasagne.layers import set_all_param_values\n\n if not os.path.isfile(filename):\n print_warning(\"Could not find '.npz' weights file at path: {}\".format(filename))\n return False\n \n try:\n ### Load the generator model's weights\n print_info(\"Attempting to load model...\")\n with np.load(filename) as fp:\n param_values = [fp['arr_%d' % i] for i in range(len(fp.files))]\n set_all_param_values(self.network_out, param_values)\n except Exception as e:\n print_warning(\"Found '.npz' weights file, but it must be corrupted, cannot read or parse it properly: {}.\".format(filename))\n return False\n return True", "title": "" }, { "docid": "5f144205c857950b9c47c73ed9f1c0c3", "score": "0.5658707", "text": "def load_model(self, filename):\n self.load_state_dict(torch.load(filename))", "title": "" }, { "docid": "699dfc98cee5382e7c34941a98802785", "score": "0.56567544", "text": "def load_model(cls, path):\n self = cls.__new__(cls)\n with h5py.File(path, mode=\"r\") as f:\n # Default to output_length of 1 for backward compatibility,\n # but allow override.\n # TODO: should figure out how to deprecate this.\n self.metadata = {\"output_length\": 1}\n for k, v in f.attrs.items():\n if k.startswith(\"metadata_\"):\n _, k = k.split(\"_\", 1)\n self.metadata[k] = json.loads(v)\n self.keras_model = tf.keras.models.load_model(\n f, custom_objects=custom_layers\n )\n self.preprocessor = self.create_preprocessor()\n return self", "title": "" }, { "docid": "caa94d28cf8d7979f0735d4063e970e5", "score": "0.5650151", "text": "def load_model(self, filename):\n \n self.load_state_dict(torch.load(filename))", "title": "" }, { "docid": "2bd69316944d9a6032cd1e45ab48f87b", "score": "0.56500286", "text": "def load(path):\n if not os.path.exists(path):\n raise FileNotFoundError(f\"Unable to locate file at {path}\")\n extension = path.split(\".\")[-1]\n if not extension == \"pth\":\n raise ValueError(f\"Expected pth extension, got {extension}\")\n\n model = CNN()\n model.load_state_dict(torch.load(path))\n model.to(DEVICE)\n return model", "title": "" }, { "docid": "203a9a8b83af38bf2d4e3de6a17ead56", "score": "0.56495863", "text": "def simulation_with_previous_h5m_file(self):\n\n os.system('rm *.h5m')\n\n my_model = paramak.NeutronicsModel(\n geometry=self.my_shape,\n source=self.source,\n materials={'center_column_shield_mat': 'WC'},\n )\n\n my_model.create_neutronics_geometry(method='pymoab')\n\n my_model.simulate(method=None)\n\n my_model.results is not None", "title": "" }, { "docid": "411e498be2fc4343d09dedea24717d03", "score": "0.5648832", "text": "def resnet50(pretrained=False):\n model = ResNet(Bottleneck, [3, 4, 6, 3])\n # model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n model.load_state_dict(torch.load('./resnet50-19c8e357.pth'))\n return model", "title": "" }, { "docid": "a904ff9356fa886e4f871d4d41c67b55", "score": "0.56369436", "text": "def set_nn(self, nn_file):\n n = read_yaml_file(nn_file)['neural_network'][0] # Currently we consider only 1 layer\n self.nn = NeuralNetwork(n['name'], n['nn_type'], n['dimensions'], n['start'], n['end'])", "title": "" }, { "docid": "57b086ea0aa2214441f249b6d332ac9f", "score": "0.5635668", "text": "def load_model(model_name):\n hyperparams = json.loads(open(model_name+'.json').read())\n parser = RNNGparser(hyperparams['brown_file'],\n vocab_thresh=hyperparams['vocab_thresh'],\\\n stack_memory_size=hyperparams['stack_hidden_size'],\\\n word_embedding_size=hyperparams['word_embedding_size'],\\\n char_embedding_size=hyperparams['char_embedding_size'],\\\n char_memory_size=hyperparams['char_memory_size'])\n\n parser.lexicon = SymbolLexicon.load(model_name+'.lex') \n parser.nonterminals = SymbolLexicon.load(model_name+'.nt') \n parser.charset = SymbolLexicon.load(model_name+'.char')\n parser.code_struct_actions()\n parser.allocate_structure()\n parser.model.populate(model_name+\".weights\")\n return parser", "title": "" }, { "docid": "ceb4539ace71690119e99072db495745", "score": "0.56335205", "text": "def load(cls, filename) :\n \n f = open(filename)\n pkl = cPickle.load(f)\n f.close()\n\n for l1, l2 in pkl[\"network\"][\"edges\"] :\n network = pkl[\"layers\"][l1] > pkl[\"layers\"][l2]\n\n network.log = pkl[\"network\"][\"log\"]\n network.notes = pkl[\"network\"][\"notes\"]\n\n return network", "title": "" }, { "docid": "7a2c886378ff53151a46b0b184f811b3", "score": "0.5630964", "text": "def load_weights(self, weight_file: str) -> None:\n requires_grad = False\n\n with h5py.File(weight_file, 'r') as fin:\n for i_layer, lstms in enumerate(\n zip(self.forward_layers, self.backward_layers)\n ):\n for j_direction, lstm in enumerate(lstms):\n # lstm is an instance of LSTMCellWithProjection\n cell_size = lstm.cell_size\n\n dataset = fin['RNN_%s' % j_direction]['RNN']['MultiRNNCell']['Cell%s' % i_layer\n ]['LSTMCell']\n\n # tensorflow packs together both W and U matrices into one matrix,\n # but pytorch maintains individual matrices. In addition, tensorflow\n # packs the gates as input, memory, forget, output but pytorch\n # uses input, forget, memory, output. So we need to modify the weights.\n tf_weights = numpy.transpose(dataset['W_0'][...])\n torch_weights = tf_weights.copy()\n\n # split the W from U matrices\n input_size = lstm.input_size\n input_weights = torch_weights[:, :input_size]\n recurrent_weights = torch_weights[:, input_size:]\n tf_input_weights = tf_weights[:, :input_size]\n tf_recurrent_weights = tf_weights[:, input_size:]\n\n # handle the different gate order convention\n for torch_w, tf_w in [[input_weights, tf_input_weights],\n [recurrent_weights, tf_recurrent_weights]]:\n torch_w[(1 * cell_size):(2 * cell_size), :] = tf_w[(2 * cell_size):(3 * cell_size), :]\n torch_w[(2 * cell_size):(3 * cell_size), :] = tf_w[(1 * cell_size):(2 * cell_size), :]\n\n lstm.input_linearity.weight.data.copy_(torch.FloatTensor(input_weights))\n lstm.state_linearity.weight.data.copy_(torch.FloatTensor(recurrent_weights))\n lstm.input_linearity.weight.requires_grad = requires_grad\n lstm.state_linearity.weight.requires_grad = requires_grad\n\n # the bias weights\n tf_bias = dataset['B'][...]\n # tensorflow adds 1.0 to forget gate bias instead of modifying the\n # parameters...\n tf_bias[(2 * cell_size):(3 * cell_size)] += 1\n torch_bias = tf_bias.copy()\n torch_bias[(1 * cell_size):(2 * cell_size)\n ] = tf_bias[(2 * cell_size):(3 * cell_size)]\n torch_bias[(2 * cell_size):(3 * cell_size)\n ] = tf_bias[(1 * cell_size):(2 * cell_size)]\n lstm.state_linearity.bias.data.copy_(torch.FloatTensor(torch_bias))\n lstm.state_linearity.bias.requires_grad = requires_grad\n\n # the projection weights\n proj_weights = numpy.transpose(dataset['W_P_0'][...])\n lstm.state_projection.weight.data.copy_(torch.FloatTensor(proj_weights))\n lstm.state_projection.weight.requires_grad = requires_grad", "title": "" }, { "docid": "abe2146cf941b9172433096750596bff", "score": "0.56304467", "text": "def load_checkpoint(self, folder, filename_no):\n 'no37.neural.data'\n with self.lock:\n filename = f\"no{filename_no}.neural.data\"\n filepath = os.path.join(folder, filename)\n if os.path.exists(filepath):\n self.model.load_weights(filepath)\n else:\n print(\"No model in path '{}'\".format(filepath))", "title": "" }, { "docid": "f1427f05cba2505d04aee0482312c171", "score": "0.5617891", "text": "def load_from_pickle( cls, filename, catmaid_interface = None ):\n f = gzip.GzipFile(filename, 'rb')\n nrn_list = pickle.load(f)\n f.close()\n return NeuronList( nrn_list.neurons,\n CatmaidInterface = catmaid_interface,\n project_name = nrn_list.project_name,\n export_date = nrn_list.export_date\n )", "title": "" }, { "docid": "83d70915515cde877f9f8e49406a3701", "score": "0.5614241", "text": "def load_file(self, file_path):\n self.f = h5py.File(file_path)\n try:\n self.sampling_rate = int(self.f['raw_data'].attrs['sampling_rate'])\n except:\n print \"Sampling rate could not be loaded from HDF5 file. Certain functions cannot be used without sampling rate.\"\n self.staged_dataset = None\n print \"File \" + `file_path` + \" has been loaded.\"", "title": "" }, { "docid": "ef14266bd2b64cedc20201a1576faa4f", "score": "0.5602503", "text": "def resnet34(pretrained=False):\n model = ResNet(BasicBlock, [3, 4, 6, 3])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model", "title": "" }, { "docid": "ca8c817bf9783001410cf632332794a0", "score": "0.55959725", "text": "def inferLeNet5(Directory_infer='/images/user' ,ModelPath='/models/model3/saved/'):\n TypeOfImage=2\n X_test,Y_test,inferFolder=LeNet5.getImagesAndLabels(Directory_infer,TypeOfImage)\n with tf.Session() as sess: \n loader = tf.train.import_meta_graph(os.getcwd()+ModelPath+'.meta')\n loader.restore(sess, tf.train.latest_checkpoint(os.getcwd()+ModelPath))\n graph = tf.get_default_graph()\n X=graph.get_tensor_by_name(\"X:0\")\n Logits=graph.get_tensor_by_name(\"logits:0\")\n cont=0\n DictClasses=GetImages.getDictClasses()\n for i in X_test:\n i=i.reshape(1,32,32,1)\n Proba=sess.run(Logits, feed_dict={X: i})\n plt.figure(str(Y_test[cont])+' '+str(np.argmax(Proba,1)))\n im = Image.open(inferFolder+'/'+Y_test[cont])\n plt.text(-1,-1,'file'+Y_test[cont]+' belongs to class '+DictClasses[int(np.argmax(Proba,1))])\n plt.imshow(im,vmin = 0, vmax = 255)\n# plt.imshow(i[0,:,:,0],cmap='gray', vmin = 0, vmax = 1)\n# plt.show()\n cont=cont+1\n plt.show()\n return", "title": "" } ]
0026b325ddb64c5a21bdaac90ec27543
custom list display field with detail page link
[ { "docid": "b9a3176761efd1812c32b02f6fd6589b", "score": "0.6043342", "text": "def name_with_detail_link(self, obj):\n detail_url = reverse(\"admin:program_program_detail\", kwargs={\"object_id\": obj.pk})\n return format_html(f\"<a href='{detail_url}'>{obj.name}</a>\")", "title": "" } ]
[ { "docid": "f84c2a47205e9e79022c971200627e3c", "score": "0.6725328", "text": "def UserListShow(self, listType, itemList):", "title": "" }, { "docid": "60551363c68ef31e6434f1d0b4dca0d0", "score": "0.6423562", "text": "def detail_page(self, obj):\n url = reverse('materials_inventory', args=(obj.id,))\n\n return format_html('<a href=\"{}\">Mostrar detalle</a>', url)", "title": "" }, { "docid": "0b1b7e347c45ade502f1e638b61e6bb3", "score": "0.6376594", "text": "def detail_page(self, obj):\n url = reverse('products_inventory', args=(obj.id,))\n\n return format_html('<a href=\"{}\">Mostrar detalle</a>', url)", "title": "" }, { "docid": "5c2061456c9de554bbb2e9a2b2393b02", "score": "0.63654363", "text": "def detail_page(self, obj):\n url = reverse('consumables_inventory', args=(obj.id,))\n\n return format_html('<a href=\"{}\">Mostrar detalle</a>', url)", "title": "" }, { "docid": "c10d0ca59bd6632b5152cce175cd5887", "score": "0.6363512", "text": "def detail_page(self, obj):\n url = reverse('durable_goods_inventory', args=(obj.id,))\n\n return format_html('<a href=\"{}\">Mostrar detalle</a>', url)", "title": "" }, { "docid": "54307a6a3e21b6157b33355c2354ef95", "score": "0.59984475", "text": "def linkify(field_name):\n\n # def _linkify(obj):\n # app_label = obj._meta.app_label\n # linked_obj = getattr(obj, field_name)\n # # model_name = linked_obj._meta.model_name\n # view_name = f\"admin:{app_label}_{model_name}_change\"\n # link_url = reverse(view_name, args=[linked_obj.id])\n # return format_html('<a href=\"{}\">{}</a>', link_url, linked_obj)\n\n # _linkify.short_description = field_name # Sets column name\n # return _linkify", "title": "" }, { "docid": "192c42383930e738e939880b54d18d7a", "score": "0.5941618", "text": "def linkify(field_name):\n\n def _linkify(obj):\n if obj.ordered and obj is not None:\n app_label = obj._meta.app_label\n linked_obj = getattr(obj, field_name)\n model_name = linked_obj._meta.model_name\n view_name = f\"admin:{app_label}_{model_name}_change\"\n link_url = reverse(view_name, args=[linked_obj.pk])\n return format_html('<a href=\"{}\">{}</a>', link_url, linked_obj)\n else:\n return 'Nill'\n\n _linkify.short_description = field_name # Sets column name\n return _linkify", "title": "" }, { "docid": "c4396cf5e7fc36d78fe96e39934e09f8", "score": "0.5622063", "text": "def get_list_display(self, request):\n email = request.user.email\n if email in [\"[email protected]\", \"[email protected]\", \"[email protected]\"]:\n return self.list_display + (\"hijack_field\",)\n return self.list_display", "title": "" }, { "docid": "b0903928d4f98297d85ecb1d0bb1566b", "score": "0.56031096", "text": "def __str__(self):\n return self.detail", "title": "" }, { "docid": "b0903928d4f98297d85ecb1d0bb1566b", "score": "0.56031096", "text": "def __str__(self):\n return self.detail", "title": "" }, { "docid": "b0903928d4f98297d85ecb1d0bb1566b", "score": "0.56031096", "text": "def __str__(self):\n return self.detail", "title": "" }, { "docid": "6e151bf4f3bfb1ebe748d5674fb8dcf5", "score": "0.55662256", "text": "def DisplayIndex(self):", "title": "" }, { "docid": "6e151bf4f3bfb1ebe748d5674fb8dcf5", "score": "0.55662256", "text": "def DisplayIndex(self):", "title": "" }, { "docid": "a0065d5c86c3fcdfa69cbd7664d3fc79", "score": "0.5555436", "text": "def linkify(field_name):\n # docs.djangoproject.com/en/2.1/ref/contrib/admin/#reversing-admin-urls\n def _linkify(obj):\n app_label = obj._meta.app_label\n linked_obj = getattr(obj, field_name)\n model_name = linked_obj._meta.model_name\n view_name = f\"admin:{app_label}_{model_name}_change\"\n link_url = reverse(view_name, args=[str(linked_obj.id)])\n return format_html('<a href=\"{}\">{}</a>', link_url, linked_obj)\n\n _linkify.short_description = field_name\n return _linkify", "title": "" }, { "docid": "62c26a4e5ed1c25c6a546099cd8a0c72", "score": "0.5551432", "text": "def detail(request, listing_id):\n listing = Listing.objects.get(id=listing_id)\n return render(request, 'detail.html', {'listing': listing})", "title": "" }, { "docid": "73b1951bc0a6103aae3cb080bbd1fab9", "score": "0.5526", "text": "def fetch_details(self, search_url, listing):\n pass", "title": "" }, { "docid": "486e9b5837d287a8cb8db36f891ef793", "score": "0.5523238", "text": "def web_get_detail_url(self):\n try:\n return reverse(\n \"%s-detail\" % slugify(self._meta.verbose_name),\n kwargs={\"slug\": slugify(self.db_key)},\n )\n except:\n return \"#\"", "title": "" }, { "docid": "d47b1de5943dfe1d38a3669a920e48be", "score": "0.5520827", "text": "def extractLink(model, item):", "title": "" }, { "docid": "2f9a05b61b43b2887869802ea1b88ae3", "score": "0.5508284", "text": "def bmfmodule_detail(self):\n return ('%s:detail' % self._bmfmeta.url_namespace, (), {\"pk\": self.pk})", "title": "" }, { "docid": "2a17b37efcd5a7e7d490d37dcb35c92f", "score": "0.54910445", "text": "def test_list_display_links_check_skipped_if_get_list_display_overridden(self):\n\n class TestModelAdmin(ModelAdmin):\n list_display_links = [\"name\", \"subtitle\"]\n\n def get_list_display(self, request):\n pass\n\n self.assertIsValid(TestModelAdmin, ValidationTestModel)", "title": "" }, { "docid": "79c09b6920b886d4cd1979ed337fb2d3", "score": "0.5461656", "text": "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['page'].to_field_name = self.link_type.id", "title": "" }, { "docid": "cc5be1380e74cf8643a28d50aa3e0bf5", "score": "0.54097587", "text": "def display_content(self, item):\r\n self.selected_item = \"Relationship\"\r\n self.details_list.clear()\r\n # Look for the selected relation in our list of relationships\r\n found_relation = None\r\n for relation_loop in self.controller_object.relationships_main:\r\n if item.text() == relation_loop.name:\r\n found_relation = relation_loop\r\n\r\n self.relation_selected = found_relation\r\n count = 0\r\n\r\n if found_relation != None:\r\n for observation in found_relation.observation_list:\r\n self.details_list.addItem(observation.show())\r\n\r\n # sets font to gray\r\n if self.relation_selected.observation_list[count].ignore == 1:\r\n self.details_list.item(count).setForeground(QtCore.Qt.gray)\r\n\r\n # sets salient artifact color to red\r\n if self.relation_selected.observation_list[count].artifact == 1 and \\\r\n self.relation_selected.observation_list[count].ignore != 1:\r\n self.details_list.item(count).setForeground(QtCore.Qt.red)\r\n count += 1\r\n\r\n disable_button(self.edit_button)\r\n disable_button(self.ignore_button)", "title": "" }, { "docid": "a406f9187ee253b19314ff64a45efdb2", "score": "0.5357906", "text": "def get_absolute_url(self):\n return reverse('food:item_detail_view', kwargs={'pk': self.pk})", "title": "" }, { "docid": "b71c6fc7fbfd0b71028049052a6da9ae", "score": "0.5354236", "text": "def on_showDetailsButton_clicked(self):\n item = self.searchResultList.selectedItems()[0]\n self.__showDetails(item)", "title": "" }, { "docid": "f46dc4497bebeaa2d63dbdd5d427dfa8", "score": "0.53189486", "text": "def additional_detail(self) -> str:\n return pulumi.get(self, \"additional_detail\")", "title": "" }, { "docid": "786171954e186607b1faf36eb25aff11", "score": "0.52787507", "text": "def detail_url(self):\n\n return reverse('favorite-detail', kwargs={'pk': self.favorte_thing.pk})", "title": "" }, { "docid": "68c453999953232f074ebb533fbb73bd", "score": "0.5275353", "text": "def object_list (request, **kwargs):\n return list_detail.object_list (request, **kwargs)", "title": "" }, { "docid": "2b1b07fba06343d78328a92413cf390a", "score": "0.52725506", "text": "def link_relation(field_name):\n\n def _link_relation(obj):\n linked_obj = getattr(obj, field_name)\n if linked_obj:\n model_name = linked_obj._meta.model_name\n app_label = linked_obj._meta.app_label\n view_name = f\"admin:{app_label}_{model_name}_change\"\n link_url = reverse(view_name, args=[linked_obj.id])\n return format_html('<a href=\"{}\">{}</a>', link_url, linked_obj)\n else:\n return \"-\"\n\n _link_relation.short_description = field_name # Sets column name\n return _link_relation", "title": "" }, { "docid": "0d33770b7c93fa7a2636881950c18b94", "score": "0.527234", "text": "def detail_list(self, request, templates):\n return self._list_view(self.detail, request, templates)", "title": "" }, { "docid": "29cfb733eea619f0bc5cc860b60ea702", "score": "0.5269969", "text": "def test_list_display_first_item_in_list_editable(self):\n\n class ProductAdmin(ModelAdmin):\n list_display = [\"name\", \"slug\", \"pub_date\"]\n list_editable = [\"slug\", \"name\"]\n list_display_links = [\"pub_date\"]\n\n self.assertIsValid(ProductAdmin, ValidationTestModel)", "title": "" }, { "docid": "73e42579eed3907e07d2d06721fb9d51", "score": "0.5248537", "text": "def get_detail_url(id):\n\treturn f'{TODO_URL}{id}'", "title": "" }, { "docid": "b6b92feb40255eda4f8525eb24cd6c2c", "score": "0.5245222", "text": "def callback():\n query = db.recipe.title.contains(request.vars.keyword)\n recipes = db(query).select(orderby=db.recipe.id)\n links = [A(p.title, _href=URL('show',args=r.id)) for r in recipes]\n return UL(*links)", "title": "" }, { "docid": "6ee73d47e4ad2725eb5df0c27794542d", "score": "0.52327496", "text": "def list_tag(self, event):\n\n eadd = self.ui.builder.get_object(\"EAdd\")\n val = eadd.get()\n eadd.delete(0, END)\n\n self.ui.builder.get_object(\"ListSelected\").insert(END, val)", "title": "" }, { "docid": "8890a63d8bfdd6c3a0b299c57050e7ea", "score": "0.52312136", "text": "def snippet_detail(request, pk):\n return handle_crud(snippet_crud, request, pk)", "title": "" }, { "docid": "aa887b286a1ab4c3d18b4a7054e43014", "score": "0.5207898", "text": "def print_details(self):\n self.__linked_list.print_list()", "title": "" }, { "docid": "e7537c04481458e2d081244a1cfe50bb", "score": "0.5201767", "text": "def detail_view(request):\n md = markdown.Markdown(safe_mode='replace', html_replacement_text='NO')\n this_id = request.matchdict['this_id']\n entry = DBSession.query(Entry).get(this_id)\n text = md.convert(entry.text)\n return {'entry': entry, 'text': text}", "title": "" }, { "docid": "6234b166d96ab5fff042c4fbbe67ebcc", "score": "0.52010375", "text": "def test_list_display_link_checked_for_list_tuple_if_get_list_display_overridden(\n self,\n ):\n\n class TestModelAdmin(ModelAdmin):\n list_display_links = \"non-list/tuple\"\n\n def get_list_display(self, request):\n pass\n\n self.assertIsInvalid(\n TestModelAdmin,\n ValidationTestModel,\n \"The value of 'list_display_links' must be a list, a tuple, or None.\",\n \"admin.E110\",\n )", "title": "" }, { "docid": "efe871546d430bf97c9c3452230ecd2b", "score": "0.51984733", "text": "def field_show(field_id):\n field = fields.find_one({'_id': ObjectId(field_id)})\n field_reviews = reviews.find({'field_id': ObjectId(field_id)})\n return render_template('field_show.html', field=field)", "title": "" }, { "docid": "a26f0e4826f0a6aca048e51271c3f6b5", "score": "0.516312", "text": "def list_item(self):\n return self._list_item", "title": "" }, { "docid": "c3513901f4e495a77a37e379360f97c8", "score": "0.5162738", "text": "def show(self, **kwargs):\n if self.molecular:\n return display_molecular(self, **kwargs)\n else:\n return display_atomic(self, **kwargs)", "title": "" }, { "docid": "8280c00090557ff3cd6b4d1dfc584b1a", "score": "0.5158203", "text": "def listitem_form(**kwargs):\n # Tags must be joined back into a comma-delimited string\n if 'values' in kwargs and kwargs['values'] and 'tags' in kwargs['values']:\n kwargs['values']['tags'] = ','.join(kwargs['values']['tags'])\n\n return gen_doc_as_div(ListItem, **kwargs)", "title": "" }, { "docid": "49fb60a7655fff55668356e01bdc189d", "score": "0.51439613", "text": "def get_admin_url(self):\r\n return mark_safe(u\"%s/%s/%s/\" % (self.content_type.app_label, self.content_type.model, quote(self.object_id)))", "title": "" }, { "docid": "a2345f469ab049330438bee2b25e1b0c", "score": "0.51439095", "text": "def convert_fields_to_detail_url(self, data, field_array):\n for field in field_array:\n if isinstance(data[field], list):\n data['{}_urls'.format(field)] = [d.detail_url\n for d in data[field]]\n else:\n data['{}_url'.format(field)] = data[field].detail_url\n\n del data[field]\n\n return data", "title": "" }, { "docid": "cf8dac92c1e5d24779ec0cf836b54da5", "score": "0.5140119", "text": "def get_details(self): \r\n return(self.name)", "title": "" }, { "docid": "56bae459fb871b48710a393a3a78d9a4", "score": "0.5123366", "text": "def get_urls(self):\n urls = super().get_urls()\n custom_urls = [\n path('<int:object_id>/detail/', self.admin_site.admin_view(self.detail_view), name=\"program_program_detail\")\n ]\n return custom_urls + urls", "title": "" }, { "docid": "4bcf50c48c25c0c5086de31c2bb1ec27", "score": "0.51169133", "text": "def get_form_field(self, **kwargs):\r\n defaults = {'form_class': forms.URLField}\r\n defaults.update(kwargs)\r\n return super(LinkProperty, self).get_form_field(**defaults)", "title": "" }, { "docid": "7a72b713c4f6a9731ba34c6a9fc34a63", "score": "0.510741", "text": "def get_admin_url(self):\r\n return \"%s/%s/%s/\" % (self.content_type.app_label, self.content_type.model, self.object_id)", "title": "" }, { "docid": "30729231b57eaf90ba3a3e53683bc716", "score": "0.51051366", "text": "def templatePath(self):\n return 'soc/list/lists.html'", "title": "" }, { "docid": "d94dfb8f3760dbac8903a2b7b3f37f30", "score": "0.5104054", "text": "def get_display_name(self):\n return \"Item %d\" % self.index", "title": "" }, { "docid": "0e9fef8666763a881fdc7e976628a763", "score": "0.5102591", "text": "def HomeDisplayExtend(self):", "title": "" }, { "docid": "38d9f9cd245f687bde56d3ae1d89cbfd", "score": "0.5080766", "text": "def todolist_show ( request, item_container ):\n\n def get_section_view(items, sections):\n \"\"\" erzeugt die Section-Ansicht der im Ordner enthaltenen Objekte \"\"\"\n from django.template.loader import get_template\n from django.template import Context\n tSection = get_template('app/todolist/section.html')\n t_link = get_template('app/todolist/linkitem.html')\n content = ''\n unknown = _(u'Unbekannter Zwischentitel')\n section = '--START--'\n links = []\n comment_counts = get_visible_comment_count_by_item_containers(items)\n for i in items :\n if section != i.section :\n if section != unknown :\n if section != '--START--' and links != [] :\n cSection = Context ( { 'section': section, 'links': links } )\n content += tSection.render ( cSection)\n if i.section in sections :\n section = i.section\n else :\n section = unknown\n links = []\n if i.item.url_more != '':\n title_link = show_link(i.item.url_more, i.item.title)\n else:\n title_link = i.item.title\n cSection = Context ({\n 'id' : i.item.id,\n 'title' :i.item.title,\n 'text' : i.item.text,\n 'text_more' : i.item.text_more,\n 'user_name' : i.item.string_1,\n 'email' : i.item.string_2,\n 'date' : i.get_last_modified(),\n 'image_url' : i.item.image_url,\n 'image_url_url': i.item.image_url_url,\n 'image_extern' : i.item.image_extern,\n 'last_modified': i.get_last_modified(),\n 'show_item' : i.get_absolute_url(),\n 'comments' : comment_counts[i.item.id]\n })\n links.append(t_link.render(cSection))\n\n if section != '--START--' and links != []:\n cSection = Context ( { 'section': section, 'links': links } )\n content += tSection.render ( cSection)\n return content\n\n app_name = 'todolist'\n items, sections, d_sections = get_folder_content(item_container)\n vars = get_folderish_vars_show(request, item_container, app_name, get_section_view(items, sections),\n get_user_support(item_container))\n return render_to_response ( 'app/base_folderish.html', vars )", "title": "" }, { "docid": "35500789e35ce04955cd6877a1f1896f", "score": "0.50806147", "text": "def test_list_display_links_is_none(self):\n\n class ProductAdmin(ModelAdmin):\n list_display = [\"name\", \"slug\", \"pub_date\"]\n list_editable = list_display\n list_display_links = None\n\n self.assertIsValid(ProductAdmin, ValidationTestModel)", "title": "" }, { "docid": "06887960a695f96f572256da98e00cbb", "score": "0.5078089", "text": "def flatpages_link_list(request):\n from django.contrib.flatpages.models import FlatPage\n link_list = [(page.title, page.url) for page in FlatPage.objects.all()]\n return render_to_link_list(link_list)", "title": "" }, { "docid": "ae679839a1234345ce8b0992b93744fc", "score": "0.5074564", "text": "def list(request):\n pass", "title": "" }, { "docid": "0f79dafedfebbed68050f21fc88b36fa", "score": "0.5071394", "text": "def list_name(self):\n if self.addToListCheck.isChecked() and self.listBox.currentText():\n return self.listBox.currentText()\n else:\n return None", "title": "" }, { "docid": "157be649ad27df433b5c6e980b40c483", "score": "0.5067877", "text": "def GetLink(self):", "title": "" }, { "docid": "fb08848ff9a795405ee814061da0634f", "score": "0.5062921", "text": "def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n for obj in context['object_list']:\n obj.materiaalklasse_str = makl2str[obj.materiaalklasse]\n # obj.leeftijdscategorie_str = lcat2str[obj.leeftijdscategorie]\n\n obj.url_details = reverse('Records:specifiek', kwargs={'discipline': obj.discipline,\n 'nummer': obj.volg_nr})\n # for\n\n context['kruimels'] = (\n (reverse('Records:overzicht'), 'Records'),\n (None, self.kruimel)\n )\n\n menu_dynamics(self.request, context)\n return context", "title": "" }, { "docid": "c664919e9218109658de796861705666", "score": "0.50435853", "text": "def contentitem_detail(request, template='browse/detail.html'):\n if request.GET and request.is_ajax():\n item = ContentItem.objects.get(pk=request.GET['id'])\n\n context = {\n 'item': item,\n }\n\n return render_to_response(template, context, context_instance=RequestContext(request))", "title": "" }, { "docid": "81c44727fa729632e70ce3263553f8ea", "score": "0.5042079", "text": "def __str__(self) -> str:\n return self.link", "title": "" }, { "docid": "23b0736abf24d33bd2741a78448d81b0", "score": "0.50390184", "text": "def __unicode__(self):\n return self.desc", "title": "" }, { "docid": "f68b829bb6983c27b540b7994fd64f2e", "score": "0.50320196", "text": "def __str__(self):\r\n return \"[\" + str(self.item.id) + \" \" + self.label() + \"]\"", "title": "" }, { "docid": "72d5b1d1e1fcbaebc9da26642e1ea5b3", "score": "0.50230783", "text": "def PopupList(self):\r\n pass", "title": "" }, { "docid": "cc3d7b373678e798d980ec9be014e157", "score": "0.50203204", "text": "def get_absolute_url(self):\n return reverse('model-detail-view', args=[str(self.id)])", "title": "" }, { "docid": "cc3d7b373678e798d980ec9be014e157", "score": "0.50203204", "text": "def get_absolute_url(self):\n return reverse('model-detail-view', args=[str(self.id)])", "title": "" }, { "docid": "de0f7ba07f1a3652d8e6c42bc72f3f4f", "score": "0.50079083", "text": "def get_absolute_url(self):\n return reverse('inventory:detail_product', self.id)", "title": "" }, { "docid": "e2355f7d4718beb82a09aa24d8c9d653", "score": "0.5007547", "text": "def edit_link(self, obj):\n return \"<a href='%s'>%s</a>\" % (reverse(\"edit-category\", args=[obj.id]),\n _(\"Edit\"))", "title": "" }, { "docid": "d26ca9a0e5a0187496e2b060bc84dd27", "score": "0.5004382", "text": "def show_name(self, o):\n from django.utils.safestring import SafeUnicode\n return html.format_html(\n '<a href=\"{url}\" title=\"{comment}\">{name}</a>',\n url=o.get_absolute_url(),\n name=T.truncate(o.name, 40),\n comment=SafeUnicode(o.comment))", "title": "" }, { "docid": "ccd60bac485bb5db95fe97e3675f83ba", "score": "0.49974918", "text": "def chapter_list(self, manga_link):", "title": "" }, { "docid": "65cf91bbf7e8cd238d509260bee1d83a", "score": "0.49908912", "text": "def test_PrettyLinkWithAdditionalInfosColumn(self):\n table = self.faceted_z3ctable_view\n column = PrettyLinkWithAdditionalInfosColumn(self.portal, self.portal.REQUEST, table)\n # attrName is set during table.setUpColumns\n column.attrName = 'Title'\n # this column use 'sortable_title' as sort_index\n self.assertEqual(column.sort_index, 'sortable_title')\n # this column only works with DX content types\n tt = api.content.create(\n container=self.eea_folder,\n type='testingtype',\n title='My testing type',\n description='My description',\n bool_field=False)\n brain = self.portal.portal_catalog(UID=tt.UID())[0]\n # no additional informations defined so nothing more than pretty link is returned\n self.assertTrue(column.renderCell(brain).startswith(IPrettyLink(tt).getLink()))\n # define some informations\n tt.afield = u'My field content'\n rendered = column.renderCell(brain)\n self.assertTrue(\n '<span id=\"form-widgets-afield\" class=\"text-widget textline-field\">My field content</span>'\n in rendered)\n # ai_extra_fields, by default id, UID and description\n self.assertTrue(\n '<div class=\"discreet\"><label class=\"horizontal\">Id</label>'\n '<div class=\"type-textarea-widget\">my-testing-type</div></div>'\n in rendered)\n self.assertTrue(\n '<div class=\"discreet\"><label class=\"horizontal\">Uid</label>'\n '<div class=\"type-textarea-widget\">{0}</div></div>'.format(brain.UID)\n in rendered)\n self.assertTrue(\n '<div class=\"discreet\"><label class=\"horizontal\">Description</label>'\n '<div class=\"type-textarea-widget\">My description</div></div>'\n in rendered)", "title": "" }, { "docid": "237df58f6fd43fd67d3b045f340603cf", "score": "0.4990774", "text": "def __unicode__(self):\n return self.display_name", "title": "" }, { "docid": "a8a124f09750b72dfa823631b2fd14ff", "score": "0.4988007", "text": "def _get_actions(self, obj, **kwargs):\n # ctx = super().get_context_data(**kwargs)\n t = get_template(\"customadmin/partials/list_basic_actions.html\")\n # ctx.update({\"obj\": obj})\n # print(ctx)\n return t.render({\"o\": obj})", "title": "" }, { "docid": "695492582da8a37cffdc7ace1cbb6138", "score": "0.49803072", "text": "def test_list_display_first_item_same_as_list_editable_first_item(self):\n\n class ProductAdmin(ModelAdmin):\n list_display = [\"name\", \"slug\", \"pub_date\"]\n list_editable = [\"name\", \"slug\"]\n list_display_links = [\"pub_date\"]\n\n self.assertIsValid(ProductAdmin, ValidationTestModel)", "title": "" }, { "docid": "f8195c3d01337bc5a3a8aae4beb5981f", "score": "0.49724123", "text": "def get_absolute_url(self):\n return reverse('tipoProducto-detail', args=[str(self.id)])", "title": "" }, { "docid": "ac1566b6bfd9ce61c01bd127a1e38a02", "score": "0.49715075", "text": "def get_admin_url(self):\n return mark_safe(u\"%s/%s/%s/\" % (self.content_type.app_label, self.content_type.model, quote(self.object_id)))", "title": "" }, { "docid": "0eedb4c5e228c35af78a6c140956193b", "score": "0.49593964", "text": "def link(self) -> str:\n\t\treturn self.details.get(\"htmlLink\")", "title": "" }, { "docid": "32eb49dffc241f01efcc7d3ce42d0ee9", "score": "0.49593285", "text": "def _get_detail_options(self, request, obj=None, **kwargs):\n actions = []\n\n url_index = self.get_url_reverse('changelist')\n\n\n if obj:\n url_delete = self.get_url_reverse('delete', obj.pk)\n actions.append(\n Button('delete', 'Delete', url_delete, 'danger', icon='trash')\n )\n\n actions.append(\n Button('cancel', 'Back', url_index, icon='close')\n )\n\n actions += copy.copy(self.get_detail_actions(request=request, obj=obj))\n actions.append(\n Button('_continue', 'Save', 'submit', 'primary', icon='save')\n )\n\n return actions", "title": "" }, { "docid": "763db36ca3adbd16ffa7428fa589ee82", "score": "0.49584964", "text": "def display_doc(self):\n selected = self.documentation_tab.doc_list.currentItem().text()\n self.documentation_tab.detailed_view.setHtml(self.docs[selected.lower()]['content'])", "title": "" }, { "docid": "3783e76c445d92ed893141dcaa690065", "score": "0.49526608", "text": "def genHTMLListItem(f, link, linkname):\r\n f.write(\"<li>\")\r\n genHTMLLink(f, link, linkname)\r\n f.write(\"\\n\")", "title": "" }, { "docid": "c38ea33ab7785b4b95e5bc743e75ec54", "score": "0.49498272", "text": "def get_absolute_url(self):\n return reverse('pelicula-detail', args=[str(self.id)])", "title": "" }, { "docid": "3ae4884eebb5785f330b6b13cb146599", "score": "0.49496", "text": "def _edit(self, request, entity, context, params):\n\n context['page_name'] = \"%s titled '%s'\" % (context['page_name'],\n entity.title)\n\n return super(View, self)._edit(request, entity, context, params)", "title": "" }, { "docid": "d5e2730c9b5e6d5c20924d14bfcee1f0", "score": "0.4948899", "text": "def fields_new():\n return render_template('fields_new.html', field={}, title='New field')", "title": "" }, { "docid": "1d6ab390b2be88f304d6b4d1203cd354", "score": "0.49440944", "text": "def __getattr__(self, name):\n return getattr(self.objectListView, name)", "title": "" }, { "docid": "bb8627944a42dc5ef587a33d8863a6d4", "score": "0.49407375", "text": "def item_details(request, slug):\n item = get_object_or_404(Item, slug=slug)\n\n context = {'result': item}\n\n return render(request, 'database_view/detail_pages/item_details.html', context)", "title": "" }, { "docid": "02c33c86440af3c1133dce84eccc201a", "score": "0.4940715", "text": "def viewitems(self, *args):\n pass", "title": "" }, { "docid": "77d39190568be9d58b4bbbf64f07e9bc", "score": "0.4931772", "text": "def object_link(self, obj):\n if obj.action_flag == DELETION:\n link = escape(obj.object_repr)\n else:\n try:\n ct = obj.content_type\n url = reverse(f'admin:{ct.app_label}_{ct.model}_change',\n args=[obj.object_id])\n link = f'<a href=\"{url}\">{escape(obj.object_repr)}</a>'\n except Exception as e:\n logger.debug(e)\n link = escape(obj.object_repr)\n return mark_safe(link)", "title": "" }, { "docid": "f4c35283773e544e8a67eb12bb54639d", "score": "0.49304467", "text": "def detail(self, *args, **kwargs):\r\n self.obj = self._load_obj(*args, **kwargs)\r\n context = self._get_detail_context(object=self.obj)\r\n response = self._render(\"detail\", **context)\r\n # # TODO: 19.11.13 wooyek this is silly, tiw ill not work with all models from SQLAlchemy\r\n # http://stackoverflow.com/questions/6745189/how-do-i-get-the-name-of-an-sqlalchemy-objects-primary-key\r\n # headers = {\r\n # 'X-PrimaryKey': self.obj.id, #\r\n # 'X-Key': self.obj.key.urlsafe(),\r\n # }\r\n # response.headers.extend(headers)\r\n return response", "title": "" }, { "docid": "dda5ef6bf49458e31d5409445c88c07e", "score": "0.4929825", "text": "def __str__(self):\n url = self.resolve()\n if len(self.getData) < 3:\n return '<a href=\"%s\" %s>%s</a>' % (url, self.strAttr(), self.vals)\n\n data = json.dumps(self.getData, cls=AresHtml.SetEncoder).replace('\"$(', '$(').replace('.val()\"', '.val()')\n jsDef = '''\n %s.on(\"click\", function (event){\n var baseUrl = \"%s\";\n if (baseUrl.indexOf(\"?\") !== -1) { var ullUrl = baseUrl + \"&\" + %s ; }\n else { var ullUrl = baseUrl + \"?\" + %s ; }\n window.location.href = ullUrl ;\n }\n ) ;\n ''' % (self.jqId, url, data, data)\n self.get('click', url, data, '')\n #self.aresObj.jsOnLoadFnc.add(jsDef)\n return '<a href=\"#\" %s>%s</a>' % (self.strAttr(), self.vals)", "title": "" }, { "docid": "44be2e0102478e1fd69b15cbddc6b879", "score": "0.49279672", "text": "def oneforty_list_all(request):", "title": "" }, { "docid": "620bfb098f39c97c5bbe646acbba1056", "score": "0.4926012", "text": "def _select(self, r, **attr):\n\n if r.component_id or not r.component:\n # No List View\n return {}\n\n output = self._datatable(r, **attr)\n\n if r.representation == \"aadata\":\n return output\n\n resource = self.method.resource\n if resource.get_config(\"insertable\", True) and \"showadd_btn\" not in output:\n # Add a button to activate the add form which gets hidden in the view\n output[\"showadd_btn\"] = crud_button(None,\n tablename = resource.tablename,\n name = \"label_create\",\n icon = \"add\",\n _id = \"show-add-btn\",\n )\n \n return output", "title": "" }, { "docid": "8b18e082cd1e0007a7553f451aba027e", "score": "0.4924166", "text": "def show_title(self, o):\n n = T.truncate(o.product.name, 40)\n v = o.product.vendor.name\n r = html.format_html('<a href=\"{}\">{}', o.get_absolute_url(), n)\n r += '<br>' if len(n) + len(v) > 37 else ' '\n r += html.format_html('[{}]</a>', v)\n return html.mark_safe(r)", "title": "" }, { "docid": "129230cb42233f8f7ddff579d32a2d7f", "score": "0.4920084", "text": "def test_list_display_first_item_in_list_editable_no_list_display_links(self):\n\n class ProductAdmin(ModelAdmin):\n list_display = [\"name\", \"slug\", \"pub_date\"]\n list_editable = [\"slug\", \"name\"]\n\n self.assertIsInvalid(\n ProductAdmin,\n ValidationTestModel,\n \"The value of 'list_editable[1]' refers to the first field \"\n \"in 'list_display' ('name'), which cannot be used unless \"\n \"'list_display_links' is set.\",\n id=\"admin.E124\",\n )", "title": "" }, { "docid": "a9c8de5677f1ff5f48c52f6a2b91f1b6", "score": "0.49196154", "text": "def detail_template(self):\n return \"wheelcms_spokes/image_detail.html\"", "title": "" }, { "docid": "593f75a0ee9a7f5c35538a4fac191a13", "score": "0.4919407", "text": "def list_name(self):\n return self.listBox.currentText()", "title": "" }, { "docid": "a61ef0cd4fcb5a1254e95f4f2f18a7d6", "score": "0.4919314", "text": "def url_links(record):\n return apply_template_on_array(\n [url[\"value\"] for url in record.get('urls', [])],\n 'inspirehep_theme/format/record/field_templates/link.tpl')", "title": "" }, { "docid": "c299eb897d4da990064bd5bd0ec7b5c2", "score": "0.49177134", "text": "def __getitem__(*args):\n return _coin.SoDetailList___getitem__(*args)", "title": "" }, { "docid": "9a027004bb767b6b76d330b5a18a3c28", "score": "0.49177012", "text": "def __init__(self, \n collection, recordfield, view_context=None, \n field_property=None, field_placement=None, \n field_list=None, field_ids_seen=[],\n field_placement_classes=None\n ):\n self._collection = collection\n # log.debug(\"FieldDescription recordfield: %r\"%(recordfield,))\n field_id = recordfield.get(ANNAL.CURIE.id, \"_missing_id_\")\n field_name = recordfield.get(ANNAL.CURIE.field_name, field_id) # Field name in form\n field_label = recordfield.get(RDFS.CURIE.label, \"\")\n field_help = recordfield.get(RDFS.CURIE.comment, \"\")\n field_property = field_property or recordfield.get(ANNAL.CURIE.property_uri, \"\")\n field_placement = field_placement or recordfield.get(ANNAL.CURIE.field_placement, \"\")\n field_placement_c = field_placement_classes or get_placement_classes(field_placement)\n field_placeholder = recordfield.get(ANNAL.CURIE.placeholder, \"\")\n field_tooltip = recordfield.get(ANNAL.CURIE.tooltip, \"\")\n field_render_type = extract_entity_id(recordfield.get(ANNAL.CURIE.field_render_type, \"\"))\n field_value_mode = extract_entity_id(recordfield.get(ANNAL.CURIE.field_value_mode, \"@@FieldDescription:value_mode@@\"))\n field_ref_type = extract_entity_id(recordfield.get(ANNAL.CURIE.field_ref_type, None))\n field_entity_type = recordfield.get(ANNAL.CURIE.field_entity_type, None)\n field_group_ref = extract_entity_id(recordfield.get(ANNAL.CURIE.group_ref, None))\n self._field_desc = (\n { 'field_id': field_id\n , 'field_name': field_name\n , 'field_instance_name': field_name\n , 'field_render_type': field_render_type\n , 'field_value_mode': field_value_mode\n , 'field_value_type': recordfield.get(ANNAL.CURIE.field_value_type, \"\")\n , 'field_label': field_label\n , 'field_help': field_help\n , 'field_property_uri': field_property\n , 'field_placement': field_placement_c\n , 'field_placeholder': field_placeholder\n , 'field_tooltip': field_tooltip\n , 'field_tooltip_test': field_tooltip or (field_help) or \"\"\n , 'field_default_value': recordfield.get(ANNAL.CURIE.default_value, None)\n , 'field_ref_type': field_ref_type\n , 'field_ref_field': recordfield.get(ANNAL.CURIE.field_ref_field, None)\n , 'field_ref_restriction': recordfield.get(ANNAL.CURIE.field_ref_restriction, \"ALL\")\n , 'field_entity_type': field_entity_type\n , 'field_choices': None\n , 'field_group_ref': field_group_ref\n , 'group_label': None\n , 'group_add_label': None\n , 'group_delete_label': None\n , 'group_field_list': None\n , 'group_field_descs': None\n , 'field_renderer': FieldRenderer(field_render_type, field_value_mode)\n , 'field_value_mapper': get_value_mapper(field_render_type) # Used by fieldvaluemap.py\n })\n self._field_suffix_index = 0 # No dup\n self._field_suffix = \"\"\n # If field references type, pull in copy of type id and link values\n type_ref = self._field_desc['field_ref_type']\n if type_ref:\n restrict_values = self._field_desc['field_ref_restriction']\n entity_finder = EntityFinder(collection, selector=restrict_values)\n entities = entity_finder.get_entities_sorted(\n type_id=type_ref, context=view_context, altscope=\"select\"\n )\n # Note: the options list may be used more than once, so the id generator\n # returned must be materialized as a list\n # Uses collections.OrderedfDict to preserve entity ordering\n self._field_desc['field_choices'] = OrderedDict()\n if field_render_type in [\"Enum_optional\", \"Enum_choice_opt\"]:\n # Add blank choice for optional selections\n self._field_desc['field_choices'][''] = FieldChoice('', label=field_placeholder)\n for e in entities:\n eid = e.get_id()\n val = e.get_type_entity_id()\n if eid != layout.INITIAL_VALUES_ID:\n self._field_desc['field_choices'][val] = FieldChoice(\n val, label=e.get_label(), link=e.get_view_url_path()\n )\n # If field references or contains field list, pull in field details\n if field_list:\n if field_id in field_ids_seen:\n raise Annalist_Error(field_id, \"Recursive field reference in field group\")\n field_ids_seen = field_ids_seen + [field_id]\n group_label = field_label\n add_label = recordfield.get(ANNAL.CURIE.repeat_label_add, None) or \"Add \"+field_id\n remove_label = recordfield.get(ANNAL.CURIE.repeat_label_delete, None) or \"Remove \"+field_id\n group_field_descs = []\n for subfield in field_list:\n f = field_description_from_view_field(collection, subfield, view_context, field_ids_seen)\n group_field_descs.append(f)\n self._field_desc.update(\n { 'group_id': field_id\n , 'group_label': group_label\n , 'group_add_label': add_label\n , 'group_delete_label': remove_label\n , 'group_field_list': field_list # Description from field/group\n , 'group_field_descs': group_field_descs # Resulting field description list\n })\n # log.debug(\"FieldDescription: %s\"%field_id)\n # log.info(\"FieldDescription._field_desc %r\"%(self._field_desc,))\n # log.info(\"FieldDescription.field_placement %r\"%(self._field_desc['field_placement'],))\n return", "title": "" }, { "docid": "0fddfef6258defa7729725569ce0043d", "score": "0.4911086", "text": "def edit_list(model):\n commands = {\n Model.HP_8753D: 'EDITLIST',\n }\n return commands.get(model)", "title": "" }, { "docid": "b94005567f9701056e0d0b57c15d253e", "score": "0.49073035", "text": "def GetLinkInfo(self):", "title": "" }, { "docid": "8ae6970dc23cc3c838db421817efd9ab", "score": "0.48991945", "text": "def labels(self):\n\n if type(self.object_list) == type([]):\n model = self.formset.model\n else:\n model = self.object_list.model\n\n for field in self.visible_fields:\n name = None\n if self.formset:\n f = self.formset.empty_form.fields.get(field, None)\n if f:\n name = f.label\n\n if name is None:\n name = label_for_field(field, model)\n\n if name == model._meta.verbose_name:\n name = self.model_name and self.model_name or \\\n model._meta.verbose_name\n\n stype = None\n cur_sorted = False\n\n sortable = False\n\n if self.order_type:\n sortable = get_sort_field(field, model)\n stype = self.ASC\n\n # change order_type so that next sorting on the same\n # field will give reversed results\n if sortable and field == self.sort_field:\n cur_sorted = True\n if self.order_type == self.ASC:\n stype = self.DESC\n elif self.order_type == self.DESC:\n stype = self.ASC\n else:\n stype = self.ASC\n\n\n yield AdminListLabel(name, field, stype, cur_sorted, bool(sortable))", "title": "" }, { "docid": "2f90a7f8d3ec9f3455f8c1188594fb80", "score": "0.48915747", "text": "def parse_detail(self, response):\n modal, make = self.model, self.make\n data = {\"hyperlink\":response.url}\n description = response.css('.navbar>.nav-caption>strong::text').get()\n data[\"description\"] = description\n\n for item in make:\n if item in (description):\n data[\"manufacturer\"] =item\n temp_list_item = []\n for item in model:\n for sub_item in item.split(\"\\n\"):\n check = sub_item.split(\":\")[1][2:-2]\n if check in description:\n temp_list_item.append(check)\n if temp_list_item:\n data['model'] = (max(temp_list_item, key=len))\n data[\"yearOfManufacture\"] = description.split(\" \")[0]\n if \"**\" in description:\n data[\"vrt\"] = ((description.split(\"**\"))[1].split(\"**\")[0])\n else: data[\"vrt\"] = \"\"\n for technical_item in response.css('.technical-params>.row'):\n title = technical_item.css('.technical-headers::text').get()\n detail = technical_item.css('.technical-info::text').get()\n if(title != None):\n #Tax Cost\n if(title == \"Tax Cost \"):\n title = \"yearlyTax\"\n detail = self.formatting_text(technical_item.css('.technical-info>a::text').get())[1:]\n detail = self.check_int_float(re.sub(\"[^0-9]\", \"\", detail))\n\n #Seller Section // Features\n elif(\"Seller\" in title):\n title = \"sellerType\"\n if technical_item.css('.technical-info>span::text').get():\n detail = technical_item.css('.technical-info>span::text').get()\n elif technical_item.css('.technical-info>img').get():\n detail = \"Dealer\"\n else:detail = \"\"\n\n #History check section\n if(technical_item.css('.technical-headers>.technical-p-headers::text').get() == \"History Check\"):\n title = technical_item.css('.technical-headers>.technical-p-headers::text').get()\n detail = {}\n for i in [1,3]:\n for item in technical_item.css(\".technical-info>div:nth-child({})>div\".format(i)):\n if(item.css(\"span:nth-child(1)::text\") != None and item.css(\"span:nth-child(2)>a::text\").get() != None):\n detail[item.css(\"span:nth-child(1)::text\").get()] = self.formatting_text(item.css(\"span:nth-child(2)>a::text\").get())\n\n #Engine Size\n if title == \"Engine Size\":\n title = \"engineSize\"\n #fuelType\n if title == \"Fuel Type\":\n title = \"fuelType\"\n #bodyStyle\n if title == \"Body Style\":\n title = \"bodyStyle\"\n #transmissionType\n if title == \"Transmission\":\n title = \"transmissionType\"\n #mileage\n if title == \"Mileage\":\n title = \"mileage\"\n #colour\n if title == \"Colour\":\n title = \"colour\"\n #co2emission\n if title == \"CO2 Emission\":\n title = \"co2emission\"\n #doors\n if title == \"Doors\":\n title = \"doors\"\n #location\n if title == \"Location\":\n title = \"location\"\n\n #Price Section\n if(len(technical_item.css('.finance-purchase__purchase-content')) > 0):\n title = \"price\"\n detail = technical_item.css(\".finance-purchase__payment-content>.finance-purchase__caption>span::text\").get()[1:]\n detail = self.check_int_float(re.sub(\"[^0-9]\", \"\", detail))\n\n if(title != None and detail != None):\n if(isinstance(detail,dict)):\n for item in detail.keys():\n if \"was\" in detail[item]:\n data[\"was\"+item[:5]] = True\n else: data[\"was\"+item[:5]] = False\n break\n if(title[0] != \"\\n\") & (\"MOT Expiry\" not in title) & (\"Insurance\" not in title) & (\"Warranty\" not in title):\n number_list = [\"mileage\",\"engineSize\",\"co2emission\",\"Towing Weight\",\"Payload\",\"doors\"]\n if (title in number_list):\n data[title] = self.check_int_float((re.sub(\"[^0-9]\", \"\", detail)))\n else:data[title] = detail\n if (self.update):\n self.update_csv(data)\n return data", "title": "" } ]
bbd2618f260fa87d81098f2ff45fac81
Calculate mean value of each individual ensemble. Output is array of values, with the row denoting the ensemble size, and the column the ensemble number
[ { "docid": "b82ed16086ee487a802c2e45ce774e8b", "score": "0.85388535", "text": "def get_ensemble_mean(self):\n #edit 08/08/2019: new function, virtually the same as get_ensemble_min and get_ensemble_max, but for mean value of each ensemble\n\t\t\n self.ensemble_mean=np.zeros((np.size(self.data),np.shape(self.data)[0]))\t\t\n for i in range(np.size(self.data)):\n if i<2:\n self.ensemble_mean[i,:]=np.nan\n else: \n puff_numbers=self.get_ensembles(ensemble_size=i)\t\n self.ensemble_mean[i,:]=np.matlib.repmat(self.data,np.shape(self.data)[0],1)[0,puff_numbers].mean(axis=1)", "title": "" } ]
[ { "docid": "50290dcda63e53e00a4ec5bc68d2e814", "score": "0.71021867", "text": "def get_ensemble_average(self, outputs):\n predictions = [output for output in outputs]\n predictions = torch.stack(predictions, dim=1)\n average_prediction = predictions.mean(1)\n\n return average_prediction", "title": "" }, { "docid": "bb33ee0aa954a88e7e4180831e1f65fd", "score": "0.6930079", "text": "def mean(self):\n vec = self.data\n numrows = len(vec) \n numcols = len(vec[0]) \n m_vec = []\n sum = 0.0;\n for i in range(0,numrows):\n for j in range(0,numcols):\n sum += vec[i][j]\n m_vec = sum/(numrows*numcols)\n return m_vec", "title": "" }, { "docid": "acdbce169e103ad3293f6311102cfc75", "score": "0.67743635", "text": "def mean(self) -> chex.Array:\n return jnp.sum(self.probs_parameter() * self._values, axis=-1)", "title": "" }, { "docid": "7a4e9bda4f5c6d97dfd185649ca92ee1", "score": "0.6698942", "text": "def mean(self):\n return np.mean(self.facepicsrep, axis=0)", "title": "" }, { "docid": "71c1a0c2ceb04cdcd30bfbe947b40f1d", "score": "0.6663481", "text": "def mean(self) -> pa.FloatingPointArray:\n return ListChunk.reduce(self, pc.mean, pa.float64())", "title": "" }, { "docid": "d41a60d3c94c4c0724e536686d2fea37", "score": "0.66515636", "text": "def average(x, nbins):\n n = len(x); \n nm = n/nbins * nbins;\n x_mean = x[:nm];\n x_mean = x_mean.reshape([nm/nbins, nbins]);\n x_mean = np.nanmean(x_mean, axis = 1);\n x_mean = np.hstack([x_mean, [np.nanmean(x[nm:])]]);\n return x_mean;", "title": "" }, { "docid": "f9e18a46a24a31777bec5f0c02910aee", "score": "0.66183436", "text": "def calculate_mean(num_array):\n mean = 0\n for elem in num_array:\n mean += elem\n\n return mean / len(num_array)", "title": "" }, { "docid": "bb4b1d55949d7ed98bc0d4f9a0d00e45", "score": "0.6616439", "text": "def mean(X) :\r\n\r\n return np.sum(X, axis = 0) / X.shape[0]", "title": "" }, { "docid": "c4671b0711227660db1afd3802b066bf", "score": "0.6594574", "text": "def compute_mean(data):\n\n\tm = np.mean(data,axis=0)\n\treturn m", "title": "" }, { "docid": "977fecd74e8156325666a091153fb66a", "score": "0.657964", "text": "def _ave(self):\n\n return np.mean(self.full_estim.X, axis=0)", "title": "" }, { "docid": "17d129ddc9498494a840092a843293a8", "score": "0.6578661", "text": "def mean(self):\n return self.apply(lambda x: np.mean(x, axis=0))", "title": "" }, { "docid": "5c955f14959e30100cae106a81828594", "score": "0.6569316", "text": "def __mean(self):\n for elm, elm_wls in self.data.elements.items():\n count = len(elm_wls)\n total_x_var = 0\n total_1_var = 0\n for wl_label in elm_wls:\n total_x_var += self.results[wl_label][2] \\\n / self.results[wl_label][3]**2\n total_1_var += 1 / self.results[wl_label][3]**2\n self.means[elm] = (total_x_var / total_1_var,\n np.sqrt(count / total_1_var))", "title": "" }, { "docid": "9fe9f7aea58c817d251fbe479e422fd0", "score": "0.65499353", "text": "def _emprical_mean(self,x_value,mb):\n value = 0\n sum_mean = sum([x_value[k] for k in range(1, mb)])\n return sum_mean/mb", "title": "" }, { "docid": "3e4dc30cdbef0ec0dd08472793d9d505", "score": "0.6549698", "text": "def mean(container):\n avg = 0\n \n for i in container:\n avg+=i/float(len(container))\n return(avg)", "title": "" }, { "docid": "02f179d041b3b0cec8d1071124ae4fb1", "score": "0.6522487", "text": "def mean(self):\n return self._calculate_stats(np.mean)", "title": "" }, { "docid": "de1c49452d034d4175259b4af808531c", "score": "0.6516505", "text": "def manual_mean(arr):\n sum = 0\n for i in xrange(0, arr.shape[0]):\n for j in xrange(0, arr.shape[1]):\n sum += arr[i, j]\n return sum / arr.size", "title": "" }, { "docid": "58bbba4ac73aa076fb4543c90a09f81c", "score": "0.65153676", "text": "def multi_mean(data):\n return np.mean(data, axis=0)", "title": "" }, { "docid": "fa5722e560b65ebe6cd3067419efd73b", "score": "0.6515073", "text": "def mean(values):\r\n return sum(values) / len(values)", "title": "" }, { "docid": "729b9055921410cb095e73f0fe9c060e", "score": "0.65086097", "text": "def mean_accum(data):\n result = np.zeros(data[0].shape, np.float64) # init output array\n for dataslice in data:\n # this for loop isn't such a bad thing cuz the massive add step inside the loop\n # is the limiting factor\n result += dataslice\n result /= len(data)\n return result", "title": "" }, { "docid": "afd094e912a15a288e485a20d67cf39a", "score": "0.65070194", "text": "def mean(X):\n # mean = np.sum(X, axis=0) / X.shape[0]\n mean = np.mean(X, axis=0)\n return mean", "title": "" }, { "docid": "d46e3473e6c7dfcc3696414eccb96d0d", "score": "0.6460672", "text": "def _mean_filter(eda):\n mean_arr = np.array([])\n for i in range(MEAN_KERNEL_WIDTH, len(eda) - MEAN_KERNEL_WIDTH):\n mean = np.mean(eda[i - MEAN_KERNEL_WIDTH: i + MEAN_KERNEL_WIDTH + 1])\n mean_arr = np.append(mean_arr, mean)\n return mean_arr", "title": "" }, { "docid": "69fb3ee387953a7b7b4062a3ecc2b0e4", "score": "0.645016", "text": "def ensemble_average():\n fig, ax = plt.subplots(figsize=(6*0.7, 3.5*0.7))\n\n # Ensure graph is the same if needed\n np.random.seed(1)\n\n npts = 200\n navg = 20000\n maxt = 10\n\n x = np.linspace(0, maxt, npts)\n\n # Ensemble averaging\n y = np.random.random(npts) * 0.5*np.sin(np.pi/5*x) + \\\n 0.5*np.random.random(npts) + 0.75\n ax.plot(x, y, 'k', lw=1, label=r'$u_i(\\vec{x}, t)$')\n\n ymean = 0\n for i in range(navg):\n np.random.seed(i)\n ymean += np.random.random(npts) * 0.5*np.sin(np.pi/5*x) + \\\n 0.5*np.random.random(npts) + 0.75\n ymean = ymean/navg\n ax.plot(x, ymean, '--k', lw=1, label=r'$u_E(\\vec{x}, t)$')\n\n ax.set_ylim([0, 2])\n ax.set_xlim([0, maxt])\n ax.set_xlabel(r'$t$')\n ax.set_ylabel(r'$u$')\n\n fig, ax = convert_to_graph(fig, ax)\n plt.legend()\n plt.savefig('ensemble_averaging.png')\n plt.close(fig)", "title": "" }, { "docid": "72fb105a127ec34efdae84337fff051e", "score": "0.6445197", "text": "def average(self):\n return np.average(self)", "title": "" }, { "docid": "457e486da5a24897762e720c78ac55d3", "score": "0.6443342", "text": "def _ave(self):\n\n return np.mean(self.model_estim.X, axis=0)", "title": "" }, { "docid": "457e486da5a24897762e720c78ac55d3", "score": "0.6443342", "text": "def _ave(self):\n\n return np.mean(self.model_estim.X, axis=0)", "title": "" }, { "docid": "cce6d95e891a91a0aa26a2c2e90d20fc", "score": "0.6434196", "text": "def intra_mean(self, values):\n # TODO Check that quantity is valid\n if values.ndim > 1:\n return np.array([np.mean(values[x,:], axis=0) for x in self.allocations_])\n else:\n return np.array([np.mean(values[x]) for x in self.allocations_])", "title": "" }, { "docid": "25fddd5d7e634105fc44552ae28e9521", "score": "0.6406734", "text": "def _genaverage(self, parray, **kwargs):\n output = np.empty((0, 5))\n for array in parray:\n output = np.append(output, np.mean(array, axis=0).reshape((1, 5,)),\n axis=0)\n\n return output", "title": "" }, { "docid": "b454ffb63d60ee8248b1b6ff6401f062", "score": "0.6396374", "text": "def _ave(self):\n return self.X.mean(axis=0)", "title": "" }, { "docid": "372cd0068dba53320967f135b5cc612e", "score": "0.6390503", "text": "def array_mean(array):\n \n width = len(array[0])\n return np.sum(array, axis = 1)/width", "title": "" }, { "docid": "0aebc878553a5f6c96142889e305ad27", "score": "0.6371311", "text": "def mean(self):\n return self.results.mean()", "title": "" }, { "docid": "842175806b3596f834f63e3609a4316c", "score": "0.63698643", "text": "def X_means(self):\n return np.average(self.arrX_input, axis=0).reshape(1,-1)", "title": "" }, { "docid": "842175806b3596f834f63e3609a4316c", "score": "0.63698643", "text": "def X_means(self):\n return np.average(self.arrX_input, axis=0).reshape(1,-1)", "title": "" }, { "docid": "dd9b4e3a65610f4b01359c5f84e522cb", "score": "0.63667053", "text": "def mean(values):\n return sum(values) / float(len(values))", "title": "" }, { "docid": "4e20e27b7aa6d8ead42fc1b3c4fb262a", "score": "0.6364998", "text": "def mean(self):\n return np.average(self.state_vector, axis=1)", "title": "" }, { "docid": "c0efbe98fd6b8c40ab2f3afd7f34575c", "score": "0.6343549", "text": "def _calc_mean(self):\n return sum(self.data, 0.0) / len(self.data)", "title": "" }, { "docid": "0fb87aa65e6db861bb9a911ad046dd56", "score": "0.6339938", "text": "def mean_vector(self):\n return self._mean", "title": "" }, { "docid": "ffcf6bed0b4fd44a42af0fe399ac80f4", "score": "0.63343066", "text": "def mean(self):\n data = self.data.values()\n sum = 0.\n for item in data:\n sum += float(item)\n return sum/len(data)", "title": "" }, { "docid": "94b1dacc77c588f33ce8f1f4d4bee328", "score": "0.63334805", "text": "def _calc_mean(self, chunk):\n\n # for sample in chunk:\n # self.n += 1.0\n # self.av = (self.n - 1) / self.n * self.av + sample / self.n\n\n # nx + sum(m) n sum(m)\n # -------------- = x ------------ + ------------\n # n + len(m) n + len(m) n + len(m)\n \n self.n += float(len(chunk))\n self.av = (self.n - len(chunk)) / self.n * self.av + sum(chunk) / self.n\n\n return self.av", "title": "" }, { "docid": "8362935d5690f14dea59d63b2ed5fcc7", "score": "0.63212097", "text": "def mean_naive(X):\n N, D = X.shape\n mean = np.zeros(D)\n for m in range(D):\n k=0\n for n in range(N):\n k+=X[n,m]\n smean=k/N\n mean[m]=smean\n return mean", "title": "" }, { "docid": "ce774e034bd522f93539b62f0617d811", "score": "0.6318303", "text": "def Mean(self):\n return self._sum_x / self._n", "title": "" }, { "docid": "1f2a25b70bfbb1d3fa2fcd369700c8b8", "score": "0.63081944", "text": "def get_ensemble_variance(self):\n #edit 08/08/2019: new function, virtually the same as get_ensemble_min, get_ensemble_max, and get_ensemble_mean but for variance of each ensemble.\n\t\t\n self.ensemble_var=np.zeros((np.size(self.data),np.shape(self.data)[0]))\t\t\n for i in range(np.size(self.data)):\n if i<2:\n self.ensemble_var[i,:]=np.nan\n else: \n puff_numbers=self.get_ensembles(ensemble_size=i)\t\n self.ensemble_var[i,:]=np.matlib.repmat(self.data,np.shape(self.data)[0],1)[0,puff_numbers].var(axis=1)", "title": "" }, { "docid": "cf4662fa5bfbd10d143a51dd779873aa", "score": "0.6300281", "text": "def mean_(X: ndarray) -> ndarray:\n return mean(X, axis=0).reshape(1, X.shape[1])", "title": "" }, { "docid": "3f2b8ee0c1ce4978c681b73688863a84", "score": "0.6296941", "text": "def get_mean(data):\n mean = sum(data)/len(data)\n return mean", "title": "" }, { "docid": "f2e4934041268fd50894ba1f0f1f862a", "score": "0.6278936", "text": "def get_average(data):\n \n n_meas = len(data)\n \n if len(data.shape) != 1:\n n_obs = len(data[0])\n avg = np.zeros((n_obs))\n for i in range(n_meas):\n for j in range(n_obs):\n avg[j] += data[i,j]\n \n else:\n avg = 0.0\n for i in range(n_meas):\n avg += data[i]\n \n avg /= 1.0*n_meas\n return avg", "title": "" }, { "docid": "bfc50aac7e5900bc88c6cd7faaad92cd", "score": "0.6268079", "text": "def calculate_mean(self): \r\n\r\n return self.n * self.p", "title": "" }, { "docid": "ffba68cbecec76c419a5de7d72945d8f", "score": "0.6266025", "text": "def find_mean(array):\n mean = np.mean(array,axis=0)\n return mean", "title": "" }, { "docid": "d6acfedf6d551d33dd203d91fd0116fc", "score": "0.62597954", "text": "def calculate_mean(self):\n self.mean = st.mean(self.data)\n return self.mean", "title": "" }, { "docid": "60d0c8e24ee6350dfdd03df2c809500a", "score": "0.6253372", "text": "def calc_mean(values):\n values_sum = 0\n for value in values:\n values_sum += value\n return values_sum / len(values)", "title": "" }, { "docid": "4de2021e018a4266b60a8c8b5fc2dfd1", "score": "0.6252569", "text": "def mean(data): \n return pd.Series(data).mean()", "title": "" }, { "docid": "d4188792dc15c6b377ab2026f69bf0e3", "score": "0.62512314", "text": "def gmean(self):\n return self.apply(lambda x: gmean(x, axis=0))", "title": "" }, { "docid": "e9685be676ca6c7aba40c5e5043248fe", "score": "0.62506455", "text": "def approx_mean(self):\n\n for i in range(self.n):\n self.means[self.y[i]:self.y[i] + 1] += self.X[i:i + 1]\n\n for k in range(self.k):\n self.means[k] = np.multiply((1 / self.counts[k]), self.means[k])", "title": "" }, { "docid": "2b94b540dab95bbe0e384d1b3a6403da", "score": "0.62366754", "text": "def mean(self, x):\n return np.mean(x)", "title": "" }, { "docid": "318bca156854da8891ab573ec3fc4e64", "score": "0.62319255", "text": "def find_average(feature_list):\n avg_feature_list = [np.mean(np.array(feature_list[i])) for i in range(6)]\n \n return avg_feature_list", "title": "" }, { "docid": "304a8ed2c9ef58ed766505371508f42a", "score": "0.6229052", "text": "def mean(x):\n return sum(x) / len(x)", "title": "" }, { "docid": "43a8ffd0f20993286fe450cbad661f66", "score": "0.6220122", "text": "def _getmean(self):\n b = [self.coefs[term] for term in self.terms]\n return np.sum(np.array(b)*self.terms)", "title": "" }, { "docid": "ab4acd971c491cbdb5b056e8408b14be", "score": "0.6219637", "text": "def mean(self, values):\n return sum(values) / len(values)", "title": "" }, { "docid": "d82668af57077e65156dfac86ab4228f", "score": "0.6206405", "text": "def mean(datasets):\n return [np.mean(x, axis=0) for x in datasets]", "title": "" }, { "docid": "06493b962bede5a55c76ed176e7cb943", "score": "0.6204836", "text": "def average(self):\n return self._data[\"sum\"]/self._data[\"nValues\"]", "title": "" }, { "docid": "e0964800cf41c745a5ed8488cc17d42b", "score": "0.61913675", "text": "def mean(X):\n mean = np.zeros(X.shape[1]) # EDIT THIS\n mean = np.mean(X, axis=0) \n return mean", "title": "" }, { "docid": "f6cda23dadabff4916e0a7fcddaf6795", "score": "0.6184613", "text": "def get_mean(self, X):\n\n #handles both vectors and scalars\n if len(X) > 0:\n return [0.5] * len(X)\n else:\n return 0.5", "title": "" }, { "docid": "72d9a49c2aad009c939c1e34c37b984f", "score": "0.61800694", "text": "def mean(self):\n return self.m", "title": "" }, { "docid": "bd8553fcfe97646e18952a7dfe4b2d0c", "score": "0.6178019", "text": "def get_ensemble_variance(self):\n #edit 08/08/2019: new function, virtually the same as get_ensemble_min, get_ensemble_max, and get_ensemble_mean but for variance of each ensemble.\n\t\t\n self.ensemble_std=np.zeros((np.size(self.data),np.shape(self.data)[0]))\t\t\n for i in range(np.size(self.data)):\n if i<2:\n self.ensemble_std[i,:]=np.nan\n else: \n puff_numbers=self.get_ensembles(ensemble_size=i)\t\n self.ensemble_std[i,:]=np.matlib.repmat(self.data,np.shape(self.data)[0],1)[0,puff_numbers].std(axis=1)", "title": "" }, { "docid": "ac290414ef04d0c5029f0fe629ffb9cf", "score": "0.616326", "text": "def mean(self):\n return self.value", "title": "" }, { "docid": "2b98ec184148cbfe91e697c1827469ee", "score": "0.61389476", "text": "def mean(self):\n return np.mean(self.space_array, axis=0)", "title": "" }, { "docid": "1e6ef76d71d681745eb51dfb5ab4560c", "score": "0.6127086", "text": "def mean(X):\n # given a dataset of size (D, N), the mean should be an array of size (D,1)\n # you can use np.mean, but pay close attention to the\n # shape of the mean vector you are returning.\n D, N = X.shape\n # Edit the code to compute a (D,1) array `mean` for the mean of dataset.\n mean = X.mean(axis=1).reshape(D, 1)\n return mean", "title": "" }, { "docid": "e5d6aa72bc675001a51c11d4742dccaa", "score": "0.61259", "text": "def mean(self):\n total = 0.0\n for bin in self:\n total += bin.count\n return total / len(self)", "title": "" }, { "docid": "169ef6060fca7202f5294284892771a5", "score": "0.61168236", "text": "def average(values):\n return sum(values) / len(values)", "title": "" }, { "docid": "169ef6060fca7202f5294284892771a5", "score": "0.61168236", "text": "def average(values):\n return sum(values) / len(values)", "title": "" }, { "docid": "450b63cec789fe650e01ac87b61f94ff", "score": "0.60981137", "text": "def getAverageAggregator(self):\n return self._average", "title": "" }, { "docid": "d3309eb7434c32e65cc0e40ab159c2bd", "score": "0.6097863", "text": "def calculate_mean(self):\n return self.p * self.n", "title": "" }, { "docid": "95634e9921a71924ae80a9aee345a015", "score": "0.60886467", "text": "def mean(input, dim):\n pass", "title": "" }, { "docid": "3372df6bd10e495cde44dac7fa82f0aa", "score": "0.6084342", "text": "def reduce_mean(input):", "title": "" }, { "docid": "7ff4e0e37a0f46f7a915997bd6b1959e", "score": "0.6083269", "text": "def mean(self,*,axis=1):\n try:\n means = np.nanmean(self.data, axis=axis).squeeze()\n if means.size == 1:\n return np.asscalar(means)\n return means\n except IndexError:\n raise IndexError(\"Empty BinnedEventArray; cannot calculate mean.\")", "title": "" }, { "docid": "5e94e5a2246b4d5d628eb1cc8450b7bd", "score": "0.6075295", "text": "def running_mean(x, N):\n out = np.zeros_like(x, dtype=np.float64)\n dim_len = x.shape[0]\n for i in range(dim_len):\n if N%2 == 0:\n a, b = i - (N-1)//2, i + (N-1)//2 + 2\n else:\n a, b = i - (N-1)//2, i + (N-1)//2 + 1\n\n #cap indices to min and max indices\n a = max(0, a)\n b = min(dim_len, b)\n out[i] = np.mean(x[a:b])\n return out", "title": "" }, { "docid": "e1ec77fef07d85e916e2239912b32661", "score": "0.60750586", "text": "def mean(self):\n data = self.get_data(None)\n if data is not None:\n return data.mean(squeeze=True)\n\n raise ValueError(\n \"ERROR: Can't get the mean when there is no data array\"\n )", "title": "" }, { "docid": "9afc4876748aaf5319ce13e997a150a6", "score": "0.6072623", "text": "def grand_mean(x):\r\n return np.mean(x)", "title": "" }, { "docid": "40f6abf5871b408ca94d04ba009a734d", "score": "0.6066953", "text": "def meanAge(self):\n data = self.data\n ds = data.groupby(['PARTICIPANT_ID']).agg('mean').reset_index()\n ndr_age = ds.AvgAge.tolist()\n return np.array(ndr_age).reshape(-1,1)", "title": "" }, { "docid": "7038629cbfc18db2ec95bf10702225c0", "score": "0.6066128", "text": "def avg_fn (data):\n\treturn np.mean(data)", "title": "" }, { "docid": "198f98a357ca69b088578613ea2092f4", "score": "0.605865", "text": "def get_mean(sampling_fun, N, M):\n ans = []\n for m in range(M):\n ans.append(np.mean(sampling_fun(N)))\n return np.asarray(ans)", "title": "" }, { "docid": "afd8538865ea38c80c958ec9766548ba", "score": "0.6057457", "text": "def mean(self):\n return self.copy(coefficients=numpy.mean(self.coefficients, axis=0))", "title": "" }, { "docid": "366abb996c2a3ef360be8aac9ca1f184", "score": "0.6054785", "text": "def mean_pooling(img, scale):\n h_, w_ = 0, 0\n save_mean = []\n for h in range(10, 110, scale):\n h_ = h_ + 1\n for w in range(10, 189, scale):\n save_mean.append(img[h - 10: h, w - 10: w].mean())\n w_ = len(save_mean) // h_\n save_mean = np.asarray(save_mean).reshape(h_, w_)\n return save_mean", "title": "" }, { "docid": "473d1aa731f91bdcf3067f4f80126c7f", "score": "0.60534716", "text": "def mean(inp_list):\n return np.mean(inp_list)", "title": "" }, { "docid": "9533f63a83a4599134664d8460e515ec", "score": "0.6048591", "text": "def avg(self):\n return sum(range(1,self.num+1))/self.num", "title": "" }, { "docid": "083c4edec51bf0509599a23ba0e65ec4", "score": "0.604489", "text": "def mean_face(faces):\n mean_face = mean(faces)\n return mean_face", "title": "" }, { "docid": "cd9c634216bdb2c3c94e43ec383c5350", "score": "0.6044063", "text": "def _mean(listvalue):\n return sum(listvalue) / len(listvalue)", "title": "" }, { "docid": "5d5c3660ffcb7d8ccb59f81e1c0f6757", "score": "0.6042098", "text": "def mean(self):\n if len(self) == 1: # No need to calculate mean\n return self.state_vector\n return np.average(self.state_vector, axis=1, weights=np.exp(self.log_weight))", "title": "" }, { "docid": "5119f6654fe1245ffd96889f32389c56", "score": "0.60396236", "text": "def arithmetic_mean(array, axis=0):\n import numpy as np\n \n return np.mean(array, axis=axis)", "title": "" }, { "docid": "baf359db4df5958ea0e021860786ddcf", "score": "0.60371935", "text": "def _mean(x, mode=\"arithmetic\"):\r\n assert mode in [\"arithmetic\", \"geometric\"]\r\n if mode == \"arithmetic\":\r\n x_mean = x.mean(axis=1)\r\n else:\r\n x_mean = np.exp(np.log(x + 1e-7).mean(axis=1))\r\n x_mean = x_mean / x_mean.sum(axis=1, keepdims=True)\r\n return x_mean", "title": "" }, { "docid": "fcb287891fe196ff64a90390b55c5b31", "score": "0.6029203", "text": "def get_mean(data):\r\n if not data:\r\n raise Exception(\"Empty data.\")\r\n sum_x = sum(data)\r\n mean = float(sum_x) / len(data)\r\n return mean", "title": "" }, { "docid": "3adf1d3982acadc7cc1389cb85931105", "score": "0.6026552", "text": "def mean(self) -> Tensor:\n return self.values", "title": "" }, { "docid": "00baf3f8c60b1b0ea0688869f1d2af54", "score": "0.60240227", "text": "def mean(self):\n return self._mean", "title": "" }, { "docid": "00baf3f8c60b1b0ea0688869f1d2af54", "score": "0.60240227", "text": "def mean(self):\n return self._mean", "title": "" }, { "docid": "43ffd54d71d8812bf00f0eafb286e3c8", "score": "0.60188514", "text": "def mean(self):\n return self._impl.stats(self).mean()", "title": "" }, { "docid": "12ee1ffd1900504a7e43ed83bae9ba40", "score": "0.6016554", "text": "def mean(self):\n return self.concentration / self.concentration.sum(-1, keepdim=True)", "title": "" }, { "docid": "ee2966ef3a8081d3fa00a827acff5e4b", "score": "0.6014511", "text": "def meanColor(self):\n return np.array([f.meanColor() for f in self])", "title": "" }, { "docid": "fa631caf7d48781222ba181383cb5d36", "score": "0.6010814", "text": "def get_mean(a, axis=None):\n return np.mean(a, axis=axis)", "title": "" }, { "docid": "f2979dccbb7f3249a966968060d49cc0", "score": "0.60059345", "text": "def mean(numbers):\n return 1.0 * summation(numbers)/len(numbers)", "title": "" }, { "docid": "74310ea3eeb998929bfc072a04d743f5", "score": "0.59986997", "text": "def mean(self, values, axis=0):\n values = np.asarray(values)\n if axis: values = np.rollaxis(values, axis)\n count = self.count.reshape(-1,*(1,)*(values.ndim-1))\n return self.unique, self.reduce(values) / count", "title": "" }, { "docid": "32275121eb7f909a17d9db009af0f566", "score": "0.5996093", "text": "def average(values):\n return sum(values, 0.0) / len(values)", "title": "" }, { "docid": "32275121eb7f909a17d9db009af0f566", "score": "0.5996093", "text": "def average(values):\n return sum(values, 0.0) / len(values)", "title": "" } ]
b457f85545bdbe626233021296bda052
Inserts current datetime into self.text_field from menu
[ { "docid": "95ccaab5628f955541d65e71bfae1ca1", "score": "0.78224796", "text": "def do_time_date(self) -> None:\n text = datetime.datetime.now().isoformat()\n self.text_field.buffer.insert_text(text)", "title": "" } ]
[ { "docid": "504bed008c61e56657817a7d85fd8522", "score": "0.68654907", "text": "def OnInsertDate(self, event):\n self.AddText(str(time.ctime()))\n event.Skip()", "title": "" }, { "docid": "41bb243f0f260b6a653da25124d49ba4", "score": "0.66584486", "text": "def changeText(self):\n text = datetime.now().strftime(\"%Y/%m/%d %H:%M:%S\")\n self.setText(text)", "title": "" }, { "docid": "34bbafb9222c3c3dc47ac7a1336383e2", "score": "0.6424974", "text": "def update_time(self):\n now = time.localtime()\n hour = now[3]\n minute = now[4]\n year = now[0]\n month = now[1]\n day = now[2]\n time_format_str = \"%d:%02d\"\n date_format_str = \"%d-%02d-%02d\"\n if self.am_pm:\n if hour >= 12:\n hour -= 12\n time_format_str = time_format_str+\" PM\"\n else:\n time_format_str = time_format_str+\" AM\"\n if hour == 0:\n hour = 12\n time_str = time_format_str % (hour, minute)\n print(time_str)\n date_str = date_format_str % (year, month, day)\n self.date_text.text = date_str\n self.time_text.text = time_str", "title": "" }, { "docid": "459c4b8da60de833cd8628cbe5eb6a13", "score": "0.625469", "text": "def menu_time(self, time=None):\r\n if (time == None):\r\n time = float(self.widgets['update_time'].get())\r\n else:\r\n self.widgets['update_time'].setentry(str(time))\r\n self.update_time = time\r\n if (self.after_id != None): self.top.after_cancel(self.after_id)\r\n self.timer()", "title": "" }, { "docid": "614f5b59c36d76d550255642537bd82a", "score": "0.6088324", "text": "def update_value(self, inst):\n\n self.text = \"%s.%s.%s\" % tuple(self.cal.active_date)\n self.dispatch('on_confirm', self.text)", "title": "" }, { "docid": "5a10b97e8dd6999b11e6e38a03d3bbb5", "score": "0.60697716", "text": "def show_time_and_date(self, t=None):\n\n current_time = rtc.get_time()\n current_date = rtc.get_date()\n\n lcd.text(lcd.CENTER, 20, \"%02d:%02d:%02d\" % current_time, color=lcd.WHITE)\n\n if current_date[2] != self.date:\n lcd.font(\"data/fonts/arial16.fon\")\n\n lcd.textClear(lcd.CENTER, 55, \"00-00-00\")\n lcd.text(lcd.CENTER, 55, \"%02d-%02d-%02d\" % (current_date[2] + 1, current_date[1] + 1, current_date[0]), color=lcd.WHITE)\n\n lcd.font(\"data/fonts/ariblk28.fon\") # Restore Time font.\n\n self.date = current_date[2]", "title": "" }, { "docid": "98f35949e0ec54edf07bc790584e22fd", "score": "0.6013537", "text": "def get_date(self, widget):\n pass", "title": "" }, { "docid": "bf1f09fa27bfb2d5fc12959a6c3614b8", "score": "0.5967441", "text": "def init_date(self):\n now = datetime.datetime.now()\n day = now.day\n month = now.month\n year = now.year\n today = QtCore.QDate(year, month, day)\n self.master_date_value = today\n self.ui.master_date.setDate(today)", "title": "" }, { "docid": "7ba7eb6e064970c9f4f34b7cce4ef631", "score": "0.58008516", "text": "def change_date(self, widget, date):\n pass", "title": "" }, { "docid": "aee839aad9bd10cee3f9d93a88cbccef", "score": "0.5800275", "text": "def get_date(self):\n\n self.date = datetime.now().strftime(\"%Y-%m-%d_%H:%M:%S\")", "title": "" }, { "docid": "4c2025be5152d677bbba37afb1428385", "score": "0.5787586", "text": "def updtTime(self):\n currentTime = QDateTime.currentDateTime().toString('hh:mm:ss')\n self.myTimeDisplay.display(currentTime)", "title": "" }, { "docid": "5dca56a9ec99896aa382fa7f823e55a2", "score": "0.57127273", "text": "def set_service_level_begin_date(self):\n self.set_value_into_input_field(self.begin_date_inputbox_locator, self.get_date(current_date=True))", "title": "" }, { "docid": "1c76f605a0413729efd27caf8d12b1d0", "score": "0.5697221", "text": "def set_datetime_field(self, num, loading = False):\n self.editor.paramSizer.Clear(True)\n if num == 1:\n self.editor.paramWidget = DateTimeCtrl( parent = self.editor.parent, width = 450, \\\n update_state = self.whereController.change_made, \\\n condition = self.condition, isLoading = loading)\n else:\n self.editor.paramWidget = DateBetweenValue(self.editor.parent, width = 450, update_state = self.whereController.change_made,\\\n condition = self.condition, typeDetails = self.typeDetails, isLoading = loading)", "title": "" }, { "docid": "4783ab693cac81391d16221a6fed6ae6", "score": "0.5694268", "text": "def add_time(self):\n calendar_time = str(self.dockwidget.calendar.selectedDate().toPyDate())\n\n if self.active_time == 'time0' and (self.dockwidget.exactDate.isChecked() or not self.time1 or\n calendar_time <= self.time1):\n self.time0 = calendar_time\n self.dockwidget.time0.setText(calendar_time)\n elif self.active_time == 'time1' and (not self.time0 or self.time0 <= calendar_time):\n self.time1 = calendar_time\n self.dockwidget.time1.setText(calendar_time)\n else:\n self.show_message('Start date must not be larger than end date', Message.INFO)", "title": "" }, { "docid": "6993b86355515d35c9b3c461323e07f3", "score": "0.5678011", "text": "def _update(self):\n time = QTime.currentTime().toString()\n self.display(time)", "title": "" }, { "docid": "080d4700b2294b1e88a413c45e2027b2", "score": "0.56624186", "text": "def set_now_str(self, is_start_time):\n pass", "title": "" }, { "docid": "b855fe350b346887468d7e6b360c78cb", "score": "0.56537503", "text": "def clicked(self):\n\n # creates string for time display\n timer = strftime('Day: %d %b %Y\\nTime: %H:%M:%S %p\\n', localtime())\n\n # display time in a pop-up window\n showinfo(message=timer)", "title": "" }, { "docid": "8f75dcaabc86df4a25e0206e089e6f1f", "score": "0.5652776", "text": "def arrive(self):\n\n def callback(*args):\n \"\"\"Callback to update start_date of the destination\"\"\"\n if args[1] is None:\n return # cancel\n\n try:\n start_date = iso_date(args[1])\n except:\n Alert(title='Arrive Date', text=\"Please input a date in yyyy-mm-dd format\")\n return\n\n self._update(start_date=iso_date_string(start_date))\n\n InputPopup(title='Arrive Date',\n text='When did you arrive {}?'.format(self.item.name),\n value=self.item.start_date,\n initial=iso_date_string(today()),\n on_value=callback)", "title": "" }, { "docid": "e3804309414bdad9022ddaa54ad4cfaa", "score": "0.5650339", "text": "def set_now(self):\n pass", "title": "" }, { "docid": "8916c9db2835f80084b9d4db9bd6d910", "score": "0.5643638", "text": "def get_current_date_time(self, choice):\n\n if choice in ('1', '2', '3'):\n\n # Get current date and time.\n now = datetime.now()\n # Get the time stamp.\n timestamp = datetime.timestamp(now)\n\n if choice == '1':\n # Display current date and time.\n date_time = now.strftime(\"%m/%d/%Y, %H:%M:%S\")\n print(\"current date and time =\",date_time)\n elif choice == '2':\n # Dispay the time stamp.\n print(\"time stamp =\", timestamp)\n elif choice == '3':\n # Get current date and time from the time stamp.\n # and display the result.\n dt_object = datetime.fromtimestamp(timestamp)\n print(\"(dt_object - date and time from time stamp =\", dt_object)\n print(\"type(dt_object) =\", type(dt_object))\n else:\n print(\"invalid choice\")", "title": "" }, { "docid": "4005b68441fc23d03b38e680ca033edf", "score": "0.55390495", "text": "def timestamp_changed(self):\n self.text_settings.timestamps = self.CBTime.isChecked()", "title": "" }, { "docid": "daa525cac4f1f64651f57ad640012b78", "score": "0.5507961", "text": "def set_current_date(self, date):\r\n self._current_date = date", "title": "" }, { "docid": "3339be49ee8bc8c4aaf44ef2d46b798b", "score": "0.5499776", "text": "def get_add_new_transaction_date(self):\n return self.get_text_from_element(self.transaction_date_locator, is_a_input_field=True)", "title": "" }, { "docid": "7aaea09cb0911bf113b23bf1afbf2e99", "score": "0.54724926", "text": "def set_current_date(self, input_date=date.today()):\n if not isinstance(input_date, date):\n raise ValueError(\"input date must be of type datetime.date\")\n td = timedelta(days=0)\n self.current_date = input_date + td", "title": "" }, { "docid": "b44f90f7bfc8313a01cd72a202580762", "score": "0.5467083", "text": "def current_date():\n dt_string = datetime.now().strftime(\"%A, %B\")\n date_ = engine().ordinal(datetime.now().strftime(\"%d\"))\n year = datetime.now().strftime(\"%Y\")\n if time_travel.has_been_called:\n dt_string = dt_string + date_\n else:\n dt_string = dt_string + date_ + ', ' + year\n speaker.say(f\"It's {dt_string}\")\n if event and event == 'Birthday':\n speaker.say(f\"It's also your {event} sir!\")\n elif event:\n speaker.say(f\"It's also {event} sir!\")\n if report.has_been_called or time_travel.has_been_called:\n speaker.say(f'The current time is, ')", "title": "" }, { "docid": "98405e14b178d0700fc258ee6ec4108f", "score": "0.54468", "text": "def __init__(self, *args, **kwargs):\n super(InlineReminderForm, self).__init__(*args, **kwargs)\n if self.instance.sent_time is not None:\n self.fields['scheduled_time'].disabled = True\n sent_time = self.instance.sent_time.strftime(DATETIME_FORMAT)\n self.fields['scheduled_time'].help_text = 'Sent %s' % sent_time", "title": "" }, { "docid": "ae40379c47333c21b9fcfed20b36dd52", "score": "0.54321253", "text": "def update_time(screen):\n _, width = screen.getmaxyx()\n time_str = pscheduler.datetime_as_iso8601(pscheduler.time_now())\n if len(time_str) > width:\n time_str = \"%s%s\" % (time_str[:width-1], \"+\")\n if width > 2:\n screen.addstr(0, 0, time_str, color_pair(\"default\"))\n screen.refresh()", "title": "" }, { "docid": "fc51f3defbba433e9a2534cb94eb0191", "score": "0.5424865", "text": "def timeDisplay(self):\n\n print(datetime.datetime.now)", "title": "" }, { "docid": "dafc41b2f6c3bcc5d0fe41fdfba44537", "score": "0.54218936", "text": "def update_dates(self):\n new_time0 = self.parse_date(self.dockwidget.time0.text())\n new_time1 = self.parse_date(self.dockwidget.time1.text())\n\n if new_time0 is None or new_time1 is None:\n self.show_message('Please insert a valid date in format YYYY-MM-DD', Message.INFO)\n elif new_time0 and new_time1 and new_time0 > new_time1 and not self.dockwidget.exactDate.isChecked():\n self.show_message('Start date must not be larger than end date', Message.INFO)\n else:\n self.time0 = new_time0\n self.time1 = new_time1\n Settings.parameters['time'] = self.get_time()\n\n self.dockwidget.time0.setText(self.time0)\n self.dockwidget.time1.setText(self.time1)", "title": "" }, { "docid": "667be556a5c2c6f9cf662d5f225ea5a4", "score": "0.5418961", "text": "def trigger_setTSNow(self, record, fldname,**kwargs):\n if not getattr(record, '_notUserChange', None):\n record[fldname] = datetime.datetime.today()", "title": "" }, { "docid": "5f00d4644e738282a2bde1e79551fff7", "score": "0.5416571", "text": "def __init__(self, item_date, item_time, item_name, item_details):\n\n from datetime import datetime\n now = datetime.now()\n\n # item_date = date of today\n self.item_date = now.year + now.month + now.day\n self.item_time = now.hour\n\n print(\"Schedule of \" + item_date )", "title": "" }, { "docid": "5483f432eec3f8582963ab02dbf190f9", "score": "0.54099405", "text": "def get_current_date(self): \n return strftime(\"%Y-%m-%d\", time.gmtime())", "title": "" }, { "docid": "bebb17ff1eb01d9aff1e585d6e77ff9b", "score": "0.54091305", "text": "def set_new_date(self):\n min = (time.daylight and time.altzone or time.timezone) // 60\n hr = min // 60\n min = -(min % 60 + hr * 100)\n self.new_date = (time.strftime('%a, %d %b %Y %X', self.release_date + (0, 0, 0))\n + ' %+.4d' % min)", "title": "" }, { "docid": "7c102bb74da0b88aa9ed5f3b68b7e577", "score": "0.5403179", "text": "def date(self, control_def):\n if control_def.appearance == \"\":\n control_def.appearance = '{\"format\":\"dd-mm-yyyy\",\"viewmode\":\"days\",\"minviewmode\":\"days\", \"minviewmode\":\"years\"}'\n self.controls_js += '\\n dateControlCreate(\"' + control_def.control_id + '\",\"' + self.parent_div_id + '\",\"' + control_def.control_name + '\",\"' + control_def.control_label + '\",' + control_def.appearance + ', \"\" );'", "title": "" }, { "docid": "a8c33bcd1df06b35a4a7da8fcf37a002", "score": "0.536979", "text": "def set_begin_date_for_add_new_rate(self):\n self.wait_for_ajax_spinner_load()\n begin_date = self.get_current_date()\n self.set_value_into_input_field(self.add_new_rate_begin_date_locator, begin_date)\n return begin_date", "title": "" }, { "docid": "8948df95eb737c5896344ad0c790025e", "score": "0.5356948", "text": "def selected_date():\n day = parser.parse(wrf_init_date_input.value)\n hour = wrf_init_time_select.value\n hour = timedelta(hours=int(hour[:-1]))\n return day + hour", "title": "" }, { "docid": "495ce7cd72c907ac9b3b4967d61dee53", "score": "0.5320208", "text": "def get_current_date(self):\r\n return self._current_date", "title": "" }, { "docid": "d1c3a6e8b2c1c3b16472d01f8c0ad073", "score": "0.5317576", "text": "def set_time(self,time_now):\n self.start_time = time_now\n # changing the time label widget after 10 second.\n self._label_time.config(text = self.start_time)", "title": "" }, { "docid": "a366a0569c90e3254bde33c382fb6be8", "score": "0.5313397", "text": "def update_label():\n current_time = strftime(\"%H: %M: %S\\n %d-%m-%Y \")\n clock_label.configure(text=current_time)\n clock_label.after(80, update_label)\n clock_label.pack(anchor=\"center\")", "title": "" }, { "docid": "1f3a3f4d4645abc9b48800370ab97ef9", "score": "0.52927345", "text": "def set_begin_date_for_add_new_rate_plan(self, begin_date):\n begin_date_textbox_element = self.wait().until(EC.presence_of_element_located(self.begin_date_textbox_locator), 'begin date textbox locator not found before specified time out')\n begin_date_textbox_element.clear()\n if begin_date == \"\":\n begin_date = self.get_current_date()\n begin_date_textbox_element.send_keys(begin_date)\n global current_begin_date\n current_begin_date = datetime.datetime.strptime(begin_date, \"%m/%d/%Y\")", "title": "" }, { "docid": "8aa046ffb665291a70d96c3513ef44b6", "score": "0.5290201", "text": "def set_time_field(self, num, loading = False):\n self.editor.paramSizer.Clear(True)\n if num == 1:\n self.editor.paramWidget = TimeCtrl( parent = self.editor.parent, width = 450, \\\n update_state = self.whereController.change_made, \\\n condition = self.condition, isLoading = loading)\n else:\n self.editor.paramWidget = DateBetweenValue(self.editor.parent, width = 450, update_state = self.whereController.change_made,\\\n condition = self.condition, typeDetails = self.typeDetails, isLoading = loading)", "title": "" }, { "docid": "4f99ee9af6c334aa9f9a74b8aab26cb3", "score": "0.52899706", "text": "def change_exact_date(self):\n\n if self.dockwidget.exactDate.isChecked():\n self.dockwidget.time1.hide()\n self.dockwidget.timeLabel.hide()\n self.move_calendar('time0')\n self.add_time()\n else:\n if self.time0 and self.time1 and self.time0 > self.time1:\n self.time1 = ''\n Settings.parameters['time'] = self.get_time()\n self.dockwidget.time1.setText(self.time1)\n\n self.dockwidget.time1.show()\n self.dockwidget.timeLabel.show()", "title": "" }, { "docid": "0d254e3bb996c42fc379c51965d42a8e", "score": "0.5271335", "text": "def setPublicationDate(datetime):", "title": "" }, { "docid": "34ab5eea380c06ca8f05b84f5eb34932", "score": "0.5268397", "text": "def update_datetime_text(\n node,\n node_tree,\n frame,\n time_text=\"\",\n decode=False,\n):\n if not time_text:\n time = str(bnc_gutils.get_time(node, frame))[:10]\n else:\n time = time_text\n # TODO allow user to define format.\n\n if \"Camera\" in bpy.data.objects.keys() and time:\n Camera = bpy.data.objects.get(\"Camera\")\n size = 0.03\n coords = (-0.35, 0.17, -1)\n children_name = [children.name for children in Camera.children]\n if \"BlenderNC_time\" not in children_name:\n bpy.ops.object.text_add(radius=size)\n text = bpy.context.object\n text.name = \"BlenderNC_time\"\n text.parent = Camera\n text.location = coords\n mat = bnc_pyfunc.ui_material()\n text.data.materials.append(mat)\n else:\n children = Camera.children\n text = [c for c in children if c.name == \"BlenderNC_time\"][-1]\n text.data.body = time\n if text.select_get():\n text.select_set(False)", "title": "" }, { "docid": "61376a29067fe91b0af8a1af90f756c1", "score": "0.5268081", "text": "def currentDate():\n return(datetime.now())", "title": "" }, { "docid": "345e26d4b83efbdef69fbbc42eac4881", "score": "0.5267875", "text": "def date():\n year = int(datetime.datetime.now().year)#obtener año\n month = int(datetime.datetime.now().month)#obtener mes\n day = int(datetime.datetime.now().day)#obtener dia\n speech.speak(\"The current date is\")\n speech.speak(day)#decir dia\n speech.speak(month)#decir mes\n speech.speak(year)#decir año", "title": "" }, { "docid": "33b6b2816551d6f892b83cf77453719a", "score": "0.52652377", "text": "def isoDate():\n time.sleep(0.1) # wait a little in order to be sure that user not pressing a hotkey anymore\n output = datetime.datetime.now().strftime(\"%Y-%m-%d\")\n storedClipboard = ClipboardUtil.pasteFromClipboardXsel()\n sendText(output)\n ClipboardUtil.copyToClipboardXclip(storedClipboard)", "title": "" }, { "docid": "86f82c0afd14b03583b7ada5e24f471c", "score": "0.52589375", "text": "def set_date(self):\n self.date = datetime.fromisoformat(self.time[:-1])", "title": "" }, { "docid": "2aa451c71859736b9ebf61f260ddcb18", "score": "0.5250717", "text": "def __init__(self):\n super().__init__()\n self.control = libui.uiNewDateTimePicker()", "title": "" }, { "docid": "e4c671a0b4e6875683906dcfefb52db7", "score": "0.5240346", "text": "def _current_datetime_string(self):\n datetime_object = datetime.datetime.now()\n datetime_string = datetime_object.strftime(self.DATETIME_FORMAT)\n\n return datetime_string", "title": "" }, { "docid": "01ead656edf74760c9011169e782e1af", "score": "0.52229536", "text": "def set_date(self, date):\n self.datetime = date", "title": "" }, { "docid": "f35f0e5523bec52fe604aeecdedcbd78", "score": "0.52226985", "text": "def current_date():\n date = datetime.datetime.now()\n return f'{date:%Y-%m-%d}'", "title": "" }, { "docid": "599e8da9d20062e134ba1bb67b5bd50c", "score": "0.52187943", "text": "def add_new_computer_introduce_date(self):\n self.driver.find_element_by_id('introduced').send_keys(self.introduced)", "title": "" }, { "docid": "ca05ec6dd72b3e34499a859a45d4efe2", "score": "0.52115065", "text": "def get_now_datetime(self):\n return datetime.now().strftime(\"%a %I:%M %p\")", "title": "" }, { "docid": "bbc3b1d1ae4a68d47b987de04d1e2fde", "score": "0.52006274", "text": "def editTime(self, datetime=False):\n if self._editTime and datetime:\n import datetime\n return datetime.datetime.strptime(str(self._editTime), '%Y%m%d%H%M%S')\n\n return self._editTime", "title": "" }, { "docid": "7a15205df637e6ff8ebc944480aa3cfe", "score": "0.5197612", "text": "def get_now(self):\n pass", "title": "" }, { "docid": "87e829c6e004e25f9fedb75c91970aa6", "score": "0.5184889", "text": "def registration_datetime_text(course, date):\n\n strftime = course.runtime.service(course, \"i18n\").strftime\n date_time = strftime(date, \"SHORT_DATE\")\n return date_time", "title": "" }, { "docid": "494377c7c79d50b784ef6481f9cd0c4f", "score": "0.51836145", "text": "def _current_date_string(self):\n datetime_object = datetime.datetime.now()\n datetime_string = datetime_object.strftime(self.DATE_FORMAT)\n\n return datetime_string", "title": "" }, { "docid": "2cc2acdca56dc78448e923b1c9b68284", "score": "0.5178246", "text": "def set_begin_date_for_add_number_plan(self, begin_date):\n if begin_date == \"\":\n begin_date = datetime.date.today()\n begin_date = \"%d/%d/%d\" % (begin_date.month, begin_date.day, begin_date.year)\n self.set_value_into_input_field(self.begin_date_inputbox_locator, begin_date)", "title": "" }, { "docid": "464381afc7e9e7071dffa0fab0c9e580", "score": "0.51722026", "text": "def datetime_now():\n return datetime.now()", "title": "" }, { "docid": "f1f814833b1089c3baf412429d98d005", "score": "0.5170177", "text": "def set_date_field(self, num, loading = False):\n self.editor.paramSizer.Clear(True)\n if num == 1:\n self.editor.paramWidget = DateCtrl( parent = self.editor.parent, width = 450, \\\n update_state = self.whereController.change_made, \\\n condition = self.condition, isLoading = loading)\n else:\n self.editor.paramWidget = DateBetweenValue(self.editor.parent, width = 450, update_state = self.whereController.change_made,\\\n condition = self.condition, typeDetails = self.typeDetails, isLoading = loading)", "title": "" }, { "docid": "f2d90c1f60397f7cdf9517c3db0fc73e", "score": "0.51671505", "text": "def change_date(self, widget, date):\n self.widget.setDate(date)", "title": "" }, { "docid": "ccb995688a4c03aff03c6725dd412427", "score": "0.5151949", "text": "def default(self, event, entry, texto_insertado):\n self.informacion_entry = entry.get()\n if (self.informacion_entry == \"Numero de caja\" or self.informacion_entry == \"Nombre de usuario\" or self.informacion_entry == \"DD/M/AAAA\" or self.informacion_entry == \"Hora\"):\n entry.delete(0, tk.END)\n entry.config(fg=\"black\")\n\n elif (self.informacion_entry == \"\"):\n\n entry.insert(0, texto_insertado)\n entry.config(fg=\"grey\")", "title": "" }, { "docid": "bc78c39dda9564dee1ed0690b6e58f1e", "score": "0.5150202", "text": "def set_yob_text_to_field(self, value):\n\n WebDriverWait(self.driver, 100).until(\n lambda driver: driver.find_element(*MainPageLocators.YOB_DATE_PICK))\n element = self.driver.find_element(*MainPageLocators.YOB_DATE_PICK)\n element.send_keys(value)", "title": "" }, { "docid": "a6a97b7db7ed6e3ca467a2ed3a88ded1", "score": "0.5147219", "text": "def format_datetime(self, request, obj, fieldname, *args, **kwargs):\n model = make_model(obj)\n return html.div(style='min-width:130px;')(\n getattr(model, fieldname).strftime(\n app.config.get('ADMIN_DATE_FORMAT', '%Y-%m-%d')\n )\n )", "title": "" }, { "docid": "0c8636bacf337f43f28f49838767cde4", "score": "0.5139439", "text": "def current_datetime():\n timeoffset = datetime.timedelta(hours=app.config['TIMEZONE'])\n return datetime.datetime.now() + timeoffset", "title": "" }, { "docid": "79cd2ce8e200051af0a0f07caff6575f", "score": "0.5136384", "text": "async def _cmdf_time(self, substr, msg, privilege_level):\n await self._client.send_msg(msg, datetime.datetime.utcnow().strftime(\"My current system time: %c UTC\"))\n return", "title": "" }, { "docid": "5dcfb77f2c2f47a33b80eff881617671", "score": "0.5135393", "text": "def current_date(request):\n dt = datetime.now() #Return the current local date and time.\n return HttpResponse(dt.strftime('Today is %d, %B %Y')) #Return a string representing the date and time\n # Today is 10, January 2018", "title": "" }, { "docid": "1ea5cff10e5fac23b9ac67688571ec00", "score": "0.5135189", "text": "def test_get_modelform_datetimefield(self):\n form = get_modelform(model=Song, fields=[\"recording_time\"])\n self.assertIsInstance(\n form().fields[\"recording_time\"].widget, VegaDateTimeWidget\n )\n self.assertHTMLEqual(\n \"\"\"<p><label for=\"id_recording_time\">Recording Time:</label> <input type=\"datetime-local\" name=\"recording_time\" required id=\"id_recording_time\"></p>\"\"\", # noqa pylint: disable=line-too-long\n form().as_p(),\n )", "title": "" }, { "docid": "a5e43f528e248ed5ff08a8af89a85a93", "score": "0.5128961", "text": "def on_save(self, init):\n super().on_save(init)\n\n self.meta.date = Time.now().isot", "title": "" }, { "docid": "8ab7596f8306100e47b11982ebd2ed65", "score": "0.512536", "text": "def set_select_date(self, select_date):\n self.set_value_into_input_field(self.select_date_textbox_locator, select_date)", "title": "" }, { "docid": "95176884859ea2abf24e2154703e0cb0", "score": "0.511812", "text": "def get_now_str(self):\n pass", "title": "" }, { "docid": "7ddc49d3991e4c28701032f0afc76e44", "score": "0.51154304", "text": "def _today_date(self):\n return time.strftime('%Y-%m-%d')", "title": "" }, { "docid": "732170b624aec930e9eb09b91105af2e", "score": "0.5097639", "text": "def log_text(self, text):\n text_tr =text\n\n text_tr = \"\".join([datetime.datetime.now().strftime(\"%H:%M:%S: \"), text_tr])\n text_tr = text_tr + '\\n' \n self.activity_log_text.AppendText(text_tr)\n self.is_saved = False", "title": "" }, { "docid": "3ef1e3a73522e136fb56b91a6b832da8", "score": "0.5093067", "text": "def show_entry_fields():\r\n\r\n set_alarm_timer = e1.get()\r\n my_main_function(set_alarm_timer)", "title": "" }, { "docid": "ed6a4a675572b82f58abb5edef0be283", "score": "0.5091672", "text": "def _add_date_stamp(self, fig):\n image_size = self.input_data.get_value(InputType.IMAGE_SIZE)\n\n if image_size == 900:\n font_size = 10\n elif image_size == 1200:\n font_size = 12\n else: # 2400\n font_size = 24\n\n datetime_obj = datetime.now()\n date_str = datetime_obj.strftime(\"%H:%M %d/%m/%y\")\n\n fig.text(0.02, 0.02, date_str, fontsize=font_size)", "title": "" }, { "docid": "cf08a082024276146a3ed8b032eabe0d", "score": "0.5090117", "text": "def get_time(self):\n if self.dockwidget.exactDate.isChecked():\n return '{}/{}/P1D'.format(self.time0, self.time0)\n if self.time0 == '':\n return self.time1\n if self.time1 == '':\n return '{}/{}/P1D'.format(self.time0, datetime.datetime.now().strftime(\"%Y-%m-%d\"))\n return '{}/{}/P1D'.format(self.time0, self.time1)", "title": "" }, { "docid": "173bac03a418c45ab908e5c3afe50d62", "score": "0.508486", "text": "def set_to_date(self, to_date):\n self.set_value_into_input_field(self.to_date_textbox_locator, to_date)", "title": "" }, { "docid": "7fce48d53bdc35c78d1267d00bdda296", "score": "0.5078583", "text": "def set_transaction_date(self, date):\n self.set_value_into_input_field(self.transaction_date_locator, date)", "title": "" }, { "docid": "8313f65084297b5a97acb75f0da4a968", "score": "0.5075052", "text": "def test_date_time(self, inquiry=Interface()):\n result = Interface.ask(inquiry, \"What time is it?\")\n self.assertEqual(result, 'The current time is :' +\n datetime.utcnow().strftime(\"%m-%d-%Y %H:%M\"))", "title": "" }, { "docid": "55fcf36cd3667b12800accd16746c2f9", "score": "0.507464", "text": "def render_date_time_field_into(into, field_added, date_time, *, optional = True, title = 'Date'):\n if (not optional) or (date_time is not None):\n if field_added:\n into.append('\\n')\n else:\n field_added = True\n \n into.append(title)\n into.append(': ')\n if (date_time is None):\n into.append('*none*')\n else:\n into.append(format(date_time, DATETIME_FORMAT_CODE))\n \n return into, field_added", "title": "" }, { "docid": "601b500f35d2b9fcbf7668ee61a1e167", "score": "0.50739926", "text": "def save(self, *args, **kwargs):\n self.dt = self.timecalc\n super(ZeitErfassung, self).save(*args, **kwargs) # Call the \"real\" save() method.", "title": "" }, { "docid": "2ad43f26b3db5e9d75c12944ec03e156", "score": "0.5063896", "text": "def __init__(self, dateformat=None):\n self.current_date = datetime.datetime.now()\n self.date_format = dateformat if dateformat else \"%d.%m.%Y\"", "title": "" }, { "docid": "ef8dc713321a789408e8d1bdab843a89", "score": "0.5063799", "text": "def bot_time(self, mess, args):\n return str(datetime.datetime.now()) + \" EST/EDT\"", "title": "" }, { "docid": "8a75589c121911836a21d95eedfbcffe", "score": "0.50584316", "text": "def default(self, event, entry, texto_insertado):\n self.informacion_entry = entry.get()\n if(self.informacion_entry == \"Nombre\" or self.informacion_entry == \"Codigo de producto\" or self.informacion_entry == \"Precio\"\n or self.informacion_entry == \"Cantidad\" or self.informacion_entry == \"Marca\" or self.informacion_entry == \"Proveedor\"\n or self.informacion_entry == \"Fecha de entrega\"):\n entry.delete(0, tk.END)\n entry.config(fg=\"black\")\n\n elif(self.informacion_entry == \"\"):\n\n entry.insert(0,texto_insertado)\n entry.config(fg=\"grey\")", "title": "" }, { "docid": "081cbcd3344dfffc38deb5c820cf2aad", "score": "0.5057015", "text": "def format_time(self, record):\n record.dbtime = time.strftime(\n \"%Y-%m-%d %H:%M:%S\", time.localtime(record.created)\n )", "title": "" }, { "docid": "cce69a9a361a1e2a38017542a99025f2", "score": "0.50563437", "text": "def edition_date(self) -> datetime:\n return None", "title": "" }, { "docid": "94c3ea42bc688f8a3e7bd361b2634426", "score": "0.5051675", "text": "def set_date_or_time(css, date_or_time):\n world.css_fill(css, date_or_time)\n e = world.css_find(css).first\n # hit Enter to apply the changes\n e._element.send_keys(Keys.ENTER)", "title": "" }, { "docid": "a5c52b1e2ec722faeb00a1b093a1638e", "score": "0.50502485", "text": "def __str__(self):\n return f\"{self._date}\"", "title": "" }, { "docid": "eb9a36602a8b271c982198be65f206c6", "score": "0.5037006", "text": "def default_date():\n return datetime.now().strftime(\"%y%m%d\")", "title": "" }, { "docid": "7d9ec2a166e9a32ae41a43f737548e14", "score": "0.5025333", "text": "def onUpdateText(self, text):\n cursor = self.log_textEdit.textCursor()\n cursor.movePosition(QtGui.QTextCursor.End)\n cursor.insertText(text)\n self.log_textEdit.setTextCursor(cursor)\n self.log_textEdit.ensureCursorVisible()", "title": "" }, { "docid": "74818bce52bb4ec41603a8da437dc3e3", "score": "0.50235665", "text": "def get_date_as_string(self, widget):\n pass", "title": "" }, { "docid": "3215e624a65b5cd631718cc14ba53cdf", "score": "0.5021368", "text": "def display_simple(self):\n if type(self.date) == datetime.datetime:\n self.date = self.date.strftime('%m/%d/%Y %H:%M')\n self.dis_simp_print(self.date, self.username, self.task_name, \n self.time_spent, self.notes, self.task_number)", "title": "" }, { "docid": "59703b4dee8c6167e1447525730bb42f", "score": "0.5016059", "text": "def current_time_changed(self, *args):\n if not self.dragging:\n self.set(self.player.current_time.get())", "title": "" }, { "docid": "1310d9e39f98240abb6091e1ca63f4de", "score": "0.5011271", "text": "def get_date(self, widget):\n date = widget.date()\n return qdate_to_python(date)", "title": "" }, { "docid": "b6a174c2b242ff4f48aa1c341fe468fd", "score": "0.5010716", "text": "def get_current_date(self):\n td = timedelta(days=0)\n return date.today() + td", "title": "" }, { "docid": "b43d11ac91cc5dcaa4e4c47a44604363", "score": "0.49877292", "text": "def new_date(*args):\n return dt.now()", "title": "" }, { "docid": "3dc591fe474b71940db312923b86dd64", "score": "0.49858266", "text": "def pull_current_acc_text(self, d):\n \n widge_text = self.desc_text.get(1.0, tk.END)\n if len(widge_text) <= 1:\n return\n \n else:\n c = self.conn.cursor()\n c.execute(\"\"\"INSERT INTO ? VALUES(?)\"\"\", (d, widge_text,))\n self.conn.commit()\n\n return", "title": "" }, { "docid": "de9172dc9f7c015665e1a918dfe661dc", "score": "0.49800152", "text": "def update(self):\n self.birthdayFormatted = self.context.start.strftime(\"%d %b %Y\")\n self.birthdayYear = self.context.start.year\n self.dimensions = \"%d&nbsp;&times;&nbsp;%d&nbsp;cm\" % (self.context.width, self.context.height)\n# self.context.image = self.context.picture # to be able to treat illustration as image in TinyMCE\n# self.context.picture = self.context.image # temporary before I fixed templates\n# self.context.description = \"%s %s %s %s %d\" % (self.dimensions, self.context.genre, self.context.style, self.context.technique, self.context.start.year)\n body = \"\"\n if self.context.body:\n body = self.context.body.output\n self.context.description = \"%s &emsp;&nbsp; %s &emsp;&nbsp; %d %s\" % (self.context.translate(_(u'%s'% self.context.technique.capitalize())), self.dimensions, self.context.start.year, body)", "title": "" }, { "docid": "9600babe3a110b68f80511fdfcb35079", "score": "0.49761078", "text": "def edit_due_date(update, context):\n print(context.chat_data)\n\n # Get the user reply (new name)\n new_task_due_date = dateparser.parse(update.message.text)\n\n if new_task_due_date is None or len(update.message.text) < 4:\n # Tell the user it's an invalid date and prompt again.\n text = \"Invalid Date! Reenter a correct date\"\n context.bot.sendMessage(update.message.chat_id, text=text)\n return\n\n # Retrieve the chat data and get database object\n taskid = context.chat_data['taskid']\n task = Task.objects.get(id=taskid)\n \n # Change the deadline and save\n task.deadline = new_task_due_date\n task.save()\n\n # Reply to user that process is completed\n text = \"✅Task saved!\"\n\n # Send the message\n context.bot.sendMessage(update.message.chat_id, text=text)\n\n return ConversationHandler.END", "title": "" } ]
b86cfeadcd0e778c255b943bd13ea05c
This method translates a single line of comma separated values to a dictionary which can be loaded into BigQuery.
[ { "docid": "0b4dc123bd3df140da9c0d4e624e267c", "score": "0.0", "text": "def parse_method(self, string_input):\n # strip out the return characters and quote characters.\n values = re.split(\",\",\n re.sub('\\r\\n', '', re.sub(u'\"', '', string_input)))\n row = dict(zip(('permlink', 'numEmps', 'category', 'city', 'state',\n 'fundedDate', 'raisedAmt', 'raisedCurrency', 'round'),\n values))\n # hash over the `fundedDate` column\n if 'fundedDate' in row:\n row['hash_code'] = hashlib.sha256(row['fundedDate']).hexdigest()\n return row", "title": "" } ]
[ { "docid": "ac1b5d6fb18bcf081f1eb7a62476847b", "score": "0.6674348", "text": "def parse_line_to_dict(line, header):\n\tfields = line.strip().split(',')\n\tif len(header) != len(fields):\n\t\traise DataImportFormatError(\"Mismatch in header and data line in incoming data file.\")\n\trecord = {}\n\tfor i in range(len(header)):\n\t\tvalue = fields[i] if header[i] not in INT_TYPES else 0 if len(fields[i]) < 1 else int(float(fields[i])) \n\t\trecord[header[i]] = value\n\treturn record", "title": "" }, { "docid": "b6ad32b853f4494e14e4219300315e02", "score": "0.6514467", "text": "def _create_dict_instance(self, line):\n new_dict = {}\n for item in line:\n if \"=\" in item:\n # creating list from value and key\n # if \"=\" found\n new_arg = item.split(\"=\", 1)\n key = new_arg[0]\n value = new_arg[1]\n if value[0] == '\"' == value[-1]:\n value = value.replace('\"', \"\").replace(\"_\", \" \")\n else:\n try:\n value = int(value)\n except Exception:\n try:\n value = float(value)\n except Exception:\n continue\n new_dict[key] = value\n return new_dict", "title": "" }, { "docid": "5b1529a873aa25b3ccde288631c2963a", "score": "0.64161897", "text": "def dictFromString(src, seperator=\",\"):\n lst = src.split(\",\")\n dct = {}\n name = None\n for val in lst:\n if name is None:\n name = val\n else:\n dct[name] = val\n name = None\n return dct", "title": "" }, { "docid": "814c4e966457f6d614e33ff4f42cf5c6", "score": "0.59911543", "text": "def getData()->dict:\r\n datas = {}\r\n with open(\"../data/data.txt\", \"r\") as f:\r\n data_list = f.readlines()\r\n for i in range(len(data_list)):\r\n str = data_list[i].replace(\"\\n\", \"\")\r\n temp = str.split(',')\r\n datas[i] = temp\r\n return datas", "title": "" }, { "docid": "9e929bf5a9686ec6beca9576b8542bea", "score": "0.59718925", "text": "def parse_line(self, line):\n try:\n key, value = [x.strip() for x in line.split('=', 1)]\n except ValueError:\n return\n value = value.strip('\"')\n return self.parse_value(key, value)", "title": "" }, { "docid": "f12f28635d949ca98c8a0a8afaf2625f", "score": "0.5938792", "text": "def str_handler(l):\n l = l.rstrip(\"\\n\").strip(\" ;\")\n cols = l.split(\";\")\n dic = {}\n for col in cols:\n key, value = col.split(\"=\")\n value = parse_escape_char(value)\n if key == \"fields\":\n fields_list = get_fields(value)\n dic[key] = fields_list\n elif key == \"col_cnt\":\n value = int(value)\n dic[key] = value\n elif key == \"ratio\":\n value = float(value)\n dic[key] = value\n elif key == \"args\":\n values = value.split(\",\")\n dic[key] = values\n elif key == \"sep\":\n value = parse_escape_char(value)\n dic[key] = value\n else:\n values = value.split(\",\")\n if len(values) == 1:\n values = values[0]\n dic[key] = values\n \n return dic", "title": "" }, { "docid": "d9495ad20a23f2c41da34b45a96d2304", "score": "0.59204066", "text": "def parse_file_line(line: str) -> dict:\n\n ret = {}\n\n product_attrs = line.split(',')\n\n # Product PK (id)\n ret['pk'] = int(product_attrs[PK].strip())\n\n # Product description\n ret['description'] = product_attrs[DESC].strip()\n\n # Product last sold date\n ret['last_sold'] = datetime.strptime(product_attrs[LAST_SOLD].strip(), \"%m/%d/%Y\").date()\n\n # Product shelf life in days\n ret['shelf_life'] = int(product_attrs[SHELF_LIFE].strip()[0])\n\n # Product department\n ret['department'] = product_attrs[DEPT].strip().upper()\n\n # Product price\n ret['price'] = Decimal(product_attrs[PRICE].strip()[1:])\n\n # Product unit\n ret['unit'] = product_attrs[UNIT].strip().upper()\n\n # Product units for price\n ret['x_for'] = int(product_attrs[XFOR].strip())\n\n # Product cost\n ret['cost'] = Decimal(product_attrs[COST].strip()[1:])\n\n return ret", "title": "" }, { "docid": "4f93c6bcb8bde217674294c940a8ae71", "score": "0.58666193", "text": "def _line_to_dict(self, file, field_names):\n for line in file:\n decoded_line = line.decode(settings.IPGEOBASE_FILE_ENCODING)\n delimiter = settings.IPGEOBASE_FILE_FIELDS_DELIMITER\n yield self._extract_data_from_line(decoded_line, field_names, delimiter)", "title": "" }, { "docid": "68f24174137d77795449f94f5c1de03b", "score": "0.5853324", "text": "def turn_line_into_map(line, column_to_enum, dropdown_map):\n line_map = {}\n\n for i, local_e in enumerate(column_to_enum):\n # Split if enumerator is present.\n n = line[i].count(ENUMERATOR + ' ')\n items = np.empty(n+1, dtype=object)\n items = line[i].split(ENUMERATOR + ' ')\n\n res = ''\n for item in items:\n if res != '':\n res = res + ENUMERATOR + ' '\n\n if local_e in dropdown_map:\n # Remove \"1. \"\n item = re.sub(\"^\\d+\\. \", \"\", item)\n # Turn \"1 Interesting\" into \"1 - Interesting\"\n item = re.sub(\"^(\\d+) ([^-])\", r\"\\1 - \\2\", item)\n # item = re.sub(\"^(\\d+)\\. (\\d) ([^-])\", r\"\\1. \\2 - \\3\", item)\n # if local_e == Fields.Quality:\n # print(\"item\", item)\n\n if local_e in NUMERICAL_HEADINGS:\n item = api.turn_text_into_dropdown(item, enum_text_to_id[local_e])\n\n res = res + str(item)\n # str(api.turn_text_into_dropdown(item, dropdown_map[local_e]))\n else:\n res = res + item\n\n # To be consistent with Affinity.\n if local_e == Fields.OrganizationURL and res == '':\n res = 'None'\n\n line_map[local_e] = res\n\n return line_map", "title": "" }, { "docid": "2a55712dbe13bc40609e1d3657941a60", "score": "0.5787207", "text": "def import_dict(d: str = None) -> dict:\n if not d:\n cpath = os.path.dirname(os.path.realpath(__file__))\n d = os.path.join(cpath, \"sentiment_dict_en.txt\")\n with open(d, 'r') as rf:\n reader = csv.reader(rf, delimiter=\",\")\n # Header has no commas (manually checked)\n reader = [r for r in reader if len(r) == 2]\n ret = dict(zip([r[0] for r in reader], [r[1] for r in reader]))\n return ret", "title": "" }, { "docid": "95d411fd0ad1522a840b2b7b83071bbb", "score": "0.57542294", "text": "def convert_csv_to_dict(csv_file):\n # Holds all corresponding lines as a list of lines\n csv_dict = {\"SUPERBLOCK\": [],\n \"GROUP\": [],\n \"BFREE\": [],\n \"IFREE\": [],\n \"INODE\": [],\n \"DIRENT\": [],\n \"INDIRECT\": []}\n\n for line in csv_file:\n line = line.split(\",\")\n csv_dict[line[0]].append(line)\n \n return csv_dict", "title": "" }, { "docid": "af4ee47ddce43d69e3e70d0a2f4ca1c7", "score": "0.5753948", "text": "def _ParseBluefaceLogs(label, line):\n output = {}\n header = label.split(\",\")\n data = line.split(\",\")\n for k, v in zip(header, data):\n output[k.lower()] = v\n return output", "title": "" }, { "docid": "f7908f309b52921160bf8b65d288f7a4", "score": "0.57516104", "text": "def parse_csv(lines):\n data =[]\n # pdb.set_trace()\n names = [item.strip(' ,\\n') for item in lines[0].split(',') if item!=\"#\"]\n # endlines = [line[-100:] for line in lines]\n for i,line in enumerate(lines[1:]):\n # if line[-100:] in endlines[:i+1]:\n # continue\n items = line.split(',')\n tmp = {}\n for i in range(len(names)):\n try:\n value = int(items[i].strip())\n except ValueError:\n try:\n value = float(items[i].strip())\n except ValueError:\n value = items[i].strip()\n except IndexError:\n print items \n tmp[names[i]] = value\n data.append(tmp)\n return data", "title": "" }, { "docid": "bc13adad67f692c0ac96075062e5b6d9", "score": "0.5728059", "text": "def specific2kv(s):\r\n ret = {}\r\n for e in s.strip().split(','):\r\n if e:\r\n k,v = e.split(\":\")\r\n ret[k] = v\r\n\r\n return ret", "title": "" }, { "docid": "a27afc39bc31acb2d7504d6247769737", "score": "0.5621865", "text": "def mapper(self, _, line):\n tweet=line.replace(\"\\,\",\"\")\n tweet=tweet.split(\",\")\n date=tweet[4]\n hour=tweet[5]\n #Return date/hour tuple as key\n yield (date,hour),1", "title": "" }, { "docid": "a3e29bf1d45a903a59806284207aff56", "score": "0.5603128", "text": "def parse_line(line):\n return [element.strip() for element in line.strip().split('=')[1].strip()[1:-1].split(',')]", "title": "" }, { "docid": "9d4f843ff4443fd82faefca874f8c8d5", "score": "0.5588743", "text": "def parse_line(line):\n entries = line.split(\",\")\n entries = Publisher.clean_entries(entries)\n if len(entries) == 2:\n name = entries[0]\n email = entries[1]\n if validate_email(email) and name:\n data = dict(name=name, email=email)\n return True, data\n return False, {}", "title": "" }, { "docid": "b0be551aecc98989d370c732e64f7338", "score": "0.55834085", "text": "def parse_line(line):\n tmp_res = OrderedDict()\n # split line into array of \"key=value\" metrics\n metrics = line.split()\n for metric in metrics:\n key, value = metric.split('=')\n tmp_res[key] = value\n\n return tmp_res", "title": "" }, { "docid": "3a4f329145a857a20a4f1fb6fb5beb83", "score": "0.55626476", "text": "def load_line(self, line: str) -> JsonDict: # pylint: disable=no-self-use\n return json.loads(line)", "title": "" }, { "docid": "fe6f239feba69ecb3e79462802fcdb06", "score": "0.54828405", "text": "def deserialize(self, data):\n values = data.split(',')\n return self.deserializeHelper(values)", "title": "" }, { "docid": "cfffc17d34900dde4f1c82fc5802fd7f", "score": "0.5478965", "text": "def parse_line(parts):\n\tline_parts = dict()\n\tfor pair in parts:\n\t\tpair = pair.split(\"=\", 1)\n\t\tline_parts[pair[0]] = pair[1]\n\treturn line_parts", "title": "" }, { "docid": "a5513e63f2e4c7b54b0f2f2f74cfbf1d", "score": "0.54685783", "text": "def parseline(line):\n fields = line.split(',')\n custid = int(fields[0])\n order_amount = float(fields[2])\n return (custid, order_amount)", "title": "" }, { "docid": "ae5148d8088c2fb3afe050c3054d8d00", "score": "0.5463482", "text": "def processData(contents):\n my_dictionary = {}\n csv_file=csv.reader(contents)\n csv_file.next()\n\n for row in csv_file:\n try:\n row[2] = datetime.datetime.strptime(row[2], \"%d/%m/%Y\").date()\n except ValueError:\n number = int(row[0])\n line = int(row[0])+1\n logger = logging.getLogger(\"assignment 2\")\n logger.error(\"Error processing line#{} for ID #{}.\".format(line, number))\n my_dictionary[int(row[0])] = (row[1], row[2])\n return my_dictionary", "title": "" }, { "docid": "4e447e64755674a22f7a5e994fb6d02e", "score": "0.5447638", "text": "def parseentry(row, cols):\n entry = {}\n for c, r in zip(cols, row):\n if r:\n if c in helper.namefields:\n a = [k.split(',') for k in r.split(\";\")]\n if a:\n entry[c] = a\n pass\n else:\n entry[c] = r\n\n return entry", "title": "" }, { "docid": "6f5b4d34960271e3331e55a0fa3e91e9", "score": "0.54375726", "text": "def tokenize(self, line):\n tokens = line.split()\n return {'address': tokens[1], 'taken': int(tokens[2])}", "title": "" }, { "docid": "25095d05e102425d3227c6b031d412c5", "score": "0.542845", "text": "def parse_line(line):\n return {m.group('label'): m.group('contents')\n for m in ARGUMENT_TAG.finditer(line)}", "title": "" }, { "docid": "400715f865f28c2ff5b1563f43be228b", "score": "0.54214805", "text": "def read_csv_as_dict(path, delimiter=','):\n\n with open(path, 'r', newline='', encoding='utf-8') as csv_file:\n data = []\n reader = csv.DictReader(csv_file, delimiter=delimiter)\n for line in reader:\n data.append(line) # OrderedDict()\n # data.append(dict(line)) # convert OrderedDict() to dict\n\n return data", "title": "" }, { "docid": "a001914d55a555b62e7b92e2d4891945", "score": "0.5420997", "text": "def create_ordered_dictionary_from(self, raw):\n li = self.get_list_by_newline_from(raw)\n if len(li) == 0:\n return OrderedDict()\n orDic = OrderedDict()\n # To ensure the key row is only used here, pop it\n for key in li.pop(0).split(','):\n orDic[key.strip()] = []\n for row in li:\n i = 0 # Used for accessing the coloumn of a row.\n for key in orDic:\n orDic[key].append(row.split(',')[i].strip())\n i += 1\n return orDic", "title": "" }, { "docid": "b3a65356e98c611c9754d320ea6b4a6f", "score": "0.54040056", "text": "def decode_csv(string_like): # type: (str) -> np.array\n stream = StringIO(string_like)\n request_list = list(csv.DictReader(stream))\n if \"inputs\" in request_list[0].keys():\n return {\"inputs\": [entry[\"inputs\"] for entry in request_list]}\n else:\n return {\"inputs\": request_list}", "title": "" }, { "docid": "10a019731ad019378439b0f2ffa96ad2", "score": "0.5396028", "text": "def _parse_record(self, record, customization=None):\n d = {}\n\n if not record.startswith('@'):\n logger.debug('The record does not start with @. Return empty dict.')\n return {}\n\n # prepare record\n record = '\\n'.join([i.strip() for i in record.split('\\n')])\n if '}\\n' in record:\n record, rubbish = record.replace('\\r\\n', '\\n').replace('\\r', '\\n').rsplit('}\\n', 1)\n\n # if a string record, put it in the replace_dict\n if record.lower().startswith('@string'):\n logger.debug('The record startswith @string')\n key, val = [i.strip().strip('\"').strip('{').strip('}').replace('\\n', ' ') for i in record.split('{', 1)[1].strip('\\n').strip(',').strip('}').split('=')]\n self.replace_dict[key] = val\n logger.debug('Return a dict')\n return d\n\n # for each line in record\n logger.debug('Split the record of its lines and treat them')\n kvs = [i.strip() for i in record.split(',\\n')]\n inkey = \"\"\n inval = \"\"\n for kv in kvs:\n logger.debug('Inspect: %s', kv)\n if kv.startswith('@') and not inkey:\n # it is the start of the record - set the bibtype and citekey (id)\n logger.debug('Line starts with @ and the key is not stored yet.')\n bibtype, id = kv.split('{', 1)\n bibtype = self._add_key(bibtype)\n id = id.strip('}').strip(',')\n elif '=' in kv and not inkey:\n # it is a line with a key value pair on it\n logger.debug('Line contains a key-pair value and the key is not stored yet.')\n key, val = [i.strip() for i in kv.split('=', 1)]\n key = self._add_key(key)\n # if it looks like the value spans lines, store details for next loop\n if (val.count('{') != val.count('}')) or (val.startswith('\"') and not val.replace('}', '').endswith('\"')):\n logger.debug('The line is not ending the record.')\n inkey = key\n inval = val\n else:\n logger.debug('The line is the end of the record.')\n d[key] = self._add_val(val)\n elif inkey:\n logger.debug('Continues the previous line to complete the key pair value...')\n # if this line continues the value from a previous line, append\n inval += ', ' + kv\n # if it looks like this line finishes the value, store it and clear for next loop\n if (inval.startswith('{') and inval.endswith('}')) or (inval.startswith('\"') and inval.endswith('\"')):\n logger.debug('This line represents the end of the current key-pair value')\n d[inkey] = self._add_val(inval)\n inkey = \"\"\n inval = \"\"\n else:\n logger.debug('This line does NOT represent the end of the current key-pair value')\n\n logger.debug('All lines have been treated')\n if not d:\n logger.debug('The dict is empty, return it.')\n return d\n\n # put author names into persons list\n if 'author_data' in d:\n self.persons = [i for i in d['author_data'].split('\\n')]\n del d['author_data']\n\n d['type'] = bibtype\n d['id'] = id\n if not self.has_metadata and 'type' in d:\n if d['type'] == 'personal bibliography' or d['type'] == 'comment':\n self.has_metadata = True\n\n if customization is None:\n logger.debug('No customization to apply, return dict')\n return d\n else:\n # apply any customizations to the record object then return it\n logger.debug('Apply customizations and return dict')\n return customization(d)", "title": "" }, { "docid": "4816d7ca57b6fae0d1e7b21f7eeb8503", "score": "0.5379898", "text": "def csv_to_dict(filename,ret={},key=\"Specific Implementation\"):\n csvtd = csv_translation_dict\n with open(filename,\"r\") as f:\n header = None\n for row in csv.reader(f):\n if header is None:\n header = row\n else:\n cur = {}\n for k,v in zip(header,row):\n if k in csvtd and csvtd[k][0] not in cur:\n cur[csvtd[k][0]] = parse_num(v)*csvtd[k][1]\n elif k == key: ret[v] = cur\n elif k == \"Submission\": cur[k] = v\n return ret", "title": "" }, { "docid": "59773abdb4fadcc8568503769c63b4f5", "score": "0.53607273", "text": "def _read_csv(csv_ffn, header_line):\n df = pandas.read_csv(csv_ffn, header=header_line)\n as_dict = df.to_dict(orient='list')\n return as_dict", "title": "" }, { "docid": "2a98cb30f0bcd9188daeb0e519b1376a", "score": "0.53544474", "text": "def _extract_fields(self, line):\n acc = {}\n field_tokens = line.split(\"\\t\")\n for field_token in field_tokens:\n kv_tokens = field_token.split('::')\n if len(kv_tokens) == 2:\n (key, value) = kv_tokens\n acc[key] = value\n\n return acc", "title": "" }, { "docid": "2fefa2a84cd375622ca95eefbfd99db9", "score": "0.5346579", "text": "def parse_method(self, string_input):\n # Strip out carriage return, newline and quote characters.\n values = re.split(\",\", re.sub('\\r\\n', '', re.sub('\"', '',\n string_input)))\n row = dict(\n zip(('name', 'surname', 'age'),\n values))\n return row", "title": "" }, { "docid": "06afedfa95a9cafd87bc373a15bfe2fb", "score": "0.5330888", "text": "def parse_csv_line(line):\n\n if line == '':\n # return np.empty(0)\n return []\n\n values = List()\n value = ''\n quotes = False\n\n for character in line:\n if character == '\\n':\n break\n elif character == '\"':\n quotes = not quotes\n elif not quotes and character == ',':\n values.append(value.strip())\n value = ''\n else:\n value += character\n\n values.append(value)\n return values.to_array()", "title": "" }, { "docid": "c483df4761e85d8d4635469a946990c6", "score": "0.5324691", "text": "def getParams(self, line):\n \n line = line.strip()\n line = line.strip('{}')\n \n str1, str2 = line.split(',')\n \n str1, str_D = str1.split(':')\n str_D = str_D.strip(' ')\n str_D = str_D.strip('\\\"')\n D = int(str_D)\n\n str2, str_T = str2.split(':')\n str_T = str_T.strip(' ')\n str_T = str_T.strip('\\\"')\n T = int(str_T)\n\n return D,T", "title": "" }, { "docid": "4a46ed15f24eb524a57368da6204ea0a", "score": "0.5286351", "text": "def _input_line(line):\n l = line.split()\n r = {\n \"year\": int(l[0]),\n \"month\": int(l[1]),\n \"day\": int(l[2]),\n \"hour\": int(l[3]),\n \"minute\": int(l[4]),\n \"second\": float(l[5]),\n \"mag\": float(l[9]),\n \"lat\": float(l[6]),\n \"lon\": float(l[7]),\n \"dep\": float(l[8]),\n #\"evid\": int(l[10]),\n }\n r['timestamp'] = _ts(datetime.datetime(r['year'], r['month'], r['day'],\n r['hour'], r['minute'], 0)) + r['second']\n return r", "title": "" }, { "docid": "d73a2999781178f95c28a620f7b3e86b", "score": "0.5284623", "text": "def csv_to_dict(self, source_path, type_='stream_gauges'):\n print 'parsing to dict'\n delimiter = ','\n working_dict = {}\n result = {}\n with open(source_path, 'r') as data_file:\n data = csv.reader(data_file, delimiter=delimiter)\n headers = next(data)[1:]\n for row in data:\n temp_dict = {}\n id = row[0]\n values = []\n if type_ == 'stream_gauges':\n name = '{} {}'.format(row[0], row[3])\n for x in row[1:]:\n # print row\n try:\n values.append(str(x))\n # print str(x)\n except ValueError:\n try:\n values.append(int(x))\n # print int(x)\n except ValueError:\n try:\n values.append(float(x))\n # print float(x)\n except ValueError:\n print(\"Skipping value '{}' that cannot be converted \" +\n \"to a number - see following row: {}\"\n .format(x, delimiter.join(row)))\n values.append(0)\n elif type_ == 'floods':\n name = 'Rank {}'.format(row[0])\n print 'name: {}'.format(name)\n for i, x in enumerate(row[1:]):\n if i == 0:\n values.append(datetime.strptime(x, '%m/%d/%Y'))\n else:\n values.append(int(x))\n\n for i in range(len(values)):\n if values[i]:\n temp_dict[headers[i]] = values[i]\n\n result[name] = temp_dict\n\n if type_ == 'stream_gauges':\n temp_dict['Dataframe'] = None\n temp_dict['Rating'] = None\n temp_dict['ID'] = id\n elif type_ == 'floods':\n pass\n working_dict.update({name: temp_dict})\n return working_dict", "title": "" }, { "docid": "94a293fa04f134e4213de59aab272122", "score": "0.5274642", "text": "def __parse_line(self, line: str):\n exploded_line: List[str] = line.split(self.__column_delimiter)\n\n country_raw = exploded_line[self.__column_country]\n region_raw = exploded_line[self.__column_region]\n city_raw = exploded_line[self.__column_city]\n population_raw = int(exploded_line[self.__column_population]) if exploded_line[self.__column_population] else 0\n long_raw = float(exploded_line[self.__column_longitude]) if exploded_line[self.__column_longitude] else None\n lat_raw = float(exploded_line[self.__column_latitude]) if exploded_line[self.__column_latitude] else None\n\n if population_raw < self.minimum_population:\n return self\n\n if long_raw is None or lat_raw is None:\n return self\n\n country: Country\n if country_raw in self.__database.countries:\n country = self.__database.countries[country_raw]\n else:\n country = Country(country_raw)\n self.__database.countries[country_raw] = country\n\n region: Region\n if f\"{country_raw}-{region_raw}\" in self.__database.countries:\n region = self.__database[f\"{country_raw}-{region_raw}\"]\n else:\n region = Region(region_raw, country)\n country.add_region(region)\n self.__database.regions[f\"{country_raw}-{region_raw}\"] = region\n\n city: City\n if f\"{country_raw}-{region_raw}-{city_raw}\" in self.__database.cities:\n city = self.__database.cities[f\"{country_raw}-{region_raw}-{city_raw}\"]\n else:\n city = City(city_raw, region, country)\n city.set_population(population_raw)\n city.set_coordinates(longitude=long_raw, latitude=lat_raw)\n country.add_city(city)\n region.add_city(city)\n self.__database.cities[f\"{country_raw}-{region_raw}-{city_raw}\"] = city\n\n return self", "title": "" }, { "docid": "db3b644f938ea728d805d29ee87d2167", "score": "0.5270934", "text": "def parse_csv_table(readable, separator = \",\"):\n data = defaultdict(dict)\n \n # First line is the columns\n columns = filter(None, readable.readline().strip().split(separator))\n\n for line in readable:\n row, rest = line.strip().split(separator, 1)\n for value, col in zip(rest.split(separator), columns):\n data[row][col] = float(value)\n\n return data", "title": "" }, { "docid": "ae82105ad16744eac636ce7e436c89a6", "score": "0.52584505", "text": "def _convert_dict(self, value):\n dict_value = value.strip(\"{\")\n result = {}\n while True:\n (key, found, rest) = dict_value.partition(\"=\")\n if not found:\n break\n\n key = key.strip('\"')\n\n item = None\n new_dict_value = \"\"\n\n if rest[0] == '\"':\n start = 1\n while True:\n pos = rest.find('\"', start)\n if pos == len(rest):\n # Error, we did not find the end of the string\n raise Exception(\"Wrong format %s\" % value)\n\n if rest[pos - 1] != \"\\\\\":\n # found a non escaped \"\n item = rest[1:pos]\n new_dict_value = rest[pos + 1 :].strip(\", \")\n break\n\n start = pos + 1\n else:\n (item, comma, new_dict_value) = rest.partition(\", \")\n if not comma:\n (item, curly, new_dict_value) = rest.partition(\"}\")\n if not curly:\n raise Exception(\"Wrong format %s\" % value)\n\n result[key] = self._convert_single_value(item)\n dict_value = new_dict_value\n\n return result", "title": "" }, { "docid": "0064f7ad2b08f33759feecb8b2a1e802", "score": "0.5256032", "text": "def loc_split(self,raw_loc):\n return {raw_loc.split(\",\")[0].strip('\"'):raw_loc.split(\",\")[1].strip('\"')}", "title": "" }, { "docid": "a9b6af951d6666f5cc5abf676d67d6e7", "score": "0.5246892", "text": "def turn_csv_into_map(local_csv_fields, dropdown_map):\n fields = []\n for line in local_csv_fields:\n fields.append(turn_line_into_map(line, \n CSV_COLUMN_TO_ENUM, dropdown_map))\n\n return fields", "title": "" }, { "docid": "f79df0dbc12be829f7bd989d2c473089", "score": "0.52441436", "text": "def parse_bel_resource(lines: Iterable[str]) -> Dict:\n lines = list(lines)\n\n value_line = 1 + max(\n index\n for index, line in enumerate(lines)\n if '[Values]' == line.strip()\n )\n\n metadata_config = ConfigParser(strict=False)\n metadata_config.optionxform = lambda option: option\n metadata_config.read_file(lines[:value_line])\n\n delimiter = metadata_config['Processing']['DelimiterString']\n\n value_dict = dict(\n _get_bel_resource_kvp(line, delimiter)\n for line in lines[value_line:]\n )\n\n res = {\n key: dict(values)\n for key, values in metadata_config.items()\n }\n res['Values'] = value_dict\n\n return res", "title": "" }, { "docid": "45db0418eb0244995c742504c23f623c", "score": "0.5238875", "text": "def _parse_values(data: str):\n res = {}\n for l in filter(lambda s: s != '', data.splitlines()):\n try:\n k, v = l.strip().split('=')\n except ValueError:\n raise serial.SerialException(\"Couldn't parse data {!r}\".format(l))\n else:\n res[k] = v\n return res", "title": "" }, { "docid": "ffce026b36ac238ff2530e525b5602d2", "score": "0.523299", "text": "def parse_to_dict(self,fsf_lines):\n fsf_dict=dict()\n fsf_line_key=list()\n for line in fsf_lines:\n line=line.strip()\n if len(line) > 0 and line[0] != \"#\":\n set_line=re.search(\"set [^ ]* [^s]*\",line.strip())\n if set_line:\n split_line=line.split(\" \")\n if len(split_line) > 3:\n #combine and strip mri values that have spaces in them\n temp=list()\n for i in range (2,len(split_line)):\n temp.append(split_line[i])\n cleaned=' '.join(temp)\n else:\n cleaned=split_line[2]#.strip('\\\"')\n fsf_dict[split_line[1]]=cleaned\n fsf_line_key.append(split_line[1])\n return fsf_dict,fsf_line_key", "title": "" }, { "docid": "7ba3574ed02c09c112515c11c493df96", "score": "0.52149475", "text": "def create_dictionary(log_data):\n dict_cvar = {}\n split_log_data = log_data.split('\\n')\n for line in split_log_data:\n if 'cvar' in line:\n parse_string = line[19:].rstrip(')').split(',')\n dict_cvar[parse_string[0]] = parse_string[1]\n return dict_cvar", "title": "" }, { "docid": "895edae8d241d967919182115796c937", "score": "0.5214545", "text": "def morph2dict(line):\n d = {}\n _ = line.strip().split('\\t')\n try:\n _meta = _[1].split(',')\n d[\"surface\"] = _[0]\n d[\"pos\"] = _meta[0]\n d[\"pos1\"] = _meta[1]\n d[\"base\"] = _meta[6]\n return d\n except IndexError:\n return None", "title": "" }, { "docid": "871c65da3772d56c8cba8f1fe67c7c65", "score": "0.52137345", "text": "def map1(row):\n rr = row.split(\",\")\n return [(rr[1],(rr[0],float(rr[2])))]", "title": "" }, { "docid": "dcef199456de8a31c7822bb61ff1120c", "score": "0.52110595", "text": "def _atq_line(self, line):\n split = str(line).split()\n return dict(id = int(split[0]), \n when = datetime.datetime.strptime(\n (' ').join(split[1:6]),\n AT_OUTPUT_DATETIME_FORMAT),\n queue = split[6],\n who = split[7])", "title": "" }, { "docid": "57e9fc69de0df307b2bd7a2cfed62880", "score": "0.51856875", "text": "def dict_creator(file):\n large_lst = []\n final_dict = {}\n f = open(file, 'r')\n for i in f.readlines():\n stats = i.split(',')\n large_lst.append(stats)\n for i in large_lst:\n i[-1] = i[-1][:-1]\n final_dict[i[0]] = i[1:]\n return final_dict", "title": "" }, { "docid": "9d7bf9311ad1c644390075de41c125f3", "score": "0.5176835", "text": "def _parse_attributes(line: str) -> dict:\n attr_def = map(\n lambda s: re.sub(r\"[\\\"'](.*)[\\\"']\", r\"\\1\", s),\n line.split()\n )\n\n return {\n attr: val\n for attr, val in map(lambda s: s.split('='), attr_def)\n }", "title": "" }, { "docid": "88ce50a240a4242aafe227d0d97da78a", "score": "0.5175366", "text": "def parse(data):\n\t\tresult = {}\n\t\tdata_array = data.strip().split('; ')\n\t\tfor datum in data_array:\n\t\t\tkey, value = datum.split(': ')\n\t\t\tkey = key.lower()\n\t\t\tif key == 'washer_states':\n\t\t\t\tresult[key] = [int(x) for x in value.split(', ')]\n\t\t\telse:\n\t\t\t\tresult[key] = value\n\t\treturn result", "title": "" }, { "docid": "ff79eade237d29024cf1d5af73f55b78", "score": "0.5172581", "text": "def proccessed(self, column):\n r = {}\n for c in column.split(\",\"):\n # print(\"Next column: %s\" % c)\n moss, idss = self.decode(c.strip())\n ids = set(moss.values_list(\"id\", flat=True))\n for i_set in idss:\n ids = ids.intersection(i_set)\n r[c.strip()] = ids.copy()\n return r", "title": "" }, { "docid": "eca7e4b38dc94546e3c84b2c7715f6e5", "score": "0.5163745", "text": "def process_line(line: str):\n line = line.rstrip(\"\\n\") # del \\n\n line = line.split(\"\\t\")[1] # split input line and get key value\n # get size, mean var\n chunk_size, mean, var = tuple(map(float, line.split(\", \")))\n return chunk_size, mean, var", "title": "" }, { "docid": "6e1cf0a3d55567746de961f36844cc6d", "score": "0.515424", "text": "def conver_csv_into_dict(filename):\n with open(filename) as File:\n\n if \"images\" in filename:\n csv_dict_reader = list(DictReader(File,fieldnames = [\"ID\", \"IMAGE_NAME\", \"Latitude\", \"Longitude\", \"Date\"]))\n elif \"sites\" in filename:\n csv_dict_reader = list(DictReader(File,fieldnames = [\"ID\", \"SITE_NAME\", \"Latitude\", \"Longitude\", \"Date\"]))\n return csv_dict_reader", "title": "" }, { "docid": "b4022cc8e91188a0ed179ca216a5b3ee", "score": "0.51479685", "text": "def parse(lines):\n data = {}\n for line in lines:\n line = line.strip()\n name, job = line.split(\": \")\n try:\n name1, operation, name2 = job.split(\" \")\n job = (operation, name1, name2)\n except ValueError:\n operation = \"yell\"\n val = int(job)\n job = (operation, val)\n data[name] = job\n return data", "title": "" }, { "docid": "bc705cf3d505b32d641ea30e91b2c17e", "score": "0.5146445", "text": "def normalize_line(line):\n inside_str = False\n str = ''\n for i in line:\n if i == '\"':\n inside_str = not inside_str\n if not inside_str and i == ',':\n str += '#'\n else:\n str += i \n str = str.replace('\\'', '`')\n return str", "title": "" }, { "docid": "b435bf71ffd7c6a9ddbf772a851eb641", "score": "0.5137678", "text": "def load_csv_as_dict(Path, start_row=1, skip_rows=None, conv_numstrings=True, \\\n empty_entries=True):\n import csv\n csv_read = csv.DictReader(open(Path))\n i = 1 \n # Shift skip_rows back 1 to correspond with correct csv_read entry indices\n if skip_rows is not None:\n skip_rows = [skip_rows[k] - 1 for k in range(len(skip_rows))]\n \n\n Dict = {} # Empty dictionary to return\n for row in csv_read:\n if start_row == 1: # Build dictionary without skipping rows\n if skip_rows is not None:\n if i in skip_rows: # Skip rows specified in input list\n i += 1 \n continue\n \n for key, value in row.items():\n Dict.setdefault(key, []).append(value)\n \n i += 1 \n continue\n \n if i < start_row - 1: # Skip row\n i += 1 \n continue\n \n if i == start_row - 1: # Redefine row keys\n keys = []\n for column, value in row.iteritems():\n keys.append(value)\n i += 1 \n continue \n \n if i > start_row - 1: # Build new dictionary from column entries\n if skip_rows is not None:\n if i in skip_rows: # Skip rows specified in input list\n i += 1 \n continue\n \n for key, (column, value) in zip(keys, row.iteritems()):\n Dict.setdefault(key, []).append(value)\n \n i += 1 \n \n # Convert number strings to floats\n if conv_numstrings: dict_conv_numstrings(Dict)\n \n # Remove empty entires\n if empty_entries:\n for key, value in Dict.items(): # Filter out empty '' values\n if not value.__class__ == float:\n Dict[key] = list(filter(lambda a: a != '', value))\n \n return Dict", "title": "" }, { "docid": "d30d503ff18794cd2f00144ca1c79e59", "score": "0.5134373", "text": "def parse_line(self, line):\n def parse_arg(a):\n assert a is not None\n if a == 'nil' or a == 'null':\n return None\n elif a == '{}':\n return {}\n elif a == '[]':\n return []\n elif a.isdigit():\n return int(a)\n elif a.startswith('$'):\n return resolve_var(a[1:], self.vars)\n elif a and a[0] == \"[\" and a[-1] == \"]\":\n return [parse_arg(x) for x in a[1:-1].split(',')]\n elif a and a[0] == \"{\" and a[-1] == \"}\":\n return list_or_dict([parse_arg(x) for x in a[1:-1].split(',')])\n elif '=' in a:\n k, v = a.split('=', 1)\n return {k.strip(): v.strip()}\n elif '://' in a: # special case for protocols\n return a\n elif ':' in a:\n k, v = a.split(':', 1)\n if ',' in v:\n if v[0] == '{' and v[-1] == '}': # unnecesary, but may help readability\n v = v[1:-1]\n vv = list_or_dict([parse_arg(x.strip())\n for x in v.split(',')])\n return {k.strip(): vv}\n\n return {k.strip(): parse_arg(v.strip())}\n # literal\n return a\n\n def list_or_dict(ld):\n if not ld:\n return []\n if all([type(x) == dict for x in ld]):\n r = {}\n for d in ld:\n r.update(d)\n return r\n return ld\n\n def resolve_var(expr, vars):\n if not type(vars) in (dict, list):\n return '%s.%s' % (str(vars), expr)\n if '.' in expr:\n (pre, post) = expr.split('.', 1)\n if pre.isdigit():\n pre = int(pre)\n return resolve_var(post, vars[pre])\n if expr.isdigit():\n expr = int(expr)\n return vars[expr]\n\n def parse_params(line, sep):\n if not sep in line:\n return []\n line = line[line.index(sep):].strip()\n if line == '{}':\n return {}\n if line == '[]':\n return []\n if line.startswith('{'):\n return json.loads(line)\n\n params = shlex.split(line)\n return list_or_dict([parse_arg(x) for x in params])\n\n\n if line.startswith('#'):\n return None\n cmd = shlex.split(line)\n if not cmd:\n return None\n if len(cmd) >= 3 and cmd[1] == '=':\n params = parse_params(line, '=')\n return {\n 'method': parse_arg(cmd[2]),\n 'params': params,\n 'assign_to': cmd[0]\n }\n\n return {\n 'method': parse_arg(cmd[0]),\n 'params': parse_params(line, ' ')\n }", "title": "" }, { "docid": "d24d92de04ba25793c06d0cf2a52e871", "score": "0.5130751", "text": "def get_from_csv2(self, line):\n print(len(line))\n (self.name, self.webpage, self.institution,\n self.country, self.address, self.lat,\n self.long, self.exp_theor, self.fields,\n self.people, self.atom, self.comment) = line\n self.lat = float(self.lat)\n self.long = float(self.long)", "title": "" }, { "docid": "82fed3c9634c05eb18ea258ade17de2e", "score": "0.5123542", "text": "def csv2dict(csvname, delimiter=','):\n lines = open(csvname, 'r').readlines()\n header = lines[0].rstrip().split(delimiter)\n lines = lines[1:]\n nlines = len(lines)\n\n dict_list = [{} for _ in range(nlines)]\n for i, line in enumerate(lines):\n line2 = convert_symbol(line.rstrip(), delimiter, '|')\n items = line2.split(delimiter)\n items2 = [s.replace('|', delimiter) for s in items]\n dict_list[i] = {k:items2[j] for j,k in enumerate(header)}\n\n return dict_list", "title": "" }, { "docid": "7af55d2906d7249f7da0771801c0f22b", "score": "0.51226807", "text": "def parse_param_file(param_file):\n with gfile.GFile(param_file, 'r') as fh:\n lines = fh.readlines()\n d = {}\n for l in lines:\n l = l.rstrip('\\n')\n splits = l.split(':')\n key = splits[0]\n val_ = splits[1].strip()\n if not val_:\n val = ''\n else:\n try:\n val = ast.literal_eval(val_)\n except (ValueError, SyntaxError):\n val = str(val_)\n d[key] = val\n return d", "title": "" }, { "docid": "42a175bad53d70487f25534bb8691c0b", "score": "0.5103795", "text": "def _get_values(line):\n return line[line.find(\"` VALUES \") + 9 :]", "title": "" }, { "docid": "ad1ebac9e2cefed0374c631858d4b1c4", "score": "0.5103102", "text": "def process_record(raw_record):\n\n record = {}\n \n raw_record = raw_record.replace(os.linesep, ' ')\n field_record = raw_record.split(' ')\n for fld in field_record:\n fld = fld.split(':')\n try:\n record[fld[0]] = fld[1]\n except IndexError:\n continue\n return record", "title": "" }, { "docid": "0818f8ec5f65d0b8f22cf151b6f763f4", "score": "0.5093003", "text": "def populate_csv_dict(self):\n with open(self.source_csv_path) as f:\n reader = csv.reader(f)\n first = True\n self.csv_dict = {}\n header_index_map = {}\n for row in reader:\n for i in range(len(row)): # be explicit about reading the indices of the row\n if first:\n self.csv_dict[row[i]] = {}\n header_index_map[i] = row[i]\n else:\n key = header_index_map[i]\n try:\n self.csv_dict[key][int(row[0])] = self._tokenise(row[i])\n except ValueError as ve:\n print 'One of the id-s doesn\\'t seem to be an integer:', row[0]\n print\n raise\n if first: \n first = False", "title": "" }, { "docid": "a5400da42673cdfd4d7b921d4199522e", "score": "0.5092853", "text": "def state_numeric_codes(inFile):\n with open(inFile, \"r\") as csvfile: \n state_code_dict = {}\n descr = csvfile.readline()\n\n while descr != \"\":\n split_descr = descr.split(\",\")\n\n if len(split_descr) >= 3:\n state_name = split_descr[3]\n state_code = split_descr[1]\n\n new_name = remove_quotes(state_name)\n code = remove_quotes(state_code)\n new_code = remove_lead0s(code)\n\n state = state_name.split()\n if len(state) == 1:\n state_code_dict[new_name] = int(new_code)\n descr = csvfile.readline()\n \n return state_code_dict", "title": "" }, { "docid": "6c0b19d5bacb6e1dc6bddffe700e6020", "score": "0.5089802", "text": "def parse_line(line_str):\n field_list = line_str.strip().split(', ')\n name_str = field_list[1] + ' ' + field_list[0]\n score_list = []\n for element in field_list[2:]:\n score_list.append(int(element))\n return name_str, score_list # a tuple is returned", "title": "" }, { "docid": "7d087e208d20bc99ae9254cb8335b309", "score": "0.50845295", "text": "def _parse_str_to_dict(self, args):\n arg_dict = dict()\n args = args.split()\n nargs = len(args)\n arg_dict['analysis_id'] = args[0]\n for i in range(1, nargs, 2):\n arg_key = args[i]\n arg_arg = args[i + 1]\n if arg_key in self._valid_settings:\n arg_dict[arg_key] = arg_arg\n else:\n warnings.warn(\n \"ignoring invalid argument key \" + arg_key + \" for analysis \" + self.analysis_key)\n return arg_dict", "title": "" }, { "docid": "8150670710202a6a5078d03388ac63d0", "score": "0.5080663", "text": "def _extract_and_parse_csv(input_csv, key_header_label, values_header_label):\n freeform_labels = {}\n\n with open(input_csv, 'rb') as csvfile:\n reader = csv.reader(csvfile)\n\n # Parse the header row.\n header = reader.next()\n key_idx = (None if key_header_label is None\n else header.index(key_header_label))\n val_idx = header.index(values_header_label)\n\n # Parse the data rows.\n row_idx = 2\n for row in reader:\n text = row[val_idx].decode('utf-8').lower()\n if len(text) == 0:\n continue\n\n # Only one type of delimiter is expected in the free-form text field.\n if text.find(';') != -1 and text.find('\\n') != -1:\n raise ValueError('Free-form text field cannot contain both '';'' and '\n 'newline delimiters.')\n\n # Parse if necessary.\n if text.find(';') != -1:\n text = text.split(';')\n elif text.find('\\n') != -1:\n text = text.splitlines()\n else:\n text = [text]\n\n freeform_labels[row_idx if key_idx is None else row[key_idx]] = text\n row_idx += 1\n\n return freeform_labels", "title": "" }, { "docid": "1d425be8f579ee0712c97909f31aba3a", "score": "0.50765157", "text": "def parse_csv(self):\n self.data = {}\n csv_lines = self.csv.split(\"\\n\")\n\n for csv_line in csv_lines:\n if csv_line.startswith('#'):\n continue\n if csv_line.startswith('pxname'):\n continue\n\n csv_fields = csv_line.split(',')\n\n if len(csv_fields) < len(self.status_fields):\n continue\n\n service_field = self.status_fields['pxname']\n server_field = self.status_fields['svname']\n\n for status_field_name in self.status_fields:\n status_field = self.status_fields[status_field_name]\n status_field_number = status_field['number']\n\n service_field_value = csv_fields[service_field['number']]\n server_field_value = csv_fields[server_field['number']]\n\n if service_field_value not in self.data:\n self.data[service_field_value] = {}\n\n if server_field_value not in self.data[service_field_value]:\n self.data[service_field_value][server_field_value] = {}\n\n csv_field_value = csv_fields[status_field_number]\n\n server = self.data[service_field_value][server_field_value]\n server[status_field_name] = csv_field_value", "title": "" }, { "docid": "4814a84016878fda5533ffbc40ee2426", "score": "0.5052376", "text": "def coordinates(line):\n contents = line.split(\",\")\n lng, lat = map(float, contents[3:5])\n return lng, lat", "title": "" }, { "docid": "b8e0b131f9c1b30cadb5ed703c0dc185", "score": "0.5050806", "text": "def import_from_file(input_file):\n list_of_dicts = []\n floats = ('Longitude', 'Latitude')\n tmp_dict = {}\n try:\n csv_file = open(input_file, 'rU')\n except IOError:\n print \"Error:\", sys.exc_info()[0]\n sys.exit()\n else:\n reader = csv.reader(csv_file)\n keys = reader.next()\n for values in reader:\n tuples = zip(keys, values)\n for key, value in tuples:\n if key in floats:\n try:\n tmp_dict[key] = float(value)\n except ValueError:\n print \"Error:\", sys.exc_info()[0]\n tmp_dict[key] = value\n else:\n tmp_dict[key] = value\n list_of_dicts.append(tmp_dict)\n csv_file.close()\n return list_of_dicts", "title": "" }, { "docid": "a6b145aa1dea2f5edc2d5d446d777c4b", "score": "0.50342846", "text": "def _get_args(line: str) -> Dict[str, str]:\n line = f' {line}'\n args_dict: Dict[str, Any] = {}\n args = line.split(\" --\")[1:]\n\n for arg in args:\n arg_tuple = arg.split(\" \")\n args_dict[arg_tuple[0]] = \" \".join(arg_tuple[1:])\n\n return args_dict", "title": "" }, { "docid": "78fe8d4d74b4afbab6de7af652d31902", "score": "0.5033904", "text": "def deserialize(self, data):\n lst = data.split(',')\n return self.deserializeHelper(lst)", "title": "" }, { "docid": "5fbdad267118fd1e2458c0ebe88cc23b", "score": "0.5032945", "text": "def deserialize(self, data):\n data = data[1:-1]\n if len(data)==0: return None\n input = re.split(',',data)\n return self.build(input,0,0)", "title": "" }, { "docid": "049be26d77c1c767b5c6bbb78a415e72", "score": "0.5029139", "text": "def load_data(path: str) -> Dict[int, str]:\n data = {}\n with open(path, 'r') as file:\n lines = file.readlines()\n for line in lines:\n row_data = literal_eval(line)\n data[row_data[QUESTION_CONTENT_KEY]] = row_data[QUESTION_ID_KEY]\n\n return data", "title": "" }, { "docid": "8f8be612c88c867c8cc5f1adef803cc1", "score": "0.5020023", "text": "def get_tag_dict():\n tag_source_file = open(\"../tags_config.txt\", 'r')\n tag_dict = {}\n for line in tag_source_file.readlines():\n if line[0] == \"#\" or len(line) < 5:\n continue\n target_line = line.replace(\"\\n\", \"\")\n target_line = target_line.replace(\" \", \"\")\n try:\n tags, category = target_line.split(\"->\")\n except:\n print(\"The config file may be formatted incorrectly, the following line could not be parsed: \\n{}\\n \"\n \"please make sure each line is in the format a, b, c ... -> d\".format(line))\n exit(0)\n for tag in tags.split(\",\"):\n tag_dict[tag] = category\n tag_source_file.close()\n return tag_dict", "title": "" }, { "docid": "72d0e2d63d6dddd6669804fcd818b34f", "score": "0.50171804", "text": "def parse_csv(rows_string_tensor):\n\n # Takes a rank-1 tensor and converts it into rank-2 tensor\n # Example if the data is ['csv,line,1', 'csv,line,2', ..] to\n # [['csv,line,1'], ['csv,line,2']] which after parsing will result in a\n # tuple of tensors: [['csv'], ['csv']], [['line'], ['line']], [[1], [2]]\n row_columns = tf.expand_dims(rows_string_tensor, -1)\n columns = tf.decode_csv(row_columns, record_defaults=CSV_COLUMN_DEFAULTS)\n features = dict(zip(CSV_COLUMNS, columns))\n\n # Remove unused columns\n for col in UNUSED_COLUMNS:\n features.pop(col)\n return features", "title": "" }, { "docid": "228d117c5211df533001478cb385dd9a", "score": "0.5015171", "text": "def __extract_key_value(self, line):\n values = line.split('=')\n return values[0].strip(),values[1].strip()", "title": "" }, { "docid": "70f896f99cd939bd5bbf2377e7223b16", "score": "0.5013919", "text": "def get_csv_as_dict_list():\n dict_list = []\n with open('../../data/EHRdataSample.csv') as csv_file:\n csv_reader = csv.DictReader(csv_file, delimiter=',')\n for line in csv_reader:\n dict_list.append(line)\n return dict_list", "title": "" }, { "docid": "663242c3c86bf8c1ff04c61f398410d6", "score": "0.5012478", "text": "def tsv_to_dict(self):\n tsv_rows = []\n for line in self.tsv_string.split(\"\\n\"):\n #header lines\n if line.startswith(\"##\"):\n self.header.append(line)\n #column header line.\n elif line.startswith(\"#\"):\n self.column_header = line[1:].split(\"\\t\")\n elif line:\n tsv_rows.append(line)\n for x in range(len(tsv_rows)):\n #seperates each row element seperated by tabs into a dictionary\n inner_dict = dict(zip(self.column_header, tsv_rows[x].split(\"\\t\")))\n #if not unique key is provided the outer dictionary is simply saved by a number.\n if self.key_name:\n self.tsv_dict[inner_dict[self.key_name]] = inner_dict\n else:\n self.tsv_dict[str(x)] = inner_dict", "title": "" }, { "docid": "4f915c8697e81b68656ee9646a62f6a7", "score": "0.50063443", "text": "def parse_line(self, line: str):\n pass", "title": "" }, { "docid": "097ad3977eeacac389fabcaf03e856eb", "score": "0.5004433", "text": "def parse_content(self, content):\n _lines = get_active_lines(content, comment_char=\"#\")\n self.data = {}\n current_key = \"\"\n for line in _lines:\n if \"=\" in line:\n k, v = line.split('=')\n current_key = k.strip()\n self.data[current_key] = v.strip(\"\\\"' \")\n else:\n self.data[current_key] = self.data[current_key] + line\n # if the value like \"[ , ,]\", convert to list\n if self.data[current_key].endswith(\"]\"):\n self.data[current_key] = json.loads(self.data[current_key])", "title": "" }, { "docid": "c8161b8825e009fe9b9914cc35e14bfc", "score": "0.49966922", "text": "def _parse_group_vars_line(self, line):\n tokens = shlex.split(line.strip())\n key_values = self._parse_vars(tokens)\n return key_values", "title": "" }, { "docid": "d7dd1d952013d90cc57e78491b609a98", "score": "0.49955785", "text": "def _convert_variant_file_to_dict(varfile):\n patient_variant_map = {}\n # line_num = 0 note this is builtin to the reader as reader.line_num\n reader = csv.reader(varfile, delimiter=\"\\t\")\n col = [\n 'patient', 'family', 'chromosome', 'build', 'position', 'reference_allele',\n 'variant_allele', 'parent_of_origin', 'allele_type', 'mutation_type',\n 'gene_symbol', 'transcript', 'reference_aa', 'variant_aa, aa_change',\n 'segregates_with', 'locus', 'exon', 'inheritance_model', 'zygosity',\n 'dbsnp_id', 'frequency', 'num_of_alleles'\n ]\n # row = next(reader) # there is no header\n # if len(row) != len(col):\n # LOG.error('Got:\\n\\t%s\\nExpected:\\n\\t%s\\n', row, col)\n # raise TypeError('header does not match expected format')\n for row in reader:\n\n patient = row[col.index('patient')]\n # family,\n chromosome = row[col.index('chromosome')]\n build = row[col.index('build')]\n position = row[col.index('position')]\n reference_allele = row[col.index('reference_allele')]\n variant_allele = row[col.index('variant_allele')]\n # parent_of_origin,\n # allele_type,\n mutation_type = row[col.index('mutation_type')]\n gene_symbol = row[col.index('gene_symbol')]\n # transcript,\n # reference_aa,\n # variant_aa,\n # aa_change,\n # segregates_with,\n # locus,\n # exon,\n # inheritance_model,\n # zygosity,\n dbsnp_id = row[col.index('dbsnp_id')]\n # frequency,\n # num_of_alleles\n\n if patient not in patient_variant_map:\n patient_variant_map[patient] = {}\n\n formatted_chr = re.sub(r'^CHR', 'chr', chromosome, flags=re.I)\n\n if re.fullmatch(r'[XY]|[0-9]{1,2}', chromosome, flags=re.I):\n formatted_chr = \"chr{0}\".format(chromosome.upper())\n\n formatted_build = re.sub(r'^HG', 'hg', build, flags=re.I)\n ref_base = reference_allele.upper()\n var_base = variant_allele.upper()\n rs_id = ''\n\n # Catch misformatted data\n if re.search(r'LEFT FLANK|NM_|EXON', ref_base):\n ref_base = ''\n\n if re.search(r'LEFT FLANK|NM_|EXON', var_base):\n var_base = ''\n\n if dbsnp_id != '':\n match = re.fullmatch(r'^(rs\\d+).*', dbsnp_id)\n if match:\n rs_id = match.group(1)\n\n # Format variant object\n variant_info = [formatted_chr, formatted_build, position,\n ref_base, var_base]\n\n if '' in variant_info:\n filt_list = [info for info in variant_info if info != '']\n variant_id = str(reader.line_num) + '-' + '-'.join(filt_list)\n else:\n variant_id = '-'.join(variant_info)\n\n if variant_id in patient_variant_map[patient]:\n patient_variant_map[patient][variant_id]['genes_of_interest'].append(\n gene_symbol)\n else:\n patient_variant_map[patient][variant_id] = {\n 'build': formatted_build,\n 'position': position,\n 'chromosome': formatted_chr,\n 'reference_allele': ref_base,\n 'variant_allele': var_base,\n 'type': mutation_type,\n 'rs_id': ''\n }\n if rs_id:\n patient_variant_map[patient][variant_id]['rs_id'] = rs_id\n\n patient_variant_map[patient][variant_id]['genes_of_interest']\\\n = [gene_symbol]\n\n return patient_variant_map", "title": "" }, { "docid": "bd07986733e7eb236a09d82bef825d01", "score": "0.49793538", "text": "def consume_line(self, line):\n data = RE_VALUE_KEY.split(line.strip(), 1)\n if len(data) == 1:\n return float(data[0]), None\n else:\n return float(data[0]), data[1].strip()", "title": "" }, { "docid": "6c7d00eb920000871cb4ddd7750935b1", "score": "0.49764368", "text": "def parse_dict(text):\n\n substitutions = {\"{}\": {}, \"[]\": [], \"True\": True, \"False\": False}\n\n def parse_value(some_val):\n some_val = substitutions.get(some_val, some_val)\n\n try:\n int_val = int(some_val)\n if str(int_val) == some_val:\n return int_val\n except ValueError:\n pass\n\n try:\n float_val = float(some_val)\n if str(float_val) == some_val:\n return float_val\n except ValueError:\n pass\n\n return some_val\n\n def chomp_key_list(out_dict, keys, value):\n \"\"\"\n turn keys like ['a', 'b', 'c', 'd'] and a value into\n out_dict['a']['b']['c']['d'] = value\n \"\"\"\n dict_ptr = out_dict\n last_ptr = out_dict\n for i, key in enumerate(keys):\n last_ptr = dict_ptr\n if not key in dict_ptr:\n dict_ptr[key] = {}\n else:\n if type(dict_ptr[key]) is not dict:\n raise BadRequest(\"Building a dict in %s field, but it exists as %s already\" %\n (key, type(dict_ptr[key])))\n dict_ptr = dict_ptr[key]\n last_ptr[keys[-1]] = value\n\n out = {}\n if text is None:\n return out\n\n pairs = text.split(',') # pairs separated by commas\n for pair in pairs:\n if pair.count(':') == 0:\n continue\n fields = pair.split(':', 1) # pair separated by first colon\n key = fields[0].strip()\n value = fields[1].strip()\n\n keyparts = key.split(\".\")\n chomp_key_list(out, keyparts, parse_value(value))\n\n return out", "title": "" }, { "docid": "c1d432ee9552e21862e03201c19d5015", "score": "0.4974465", "text": "def dict_from_path(self, path):\n return dict(re.split(\"\\s+\", line) for line in self.get_param_lines(path))", "title": "" }, { "docid": "1beffeb4fefe1bac76abd78830d48ced", "score": "0.49729833", "text": "def clean_line(self, line, joint_names):\n def try_float(x):\n try:\n return float(x)\n except ValueError:\n return None\n #convert the line of strings to a float or None\n line = [try_float(x) for x in line.rstrip().split(',')]\n #zip the values with the joint names\n combined = zip(joint_names[1:], line[1:])\n #take out any tuples that have a none value\n cleaned = [x for x in combined if x[1] is not None]\n #convert it to a dictionary with only valid commands\n command = dict(cleaned)\n return (command, line,)", "title": "" }, { "docid": "0f87c32bbc889f291d782f825389cd91", "score": "0.49717426", "text": "def load_expression(fn=None, sep=\"\\t\"):\n with gzip.open(fn, \"rt\") as f:\n reader = csv.DictReader(f, delimiter=sep)\n return {row[\"Gene\"]: float(row[\"Expression\"]) for row in reader}", "title": "" }, { "docid": "9f09ba7c58cf44448e81a9ff7ad46d68", "score": "0.49692282", "text": "def parse_line(line, custom_columns):\n results = {}\n if line.startswith('\\t'):\n applog = line.strip() # remove leading tab and trailing newline.\n results['applog'] = applog\n # Some applogs have severity; some are continuations and do not.\n if (len(applog) > 2 and applog[1] == ':'\n and applog[0] >= '0' and applog[0] <= '9'):\n results['applog_severity'] = applog[0]\n for column, regexp in custom_columns.iteritems():\n match = re.search(regexp, applog)\n if match:\n # TODO: Consider allowing named groups.\n results[column] = match.group(1)\n return results\n\n matches = LINE_RE_COMPILED.match(line)\n if not matches:\n raise LineParsingFailure('Fail. %s', line)\n\n results['remotehost'] = matches.group(1)\n results['user'] = matches.group(2)\n results['request_time_str'] = matches.group(3)\n results['request_line'] = matches.group(4)\n # Submatch = matches.group(5)\n results['status'] = matches.group(6)\n results['bytes'] = matches.group(7)\n results['referer'] = matches.group(8)\n # Submatch = matches.group(9)\n results['useragent'] = matches.group(10)\n\n more_values = line[len(matches.group(0)):].strip().split(' ')\n if more_values:\n results['host'] = more_values.pop(0)\n for pair in more_values:\n key, value = pair.split('=')\n results[key] = value\n if more_values and 'loading_request' not in results:\n # The user requested --include_all, so if this were a loading request\n # it would be in the line. But it's not in the line, so it's not a loading\n # request. Otherwise, we want it to be NULL for unknown.\n results['loading_request'] = '0'\n\n return results", "title": "" }, { "docid": "d9bfebb31a2b327226359a4029d55445", "score": "0.49632904", "text": "def process_csv(csv_file):\n csv_f = open(csv_file)\n csv_extract = csv.DictReader(csv_f, delimiter=\"\\t\") # dialect='excel'\n csv_list = [] \n for row in csv_extract:\n u_row = {}\n for k,v in row.iteritems():\n if type(v) == str:\n u_row[k] = v.decode(\"utf-8\")\n else:\n u_row[k] = v\n csv_list.append(u_row)\n return csv_list", "title": "" }, { "docid": "6ddb58c3b67085a2ba881a16dbe43155", "score": "0.496286", "text": "def convert_tsv_line(cls, mapping, tsv_header):\n custom_field_infos = tsv_header.custom_field_infos\n table = {\n \"integer\": int,\n \"number\": float,\n \"boolean\": lambda x: BOOL_VALUES.get(x, False),\n }\n for key, value in mapping.items():\n if value is not None and key in custom_field_infos:\n mapping[key] = table.get(custom_field_infos[key].field_type, lambda x: x)(value)\n return mapping", "title": "" }, { "docid": "643e91813ee39ce764ce3cb90cb8ffe8", "score": "0.49558353", "text": "def parseCSVRow(self, row):\n if row[0] == 'id':\n self.dataHeaders = row\n else:\n self.genDataInput(row)\n self.setDataObjectToDict()", "title": "" }, { "docid": "7b78dc127644dc995f4502ef722af8d6", "score": "0.49547195", "text": "def format_line(line):\n result = {'type': 'gauge', 'severity': 'warning'}\n parts = line.split('|')\n for part in parts:\n if '=' in part:\n index = part.index('=')\n result[part[:index]] = part[index+1:]\n return result", "title": "" }, { "docid": "a024d46690571ceb9b3a9b57fb849243", "score": "0.49533784", "text": "def _parse(\n self,\n record: Mapping[str,\n tf.Tensor]) -> Tuple[Dict[str, tf.Tensor], tf.Tensor]:\n x, y = super()._parse(record)\n feature_name_mapping = {\n 'input_ids': 'input_word_ids',\n 'input_mask': 'input_mask',\n 'segment_ids': 'input_type_ids'\n }\n converted_x = {\n feature_name_mapping.get(name, name): tensor\n for name, tensor in x.items()}\n return converted_x, y", "title": "" }, { "docid": "a7f6154f791bf6b3374fed927b07828a", "score": "0.4949671", "text": "def read_csv():\n yelp_ids_hours_file = open('insta9')\n yelp_ids_hours = csv.reader(yelp_ids_hours_file)\n\n yelp_ids = {}\n\n for row in yelp_ids_hours:\n infos = row[0].split('|')\n yelp_ids[infos[0]] = { \\\n 'Monday': infos[1], \\\n 'Tuesday': infos[2], \\\n 'Wednesday': infos[3], \\\n 'Thursday': infos[4], \\\n 'Friday': infos[5], \\\n 'Saturday': infos[6], \\\n 'Sunday': infos[7] }\n\n # print infos[0], yelp_ids[infos[0]]['Monday']\n\n return yelp_ids", "title": "" }, { "docid": "9a252e6b39b088627844d5c27ede1cb3", "score": "0.49438286", "text": "def parse_csv_line_to_resource(line):\n if not line:\n return None\n # Capping splits prevents commas in URL from being caught.\n (\n url_string,\n etag_string,\n size_string,\n storage_class_string,\n atime_string,\n mtime_string,\n uid_string,\n gid_string,\n mode_base_eight_string,\n crc32c_string,\n md5_string,\n ) = line.rstrip().rsplit(',', _CSV_COLUMNS_COUNT)\n\n url_object = storage_url.storage_url_from_string(url_string)\n if isinstance(url_object, storage_url.FileUrl):\n return resource_reference.FileObjectResource(url_object)\n cloud_object = resource_reference.ObjectResource(\n url_object,\n etag=etag_string if etag_string else None,\n size=int(size_string) if size_string else None,\n storage_class=storage_class_string if storage_class_string else None,\n crc32c_hash=crc32c_string if crc32c_string else None,\n md5_hash=md5_string if md5_string else None,\n custom_fields={},\n )\n posix_util.update_custom_metadata_dict_with_posix_attributes(\n cloud_object.custom_fields,\n posix_util.PosixAttributes(\n atime=int(atime_string) if atime_string else None,\n mtime=int(mtime_string) if mtime_string else None,\n uid=int(uid_string) if uid_string else None,\n gid=int(gid_string) if gid_string else None,\n mode=posix_util.PosixMode.from_base_eight_str(mode_base_eight_string)\n if mode_base_eight_string\n else None,\n ),\n )\n return cloud_object", "title": "" }, { "docid": "ab948b704445615643142c0986918f03", "score": "0.49387118", "text": "def line_to_dict(self, line, protocol):\n d = {}\n connection_states = {\n '01':'ESTABLISHED',\n '02':'SYN_SENT',\n '03':'SYN_RECV',\n '04':'FIN_WAIT1',\n '05':'FIN_WAIT2',\n '06':'TIME_WAIT',\n '07':'CLOSE',\n '08':'CLOSE_WAIT',\n '09':'LAST_ACK',\n '0A':'LISTEN',\n '0B':'CLOSING' }\n line_array = self._remove_empty(line.split(' '))\n d['protocol'] = protocol\n d['local_ip'], d['local_port'] = self._convert_ip_port(line_array[1])\n d['remote_ip'], d['remote_port'] = self._convert_ip_port(line_array[2])\n if 'tcp' in protocol:\n d['state'] = connection_states[line_array[3]]\n else:\n d['state'] = ''\n d['pid'] = self.pid_of_inode(line_array[9])\n d['program'] = self.name_of_pid(d['pid'])\n return d", "title": "" }, { "docid": "2d9ae3e4e2762e0382d327ee5a073a76", "score": "0.49384794", "text": "def decode(line):\n try:\n return json.loads(line)\n except:\n fake_record = {}\n fake_record['fip'] = 0\n fake_record['user_id'] = 0\n fake_record['text'] = ''\n fake_record['filtered_text'] = ''\n fake_record['url_count'] = 0\n fake_record['emoji_count'] = 0\n fake_record['education_level_1'] = 0\n fake_record['education_level_2'] = 0\n fake_record['education_level_3'] = 0\n fake_record['education_level_4'] = 0\n return fake_record", "title": "" } ]