id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
2338
|
import asyncio
import unittest
from .helpers import async_test
class AsyncTestCase(unittest.TestCase):
''' AsyncTestCase allows to test asynchoronus function.
The usage is the same as :code:`unittest.TestCase`. It works with other test frameworks
and runners (eg. `pytest`, `nose`) as well.
AsyncTestCase can run:
- test of synchronous code (:code:`unittest.TestCase`)
- test of asynchronous code, supports syntax with
:code:`async`/:code:`await` (Python 3.5+) and
:code:`asyncio.coroutine`/:code:`yield from` (Python 3.4)
Code to test:
.. code-block:: python
import asyncio
async def async_add(x, y, delay=0.1):
await asyncio.sleep(delay)
return x + y
async def async_one():
await async_nested_exc()
async def async_nested_exc():
await asyncio.sleep(0.1)
raise Exception('Test')
Tests:
.. code-block:: python
import aiounittest
class MyTest(aiounittest.AsyncTestCase):
async def test_await_async_add(self):
ret = await async_add(1, 5)
self.assertEqual(ret, 6)
async def test_await_async_fail(self):
with self.assertRaises(Exception) as e:
await async_one()
'''
def get_event_loop(self):
''' Method provides an event loop for the test
It is called before each test, by default :code:`aiounittest.AsyncTestCase` creates the brand new event
loop everytime. After completion, the loop is closed and then recreated, set as default,
leaving asyncio clean.
.. note::
In the most common cases you don't have to bother about this method, the default implementation is a receommended one.
But if, for some reasons, you want to provide your own event loop just override it. Note that :code:`AsyncTestCase` won't close such a loop.
.. code-block:: python
class MyTest(aiounittest.AsyncTestCase):
def get_event_loop(self):
self.my_loop = asyncio.get_event_loop()
return self.my_loop
'''
return None
def __getattribute__(self, name):
attr = super().__getattribute__(name)
if name.startswith('test_') and asyncio.iscoroutinefunction(attr):
return async_test(attr, loop=self.get_event_loop())
else:
return attr
|
2374
|
import numpy as np
from util import *
def naiveDistanceProfile(tsA, idx, m, tsB = None):
"""Return the distance profile of query against ts. Use the naive all pairs comparison algorithm.
>>> np.round(naiveDistanceProfile(np.array([0.0, 1.0, -1.0, 0.0]), 0, 4, np.array([-1, 1, 0, 0, -1, 1])), 3)
array([[ 2. , 2.828, 2. ],
[ 0. , 0. , 0. ]])
"""
selfJoin = False
if tsB is None:
selfJoin = True
tsB = tsA
query = tsA[idx : (idx + m)]
distanceProfile = []
n = len(tsB)
for i in range(n - m + 1):
distanceProfile.append(zNormalizedEuclideanDistance(query, tsB[i : i + m]))
if selfJoin:
trivialMatchRange = (max(0, idxToProcess - m / 2), min(idxToProcess + m / 2 + 1, len(tsB)))
distanceProfile[trivialMatchRange[0] : trivialMatchRange[1]] = np.inf
return (distanceProfile, np.full(n - m + 1, idx, dtype = float))
def stampDistanceProfile(tsA, idx, m, tsB = None):
"""
>>> np.round(stampDistanceProfile(np.array([0.0, 1.0, -1.0, 0.0]), 0, 4, np.array([-1, 1, 0, 0, -1, 1])), 3)
array([[ 2. , 2.828, 2. ],
[ 0. , 0. , 0. ]])
"""
selfJoin = False
if tsB is None:
selfJoin = True
tsB = tsA
query = tsA[idx : (idx + m)]
n = len(tsB)
distanceProfile = mass(query, tsB)
if selfJoin:
trivialMatchRange = (max(0, idxToProcess - m / 2), min(idxToProcess + m / 2 + 1, len(tsB)))
distanceProfile[trivialMatchRange[0] : trivialMatchRange[1]] = np.inf
return (distanceProfile, np.full(n - m + 1, idx, dtype = float))
if __name__ == "__main__":
import doctest
doctest.testmod()
|
2406
|
import functools
from collections import OrderedDict
from typing import Any, Callable, Dict, List, Mapping, Sequence, Tuple, Union, cast
import torch
from ignite.engine import Engine, EventEnum, Events
from ignite.handlers.timing import Timer
class BasicTimeProfiler:
"""
BasicTimeProfiler can be used to profile the handlers,
events, data loading and data processing times.
Examples:
.. code-block:: python
from ignite.handlers import BasicTimeProfiler
trainer = Engine(train_updater)
# Create an object of the profiler and attach an engine to it
profiler = BasicTimeProfiler()
profiler.attach(trainer)
@trainer.on(Events.EPOCH_COMPLETED)
def log_intermediate_results():
profiler.print_results(profiler.get_results())
trainer.run(dataloader, max_epochs=3)
profiler.write_results('path_to_dir/time_profiling.csv')
.. versionadded:: 0.4.6
"""
events_to_ignore = [
Events.EXCEPTION_RAISED,
Events.TERMINATE,
Events.TERMINATE_SINGLE_EPOCH,
Events.DATALOADER_STOP_ITERATION,
]
def __init__(self) -> None:
self._dataflow_timer = Timer()
self._processing_timer = Timer()
self._event_handlers_timer = Timer()
self.dataflow_times = torch.zeros(1)
self.processing_times = torch.zeros(1)
self.event_handlers_times = {} # type: Dict[EventEnum, torch.Tensor]
self._events = [
Events.EPOCH_STARTED,
Events.EPOCH_COMPLETED,
Events.ITERATION_STARTED,
Events.ITERATION_COMPLETED,
Events.GET_BATCH_STARTED,
Events.GET_BATCH_COMPLETED,
Events.COMPLETED,
]
self._fmethods = [
self._as_first_epoch_started,
self._as_first_epoch_completed,
self._as_first_iter_started,
self._as_first_iter_completed,
self._as_first_get_batch_started,
self._as_first_get_batch_completed,
self._as_first_completed,
]
self._lmethods = [
self._as_last_epoch_started,
self._as_last_epoch_completed,
self._as_last_iter_started,
self._as_last_iter_completed,
self._as_last_get_batch_started,
self._as_last_get_batch_completed,
self._as_last_completed,
]
def _reset(self, num_epochs: int, total_num_iters: int) -> None:
self.dataflow_times = torch.zeros(total_num_iters)
self.processing_times = torch.zeros(total_num_iters)
self.event_handlers_times = {
Events.STARTED: torch.zeros(1),
Events.COMPLETED: torch.zeros(1),
Events.EPOCH_STARTED: torch.zeros(num_epochs),
Events.EPOCH_COMPLETED: torch.zeros(num_epochs),
Events.ITERATION_STARTED: torch.zeros(total_num_iters),
Events.ITERATION_COMPLETED: torch.zeros(total_num_iters),
Events.GET_BATCH_COMPLETED: torch.zeros(total_num_iters),
Events.GET_BATCH_STARTED: torch.zeros(total_num_iters),
}
def _as_first_started(self, engine: Engine) -> None:
if hasattr(engine.state.dataloader, "__len__"):
num_iters_per_epoch = len(engine.state.dataloader) # type: ignore[arg-type]
else:
if engine.state.epoch_length is None:
raise ValueError(
"As epoch_length is not set, we can not use BasicTimeProfiler in this case."
"Please, set trainer.run(..., epoch_length=epoch_length) in order to fix this."
)
num_iters_per_epoch = engine.state.epoch_length
self.max_epochs = cast(int, engine.state.max_epochs)
self.total_num_iters = self.max_epochs * num_iters_per_epoch
self._reset(self.max_epochs, self.total_num_iters)
self.event_handlers_names = {
e: [
h.__qualname__ if hasattr(h, "__qualname__") else h.__class__.__name__
for (h, _, _) in engine._event_handlers[e]
if "BasicTimeProfiler." not in repr(h) # avoid adding internal handlers into output
]
for e in Events
if e not in self.events_to_ignore
}
# Setup all other handlers:
engine._event_handlers[Events.STARTED].append((self._as_last_started, (engine,), {}))
for e, m in zip(self._events, self._fmethods):
engine._event_handlers[e].insert(0, (m, (engine,), {}))
for e, m in zip(self._events, self._lmethods):
engine._event_handlers[e].append((m, (engine,), {}))
# Let's go
self._event_handlers_timer.reset()
def _as_last_started(self, engine: Engine) -> None:
self.event_handlers_times[Events.STARTED][0] = self._event_handlers_timer.value()
def _as_first_epoch_started(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
def _as_last_epoch_started(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
e = engine.state.epoch - 1
self.event_handlers_times[Events.EPOCH_STARTED][e] = t
def _as_first_get_batch_started(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
self._dataflow_timer.reset()
def _as_last_get_batch_started(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
i = engine.state.iteration - 1
self.event_handlers_times[Events.GET_BATCH_STARTED][i] = t
def _as_first_get_batch_completed(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
def _as_last_get_batch_completed(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
i = engine.state.iteration - 1
self.event_handlers_times[Events.GET_BATCH_COMPLETED][i] = t
d = self._dataflow_timer.value()
self.dataflow_times[i] = d
self._dataflow_timer.reset()
def _as_first_iter_started(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
def _as_last_iter_started(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
i = engine.state.iteration - 1
self.event_handlers_times[Events.ITERATION_STARTED][i] = t
self._processing_timer.reset()
def _as_first_iter_completed(self, engine: Engine) -> None:
t = self._processing_timer.value()
i = engine.state.iteration - 1
self.processing_times[i] = t
self._event_handlers_timer.reset()
def _as_last_iter_completed(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
i = engine.state.iteration - 1
self.event_handlers_times[Events.ITERATION_COMPLETED][i] = t
def _as_first_epoch_completed(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
def _as_last_epoch_completed(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
e = engine.state.epoch - 1
self.event_handlers_times[Events.EPOCH_COMPLETED][e] = t
def _as_first_completed(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
def _as_last_completed(self, engine: Engine) -> None:
self.event_handlers_times[Events.COMPLETED][0] = self._event_handlers_timer.value()
# Remove added handlers:
engine.remove_event_handler(self._as_last_started, Events.STARTED)
for e, m in zip(self._events, self._fmethods):
engine.remove_event_handler(m, e)
for e, m in zip(self._events, self._lmethods):
engine.remove_event_handler(m, e)
def attach(self, engine: Engine) -> None:
"""Attach BasicTimeProfiler to the given engine.
Args:
engine: the instance of Engine to attach
"""
if not isinstance(engine, Engine):
raise TypeError(f"Argument engine should be ignite.engine.Engine, but given {type(engine)}")
if not engine.has_event_handler(self._as_first_started):
engine._event_handlers[Events.STARTED].insert(0, (self._as_first_started, (engine,), {}))
@staticmethod
def _compute_basic_stats(data: torch.Tensor) -> Dict[str, Union[str, float, Tuple[Union[float], Union[float]]]]:
# compute on non-zero data:
data = data[data > 0]
out = [
("total", torch.sum(data).item() if len(data) > 0 else "not yet triggered")
] # type: List[Tuple[str, Union[str, float, Tuple[Union[float], Union[float]]]]]
if len(data) > 1:
out += [
("min/index", (torch.min(data).item(), torch.argmin(data).item())),
("max/index", (torch.max(data).item(), torch.argmax(data).item())),
("mean", torch.mean(data).item()),
("std", torch.std(data).item()),
]
return OrderedDict(out)
def get_results(self) -> Dict[str, Dict[str, Any]]:
"""
Method to fetch the aggregated profiler results after the engine is run
.. code-block:: python
results = profiler.get_results()
"""
total_eh_time = sum(
[(self.event_handlers_times[e]).sum() for e in Events if e not in self.events_to_ignore]
) # type: Union[int, torch.Tensor]
event_handlers_stats = dict(
[
(str(e.name).replace(".", "_"), self._compute_basic_stats(self.event_handlers_times[e]))
for e in Events
if e not in self.events_to_ignore
]
+ [("total_time", total_eh_time)] # type: ignore[list-item]
)
return OrderedDict(
[
("processing_stats", self._compute_basic_stats(self.processing_times)),
("dataflow_stats", self._compute_basic_stats(self.dataflow_times)),
("event_handlers_stats", event_handlers_stats),
(
"event_handlers_names",
{str(e.name).replace(".", "_") + "_names": v for e, v in self.event_handlers_names.items()},
),
]
)
def write_results(self, output_path: str) -> None:
"""
Method to store the unaggregated profiling results to a csv file
Args:
output_path: file output path containing a filename
.. code-block:: python
profiler.write_results('path_to_dir/awesome_filename.csv')
Examples:
.. code-block:: text
-----------------------------------------------------------------
epoch iteration processing_stats dataflow_stats Event_STARTED ...
1.0 1.0 0.00003 0.252387 0.125676
1.0 2.0 0.00029 0.252342 0.125123
"""
try:
import pandas as pd
except ImportError:
raise RuntimeError("Need pandas to write results as files")
iters_per_epoch = self.total_num_iters // self.max_epochs
epochs = torch.arange(self.max_epochs, dtype=torch.float32).repeat_interleave(iters_per_epoch) + 1
iterations = torch.arange(self.total_num_iters, dtype=torch.float32) + 1
processing_stats = self.processing_times
dataflow_stats = self.dataflow_times
event_started = self.event_handlers_times[Events.STARTED].repeat_interleave(self.total_num_iters)
event_completed = self.event_handlers_times[Events.COMPLETED].repeat_interleave(self.total_num_iters)
event_epoch_started = self.event_handlers_times[Events.EPOCH_STARTED].repeat_interleave(iters_per_epoch)
event_epoch_completed = self.event_handlers_times[Events.EPOCH_COMPLETED].repeat_interleave(iters_per_epoch)
event_iter_started = self.event_handlers_times[Events.ITERATION_STARTED]
event_iter_completed = self.event_handlers_times[Events.ITERATION_COMPLETED]
event_batch_started = self.event_handlers_times[Events.GET_BATCH_STARTED]
event_batch_completed = self.event_handlers_times[Events.GET_BATCH_COMPLETED]
results_dump = torch.stack(
[
epochs,
iterations,
processing_stats,
dataflow_stats,
event_started,
event_completed,
event_epoch_started,
event_epoch_completed,
event_iter_started,
event_iter_completed,
event_batch_started,
event_batch_completed,
],
dim=1,
).numpy()
results_df = pd.DataFrame(
data=results_dump,
columns=[
"epoch",
"iteration",
"processing_stats",
"dataflow_stats",
"Event_STARTED",
"Event_COMPLETED",
"Event_EPOCH_STARTED",
"Event_EPOCH_COMPLETED",
"Event_ITERATION_STARTED",
"Event_ITERATION_COMPLETED",
"Event_GET_BATCH_STARTED",
"Event_GET_BATCH_COMPLETED",
],
)
results_df.to_csv(output_path, index=False)
@staticmethod
def print_results(results: Dict) -> str:
"""
Method to print the aggregated results from the profiler
Args:
results: the aggregated results from the profiler
.. code-block:: python
profiler.print_results(results)
Examples:
.. code-block:: text
----------------------------------------------------
| Time profiling stats (in seconds): |
----------------------------------------------------
total | min/index | max/index | mean | std
Processing function:
157.46292 | 0.01452/1501 | 0.26905/0 | 0.07730 | 0.01258
Dataflow:
6.11384 | 0.00008/1935 | 0.28461/1551 | 0.00300 | 0.02693
Event handlers:
2.82721
- Events.STARTED: []
0.00000
- Events.EPOCH_STARTED: []
0.00006 | 0.00000/0 | 0.00000/17 | 0.00000 | 0.00000
- Events.ITERATION_STARTED: ['PiecewiseLinear']
0.03482 | 0.00001/188 | 0.00018/679 | 0.00002 | 0.00001
- Events.ITERATION_COMPLETED: ['TerminateOnNan']
0.20037 | 0.00006/866 | 0.00089/1943 | 0.00010 | 0.00003
- Events.EPOCH_COMPLETED: ['empty_cuda_cache', 'training.<locals>.log_elapsed_time', ]
2.57860 | 0.11529/0 | 0.14977/13 | 0.12893 | 0.00790
- Events.COMPLETED: []
not yet triggered
"""
def to_str(v: Union[str, tuple]) -> str:
if isinstance(v, str):
return v
elif isinstance(v, tuple):
return f"{v[0]:.5f}/{v[1]}"
return f"{v:.5f}"
def odict_to_str(d: Mapping) -> str:
out = " | ".join([to_str(v) for v in d.values()])
return out
others = {
k: odict_to_str(v) if isinstance(v, OrderedDict) else v for k, v in results["event_handlers_stats"].items()
}
others.update(results["event_handlers_names"])
output_message = """
----------------------------------------------------
| Time profiling stats (in seconds): |
----------------------------------------------------
total | min/index | max/index | mean | std
Processing function:
{processing_stats}
Dataflow:
{dataflow_stats}
Event handlers:
{total_time:.5f}
- Events.STARTED: {STARTED_names}
{STARTED}
- Events.EPOCH_STARTED: {EPOCH_STARTED_names}
{EPOCH_STARTED}
- Events.ITERATION_STARTED: {ITERATION_STARTED_names}
{ITERATION_STARTED}
- Events.ITERATION_COMPLETED: {ITERATION_COMPLETED_names}
{ITERATION_COMPLETED}
- Events.EPOCH_COMPLETED: {EPOCH_COMPLETED_names}
{EPOCH_COMPLETED}
- Events.COMPLETED: {COMPLETED_names}
{COMPLETED}
""".format(
processing_stats=odict_to_str(results["processing_stats"]),
dataflow_stats=odict_to_str(results["dataflow_stats"]),
**others,
)
print(output_message)
return output_message
class HandlersTimeProfiler:
"""
HandlersTimeProfiler can be used to profile the handlers,
data loading and data processing times. Custom events are also
profiled by this profiler
Examples:
.. code-block:: python
from ignite.handlers import HandlersTimeProfiler
trainer = Engine(train_updater)
# Create an object of the profiler and attach an engine to it
profiler = HandlersTimeProfiler()
profiler.attach(trainer)
@trainer.on(Events.EPOCH_COMPLETED)
def log_intermediate_results():
profiler.print_results(profiler.get_results())
trainer.run(dataloader, max_epochs=3)
profiler.write_results('path_to_dir/time_profiling.csv')
.. versionadded:: 0.4.6
"""
EVENT_FILTER_THESHOLD_TIME = 0.0001
def __init__(self) -> None:
self._dataflow_timer = Timer()
self._processing_timer = Timer()
self._event_handlers_timer = Timer()
self.dataflow_times = [] # type: List[float]
self.processing_times = [] # type: List[float]
self.event_handlers_times = {} # type: Dict[EventEnum, Dict[str, List[float]]]
@staticmethod
def _get_callable_name(handler: Callable) -> str:
# get name of the callable handler
return getattr(handler, "__qualname__", handler.__class__.__name__)
def _create_wrapped_handler(self, handler: Callable, event: EventEnum) -> Callable:
@functools.wraps(handler)
def _timeit_handler(*args: Any, **kwargs: Any) -> None:
self._event_handlers_timer.reset()
handler(*args, **kwargs)
t = self._event_handlers_timer.value()
hname = self._get_callable_name(handler)
# filter profiled time if the handler was attached to event with event filter
if not hasattr(handler, "_parent") or t >= self.EVENT_FILTER_THESHOLD_TIME:
self.event_handlers_times[event][hname].append(t)
# required to revert back to original handler after profiling
setattr(_timeit_handler, "_profiler_original", handler)
return _timeit_handler
def _timeit_processing(self) -> None:
# handler used for profiling processing times
t = self._processing_timer.value()
self.processing_times.append(t)
def _timeit_dataflow(self) -> None:
# handler used for profiling dataflow times
t = self._dataflow_timer.value()
self.dataflow_times.append(t)
def _reset(self, event_handlers_names: Mapping[EventEnum, List[str]]) -> None:
# reset the variables used for profiling
self.dataflow_times = []
self.processing_times = []
self.event_handlers_times = {e: {h: [] for h in event_handlers_names[e]} for e in event_handlers_names}
@staticmethod
def _is_internal_handler(handler: Callable) -> bool:
# checks whether the handler is internal
return any(n in repr(handler) for n in ["HandlersTimeProfiler.", "Timer."])
def _detach_profiler_handlers(self, engine: Engine) -> None:
# reverts handlers to original handlers
for e in engine._event_handlers:
for i, (func, args, kwargs) in enumerate(engine._event_handlers[e]):
if hasattr(func, "_profiler_original"):
engine._event_handlers[e][i] = (func._profiler_original, args, kwargs)
def _as_first_started(self, engine: Engine) -> None:
# wraps original handlers for profiling
self.event_handlers_names = {
e: [
self._get_callable_name(h)
for (h, _, _) in engine._event_handlers[e]
if not self._is_internal_handler(h)
]
for e in engine._allowed_events
}
self._reset(self.event_handlers_names)
for e in engine._allowed_events:
for i, (func, args, kwargs) in enumerate(engine._event_handlers[e]):
if not self._is_internal_handler(func):
engine._event_handlers[e][i] = (self._create_wrapped_handler(func, e), args, kwargs)
# processing timer
engine.add_event_handler(Events.ITERATION_STARTED, self._processing_timer.reset)
engine._event_handlers[Events.ITERATION_COMPLETED].insert(0, (self._timeit_processing, (), {}))
# dataflow timer
engine.add_event_handler(Events.GET_BATCH_STARTED, self._dataflow_timer.reset)
engine._event_handlers[Events.GET_BATCH_COMPLETED].insert(0, (self._timeit_dataflow, (), {}))
# revert back the wrapped handlers with original handlers at the end
engine.add_event_handler(Events.COMPLETED, self._detach_profiler_handlers)
def attach(self, engine: Engine) -> None:
"""Attach HandlersTimeProfiler to the given engine.
Args:
engine: the instance of Engine to attach
"""
if not isinstance(engine, Engine):
raise TypeError(f"Argument engine should be ignite.engine.Engine, but given {type(engine)}")
if not engine.has_event_handler(self._as_first_started):
engine._event_handlers[Events.STARTED].insert(0, (self._as_first_started, (engine,), {}))
def get_results(self) -> List[List[Union[str, float]]]:
"""
Method to fetch the aggregated profiler results after the engine is run
.. code-block:: python
results = profiler.get_results()
"""
total_eh_time = sum(
[
sum(self.event_handlers_times[e][h])
for e in self.event_handlers_times
for h in self.event_handlers_times[e]
]
)
total_eh_time = round(float(total_eh_time), 5)
def compute_basic_stats(
times: Union[Sequence, torch.Tensor]
) -> List[Union[str, float, Tuple[Union[str, float], Union[str, float]]]]:
data = torch.as_tensor(times, dtype=torch.float32)
# compute on non-zero data:
data = data[data > 0]
total = round(torch.sum(data).item(), 5) if len(data) > 0 else "not triggered" # type: Union[str, float]
min_index = ("None", "None") # type: Tuple[Union[str, float], Union[str, float]]
max_index = ("None", "None") # type: Tuple[Union[str, float], Union[str, float]]
mean = "None" # type: Union[str, float]
std = "None" # type: Union[str, float]
if len(data) > 0:
min_index = (round(torch.min(data).item(), 5), torch.argmin(data).item())
max_index = (round(torch.max(data).item(), 5), torch.argmax(data).item())
mean = round(torch.mean(data).item(), 5)
if len(data) > 1:
std = round(torch.std(data).item(), 5)
return [total, min_index, max_index, mean, std]
event_handler_stats = [
[
h,
getattr(e, "name", str(e)),
*compute_basic_stats(torch.tensor(self.event_handlers_times[e][h], dtype=torch.float32)),
]
for e in self.event_handlers_times
for h in self.event_handlers_times[e]
]
event_handler_stats.append(["Total", "", total_eh_time, "", "", "", ""])
event_handler_stats.append(["Processing", "None", *compute_basic_stats(self.processing_times)])
event_handler_stats.append(["Dataflow", "None", *compute_basic_stats(self.dataflow_times)])
return event_handler_stats
def write_results(self, output_path: str) -> None:
"""
Method to store the unaggregated profiling results to a csv file
Args:
output_path: file output path containing a filename
.. code-block:: python
profiler.write_results('path_to_dir/awesome_filename.csv')
Examples:
.. code-block:: text
-----------------------------------------------------------------
# processing_stats dataflow_stats training.<locals>.log_elapsed_time (EPOCH_COMPLETED) ...
1 0.00003 0.252387 0.125676
2 0.00029 0.252342 0.125123
"""
try:
import pandas as pd
except ImportError:
raise RuntimeError("Need pandas to write results as files")
processing_stats = torch.tensor(self.processing_times, dtype=torch.float32)
dataflow_stats = torch.tensor(self.dataflow_times, dtype=torch.float32)
cols = [processing_stats, dataflow_stats]
headers = ["processing_stats", "dataflow_stats"]
for e in self.event_handlers_times:
for h in self.event_handlers_times[e]:
headers.append(f"{h} ({getattr(e, 'name', str(e))})")
cols.append(torch.tensor(self.event_handlers_times[e][h], dtype=torch.float32))
# Determine maximum length
max_len = max([x.numel() for x in cols])
count_col = torch.arange(max_len, dtype=torch.float32) + 1
cols.insert(0, count_col)
headers.insert(0, "#")
# pad all tensors to have same length
cols = [torch.nn.functional.pad(x, pad=(0, max_len - x.numel()), mode="constant", value=0) for x in cols]
results_dump = torch.stack(cols, dim=1).numpy()
results_df = pd.DataFrame(data=results_dump, columns=headers)
results_df.to_csv(output_path, index=False)
@staticmethod
def print_results(results: List[List[Union[str, float]]]) -> None:
"""
Method to print the aggregated results from the profiler
Args:
results: the aggregated results from the profiler
.. code-block:: python
profiler.print_results(results)
Examples:
.. code-block:: text
----------------------------------------- ----------------------- -------------- ...
Handler Event Name Total(s)
----------------------------------------- ----------------------- --------------
run.<locals>.log_training_results EPOCH_COMPLETED 19.43245
run.<locals>.log_validation_results EPOCH_COMPLETED 2.55271
run.<locals>.log_time EPOCH_COMPLETED 0.00049
run.<locals>.log_intermediate_results EPOCH_COMPLETED 0.00106
run.<locals>.log_training_loss ITERATION_COMPLETED 0.059
run.<locals>.log_time COMPLETED not triggered
----------------------------------------- ----------------------- --------------
Total 22.04571
----------------------------------------- ----------------------- --------------
Processing took total 11.29543s [min/index: 0.00393s/1875, max/index: 0.00784s/0,
mean: 0.00602s, std: 0.00034s]
Dataflow took total 16.24365s [min/index: 0.00533s/1874, max/index: 0.01129s/937,
mean: 0.00866s, std: 0.00113s]
"""
# adopted implementation of torch.autograd.profiler.build_table
handler_column_width = max([len(item[0]) for item in results]) + 4 # type: ignore[arg-type]
event_column_width = max([len(item[1]) for item in results]) + 4 # type: ignore[arg-type]
DEFAULT_COLUMN_WIDTH = 14
headers = [
"Handler",
"Event Name",
"Total(s)",
"Min(s)/IDX",
"Max(s)/IDX",
"Mean(s)",
"Std(s)",
]
# Have to use a list because nonlocal is Py3 only...
SPACING_SIZE = 2
row_format_lst = [""]
header_sep_lst = [""]
line_length_lst = [-SPACING_SIZE]
def add_column(padding: int, text_dir: str = ">") -> None:
row_format_lst[0] += "{: " + text_dir + str(padding) + "}" + (" " * SPACING_SIZE)
header_sep_lst[0] += "-" * padding + (" " * SPACING_SIZE)
line_length_lst[0] += padding + SPACING_SIZE
add_column(handler_column_width, text_dir="<")
add_column(event_column_width, text_dir="<")
for _ in headers[2:]:
add_column(DEFAULT_COLUMN_WIDTH)
row_format = row_format_lst[0]
header_sep = header_sep_lst[0]
result = []
def append(s: str) -> None:
result.append(s)
result.append("\n")
result.append("\n")
append(header_sep)
append(row_format.format(*headers))
append(header_sep)
for row in results[:-3]:
# format min/idx and max/idx
row[3] = "{}/{}".format(*row[3]) # type: ignore[misc]
row[4] = "{}/{}".format(*row[4]) # type: ignore[misc]
append(row_format.format(*row))
append(header_sep)
# print total handlers time row
append(row_format.format(*results[-3]))
append(header_sep)
summary_format = "{} took total {}s [min/index: {}, max/index: {}, mean: {}s, std: {}s]"
for row in results[-2:]:
row[3] = "{}s/{}".format(*row[3]) # type: ignore[misc]
row[4] = "{}s/{}".format(*row[4]) # type: ignore[misc]
del row[1]
append(summary_format.format(*row))
print("".join(result))
|
2409
|
from __future__ import (division)
from pomegranate import *
from pomegranate.io import DataGenerator
from pomegranate.io import DataFrameGenerator
from nose.tools import with_setup
from nose.tools import assert_almost_equal
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_less_equal
from nose.tools import assert_raises
from nose.tools import assert_true
from numpy.testing import assert_array_almost_equal
import pandas
import random
import pickle
import numpy as np
nan = numpy.nan
def setup_multivariate_gaussian():
mu, cov = [0, 0, 0], numpy.eye(3)
d1 = MultivariateGaussianDistribution(mu, cov)
mu, cov = [2, 2, 2], numpy.eye(3)
d2 = MultivariateGaussianDistribution(mu, cov)
global model
model = BayesClassifier([d1, d2])
global X
X = numpy.array([[ 0.3, 0.5, 0.1],
[ 0.8, 1.4, 0.5],
[ 1.4, 2.6, 1.8],
[ 4.2, 3.3, 3.7],
[ 2.6, 3.6, 3.3],
[ 3.1, 2.2, 1.7],
[ 1.8, 2.2, 1.8],
[-1.2, -1.8, -1.5],
[-1.8, 0.3, 0.5],
[ 0.7, -1.3, -0.1]])
global y
y = [0, 0, 0, 1, 1, 1, 1, 0, 0, 0]
global X_nan
X_nan = numpy.array([[ 0.3, nan, 0.1],
[ nan, 1.4, nan],
[ 1.4, 2.6, nan],
[ nan, nan, nan],
[ nan, 3.6, 3.3],
[ 3.1, nan, 1.7],
[ nan, nan, 1.8],
[-1.2, -1.8, -1.5],
[ nan, 0.3, 0.5],
[ nan, -1.3, nan]])
def setup_multivariate_mixed():
mu, cov = [0, 0, 0], numpy.eye(3)
d1 = MultivariateGaussianDistribution(mu, cov)
d21 = ExponentialDistribution(5)
d22 = LogNormalDistribution(0.2, 0.8)
d23 = PoissonDistribution(3)
d2 = IndependentComponentsDistribution([d21, d22, d23])
global model
model = BayesClassifier([d1, d2])
global X
X = numpy.array([[ 0.3, 0.5, 0.1],
[ 0.8, 1.4, 0.5],
[ 1.4, 2.6, 1.8],
[ 4.2, 3.3, 3.7],
[ 2.6, 3.6, 3.3],
[ 3.1, 2.2, 1.7],
[ 1.8, 2.2, 1.8],
[ 1.2, 1.8, 1.5],
[ 1.8, 0.3, 0.5],
[ 0.7, 1.3, 0.1]])
global y
y = [0, 0, 0, 1, 1, 1, 1, 0, 0, 0]
global X_nan
X_nan = numpy.array([[ 0.3, nan, 0.1],
[ nan, 1.4, nan],
[ 1.4, 2.6, nan],
[ nan, nan, nan],
[ nan, 3.6, 3.3],
[ 3.1, nan, 1.7],
[ nan, nan, 1.8],
[ 1.2, 1.8, 1.5],
[ nan, 0.3, 0.5],
[ nan, 1.3, nan]])
def setup_hmm():
global model
global hmm1
global hmm2
global hmm3
rigged = State( DiscreteDistribution({ 'H': 0.8, 'T': 0.2 }) )
unrigged = State( DiscreteDistribution({ 'H': 0.5, 'T':0.5 }) )
hmm1 = HiddenMarkovModel()
hmm1.start = rigged
hmm1.add_transition(rigged, rigged, 1)
hmm1.bake()
hmm2 = HiddenMarkovModel()
hmm2.start = unrigged
hmm2.add_transition(unrigged, unrigged, 1)
hmm2.bake()
hmm3 = HiddenMarkovModel()
hmm3.add_transition(hmm3.start, unrigged, 0.5)
hmm3.add_transition(hmm3.start, rigged, 0.5)
hmm3.add_transition(rigged, rigged, 0.5)
hmm3.add_transition(rigged, unrigged, 0.5)
hmm3.add_transition(unrigged, rigged, 0.5)
hmm3.add_transition(unrigged, unrigged, 0.5)
hmm3.bake()
model = BayesClassifier([hmm1, hmm2, hmm3])
def setup_multivariate():
pass
def teardown():
pass
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_initialization():
assert_equal(model.d, 3)
assert_equal(model.n, 2)
assert_equal(model.is_vl_, False)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_initialization():
assert_equal(model.d, 3)
assert_equal(model.n, 2)
assert_equal(model.is_vl_, False)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict_log_proba():
y_hat = model.predict_log_proba(X)
y = [[ -1.48842547e-02, -4.21488425e+00],
[ -4.37487950e-01, -1.03748795e+00],
[ -5.60369104e+00, -3.69104343e-03],
[ -1.64000001e+01, -7.54345812e-08],
[ -1.30000023e+01, -2.26032685e-06],
[ -8.00033541e+00, -3.35406373e-04],
[ -5.60369104e+00, -3.69104343e-03],
[ -3.05902274e-07, -1.50000003e+01],
[ -3.35406373e-04, -8.00033541e+00],
[ -6.11066022e-04, -7.40061107e+00]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict_log_proba():
y_hat = model.predict_log_proba(X)
y = [[ -5.03107596e-01, -9.27980626e-01],
[ -1.86355320e-01, -1.77183117e+00],
[ -5.58542088e-01, -8.48731256e-01],
[ -7.67315597e-01, -6.24101927e-01],
[ -2.32860808e+00, -1.02510436e-01],
[ -3.06641866e-03, -5.78877778e+00],
[ -9.85292840e-02, -2.36626165e+00],
[ -2.61764180e-01, -1.46833995e+00],
[ -2.01640009e-03, -6.20744952e+00],
[ -1.47371167e-01, -1.98758175e+00]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_nan_predict_log_proba():
y_hat = model.predict_log_proba(X_nan)
y = [[ -3.99533332e-02, -3.23995333e+00],
[ -1.17110067e+00, -3.71100666e-01],
[ -4.01814993e+00, -1.81499279e-02],
[ -6.93147181e-01, -6.93147181e-01],
[ -9.80005545e+00, -5.54500620e-05],
[ -5.60369104e+00, -3.69104343e-03],
[ -1.78390074e+00, -1.83900741e-01],
[ -3.05902274e-07, -1.50000003e+01],
[ -8.68361522e-02, -2.48683615e+00],
[ -1.00016521e-02, -4.61000165e+00]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_nan_predict_log_proba():
y_hat = model.predict_log_proba(X_nan)
y = [[ -3.57980882e-01, -1.20093223e+00],
[ -1.20735130e+00, -3.55230506e-01],
[ -2.43174286e-01, -1.53310132e+00],
[ -6.93147181e-01, -6.93147181e-01],
[ -9.31781101e+00, -8.98143220e-05],
[ -6.29755079e-04, -7.37049444e+00],
[ -1.31307006e+00, -3.13332194e-01],
[ -2.61764180e-01, -1.46833995e+00],
[ -2.29725479e-01, -1.58353505e+00],
[ -1.17299253e+00, -3.70251760e-01]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict_log_proba_parallel():
y_hat = model.predict_log_proba(X, n_jobs=2)
y = [[ -1.48842547e-02, -4.21488425e+00],
[ -4.37487950e-01, -1.03748795e+00],
[ -5.60369104e+00, -3.69104343e-03],
[ -1.64000001e+01, -7.54345812e-08],
[ -1.30000023e+01, -2.26032685e-06],
[ -8.00033541e+00, -3.35406373e-04],
[ -5.60369104e+00, -3.69104343e-03],
[ -3.05902274e-07, -1.50000003e+01],
[ -3.35406373e-04, -8.00033541e+00],
[ -6.11066022e-04, -7.40061107e+00]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict_log_proba_parallel():
y_hat = model.predict_log_proba(X, n_jobs=2)
y = [[ -5.03107596e-01, -9.27980626e-01],
[ -1.86355320e-01, -1.77183117e+00],
[ -5.58542088e-01, -8.48731256e-01],
[ -7.67315597e-01, -6.24101927e-01],
[ -2.32860808e+00, -1.02510436e-01],
[ -3.06641866e-03, -5.78877778e+00],
[ -9.85292840e-02, -2.36626165e+00],
[ -2.61764180e-01, -1.46833995e+00],
[ -2.01640009e-03, -6.20744952e+00],
[ -1.47371167e-01, -1.98758175e+00]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict_proba():
y_hat = model.predict_proba(X)
y = [[ 9.85225968e-01, 1.47740317e-02],
[ 6.45656306e-01, 3.54343694e-01],
[ 3.68423990e-03, 9.96315760e-01],
[ 7.54345778e-08, 9.99999925e-01],
[ 2.26032430e-06, 9.99997740e-01],
[ 3.35350130e-04, 9.99664650e-01],
[ 3.68423990e-03, 9.96315760e-01],
[ 9.99999694e-01, 3.05902227e-07],
[ 9.99664650e-01, 3.35350130e-04],
[ 9.99389121e-01, 6.10879359e-04]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict_proba():
y_hat = model.predict_proba(X)
y = [[ 0.60464873, 0.39535127],
[ 0.82997863, 0.17002137],
[ 0.57204244, 0.42795756],
[ 0.46425765, 0.53574235],
[ 0.09743127, 0.90256873],
[ 0.99693828, 0.00306172],
[ 0.90616916, 0.09383084],
[ 0.76969251, 0.23030749],
[ 0.99798563, 0.00201437],
[ 0.86297361, 0.13702639]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_nan_predict_proba():
y_hat = model.predict_proba(X_nan)
y = [[ 9.60834277e-01, 3.91657228e-02],
[ 3.10025519e-01, 6.89974481e-01],
[ 1.79862100e-02, 9.82013790e-01],
[ 5.00000000e-01, 5.00000000e-01],
[ 5.54485247e-05, 9.99944551e-01],
[ 3.68423990e-03, 9.96315760e-01],
[ 1.67981615e-01, 8.32018385e-01],
[ 9.99999694e-01, 3.05902227e-07],
[ 9.16827304e-01, 8.31726965e-02],
[ 9.90048198e-01, 9.95180187e-03]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_nan_predict_proba():
y_hat = model.predict_proba(X_nan)
y = [[ 6.99086440e-01, 3.00913560e-01],
[ 2.98988163e-01, 7.01011837e-01],
[ 7.84134838e-01, 2.15865162e-01],
[ 5.00000000e-01, 5.00000000e-01],
[ 8.98102888e-05, 9.99910190e-01],
[ 9.99370443e-01, 6.29556825e-04],
[ 2.68992964e-01, 7.31007036e-01],
[ 7.69692511e-01, 2.30307489e-01],
[ 7.94751748e-01, 2.05248252e-01],
[ 3.09439547e-01, 6.90560453e-01]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict_proba_parallel():
y_hat = model.predict_proba(X, n_jobs=2)
y = [[ 9.85225968e-01, 1.47740317e-02],
[ 6.45656306e-01, 3.54343694e-01],
[ 3.68423990e-03, 9.96315760e-01],
[ 7.54345778e-08, 9.99999925e-01],
[ 2.26032430e-06, 9.99997740e-01],
[ 3.35350130e-04, 9.99664650e-01],
[ 3.68423990e-03, 9.96315760e-01],
[ 9.99999694e-01, 3.05902227e-07],
[ 9.99664650e-01, 3.35350130e-04],
[ 9.99389121e-01, 6.10879359e-04]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict_proba_parallel():
y_hat = model.predict_proba(X, n_jobs=2)
y = [[ 0.60464873, 0.39535127],
[ 0.82997863, 0.17002137],
[ 0.57204244, 0.42795756],
[ 0.46425765, 0.53574235],
[ 0.09743127, 0.90256873],
[ 0.99693828, 0.00306172],
[ 0.90616916, 0.09383084],
[ 0.76969251, 0.23030749],
[ 0.99798563, 0.00201437],
[ 0.86297361, 0.13702639]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict():
y_hat = model.predict(X)
y = [0, 0, 1, 1, 1, 1, 1, 0, 0, 0]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict():
y_hat = model.predict(X)
y = [0, 0, 0, 1, 1, 0, 0, 0, 0, 0]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_nan_predict():
y_hat = model.predict(X_nan)
y = [0, 1, 1, 0, 1, 1, 1, 0, 0, 0]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_nan_predict():
y_hat = model.predict(X_nan)
y = [0, 1, 0, 0, 1, 0, 1, 0, 0, 1]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict_parallel():
y_hat = model.predict(X, n_jobs=2)
y = [0, 0, 1, 1, 1, 1, 1, 0, 0, 0]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict_parallel():
y_hat = model.predict(X, n_jobs=2)
y = [0, 0, 0, 1, 1, 0, 0, 0, 0, 0]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_fit_parallel():
model.fit(X, y, n_jobs=2)
mu1 = model.distributions[0].parameters[0]
cov1 = model.distributions[0].parameters[1]
mu1_t = [0.03333333, 0.28333333, 0.21666666]
cov1_t = [[1.3088888, 0.9272222, 0.6227777],
[0.9272222, 2.2513888, 1.3402777],
[0.6227777, 1.3402777, 0.9547222]]
mu2 = model.distributions[1].parameters[0]
cov2 = model.distributions[1].parameters[1]
mu2_t = [2.925, 2.825, 2.625]
cov2_t = [[0.75687499, 0.23687499, 0.4793750],
[0.23687499, 0.40187499, 0.5318749],
[0.47937500, 0.53187499, 0.7868750]]
assert_array_almost_equal(mu1, mu1_t)
assert_array_almost_equal(cov1, cov1_t)
assert_array_almost_equal(mu2, mu2_t)
assert_array_almost_equal(cov2, cov2_t)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_fit_parallel():
model.fit(X, y, n_jobs=2)
mu1 = model.distributions[0].parameters[0]
cov1 = model.distributions[0].parameters[1]
mu1_t = [1.033333, 1.3166667, 0.75]
cov1_t = [[0.242222, 0.0594444, 0.178333],
[0.059444, 0.5980555, 0.414166],
[0.178333, 0.4141666, 0.439166]]
d21 = model.distributions[1].distributions[0]
d22 = model.distributions[1].distributions[1]
d23 = model.distributions[1].distributions[2]
assert_array_almost_equal(mu1, mu1_t)
assert_array_almost_equal(cov1, cov1_t)
assert_array_almost_equal(d21.parameters, [0.34188034])
assert_array_almost_equal(d22.parameters, [1.01294275, 0.22658346])
assert_array_almost_equal(d23.parameters, [2.625])
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_from_samples():
model = BayesClassifier.from_samples(MultivariateGaussianDistribution, X, y)
mu1 = model.distributions[0].parameters[0]
cov1 = model.distributions[0].parameters[1]
mu1_t = [0.03333333, 0.2833333, 0.21666666]
cov1_t = [[1.308888888, 0.9272222222, 0.6227777777],
[0.927222222, 2.251388888, 1.340277777],
[0.622777777, 1.340277777, 0.9547222222]]
mu2 = model.distributions[1].parameters[0]
cov2 = model.distributions[1].parameters[1]
mu2_t = [2.925, 2.825, 2.625]
cov2_t = [[0.75687500, 0.23687499, 0.47937500],
[0.23687499, 0.40187499, 0.53187499],
[0.47937500, 0.53187499, 0.78687500]]
assert_array_almost_equal(mu1, mu1_t)
assert_array_almost_equal(cov1, cov1_t)
assert_array_almost_equal(mu2, mu2_t)
assert_array_almost_equal(cov2, cov2_t)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_pickle():
model2 = pickle.loads(pickle.dumps(model))
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_pickle():
model2 = pickle.loads(pickle.dumps(model))
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_to_json():
model2 = BayesClassifier.from_json(model.to_json())
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_to_json():
model2 = BayesClassifier.from_json(model.to_json())
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_robust_from_json():
model2 = from_json(model.to_json())
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_robust_from_json():
model2 = from_json(model.to_json())
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_hmm, teardown)
def test_model():
assert_almost_equal(hmm1.log_probability(list('H')), -0.2231435513142097 )
assert_almost_equal(hmm1.log_probability(list('T')), -1.6094379124341003 )
assert_almost_equal(hmm1.log_probability(list('HHHH')), -0.8925742052568388 )
assert_almost_equal(hmm1.log_probability(list('THHH')), -2.2788685663767296 )
assert_almost_equal(hmm1.log_probability(list('TTTT')), -6.437751649736401 )
assert_almost_equal(hmm2.log_probability(list('H')), -0.6931471805599453 )
assert_almost_equal(hmm2.log_probability(list('T')), -0.6931471805599453 )
assert_almost_equal(hmm2.log_probability(list('HHHH')), -2.772588722239781 )
assert_almost_equal(hmm2.log_probability(list('THHH')), -2.772588722239781 )
assert_almost_equal(hmm2.log_probability(list('TTTT')), -2.772588722239781 )
assert_almost_equal(hmm3.log_probability(list('H')), -0.43078291609245417)
assert_almost_equal(hmm3.log_probability(list('T')), -1.0498221244986776)
assert_almost_equal(hmm3.log_probability(list('HHHH')), -1.7231316643698167)
assert_almost_equal(hmm3.log_probability(list('THHH')), -2.3421708727760397)
assert_almost_equal(hmm3.log_probability(list('TTTT')), -4.1992884979947105)
assert_almost_equal(hmm3.log_probability(list('THTHTHTHTHTH')), -8.883630243546788)
assert_almost_equal(hmm3.log_probability(list('THTHHHHHTHTH')), -7.645551826734343)
assert_equal(model.d, 1)
@with_setup(setup_hmm, teardown)
def test_hmm_log_proba():
logs = model.predict_log_proba(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')]))
assert_almost_equal(logs[0][0], -0.89097292388986515)
assert_almost_equal(logs[0][1], -1.3609765531356006)
assert_almost_equal(logs[0][2], -1.0986122886681096)
assert_almost_equal(logs[1][0], -0.93570553121744293)
assert_almost_equal(logs[1][1], -1.429425687080494)
assert_almost_equal(logs[1][2], -0.9990078376167526)
assert_almost_equal(logs[2][0], -3.9007882563128864)
assert_almost_equal(logs[2][1], -0.23562532881626597)
assert_almost_equal(logs[2][2], -1.6623251045711958)
assert_almost_equal(logs[3][0], -3.1703366478831185)
assert_almost_equal(logs[3][1], -0.49261403211260379)
assert_almost_equal(logs[3][2], -1.058478108940049)
assert_almost_equal(logs[4][0], -1.3058441172130273)
assert_almost_equal(logs[4][1], -1.4007102236822906)
assert_almost_equal(logs[4][2], -0.7284958836972919)
@with_setup(setup_hmm, teardown)
def test_hmm_proba():
probs = model.predict_proba(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')]))
assert_almost_equal(probs[0][0], 0.41025641025641024)
assert_almost_equal(probs[0][1], 0.25641025641025639)
assert_almost_equal(probs[0][2], 0.33333333333333331)
assert_almost_equal(probs[1][0], 0.39230898163446098)
assert_almost_equal(probs[1][1], 0.23944639992337707)
assert_almost_equal(probs[1][2], 0.36824461844216183)
assert_almost_equal(probs[2][0], 0.020225961918306088)
assert_almost_equal(probs[2][1], 0.79007663743383105)
assert_almost_equal(probs[2][2], 0.18969740064786292)
assert_almost_equal(probs[3][0], 0.041989459861032523)
assert_almost_equal(probs[3][1], 0.61102706038265642)
assert_almost_equal(probs[3][2], 0.346983479756311)
assert_almost_equal(probs[4][0], 0.27094373022369794)
assert_almost_equal(probs[4][1], 0.24642188711704707)
assert_almost_equal(probs[4][2], 0.48263438265925512)
@with_setup(setup_hmm, teardown)
def test_hmm_prediction():
predicts = model.predict(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')]))
assert_equal(predicts[0], 0)
assert_equal(predicts[1], 0)
assert_equal(predicts[2], 1)
assert_equal(predicts[3], 1)
assert_equal(predicts[4], 2)
@with_setup(setup_multivariate_gaussian, teardown)
def test_io_log_probability():
X2 = DataGenerator(X)
X3 = DataFrameGenerator(pandas.DataFrame(X))
logp1 = model.log_probability(X)
logp2 = model.log_probability(X2)
logp3 = model.log_probability(X3)
assert_array_almost_equal(logp1, logp2)
assert_array_almost_equal(logp1, logp3)
@with_setup(setup_multivariate_gaussian, teardown)
def test_io_predict():
X2 = DataGenerator(X)
X3 = DataFrameGenerator(pandas.DataFrame(X))
y_hat1 = model.predict(X)
y_hat2 = model.predict(X2)
y_hat3 = model.predict(X3)
assert_array_almost_equal(y_hat1, y_hat2)
assert_array_almost_equal(y_hat1, y_hat3)
@with_setup(setup_multivariate_gaussian, teardown)
def test_io_predict_proba():
X2 = DataGenerator(X)
X3 = DataFrameGenerator(pandas.DataFrame(X))
y_hat1 = model.predict_proba(X)
y_hat2 = model.predict_proba(X2)
y_hat3 = model.predict_proba(X3)
assert_array_almost_equal(y_hat1, y_hat2)
assert_array_almost_equal(y_hat1, y_hat3)
@with_setup(setup_multivariate_gaussian, teardown)
def test_io_predict_log_proba():
X2 = DataGenerator(X)
X3 = DataFrameGenerator(pandas.DataFrame(X))
y_hat1 = model.predict_log_proba(X)
y_hat2 = model.predict_log_proba(X2)
y_hat3 = model.predict_log_proba(X3)
assert_array_almost_equal(y_hat1, y_hat2)
assert_array_almost_equal(y_hat1, y_hat3)
def test_io_fit():
X = numpy.random.randn(100, 5) + 0.5
weights = numpy.abs(numpy.random.randn(100))
y = numpy.random.randint(2, size=100)
data_generator = DataGenerator(X, weights, y)
mu1 = numpy.array([0, 0, 0, 0, 0])
mu2 = numpy.array([1, 1, 1, 1, 1])
cov = numpy.eye(5)
d1 = MultivariateGaussianDistribution(mu1, cov)
d2 = MultivariateGaussianDistribution(mu2, cov)
bc1 = BayesClassifier([d1, d2])
bc1.fit(X, y, weights)
d1 = MultivariateGaussianDistribution(mu1, cov)
d2 = MultivariateGaussianDistribution(mu2, cov)
bc2 = BayesClassifier([d1, d2])
bc2.fit(data_generator)
logp1 = bc1.log_probability(X)
logp2 = bc2.log_probability(X)
assert_array_almost_equal(logp1, logp2)
def test_io_from_samples():
X = numpy.random.randn(100, 5) + 0.5
weights = numpy.abs(numpy.random.randn(100))
y = numpy.random.randint(2, size=100)
data_generator = DataGenerator(X, weights, y)
d = MultivariateGaussianDistribution
bc1 = BayesClassifier.from_samples(d, X=X, y=y, weights=weights)
bc2 = BayesClassifier.from_samples(d, X=data_generator)
logp1 = bc1.log_probability(X)
logp2 = bc2.log_probability(X)
assert_array_almost_equal(logp1, logp2)
|
2416
|
from typing import Dict, List, cast
from py_headless_daw.project.parameter import Parameter, ParameterValueType, ParameterRangeType
class HavingParameters:
def __init__(self):
self._parameters: Dict[str, Parameter] = {}
super().__init__()
def has_parameter(self, name: str) -> bool:
return name in self._parameters
def add_parameter(self,
name: str,
value: ParameterValueType,
param_type: str,
value_range: ParameterRangeType):
if name in self._parameters:
raise Exception('parameter named ' + name + ' already added to this object')
parameter = Parameter(name, value, param_type, value_range)
self._parameters[name] = parameter
def add_parameter_object(self, parameter: Parameter) -> None:
self._parameters[parameter.name] = parameter
def get_parameter(self, name: str) -> Parameter:
for parameter in self.parameters:
if parameter.name == name:
return parameter
list_of_names: List[str] = [p.name for p in self.parameters]
# noinspection PyTypeChecker
available_names: List[str] = cast(List[str], list_of_names)
raise Exception('parameter named ' + name + ' not found. Available: ' + ', '.join(available_names))
def get_parameter_value(self, name: str) -> ParameterValueType:
param = self.get_parameter(name)
return param.value
def get_float_parameter_value(self, name: str) -> float:
param = self.get_parameter(name)
if param.type != Parameter.TYPE_FLOAT:
raise ValueError(f"parameter {name} was expected to be float (error: f009d0ef)")
value = self.get_parameter_value(name)
cast_value = cast(float, value)
return cast_value
def get_enum_parameter_value(self, name: str) -> str:
param = self.get_parameter(name)
if param.type != Parameter.TYPE_ENUM:
raise ValueError(f"parameter {name} was expected to be enum (error: 80a1d180)")
value = self.get_parameter_value(name)
cast_value = cast(str, value)
return cast_value
def set_parameter_value(self, name: str, value: ParameterValueType):
param = self.get_parameter(name)
param.value = value
@property
def parameters(self) -> List[Parameter]:
return list(self._parameters.values())
|
2426
|
import os, sys, cdms2, vcs, vcs.testing.regression as regression
dataset = cdms2.open(os.path.join(vcs.sample_data,"clt.nc"))
data = dataset("clt")
canvas = regression.init()
isoline = canvas.createisoline()
isoline.label="y"
texts=[]
colors = []
for i in range(10):
text = canvas.createtext()
text.color = 50 + 12 * i
text.height = 12
colors.append(100 + 12 * i)
if i%2 == 0:
texts.append(text.name)
else:
texts.append(text)
isoline.text = texts
# First test using isoline.text[...].color
canvas.plot(data, isoline, bg=1)
baseline = os.path.splitext(sys.argv[1])
baselineImage = "%s%s"%baseline
ret = regression.run_wo_terminate(canvas, "test_vcs_isoline_labels.png", baselineImage)
# Now set isoline.linecolors and test again.
canvas.clear()
isoline.linecolors = colors
canvas.plot(data, isoline, bg=1)
baselineImage = "%s%d%s"%(baseline[0], 2, baseline[1])
testImage = os.path.abspath("test_vcs_isoline_labels2.png")
ret += regression.run_wo_terminate(canvas, testImage, baselineImage)
# Now set isoline.textcolors and test again.
canvas.clear()
isoline.textcolors = colors
canvas.plot(data, isoline, bg=1)
baselineImage = "%s%d%s"%(baseline[0], 3, baseline[1])
testImage = os.path.abspath("test_vcs_isoline_labels3.png")
ret += regression.run_wo_terminate(canvas, testImage, baselineImage)
sys.exit(ret)
|
2476
|
import sqlite3
import subprocess, datetime
from flask import Flask, request, session, g, redirect, url_for, \
abort, render_template, flash
from contextlib import closing
from tquery import get_latest_record
from config import *
app = Flask(__name__)
app.config.from_object(__name__)
# DB helper functions
def connect_db():
return sqlite3.connect(app.config['DATABASE'])
def init_db():
"""Initializes the sqlite3 database. This function must be imported and
executed from the Python interpreter before the application is first run."""
with closing(connect_db()) as db:
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
# Auto-open and close DB when serving requests
@app.before_request
def before_request():
g.db = connect_db()
@app.teardown_request
def teardown_request(exception):
db = getattr(g, 'db', None)
if db is not None:
db.close()
@app.route('/', methods=['GET', 'POST'])
def welcome_page():
if 'username' in session and session['username']:
return redirect(url_for('submit_page'))
error = None
if request.method == 'POST': # someone's logging in
if not request.form['username'] in app.config['USERNAMES']:
error = 'username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'password'
else: # successful login
session['username'] = request.form['username']
flash('Hi ' + session['username'] + '!')
return redirect(url_for('submit_page'))
return render_template('welcome_page.html', commands=command_history(),
error=error, last_record=last_record())
@app.route('/submit', methods=['GET', 'POST'])
def submit_page():
error = None
if not session.get('username'):
abort(401)
if request.method == 'POST': # command is being issued to AC
user_mode = request.form['mode']
user_temperature = request.form['temperature']
validation_codes = validate_AC_command(user_mode, user_temperature)
if (validation_codes['mode_error'] or
validation_codes['temperature_error']):
error=validation_codes
else:
subprocess.call(['/usr/bin/irsend','SEND_ONCE', 'lgac',
validation_codes['command']])
g.db.execute('insert into commands (command, ts, user) values (?, ?, ?)',
[validation_codes['command'],
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
session['username']])
g.db.commit()
flash('Command submitted')
return render_template('submit_page.html', commands=command_history(),
error=error, last_record=last_record())
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('welcome_page'))
def validate_AC_command(user_mode, user_temperature):
"""Validates and sanitizes user-input command; translates command
into irsend call."""
codes = dict()
if user_mode not in app.config['ACMODES']:
codes['mode_error'] = True
else:
codes['mode_error'] = False
if user_mode is not 'off' and user_temperature not in app.config['ACTEMPERATURES']:
codes['temperature_error'] = True
else:
codes['temperature_error'] = False
if not codes['mode_error'] and not codes['temperature_error']:
codes['mode'] = user_mode
codes['temperature'] = user_temperature
if codes['mode'] == 'off':
command_postfix = 'off'
elif codes['mode'] == 'heat':
command_postfix = 'heat' + codes['temperature']
else:
command_postfix = codes['temperature']
codes['command'] = command_postfix
return codes
def command_history():
"""Returns a list of dictionaries, each containing a command issued
to the AC previously. The list is ordered chronologically, from newest
to oldest."""
cur = g.db.execute('select command, ts, user from commands order by id desc')
command_history = []
for row in cur.fetchall():
if row[0][0] == 'h':
cmd = 'heat to ' + row[0][4:]
elif row[0] == 'off':
cmd = 'off'
else:
cmd = 'cool to ' + row[0]
command_history.append(dict(command=cmd, ts=row[1], user=row[2]))
return command_history
def last_record():
"""Returns the last temperature and humidity record data.
The returned object is a dict with keys ts, fahrenheit, celsius and
humidity.
"""
db_record = get_latest_record()
out_record = dict()
out_record['date'] = db_record[0].strftime("%Y-%m-%d")
out_record['time'] = db_record[0].strftime("%H:%M")
out_record['celsius'] = db_record[1]
out_record['fahrenheit'] = int(round(out_record['celsius']*9/5.0 + 32))
out_record['humidity'] = int(round(db_record[2]))
return out_record
if __name__ == '__main__':
app.run(host='0.0.0.0')
|
2492
|
import unittest
import tests.settings_mock as settings_mock
from tests.activity.classes_mock import FakeLogger
from workflow.workflow_IngestAcceptedSubmission import workflow_IngestAcceptedSubmission
class TestWorkflowIngestAcceptedSubmission(unittest.TestCase):
def setUp(self):
self.workflow = workflow_IngestAcceptedSubmission(
settings_mock, FakeLogger(), None, None, None, None
)
def test_init(self):
self.assertEqual(self.workflow.name, "IngestAcceptedSubmission")
|
2513
|
from dataset.baseset import BaseSet
import random, cv2
import numpy as np
class iNaturalist(BaseSet):
def __init__(self, mode='train', cfg=None, transform=None):
super(iNaturalist, self).__init__(mode, cfg, transform)
random.seed(0)
self.class_dict = self._get_class_dict()
def __getitem__(self, index):
if self.cfg.TRAIN.SAMPLER.TYPE == "weighted sampler" and self.mode == 'train':
assert self.cfg.TRAIN.SAMPLER.WEIGHTED_SAMPLER.TYPE in ["balance", 'square', 'progressive']
if self.cfg.TRAIN.SAMPLER.WEIGHTED_SAMPLER.TYPE == "balance":
sample_class = random.randint(0, self.num_classes - 1)
elif self.cfg.TRAIN.SAMPLER.WEIGHTED_SAMPLER.TYPE == "square":
sample_class = np.random.choice(np.arange(self.num_classes), p=self.square_p)
else:
sample_class = np.random.choice(np.arange(self.num_classes), p=self.progress_p)
sample_indexes = self.class_dict[sample_class]
index = random.choice(sample_indexes)
now_info = self.data[index]
img = self._get_image(now_info)
image = self.transform(img)
meta = dict()
image_label = now_info['category_id'] # 0-index
return image, image_label, meta
|
2620
|
from glue.core.data_factories.helpers import has_extension
from glue.config import data_factory
__all__ = ['tabular_data']
@data_factory(label="ASCII Table",
identifier=has_extension('csv txt tsv tbl dat '
'csv.gz txt.gz tbl.bz '
'dat.gz'),
priority=1)
def tabular_data(path, **kwargs):
from glue.core.data_factories.astropy_table import astropy_tabular_data
from glue.core.data_factories.pandas import pandas_read_table
for fac in [astropy_tabular_data, pandas_read_table]:
try:
return fac(path, **kwargs)
except Exception:
pass
else:
raise IOError("Could not parse file: %s" % path)
|
2623
|
import math
import pytorch_lightning as pl
class LearningRateDecayCallback(pl.Callback):
def __init__(self, learning_rate, warmup_tokens=<PASSWORD>, final_tokens=<PASSWORD>, lr_decay=True):
super().__init__()
self.learning_rate = learning_rate
self.tokens = 0
self.final_tokens = final_tokens
self.lr_decay = lr_decay
self.warmup_tokens = warmup_tokens
def on_train_batch_end(self, trainer, pl_module, batch, batch_idx, dataloader_idx):
optimizer = trainer.optimizers[0]
_, y = batch
if self.lr_decay:
self.tokens += (y >= 0).sum() # number of tokens processed this step (i.e. label is not -100)
if self.tokens < self.warmup_tokens:
# linear warmup
lr_mult = float(self.tokens) / float(max(1, self.warmup_tokens))
else:
# cosine learning rate decay
progress = float(self.tokens - self.warmup_tokens) / float(
max(1, self.final_tokens - self.warmup_tokens))
lr_mult = max(0.1, 0.5 * (1.0 + math.cos(math.pi * progress)))
lr = self.learning_rate * lr_mult
for param_group in optimizer.param_groups:
param_group['lr'] = lr
|
2626
|
from setuptools import setup
setup(
name="nmn-iwp",
version="0.1",
keywords="",
packages=["vr", "vr.models"]
)
|
2659
|
def test_xrange(judge_command):
judge_command(
"XRANGE somestream - +",
{"command": "XRANGE", "key": "somestream", "stream_id": ["-", "+"]},
)
judge_command(
"XRANGE somestream 1526985054069 1526985055069",
{
"command": "XRANGE",
"key": "somestream",
"stream_id": ["1526985054069", "1526985055069"],
},
)
judge_command(
"XRANGE somestream 1526985054069 1526985055069-10",
{
"command": "XRANGE",
"key": "somestream",
"stream_id": ["1526985054069", "1526985055069-10"],
},
)
judge_command(
"XRANGE somestream 1526985054069 1526985055069-10 count 10",
{
"command": "XRANGE",
"key": "somestream",
"stream_id": ["1526985054069", "1526985055069-10"],
"count_const": "count",
"count": "10",
},
)
def test_xgroup_create(judge_command):
judge_command(
"XGROUP CREATE mykey mygroup 123",
{
"command": "XGROUP",
"stream_create": "CREATE",
"key": "mykey",
"group": "mygroup",
"stream_id": "123",
},
)
judge_command(
"XGROUP CREATE mykey mygroup $",
{
"command": "XGROUP",
"stream_create": "CREATE",
"key": "mykey",
"group": "mygroup",
"stream_id": "$",
},
)
# short of a parameter
judge_command("XGROUP CREATE mykey mygroup", None)
judge_command("XGROUP CREATE mykey", None)
def test_xgroup_setid(judge_command):
judge_command(
"XGROUP SETID mykey mygroup 123",
{
"command": "XGROUP",
"stream_setid": "SETID",
"key": "mykey",
"group": "mygroup",
"stream_id": "123",
},
)
judge_command(
"XGROUP SETID mykey mygroup $",
{
"command": "XGROUP",
"stream_setid": "SETID",
"key": "mykey",
"group": "mygroup",
"stream_id": "$",
},
)
# two subcommand together shouldn't match
judge_command("XGROUP CREATE mykey mygroup 123 SETID mykey mygroup $", None)
def test_xgroup_destroy(judge_command):
judge_command(
"XGROUP destroy mykey mygroup",
{
"command": "XGROUP",
"stream_destroy": "destroy",
"key": "mykey",
"group": "mygroup",
},
)
judge_command("XGROUP destroy mykey", None)
judge_command("XGROUP DESTROY mykey mygroup $", None)
def test_xgroup_delconsumer(judge_command):
judge_command(
"XGROUP delconsumer mykey mygroup myconsumer",
{
"command": "XGROUP",
"stream_delconsumer": "delconsumer",
"key": "mykey",
"group": "mygroup",
"consumer": "myconsumer",
},
)
judge_command(
"XGROUP delconsumer mykey mygroup $",
{
"command": "XGROUP",
"stream_delconsumer": "delconsumer",
"key": "mykey",
"group": "mygroup",
"consumer": "$",
},
)
judge_command("XGROUP delconsumer mykey mygroup", None)
def test_xgroup_stream(judge_command):
judge_command(
"XACK mystream group1 123123",
{
"command": "XACK",
"key": "mystream",
"group": "group1",
"stream_id": "123123",
},
)
judge_command(
"XACK mystream group1 123123 111",
{"command": "XACK", "key": "mystream", "group": "group1", "stream_id": "111"},
)
def test_xinfo(judge_command):
judge_command(
"XINFO consumers mystream mygroup",
{
"command": "XINFO",
"stream_consumers": "consumers",
"key": "mystream",
"group": "mygroup",
},
)
judge_command(
"XINFO GROUPS mystream",
{"command": "XINFO", "stream_groups": "GROUPS", "key": "mystream"},
)
judge_command(
"XINFO STREAM mystream",
{"command": "XINFO", "stream": "STREAM", "key": "mystream"},
)
judge_command("XINFO HELP", {"command": "XINFO", "help": "HELP"})
judge_command("XINFO consumers mystream mygroup GROUPS mystream", None)
judge_command("XINFO groups mystream mygroup", None)
def test_xinfo_with_full(judge_command):
judge_command(
"XINFO STREAM mystream FULL",
{
"command": "XINFO",
"stream": "STREAM",
"key": "mystream",
"full_const": "FULL",
},
)
judge_command(
"XINFO STREAM mystream FULL count 10",
{
"command": "XINFO",
"stream": "STREAM",
"key": "mystream",
"full_const": "FULL",
"count_const": "count",
"count": "10",
},
)
def test_xpending(judge_command):
judge_command(
"XPENDING mystream group55",
{"command": "XPENDING", "key": "mystream", "group": "group55"},
)
judge_command(
"XPENDING mystream group55 myconsumer",
{
"command": "XPENDING",
"key": "mystream",
"group": "group55",
"consumer": "myconsumer",
},
)
judge_command(
"XPENDING mystream group55 - + 10",
{
"command": "XPENDING",
"key": "mystream",
"group": "group55",
"stream_id": ["-", "+"],
"count": "10",
},
)
judge_command(
"XPENDING mystream group55 - + 10 myconsumer",
{
"command": "XPENDING",
"key": "mystream",
"group": "group55",
"stream_id": ["-", "+"],
"count": "10",
"consumer": "myconsumer",
},
)
judge_command("XPENDING mystream group55 - + ", None)
def test_xadd(judge_command):
judge_command(
"xadd mystream MAXLEN ~ 1000 * key value",
{
"command": "xadd",
"key": "mystream",
"maxlen": "MAXLEN",
"approximately": "~",
"count": "1000",
"sfield": "key",
"svalue": "value",
"stream_id": "*",
},
)
# test for MAXLEN option
judge_command(
"xadd mystream MAXLEN 1000 * key value",
{
"command": "xadd",
"key": "mystream",
"maxlen": "MAXLEN",
"count": "1000",
"sfield": "key",
"svalue": "value",
"stream_id": "*",
},
)
judge_command(
"xadd mystream * key value",
{
"command": "xadd",
"key": "mystream",
"sfield": "key",
"svalue": "value",
"stream_id": "*",
},
)
# spcify stream id
judge_command(
"xadd mystream 123-123 key value",
{
"command": "xadd",
"key": "mystream",
"sfield": "key",
"svalue": "value",
"stream_id": "123-123",
},
)
judge_command(
"xadd mystream 123-123 key value foo bar hello world",
{
"command": "xadd",
"key": "mystream",
"sfield": "hello",
"svalue": "world",
"stream_id": "123-123",
},
)
def test_xtrim(judge_command):
judge_command(
" XTRIM mystream MAXLEN 2",
{"command": "XTRIM", "key": "mystream", "maxlen": "MAXLEN", "count": "2"},
)
judge_command(
" XTRIM mystream MAXLEN ~ 2",
{
"command": "XTRIM",
"key": "mystream",
"maxlen": "MAXLEN",
"count": "2",
"approximately": "~",
},
)
judge_command(" XTRIM mystream", None)
def test_xdel(judge_command):
judge_command(
"XDEL mystream 1581165000000 1549611229000 1581060831000",
{"command": "XDEL", "key": "mystream", "stream_id": "1581060831000"},
)
judge_command(
"XDEL mystream 1581165000000",
{"command": "XDEL", "key": "mystream", "stream_id": "1581165000000"},
)
def test_xclaim(judge_command):
judge_command(
"XCLAIM mystream mygroup Alice 3600000 1526569498055-0",
{
"command": "XCLAIM",
"key": "mystream",
"group": "mygroup",
"consumer": "Alice",
"millisecond": "3600000",
"stream_id": "1526569498055-0",
},
)
judge_command(
"XCLAIM mystream mygroup Alice 3600000 1526569498055-0 123 456 789",
{
"command": "XCLAIM",
"key": "mystream",
"group": "mygroup",
"consumer": "Alice",
"millisecond": "3600000",
"stream_id": "789",
},
)
judge_command(
"XCLAIM mystream mygroup Alice 3600000 1526569498055-0 IDEL 300",
{
"command": "XCLAIM",
"key": "mystream",
"group": "mygroup",
"consumer": "Alice",
"millisecond": ["3600000", "300"],
"stream_id": "1526569498055-0",
"idel": "IDEL",
},
)
judge_command(
"XCLAIM mystream mygroup Alice 3600000 1526569498055-0 retrycount 7",
{
"command": "XCLAIM",
"key": "mystream",
"group": "mygroup",
"consumer": "Alice",
"millisecond": "3600000",
"stream_id": "1526569498055-0",
"retrycount": "retrycount",
"count": "7",
},
)
judge_command(
"XCLAIM mystream mygroup Alice 3600000 1526569498055-0 TIME 123456789",
{
"command": "XCLAIM",
"key": "mystream",
"group": "mygroup",
"consumer": "Alice",
"millisecond": "3600000",
"stream_id": "1526569498055-0",
"time": "TIME",
"timestamp": "123456789",
},
)
judge_command(
"XCLAIM mystream mygroup Alice 3600000 1526569498055-0 FORCE",
{
"command": "XCLAIM",
"key": "mystream",
"group": "mygroup",
"consumer": "Alice",
"millisecond": "3600000",
"stream_id": "1526569498055-0",
"force": "FORCE",
},
)
judge_command(
"XCLAIM mystream mygroup Alice 3600000 1526569498055-0 JUSTID",
{
"command": "XCLAIM",
"key": "mystream",
"group": "mygroup",
"consumer": "Alice",
"millisecond": "3600000",
"stream_id": "1526569498055-0",
"justid": "JUSTID",
},
)
def test_xread(judge_command):
judge_command(
"XREAD COUNT 2 STREAMS mystream writers 0-0 0-0",
{
"command": "XREAD",
"count_const": "COUNT",
"count": "2",
"streams": "STREAMS",
# FIXME current grammar can't support multiple tokens
# so the ids will be recongized to keys.
"keys": "mystream writers 0-0",
"stream_id": "0-0",
},
)
judge_command(
"XREAD COUNT 2 BLOCK 1000 STREAMS mystream writers 0-0 0-0",
{
"command": "XREAD",
"count_const": "COUNT",
"count": "2",
"streams": "STREAMS",
"keys": "mystream writers 0-0",
"block": "BLOCK",
"millisecond": "1000",
"stream_id": "0-0",
},
)
def test_xreadgroup(judge_command):
judge_command(
"XREADGROUP GROUP mygroup1 Bob COUNT 1 BLOCK 100 NOACK STREAMS key1 1 key2 2",
{
"command": "XREADGROUP",
"stream_group": "GROUP",
"group": "mygroup1",
"consumer": "Bob",
"count_const": "COUNT",
"count": "1",
"block": "BLOCK",
"millisecond": "100",
"noack": "NOACK",
"streams": "STREAMS",
"keys": "key1 1 key2",
"stream_id": "2",
},
)
judge_command(
"XREADGROUP GROUP mygroup1 Bob STREAMS key1 1 key2 2",
{
"command": "XREADGROUP",
"stream_group": "GROUP",
"group": "mygroup1",
"consumer": "Bob",
"streams": "STREAMS",
"keys": "key1 1 key2",
"stream_id": "2",
},
)
judge_command("XREADGROUP GROUP group consumer", None)
|
2671
|
import logging
import os
import pickle
import sys
import threading
import time
from typing import List
from Giveme5W1H.extractor.root import path
from Giveme5W1H.extractor.tools.util import bytes_2_human_readable
class KeyValueCache(object):
def __init__(self, cache_path):
"""
:param cache_path: path to cache, must be relative to the root.py file
"""
self.log = logging.getLogger('GiveMe5W')
# resolve path relative to the path file
self._cache_path = path(cache_path)
# ad a meaningful extension
self._cache_path = self._cache_path + '.prickle'
self._cache = {}
if cache_path and os.path.isfile(self._cache_path) and os.path.getsize(self._cache_path) > 0:
# reload cache object form disc, if any
with open(self._cache_path, 'rb') as ff:
self._cache = pickle.load(ff)
self.log.debug('KeyValueCache: ' + self._cache_path + ' restored')
self.log_stats()
else:
self._cache = {}
self._lock = threading.Lock()
def log_stats(self):
# size is not considering child's
self.log.info(self._cache_path + ' entries: ' + str(len(self._cache)) + ' size: ' + bytes_2_human_readable(
sys.getsizeof(self._cache)))
def persist(self):
with open(self._cache_path, 'wb') as f:
pickle.dump(self._cache, f, pickle.HIGHEST_PROTOCOL)
def cache(self, key: str, value: object):
"""
None values are considered as invalid results (ToughRequest) is producing none for exceptions
set -1 if you want to store "No distance"
:param key:
:param value:
:return:
"""
self._lock.acquire()
if value is not None:
self._cache[key] = self._pack(value);
self.log.debug(self._cache_path + ' CACHED: ' + str(key) + ': ' + str(value))
self.persist()
self._lock.release()
def get(self, key):
"""
Read cache entries
:param key:
:return:
"""
self._lock.acquire()
result = None
value = self._cache.get(key)
if value is not None:
self.log.debug(self._cache_path + ' LOADED: ' + str(key) + ': ' + str(value))
result = self._unpack(value)
self._lock.release()
return result
def get_complex(self, list_of_keys: List[str]):
"""
Read complex cache entries
"""
return self.get(self._get_id(list_of_keys))
def cache_complex(self, list_of_keys: List[str], value):
"""
helper to cache multi (string)key values.
They are sorted before concatenation, therefore an order is determined.
"""
self.cache(self._get_id(list_of_keys), value)
def _get_id(self, list_of_keys: List[str]):
"""
sorts list_of_keys, concatenates with # for readability
:param list_of_keys:
:return:
"""
sorted(list_of_keys)
return "#".join(list_of_keys)
def _pack(self, value):
"""
cache tracks the age of an entry, may be helpful in the future
:param value:
:return:
"""
return [value, str(time.time())]
def _unpack(self, value):
"""
removes the timestamp around the cached value, if any
:param value:
:return:
"""
# there are some old entries without timestamp
if isinstance(value, str) or isinstance(value, int):
return value
return value[0]
|
2699
|
import os
import sys
import torch
import yaml
from functools import partial
sys.path.append('../../../../')
from trainers import trainer, frn_train
from datasets import dataloaders
from models.FRN import FRN
args = trainer.train_parser()
with open('../../../../config.yml', 'r') as f:
temp = yaml.safe_load(f)
data_path = os.path.abspath(temp['data_path'])
fewshot_path = os.path.join(data_path,'CUB_fewshot_raw')
pm = trainer.Path_Manager(fewshot_path=fewshot_path,args=args)
train_way = args.train_way
shots = [args.train_shot, args.train_query_shot]
train_loader = dataloaders.meta_train_dataloader(data_path=pm.train,
way=train_way,
shots=shots,
transform_type=args.train_transform_type)
model = FRN(way=train_way,
shots=[args.train_shot, args.train_query_shot],
resnet=args.resnet)
train_func = partial(frn_train.default_train,train_loader=train_loader)
tm = trainer.Train_Manager(args,path_manager=pm,train_func=train_func)
tm.train(model)
tm.evaluate(model)
|
2797
|
import os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0,parentdir)
import math
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
import time
import pybullet as p
from . import kuka
import random
import pybullet_data
from pkg_resources import parse_version
maxSteps = 1000
RENDER_HEIGHT = 720
RENDER_WIDTH = 960
class KukaCamGymEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second' : 50
}
def __init__(self,
urdfRoot=pybullet_data.getDataPath(),
actionRepeat=1,
isEnableSelfCollision=True,
renders=False,
isDiscrete=False):
self._timeStep = 1./240.
self._urdfRoot = urdfRoot
self._actionRepeat = actionRepeat
self._isEnableSelfCollision = isEnableSelfCollision
self._observation = []
self._envStepCounter = 0
self._renders = renders
self._width = 341
self._height = 256
self._isDiscrete=isDiscrete
self.terminated = 0
self._p = p
if self._renders:
cid = p.connect(p.SHARED_MEMORY)
if (cid<0):
p.connect(p.GUI)
p.resetDebugVisualizerCamera(1.3,180,-41,[0.52,-0.2,-0.33])
else:
p.connect(p.DIRECT)
#timinglog = p.startStateLogging(p.STATE_LOGGING_PROFILE_TIMINGS, "kukaTimings.json")
self._seed()
self.reset()
observationDim = len(self.getExtendedObservation())
#print("observationDim")
#print(observationDim)
observation_high = np.array([np.finfo(np.float32).max] * observationDim)
if (self._isDiscrete):
self.action_space = spaces.Discrete(7)
else:
action_dim = 3
self._action_bound = 1
action_high = np.array([self._action_bound] * action_dim)
self.action_space = spaces.Box(-action_high, action_high)
self.observation_space = spaces.Box(low=0, high=255, shape=(self._height, self._width, 4))
self.viewer = None
def _reset(self):
self.terminated = 0
p.resetSimulation()
p.setPhysicsEngineParameter(numSolverIterations=150)
p.setTimeStep(self._timeStep)
p.loadURDF(os.path.join(self._urdfRoot,"plane.urdf"),[0,0,-1])
p.loadURDF(os.path.join(self._urdfRoot,"table/table.urdf"), 0.5000000,0.00000,-.820000,0.000000,0.000000,0.0,1.0)
xpos = 0.5 +0.2*random.random()
ypos = 0 +0.25*random.random()
ang = 3.1415925438*random.random()
orn = p.getQuaternionFromEuler([0,0,ang])
self.blockUid =p.loadURDF(os.path.join(self._urdfRoot,"block.urdf"), xpos,ypos,-0.1,orn[0],orn[1],orn[2],orn[3])
p.setGravity(0,0,-10)
self._kuka = kuka.Kuka(urdfRootPath=self._urdfRoot, timeStep=self._timeStep)
self._envStepCounter = 0
p.stepSimulation()
self._observation = self.getExtendedObservation()
return np.array(self._observation)
def __del__(self):
p.disconnect()
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def getExtendedObservation(self):
#camEyePos = [0.03,0.236,0.54]
#distance = 1.06
#pitch=-56
#yaw = 258
#roll=0
#upAxisIndex = 2
#camInfo = p.getDebugVisualizerCamera()
#print("width,height")
#print(camInfo[0])
#print(camInfo[1])
#print("viewMatrix")
#print(camInfo[2])
#print("projectionMatrix")
#print(camInfo[3])
#viewMat = camInfo[2]
#viewMat = p.computeViewMatrixFromYawPitchRoll(camEyePos,distance,yaw, pitch,roll,upAxisIndex)
viewMat = [-0.5120397806167603, 0.7171027660369873, -0.47284144163131714, 0.0, -0.8589617609977722, -0.42747554183006287, 0.28186774253845215, 0.0, 0.0, 0.5504802465438843, 0.8348482847213745, 0.0, 0.1925382763147354, -0.24935829639434814, -0.4401884973049164, 1.0]
#projMatrix = camInfo[3]#[0.7499999403953552, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, -1.0000200271606445, -1.0, 0.0, 0.0, -0.02000020071864128, 0.0]
projMatrix = [0.75, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, -1.0000200271606445, -1.0, 0.0, 0.0, -0.02000020071864128, 0.0]
img_arr = p.getCameraImage(width=self._width,height=self._height,viewMatrix=viewMat,projectionMatrix=projMatrix)
rgb=img_arr[2]
np_img_arr = np.reshape(rgb, (self._height, self._width, 4))
self._observation = np_img_arr
return self._observation
def _step(self, action):
if (self._isDiscrete):
dv = 0.01
dx = [0,-dv,dv,0,0,0,0][action]
dy = [0,0,0,-dv,dv,0,0][action]
da = [0,0,0,0,0,-0.1,0.1][action]
f = 0.3
realAction = [dx,dy,-0.002,da,f]
else:
dv = 0.01
dx = action[0] * dv
dy = action[1] * dv
da = action[2] * 0.1
f = 0.3
realAction = [dx,dy,-0.002,da,f]
return self.step2( realAction)
def step2(self, action):
for i in range(self._actionRepeat):
self._kuka.applyAction(action)
p.stepSimulation()
if self._termination():
break
#self._observation = self.getExtendedObservation()
self._envStepCounter += 1
self._observation = self.getExtendedObservation()
if self._renders:
time.sleep(self._timeStep)
#print("self._envStepCounter")
#print(self._envStepCounter)
done = self._termination()
reward = self._reward()
#print("len=%r" % len(self._observation))
return np.array(self._observation), reward, done, {}
def _render(self, mode='human', close=False):
if mode != "rgb_array":
return np.array([])
base_pos,orn = self._p.getBasePositionAndOrientation(self._racecar.racecarUniqueId)
view_matrix = self._p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=base_pos,
distance=self._cam_dist,
yaw=self._cam_yaw,
pitch=self._cam_pitch,
roll=0,
upAxisIndex=2)
proj_matrix = self._p.computeProjectionMatrixFOV(
fov=60, aspect=float(RENDER_WIDTH)/RENDER_HEIGHT,
nearVal=0.1, farVal=100.0)
(_, _, px, _, _) = self._p.getCameraImage(
width=RENDER_WIDTH, height=RENDER_HEIGHT, viewMatrix=view_matrix,
projectionMatrix=proj_matrix, renderer=pybullet.ER_BULLET_HARDWARE_OPENGL)
rgb_array = np.array(px)
rgb_array = rgb_array[:, :, :3]
return rgb_array
def _termination(self):
#print (self._kuka.endEffectorPos[2])
state = p.getLinkState(self._kuka.kukaUid,self._kuka.kukaEndEffectorIndex)
actualEndEffectorPos = state[0]
#print("self._envStepCounter")
#print(self._envStepCounter)
if (self.terminated or self._envStepCounter>maxSteps):
self._observation = self.getExtendedObservation()
return True
maxDist = 0.005
closestPoints = p.getClosestPoints(self._kuka.trayUid, self._kuka.kukaUid,maxDist)
if (len(closestPoints)):#(actualEndEffectorPos[2] <= -0.43):
self.terminated = 1
#print("closing gripper, attempting grasp")
#start grasp and terminate
fingerAngle = 0.3
for i in range (100):
graspAction = [0,0,0.0001,0,fingerAngle]
self._kuka.applyAction(graspAction)
p.stepSimulation()
fingerAngle = fingerAngle-(0.3/100.)
if (fingerAngle<0):
fingerAngle=0
for i in range (1000):
graspAction = [0,0,0.001,0,fingerAngle]
self._kuka.applyAction(graspAction)
p.stepSimulation()
blockPos,blockOrn=p.getBasePositionAndOrientation(self.blockUid)
if (blockPos[2] > 0.23):
#print("BLOCKPOS!")
#print(blockPos[2])
break
state = p.getLinkState(self._kuka.kukaUid,self._kuka.kukaEndEffectorIndex)
actualEndEffectorPos = state[0]
if (actualEndEffectorPos[2]>0.5):
break
self._observation = self.getExtendedObservation()
return True
return False
def _reward(self):
#rewards is height of target object
blockPos,blockOrn=p.getBasePositionAndOrientation(self.blockUid)
closestPoints = p.getClosestPoints(self.blockUid,self._kuka.kukaUid,1000, -1, self._kuka.kukaEndEffectorIndex)
reward = -1000
numPt = len(closestPoints)
#print(numPt)
if (numPt>0):
#print("reward:")
reward = -closestPoints[0][8]*10
if (blockPos[2] >0.2):
#print("grasped a block!!!")
#print("self._envStepCounter")
#print(self._envStepCounter)
reward = reward+1000
#print("reward")
#print(reward)
return reward
if parse_version(gym.__version__)>=parse_version('0.9.6'):
render = _render
reset = _reset
seed = _seed
step = _step
|
2820
|
from __future__ import division, print_function
__author__ = 'saeedamen' # <NAME> / <EMAIL>
#
# Copyright 2017 Cuemacro Ltd. - http//www.cuemacro.com / @cuemacro
#
# See the License for the specific language governing permissions and limitations under the License.
#
## Web server components
import dash_core_components as dcc
import dash_html_components as html
import base64
import os
## Date/time components
import pandas as pd
import datetime
from datetime import timedelta
from collections import OrderedDict
from pandas.tseries.offsets import *
from tcapy.vis.layoutdash import LayoutDash
########################################################################################################################
class LayoutDashImplGen(LayoutDash):
"""This implements the LayoutDash abstract class, to create the web based GUI for the tcapy application. It creates two
web pages
- detailed_page - for doing detailed tcapy analysis for a specific currency pair
- aggregated_page - for more aggregated style analysis across multiple currency pairs and over multiple time periods
"""
def __init__(self, app=None, constants=None, url_prefix=''):
super(LayoutDashImplGen, self).__init__(app=app, constants=constants, url_prefix=url_prefix)
available_dates = pd.date_range(
datetime.datetime.today().date() - timedelta(days=self._constants.gui_lookback_window),
datetime.datetime.today().date(), freq=BDay())
times = pd.date_range("0:00", "23:59", freq="15min")
### create the possible values for drop down boxes on both pages
# Reverse date list (for both detailed and aggregated pages)
self.available_dates = [x.date() for x in available_dates[::-1]]
# For detailed page only
self.available_times = [t.strftime("%H:%M") for t in times]
self.available_tickers = self._constants.available_tickers_dictionary['All']
self.available_venues = self._constants.available_venues_dictionary['All']
self.available_brokers = self._constants.available_brokers_dictionary['All']
self.available_algos = self._constants.available_algos_dictionary['All']
self.available_market_data = self._constants.available_market_data
self.available_order_plot_lines = ['candlestick', 'mid', 'bid', 'ask', 'arrival', 'twap', 'vwap',
'buy trade', 'sell trade']
self.available_execution_plot_lines = ['candlestick', 'mid', 'bid', 'ask', 'buy trade', 'sell trade']
self.available_slippage_bounds = ['0.25', '0.5', '1.0', '1.25', '1.5', '2.0', 'bid/ask']
# For aggregated page only
self.available_grouped_tickers = self._flatten_dictionary(self._constants.available_tickers_dictionary)
self.available_grouped_venues = self._flatten_dictionary(self._constants.available_venues_dictionary)
self.available_grouped_brokers = self._flatten_dictionary(self._constants.available_brokers_dictionary)
self.available_grouped_algos = self._flatten_dictionary(self._constants.available_algos_dictionary)
self.available_event_types = self._constants.available_event_types
self.available_metrics = self._constants.available_metrics
self.available_reload = ['no', 'yes']
self.available_visualization = ['yes', 'no']
self.construct_layout()
def _flatten_dictionary(self, dictionary):
available = dictionary['All']
available_groups = self._util_func.dict_key_list(dictionary.keys())
return self.flatten_list_of_strings([available_groups, available])
def construct_layout(self):
self.page_content = html.Div([
dcc.Location(id='url', refresh=False),
html.Div(id='page-content')
])
link_bar_dict = {'Detailed' : 'detailed',
'Aggregated' : 'aggregated',
'Compliance' : 'compliance'}
trade_outliers_cols = ['Date', 'ticker', 'side', 'notional cur', 'benchmark', 'exec not',
'exec not in rep cur', 'slippage']
broker_cols = ['Date', 'by broker notional (rep cur)']
# Main page for detailed analysing of (eg. over the course of a few days)
self.pages['detailed'] = html.Div([
self._sc.header_bar('FX: Detailed - Trader Analysis', img='logo.png'),
self._sc.link_bar(link_bar_dict),
self._sc.width_row_cell(html.B("Status: ok", id='detailed-status'), margin_left=5),
self._sc.horizontal_bar(),
# Dropdown selection boxes
html.Div([
self._sc.drop_down(caption='Start Date', id={'start-date-val' : self.available_dates,
'start-time-val' : self.available_times},
prefix_id='detailed'),
self._sc.drop_down(caption='Finish Date', id=OrderedDict([('finish-date-val', self.available_dates),
('finish-time-val', self.available_times)]),
prefix_id='detailed'),
self._sc.drop_down(caption='Ticker', id='ticker-val', prefix_id='detailed',
drop_down_values=self.available_tickers),
self._sc.drop_down(caption='Broker', id='broker-val', prefix_id='detailed',
drop_down_values=self.available_grouped_brokers),
self._sc.drop_down(caption='Algo', id='algo-val', prefix_id='detailed',
drop_down_values=self.available_grouped_algos),
self._sc.drop_down(caption='Venue', id='venue-val', prefix_id='detailed',
drop_down_values=self.available_grouped_venues),
self._sc.drop_down(caption='Market Data', id='market-data-val', prefix_id='detailed',
drop_down_values=self.available_market_data),
self._sc.drop_down(caption='Metric', id='metric-val', prefix_id='detailed',
drop_down_values=self.available_metrics)
]),
self._sc.horizontal_bar(),
self._sc.button(caption='Calculate', id='calculation-button', prefix_id='detailed'),
# self.button(caption = 'Print PDF', id = 'detailed-print-pdf-button', className = 'no-print'),
# Orders
self._sc.horizontal_bar(),
self._sc.plot(caption='Orders: Timeline', id='order-candle-timeline-plot', prefix_id='detailed',
element_add=self._sc.timeline_dropdown('detailed-order-candle-timeline-plot',
self.available_order_plot_lines),
downloadplot_caption='Download CSV',
downloadplot_tag='order-candle-timeline-download-link',
download_file='download_order_candle_timeline', height=500),
self._sc.plot(caption='Orders: Markout', id='order-markout-plot', prefix_id='detailed', height=500),
self._sc.plot(caption='Orders: Histogram vs PDF fit', id='order-dist-plot', prefix_id='detailed', height=500),
# Execution trades
self._sc.horizontal_bar(),
self._sc.plot(caption='Executions: Timeline', id='execution-candle-timeline-plot', prefix_id='detailed',
element_add=self._sc.timeline_dropdown('detailed-execution-candle-timeline-plot',
self.available_execution_plot_lines),
downloadplot_caption='Download CSV',
downloadplot_tag='execution-candle-timeline-download-link',
download_file='download_execution_candle_timeline.csv', height=500),
self._sc.plot(caption='Executions: Markout', id='execution-markout-plot', prefix_id='detailed', height=500),
self._sc.plot(caption='Executions: Histogram vs PDF fit', id='execution-dist-plot', prefix_id='detailed', height=500),
# Detailed tcapy markout table for executions
html.Div([
html.H3('Executions: Markout Table'),
html.Div(id='detailed-execution-table')
],
style={'width': '1000px', 'display': 'inline-block', 'marginBottom': 5, 'marginTop': 5, 'marginLeft': 5,
'marginRight': 5}),
],
style={'width': '1000px', 'marginRight': 'auto', 'marginLeft': 'auto'})
################################################################################################################
# Secondary page for analysing aggregated statistics over long periods of time, eg. who is the best broker?
self.pages['aggregated'] = html.Div([
self._sc.header_bar('FX: Aggregated - Trader Analysis', img='logo.png'),
self._sc.link_bar(link_bar_dict),
self._sc.width_row_cell(html.B("Status: ok", id='aggregated-status'), margin_left=5),
self._sc.horizontal_bar(),
# dropdown selection boxes
html.Div([
self._sc.drop_down(caption='Start Date', id='start-date-val', prefix_id='aggregated',
drop_down_values=self.available_dates),
self._sc.drop_down(caption='Finish Date', id='finish-date-val', prefix_id='aggregated',
drop_down_values=self.available_dates),
self._sc.drop_down(caption='Ticker', id='ticker-val', prefix_id='aggregated',
drop_down_values=self.available_grouped_tickers, multiselect=True),
self._sc.drop_down(caption='Broker', id='broker-val', prefix_id='aggregated',
drop_down_values=self.available_grouped_brokers, multiselect=True),
self._sc.drop_down(caption='Algo', id='algo-val', prefix_id='aggregated',
drop_down_values=self.available_grouped_algos, multiselect=True),
self._sc.drop_down(caption='Venue', id='venue-val', prefix_id='aggregated',
drop_down_values=self.available_grouped_venues, multiselect=True),
self._sc.drop_down(caption='Reload', id='reload-val', prefix_id='aggregated',
drop_down_values=self.available_reload),
self._sc.drop_down(caption='Market Data', id='market-data-val', prefix_id='aggregated',
drop_down_values=self.available_market_data),
self._sc.drop_down(caption='Event Type', id='event-type-val', prefix_id='aggregated',
drop_down_values=self.available_event_types),
self._sc.drop_down(caption='Metric', id='metric-val', prefix_id='aggregated',
drop_down_values=self.available_metrics),
]),
self._sc.horizontal_bar(),
self._sc.button(caption='Calculate', id='calculation-button', prefix_id='aggregated'),
# , msg_id='aggregated-status'),
self._sc.horizontal_bar(),
# self.date_picker_range(caption='Start/Finish Dates', id='aggregated-date-val', offset=[-7,-1]),
self._sc.plot(caption='Aggregated Trader: Summary',
id=['execution-by-ticker-bar-plot', 'execution-by-venue-bar-plot'], prefix_id='aggregated', height=500),
self._sc.horizontal_bar(),
self._sc.plot(caption='Aggregated Trader: Timeline', id='execution-by-ticker-timeline-plot',
prefix_id='aggregated', height=500),
self._sc.horizontal_bar(),
self._sc.plot(caption='Aggregated Trader: PDF fit (' + self._constants.reporting_currency + ' notional)', id=['execution-by-ticker-dist-plot',
'execution-by-venue-dist-plot'],
prefix_id='aggregated', height=500),
self._sc.horizontal_bar()
],
style={'width': '1000px', 'marginRight': 'auto', 'marginLeft': 'auto'})
################################################################################################################
self.pages['compliance'] = html.Div([
self._sc.header_bar('FX: Compliance Analysis', img='logo.png'),
self._sc.link_bar(link_bar_dict),
self._sc.width_row_cell(html.B("Status: ok", id='compliance-status'), margin_left=5),
self._sc.horizontal_bar(),
# Dropdown selection boxes
html.Div([
self._sc.drop_down(caption='Start Date', id='start-date-val', prefix_id='compliance',
drop_down_values=self.available_dates),
self._sc.drop_down(caption='Finish Date', id='finish-date-val', prefix_id='compliance',
drop_down_values=self.available_dates),
self._sc.drop_down(caption='Ticker', id='ticker-val', prefix_id='compliance',
drop_down_values=self.available_grouped_tickers, multiselect=True),
self._sc.drop_down(caption='Broker', id='broker-val', prefix_id='compliance',
drop_down_values=self.available_grouped_brokers, multiselect=True),
self._sc.drop_down(caption='Algo', id='algo-val', prefix_id='compliance',
drop_down_values=self.available_grouped_algos, multiselect=True),
self._sc.drop_down(caption='Venue', id='venue-val', prefix_id='compliance',
drop_down_values=self.available_grouped_venues, multiselect=True),
self._sc.drop_down(caption='Reload', id='reload-val', prefix_id='compliance',
drop_down_values=self.available_reload),
self._sc.drop_down(caption='Market Data', id='market-data-val', prefix_id='compliance',
drop_down_values=self.available_market_data),
self._sc.drop_down(caption='Filter by Time', id='filter-time-of-day-val', prefix_id='compliance',
drop_down_values=self.available_reload),
self._sc.drop_down(caption='Start Time of Day', id='start-time-of-day-val', prefix_id='compliance',
drop_down_values=self.available_times),
self._sc.drop_down(caption='Finish Time of Day', id='finish-time-of-day-val', prefix_id='compliance',
drop_down_values=self.available_times),
self._sc.drop_down(caption='Slippage to Mid (bp)', id='slippage-bounds-val', prefix_id='compliance',
drop_down_values=self.available_slippage_bounds),
self._sc.drop_down(caption='Visualization', id='visualization-val', prefix_id='compliance',
drop_down_values=self.available_visualization)
]),
self._sc.horizontal_bar(),
html.Div([
self._sc.button(caption='Calculate', id='calculation-button', prefix_id='compliance'),
# self.date_picker(caption='Start Date', id='start-date-dtpicker', prefix_id='compliance'),
# self.date_picker(caption='Finish Date', id='finish-date-dtpicker', prefix_id='compliance'),
]),
self._sc.horizontal_bar(),
self._sc.table(caption='Compliance: Trade Outliers', id='execution-by-anomalous-table', prefix_id='compliance',
columns=trade_outliers_cols,
downloadplot_caption='Trade outliers CSV',
downloadplot_tag='execution-by-anomalous-download-link',
download_file='download_execution_by_anomalous.csv'),
self._sc.table(caption='Compliance: Totals by Broker', id='summary-by-broker-table', prefix_id='compliance',
columns=broker_cols,
downloadplot_caption='Download broker CSV',
downloadplot_tag='summary-by-broker-download-link',
download_file='download_broker.csv'
),
self._sc.horizontal_bar()
],
style={'width': '1000px', 'marginRight': 'auto', 'marginLeft': 'auto'})
# ID flags
self.id_flags = {
# Detailed trader page
# 'timeline_trade_orders' : {'client-orders': 'order', 'executions': 'trade'},
# 'markout_trade_orders' : {'client-orders': 'order_df', 'executions': 'trade_df'},
'detailed_candle_timeline_trade_order': {'execution': 'sparse_market_trade_df',
'order': 'sparse_market_order_df'},
'detailed_markout_trade_order': {'execution': 'trade_df', 'order': 'order_df'},
'detailed_table_trade_order': {'execution': 'table_trade_df_markout_by_all'},
'detailed_dist_trade_order': {'execution': 'dist_trade_df_by/pdf/side', 'order': 'dist_order_df_by/pdf/side'},
'detailed_download_link_trade_order': {'execution-candle-timeline': 'sparse_market_trade_df',
'order-candle-timeline': 'sparse_market_order_df'},
# Aggregated trader page
'aggregated_bar_trade_order': {'execution-by-ticker': 'bar_trade_df_by/mean/ticker',
'execution-by-venue': 'bar_trade_df_by/mean/venue'},
'aggregated_timeline_trade_order': {'execution-by-ticker': 'timeline_trade_df_by/mean_date/ticker',
'execution-by-venue': 'timeline_trade_df_by/mean_date/venue'},
'aggregated_dist_trade_order': {'execution-by-ticker': 'dist_trade_df_by/pdf/ticker',
'execution-by-venue': 'dist_trade_df_by/pdf/venue'},
# Compliance page
'compliance_metric_table_trade_order':
{'execution-by-anomalous': 'table_trade_df_slippage_by_worst_all',
'summary-by-broker': 'bar_trade_df_executed_notional_in_reporting_currency_by_broker_id'},
'compliance_download_link_trade_order':
{'execution-by-anomalous': 'table_trade_df_slippage_by_worst_all',
'summary-by-broker': 'bar_trade_df_executed_notional_in_reporting_currency_by_broker_id'},
}
|
2821
|
import pytest
import stk
from ...case_data import CaseData
@pytest.fixture(
scope='session',
params=(
lambda name: CaseData(
molecule=stk.ConstructedMolecule(
topology_graph=stk.cof.PeriodicKagome(
building_blocks=(
stk.BuildingBlock(
smiles='BrC1=C(Br)[C+]=N1',
functional_groups=[stk.BromoFactory()],
),
stk.BuildingBlock(
smiles=(
'Br[C+]1C2(Br)[C+]=N[C+]2[C+](Br)[C+]('
'Br)[C+2]1'
),
functional_groups=[stk.BromoFactory()],
),
),
lattice_size=(2, 2, 1),
),
),
smiles=(
'[C+]1=NC2=C1[C+]1[C+]3[C+2][C+]4C5=C(N=[C+]5)C56[C+]='
'N[C+]5[C+]5C7=C([C+]=N7)[C+]7[C+]8[C+2][C+]9C%10=C(N='
'[C+]%10)[C+]%10[C+2][C+]%11C%12=C([C+]=N%12)[C+]%12[C'
'+]%13[C+2][C+]%14C%15=C(N=[C+]%15)C%15%16[C+]=N[C+]%1'
'5[C+]%15C%17=C([C+]=N%17)[C+]%17[C+]%18[C+2][C+]%19C%'
'20=C(N=[C+]%20)[C+]%20[C+2][C+]2[C+]2C%21=C([C+]=N%21'
')[C+]%21[C+]([C+2][C+](C%22=C(N=[C+]%22)[C+]%16[C+2]['
'C+]%15C%15=C([C+]=N%15)[C+]%15[C+]([C+2][C+](C%16=C(N'
'=[C+]%16)C%10%16[C+]=N[C+]%16[C+]%11C%10=C([C+]=N%10)'
'[C+]%10[C+]([C+2][C+](C%11=C(N=[C+]%11)[C+]6[C+2][C+]'
'5C5=C([C+]=N5)[C+]5[C+]([C+2][C+](C6=C(N=[C+]6)C%206['
'C+]=N[C+]26)C2([C+]=N[C+]52)C2=C%18N=[C+]2)C2=C(N=[C+'
']2)C92[C+]=N[C+]72)C2([C+]=N[C+]%102)C2=C%13[C+]=N2)C'
'2=C([C+]=N2)C42[C+]=N[C+]12)C1([C+]=N[C+]%151)C1=C8N='
'[C+]1)C1=C(N=[C+]1)C%191[C+]=N[C+]%171)C1([C+]=N[C+]%'
'211)C1=C3[C+]=N1)C1=C([C+]=N1)C%141[C+]=N[C+]%121'
),
name=name,
),
lambda name: CaseData(
molecule=stk.ConstructedMolecule(
topology_graph=stk.cof.PeriodicKagome(
building_blocks=(
stk.BuildingBlock(
smiles='BrC1=C(Br)[C+]=N1',
functional_groups=[stk.BromoFactory()],
),
stk.BuildingBlock(
smiles=(
'Br[C+]1C2(Br)[C+]=N[C+]2[C+](Br)[C+]('
'Br)[C+2]1'
),
functional_groups=[stk.BromoFactory()],
),
),
lattice_size=(2, 2, 1),
optimizer=stk.PeriodicCollapser(),
),
),
smiles=(
'[C+]1=NC2=C1[C+]1[C+]3[C+2][C+]4C5=C(N=[C+]5)C56[C+]='
'N[C+]5[C+]5C7=C([C+]=N7)[C+]7[C+]8[C+2][C+]9C%10=C(N='
'[C+]%10)[C+]%10[C+2][C+]%11C%12=C([C+]=N%12)[C+]%12[C'
'+]%13[C+2][C+]%14C%15=C(N=[C+]%15)C%15%16[C+]=N[C+]%1'
'5[C+]%15C%17=C([C+]=N%17)[C+]%17[C+]%18[C+2][C+]%19C%'
'20=C(N=[C+]%20)[C+]%20[C+2][C+]2[C+]2C%21=C([C+]=N%21'
')[C+]%21[C+]([C+2][C+](C%22=C(N=[C+]%22)[C+]%16[C+2]['
'C+]%15C%15=C([C+]=N%15)[C+]%15[C+]([C+2][C+](C%16=C(N'
'=[C+]%16)C%10%16[C+]=N[C+]%16[C+]%11C%10=C([C+]=N%10)'
'[C+]%10[C+]([C+2][C+](C%11=C(N=[C+]%11)[C+]6[C+2][C+]'
'5C5=C([C+]=N5)[C+]5[C+]([C+2][C+](C6=C(N=[C+]6)C%206['
'C+]=N[C+]26)C2([C+]=N[C+]52)C2=C%18N=[C+]2)C2=C(N=[C+'
']2)C92[C+]=N[C+]72)C2([C+]=N[C+]%102)C2=C%13[C+]=N2)C'
'2=C([C+]=N2)C42[C+]=N[C+]12)C1([C+]=N[C+]%151)C1=C8N='
'[C+]1)C1=C(N=[C+]1)C%191[C+]=N[C+]%171)C1([C+]=N[C+]%'
'211)C1=C3[C+]=N1)C1=C([C+]=N1)C%141[C+]=N[C+]%121'
),
name=name,
),
),
)
def cof_periodic_kagome(request) -> CaseData:
return request.param(
f'{request.fixturename}{request.param_index}',
)
|
2841
|
import torch.utils.data as data
import numpy as np
from imageio import imread
from path import Path
import pdb
def crawl_folders(folders_list):
imgs = []
depth = []
for folder in folders_list:
current_imgs = sorted(folder.files('*.jpg'))
current_depth = []
for img in current_imgs:
d = img.dirname()/(img.name[:-4] + '.npy')
assert(d.isfile()), "depth file {} not found".format(str(d))
depth.append(d)
imgs.extend(current_imgs)
depth.extend(current_depth)
return imgs, depth
def load_as_float(path):
return imread(path).astype(np.float32)
class ValidationSet(data.Dataset):
"""A sequence data loader where the files are arranged in this way:
root/scene_1/0000000.jpg
root/scene_1/0000000.npy
root/scene_1/0000001.jpg
root/scene_1/0000001.npy
..
root/scene_2/0000000.jpg
root/scene_2/0000000.npy
.
transform functions must take in a list a images and a numpy array which can be None
"""
def __init__(self, root, transform=None):
self.root = Path(root)
scene_list_path = self.root/'val.txt'
self.scenes = [self.root/folder[:-1] for folder in open(scene_list_path)]
self.imgs, self.depth = crawl_folders(self.scenes)
self.transform = transform
def __getitem__(self, index):
img = load_as_float(self.imgs[index])
depth = np.load(self.depth[index]).astype(np.float32) #;pdb.set_trace()
if self.transform is not None:
img, _, _ = self.transform([img], depth, None); #this depth is just used to fill the compose transform that is shared(no need for the result)
img = img[0]
return img, depth
def __len__(self):
return len(self.imgs)
|
2848
|
from flask import Flask
from flask_cors import CORS
from flask_graphql import GraphQLView
from schema import Schema
def create_app(**kwargs):
app = Flask(__name__)
app.debug = True
app.add_url_rule(
'/graphql',
view_func=GraphQLView.as_view('graphql', schema=Schema, **kwargs)
)
return app
if __name__ == '__main__':
app = create_app(graphiql=True)
CORS(app, resources={r'/graphql': {'origins': '*'}})
app.run()
|
2863
|
from __future__ import print_function
try:
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
except ImportError:
from PySide2.QtWidgets import *
from PySide2.QtGui import *
from PySide2.QtCore import *
import hou
from hammer_tools.utils import createAction
def isRevertToDefaultEvent(event):
return event.modifiers() == Qt.ControlModifier and event.button() == Qt.MiddleButton
class Slider(QSlider):
def __init__(self, orientation=Qt.Horizontal, parent=None):
super(Slider, self).__init__(orientation, parent)
self.defaultValue = 0
self.valueLadderMode = False
def revertToDefault(self):
self.setValue(self.defaultValue)
def setDefaultValue(self, value, reset=True):
self.defaultValue = value
if reset:
self.revertToDefault()
def mousePressEvent(self, event):
if False: # Type hint
event = QMouseEvent
if event.button() == Qt.MiddleButton:
return
elif event.button() == Qt.LeftButton:
event = QMouseEvent(QEvent.MouseButtonPress, event.pos(),
Qt.MiddleButton, Qt.MiddleButton, Qt.NoModifier)
super(Slider, self).mousePressEvent(event)
def mouseMoveEvent(self, event):
if False: # Type hint
event = QMouseEvent
if not self.valueLadderMode and event.buttons() == Qt.MiddleButton:
try:
hou.ui.openValueLadder(self.value(), self.setValue, data_type=hou.valueLadderDataType.Int)
except hou.OperationFailed:
return
else:
self.valueLadderMode = True
elif self.valueLadderMode:
hou.ui.updateValueLadder(event.globalX(), event.globalY(),
bool(event.modifiers() & Qt.AltModifier),
bool(event.modifiers() & Qt.ShiftModifier))
else:
super(Slider, self).mouseMoveEvent(event)
def mouseReleaseEvent(self, event):
if False: # Type hint
event = QMouseEvent
if self.valueLadderMode and event.button() == Qt.MiddleButton:
hou.ui.closeValueLadder()
self.valueLadderMode = False
elif isRevertToDefaultEvent(event):
self.revertToDefault()
else:
super(Slider, self).mouseReleaseEvent(event)
class SearchField(QComboBox):
def __init__(self, parent=None):
super(SearchField, self).__init__(parent)
self.setEditable(True)
edit = self.lineEdit()
edit.setPlaceholderText('Search...')
edit.installEventFilter(self)
edit.setFont(QFont('Segoe UI'))
self.setFixedHeight(26)
comp = self.completer()
comp.setCompletionMode(QCompleter.PopupCompletion)
comp.setFilterMode(Qt.MatchContains)
comp.setModelSorting(QCompleter.CaseInsensitivelySortedModel)
comp.setMaxVisibleItems(5)
popup = comp.popup()
popup.setStyleSheet(hou.qt.styleSheet())
def mouseReleaseEvent(self, event):
if False: # Type hint
event = QMouseEvent
if isRevertToDefaultEvent(event):
self.clearEditText()
def eventFilter(self, watched, event):
if False: # Type hint
watched = QObject
event = QEvent
if watched == self.lineEdit():
if event.type() == QEvent.MouseButtonRelease and isRevertToDefaultEvent(event):
self.clearEditText()
event.accept()
return True
return False
def keyPressEvent(self, event):
if False: # Type hint
event = QKeyEvent
key = event.key()
mod = event.modifiers()
if mod == Qt.NoModifier and key == Qt.Key_Escape:
self.clearEditText()
else:
super(SearchField, self).keyPressEvent(event)
def hidePopup(self):
super(SearchField, self).hidePopup()
self.lineEdit().setFocus()
link_or_state_icon = 'BUTTONS_link'
embedded_icon = 'BUTTONS_pinned'
class BrowserMode(QStandardItemModel):
def __init__(self):
super(BrowserMode, self).__init__()
class BrowserTreeView(QTreeView):
def __init__(self, parent=None):
super(BrowserTreeView, self).__init__(parent)
self.setAlternatingRowColors(True)
class BrowserTableView(QListView):
def __init__(self, parent=None):
super(BrowserTableView, self).__init__(parent)
self.setViewMode(QListView.IconMode)
self.setResizeMode(QListView.Adjust)
self.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.setVerticalScrollMode(QAbstractItemView.ScrollPerPixel)
self.setIconSize(QSize(120, 90))
self.setUniformItemSizes(True)
self.setContextMenuPolicy(Qt.CustomContextMenu)
class ContentBrowser(QWidget):
def __init__(self, parent=None):
super(ContentBrowser, self).__init__(parent)
self.setWindowTitle('Content Browser')
self.setProperty('houdiniStyle', True)
topLayout = QHBoxLayout()
topLayout.setContentsMargins(4, 4, 4, 2)
topLayout.setSpacing(2)
self.refreshButton = QPushButton()
self.refreshButton.setFixedSize(26, 26)
self.refreshButton.setToolTip('Update\tF5')
self.refreshButton.setIcon(hou.qt.Icon('BUTTONS_reload', 18, 18))
self.refreshButton.setIconSize(QSize(18, 18))
topLayout.addWidget(self.refreshButton)
sep = hou.qt.Separator()
if False: # Type hint
sep = QFrame
sep.setFixedWidth(2)
sep.setFrameShape(QFrame.VLine)
topLayout.addWidget(sep)
viewModeButtonGroup = QButtonGroup(self)
viewModeButtonGroup.setExclusive(True)
self.treeViewButton = QPushButton()
self.treeViewButton.setFixedSize(26, 26)
self.treeViewButton.setToolTip('Tree View\t\tCtrl+1')
self.treeViewButton.setIcon(hou.qt.Icon('BUTTONS_tree', 18, 18))
self.treeViewButton.setIconSize(QSize(18, 18))
self.treeViewButton.setCheckable(True)
viewModeButtonGroup.addButton(self.treeViewButton)
topLayout.addWidget(self.treeViewButton)
self.tableViewButton = QPushButton()
self.tableViewButton.setFixedSize(26, 26)
self.tableViewButton.setToolTip('Table View\tCtrl+2')
self.tableViewButton.setIcon(hou.qt.Icon('NETVIEW_shape_palette', 18, 18))
self.tableViewButton.setIconSize(QSize(18, 18))
self.tableViewButton.setCheckable(True)
self.tableViewButton.toggle()
viewModeButtonGroup.addButton(self.tableViewButton)
topLayout.addWidget(self.tableViewButton)
topLayout.addWidget(sep)
self.searchField = SearchField()
self.searchField.setToolTip('Search\tCtrl+F, F3')
topLayout.addWidget(self.searchField)
searchModeButtonGroup = QButtonGroup(self)
searchModeButtonGroup.setExclusive(True)
self.wholeSearchButton = QPushButton()
self.wholeSearchButton.setFixedSize(26, 26)
self.wholeSearchButton.setCheckable(True)
self.wholeSearchButton.setToolTip('Whole word search')
self.wholeSearchButton.setIcon(hou.qt.Icon('VOP_titlecase', 18, 18))
self.wholeSearchButton.setIconSize(QSize(18, 18))
searchModeButtonGroup.addButton(self.wholeSearchButton)
topLayout.addWidget(self.wholeSearchButton)
self.fuzzySearchButton = QPushButton()
self.fuzzySearchButton.setFixedSize(26, 26)
self.fuzzySearchButton.setCheckable(True)
self.fuzzySearchButton.toggle()
self.fuzzySearchButton.setToolTip('Fuzzy search')
self.fuzzySearchButton.setIcon(hou.qt.Icon('VOP_endswith', 18, 18))
self.fuzzySearchButton.setIconSize(QSize(18, 18))
searchModeButtonGroup.addButton(self.fuzzySearchButton)
topLayout.addWidget(self.fuzzySearchButton)
self.patternSearchButton = QPushButton()
self.patternSearchButton.setFixedSize(26, 26)
self.patternSearchButton.setCheckable(True)
self.patternSearchButton.setToolTip('Search by Pattern')
self.patternSearchButton.setIcon(hou.qt.Icon('VOP_isalpha', 18, 18))
self.patternSearchButton.setIconSize(QSize(18, 18))
searchModeButtonGroup.addButton(self.patternSearchButton)
topLayout.addWidget(self.patternSearchButton)
self.regexSearchButton = QPushButton()
self.regexSearchButton.setFixedSize(26, 26)
self.regexSearchButton.setCheckable(True)
self.regexSearchButton.setToolTip('Search by Regular Expression')
self.regexSearchButton.setIcon(hou.qt.Icon('VOP_regex_match', 18, 18))
self.regexSearchButton.setIconSize(QSize(18, 18))
searchModeButtonGroup.addButton(self.regexSearchButton)
topLayout.addWidget(self.regexSearchButton)
topLayout.addWidget(sep)
topLayout.addWidget(hou.qt.HelpButton('/hammer/content_browser', 'Show Help\tF1'))
middleLayout = QHBoxLayout()
middleLayout.setContentsMargins(4, 0, 0, 4)
middleLayout.setSpacing(4)
self.viewLayout = QStackedLayout(middleLayout)
model = QFileSystemModel()
model.setRootPath('C:/')
treeView = BrowserTreeView()
treeView.setModel(model)
treeView.setRootIndex(model.index('C:/'))
self.viewLayout.addWidget(treeView)
tableView = BrowserTableView()
tableView.setModel(model)
tableView.setRootIndex(model.index('C:/'))
tableView.setSelectionModel(treeView.selectionModel())
self.viewLayout.addWidget(tableView)
self.viewLayout.setCurrentIndex(1)
self.treeViewButton.clicked.connect(self.switchToTreeView)
self.addAction(createAction(self, 'Tree View', self.switchToTreeView, shortcut='Ctrl+1'))
self.tableViewButton.clicked.connect(self.switchToTableView)
self.addAction(createAction(self, 'Table View', self.switchToTableView, shortcut='Ctrl+2'))
bottomLayout = QHBoxLayout()
bottomLayout.setContentsMargins(4, 0, 4, 4)
bottomLayout.setSpacing(2)
settingsButton = QPushButton()
settingsButton.setFixedSize(26, 26)
settingsButton.setToolTip('Settings')
settingsButton.setIcon(hou.qt.Icon('BUTTONS_gear_mini', 18, 18))
settingsButton.setIconSize(QSize(18, 18))
bottomLayout.addWidget(settingsButton)
spacer = QSpacerItem(0, 0, QSizePolicy.Expanding, QSizePolicy.Ignored)
bottomLayout.addSpacerItem(spacer)
self.scaleSlider = Slider()
self.scaleSlider.setDefaultValue(50)
self.scaleSlider.setFixedWidth(120)
self.scaleSlider.valueChanged.connect(lambda v: tableView.setIconSize(QSize(120, 90) * v / 100))
bottomLayout.addWidget(self.scaleSlider)
mainLayout = QVBoxLayout(self)
mainLayout.setContentsMargins(0, 0, 0, 0)
mainLayout.setSpacing(4)
mainLayout.addLayout(topLayout)
mainLayout.addLayout(middleLayout)
mainLayout.addLayout(bottomLayout)
def switchToTreeView(self):
self.viewLayout.setCurrentIndex(0)
self.scaleSlider.hide()
self.treeViewButton.setChecked(True)
def switchToTableView(self):
self.viewLayout.setCurrentIndex(1)
self.scaleSlider.show()
self.tableViewButton.setChecked(True)
def keyPressEvent(self, event):
if False: # Type hint
event = QKeyEvent
key = event.key()
mod = event.modifiers()
if mod == Qt.NoModifier and key == Qt.Key_F5:
pass
elif mod == Qt.ControlModifier and key == Qt.Key_F:
self.searchField.setFocus()
elif mod == Qt.NoModifier and key == Qt.Key_F3:
self.searchField.setFocus()
elif mod == Qt.ControlModifier and key == Qt.Key_Equal:
pass
elif mod == Qt.ControlModifier and key == Qt.Key_Minus:
pass
elif mod == Qt.ControlModifier and key == Qt.Key_1:
pass
elif mod == Qt.ControlModifier and key == Qt.Key_2:
pass
elif mod == Qt.NoModifier and key == Qt.Key_F1:
pass
else:
super(ContentBrowser, self).keyPressEvent(event)
if __name__ == '__main__':
app = QApplication([])
window = ContentBrowser()
window.show()
app.exec_()
|
2881
|
import json
import multiprocessing as mp
import re
from argparse import ArgumentParser
from enum import Enum, auto
import javalang
from functools import partial
PRED_TOKEN = 'PRED'
modifiers = ['public', 'private', 'protected', 'static']
class TargetType(Enum):
seq = auto()
tree = auto()
@staticmethod
def from_string(s):
try:
return TargetType[s]
except KeyError:
raise ValueError()
target_type = TargetType.seq
RE_WORDS = re.compile(r'''
# Find words in a string. Order matters!
[A-Z]+(?=[A-Z][a-z]) | # All upper case before a capitalized word
[A-Z]?[a-z]+ | # Capitalized words / all lower case
[A-Z]+ | # All upper case
\d+ | # Numbers
_ |
\" |
.+
''', re.VERBOSE)
TREE_SPLIT = re.compile(r'([(),])')
def split_subtokens(str):
return [subtok for subtok in RE_WORDS.findall(str) if not subtok == '_']
def subtokenize(s):
failed = False
try:
tokens = list(javalang.tokenizer.tokenize(s))
except:
try:
tokens = list(javalang.tokenizer.tokenize(s + '()'))[:-2]
except:
try:
tokens = list(javalang.tokenizer.tokenize('(' + s + ')'))[1:-1]
except:
tokens = s.split()
failed = True
if failed:
return [' _ '.join(split_subtokens(i)) for i in tokens if not i in modifiers]
else:
return [' _ '.join(split_subtokens(i.value)) for i in tokens if not i.value in modifiers]
def subtokenize_tree(s):
return ' '.join([sub for sub in re.split(TREE_SPLIT, s) if len(sub) > 0])
def process_line(target_type, max_targets, max_nodes, line):
obj = json.loads(line)
left_context = obj['left_context']
right_context = obj['right_context']
target_seq = obj['target_seq']
num_targets = obj['num_targets']
num_nodes = obj['num_nodes']
if max_targets is not None and num_targets > max_targets:
return None, None
if max_nodes is not None and num_nodes > max_nodes:
return None, None
if target_type is TargetType.seq:
target_pred = ' '.join(subtokenize(target_seq)).lower()
elif target_type is TargetType.tree:
target_pred = subtokenize_tree(obj['linearized_tree'])
source = '{} {} {}'.format(' '.join(subtokenize(left_context)[-200:]).lower(), PRED_TOKEN, ' '.join(subtokenize(right_context)[:200]).lower())
return source, target_pred
def process_file(file_path, data_file_role, dataset_name, target_type, max_targets, max_nodes):
total_examples = 0
source_output_path = '{}.{}.{}.source.txt'.format(dataset_name, target_type, data_file_role)
target_output_path = '{}.{}.{}.target.txt'.format(dataset_name, target_type, data_file_role)
with open(source_output_path, 'w') as source_output_file:
with open(target_output_path, 'w') as target_output_file:
with open(file_path, 'r') as file:
subtokenize_line = partial(process_line, target_type, max_targets, max_nodes)
with mp.Pool(64) as pool:
if data_file_role in ['test', 'val']:
examples = [process_line(target_type, max_targets, max_nodes, line) for line in file]
else:
examples = pool.imap_unordered(subtokenize_line, file, chunksize=100)
#examples = [process_line(target_type, max_targets, max_nodes, line) for line in file]
for source_seq, target_seq in examples:
if source_seq is None or target_seq is None:
continue
source_output_file.write(source_seq + '\n')
target_output_file.write(target_seq + '\n')
total_examples += 1
#print(source_seq, target_seq)
print('File: ' + file_path)
print('Total examples: ' + str(total_examples))
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("-trd", "--train_data", dest="train_data_path",
help="path to training data file", required=True)
parser.add_argument("-ted", "--test_data", dest="test_data_path",
help="path to test data file", required=True)
parser.add_argument("-vd", "--val_data", dest="val_data_path",
help="path to validation data file", required=True)
parser.add_argument("-o", "--output_name", dest="output_name",
help="output name - the base name for the created dataset", metavar="FILE", required=True,
default='data')
parser.add_argument("--target_type", dest="target_type", type=TargetType.from_string, choices=list(TargetType), required=True)
parser.add_argument("--max_targets", dest="max_targets", type=int, required=False, default=40)
parser.add_argument("--max_nodes", dest="max_nodes", type=int, required=False, default=None)
parser.add_argument('--local', action='store_true')
args = parser.parse_args()
train_data_path = args.train_data_path
test_data_path = args.test_data_path
val_data_path = args.val_data_path
for data_file_path, data_role in zip([train_data_path, test_data_path, val_data_path], ['train', 'test', 'val']):
process_file(file_path=data_file_path, data_file_role=data_role, dataset_name=args.output_name,
target_type=args.target_type, max_targets=args.max_targets, max_nodes=args.max_nodes)
|
2887
|
import typing
from bot.constants import BOT_REPO_URL
from discord import Embed
from discord.ext import commands
from discord.ext.commands.cooldowns import BucketType
from . import _issues, _profile, _source
class Github(commands.Cog):
"""
Github Category cog, which contains commands related to github.
Commands:
├ profile Fetches a user's GitHub information.
├ issue Command to retrieve issue(s) from a GitHub repository.
└ source Displays information about the bot's source code.
"""
def __init__(self, bot: commands.Bot) -> None:
self.bot = bot
@commands.group(name="github", aliases=("gh",), invoke_without_command=True)
async def github_group(self, ctx: commands.Context) -> None:
"""Commands for Github."""
await ctx.send_help(ctx.command)
@github_group.command(name="profile")
@commands.cooldown(1, 10, BucketType.user)
async def profile(self, ctx: commands.Context, username: str) -> None:
"""
Fetches a user's GitHub information.
Username is optional and sends the help command if not specified.
"""
github_profile = _profile.GithubInfo(self.bot.http_session)
embed = await github_profile.get_github_info(username)
await ctx.send(embed=embed)
@github_group.command(name="issue", aliases=("pr",))
async def issue(
self,
ctx: commands.Context,
numbers: commands.Greedy[int],
repository: typing.Optional[str] = None,
) -> None:
"""Command to retrieve issue(s) from a GitHub repository."""
github_issue = _issues.Issues(self.bot.http_session)
if not numbers:
raise commands.MissingRequiredArgument(ctx.command.clean_params["numbers"])
if repository is None:
user = "gurkult"
else:
user, _, repository = repository.rpartition("/")
if user == "":
user = "gurkult"
embed = await github_issue.issue(ctx.message.channel, numbers, repository, user)
await ctx.send(embed=embed)
@github_group.command(name="source", aliases=("src", "inspect"))
async def source_command(
self, ctx: commands.Context, *, source_item: typing.Optional[str] = None
) -> None:
"""Displays information about the bot's source code."""
if source_item is None:
embed = Embed(title="Gurkbot's GitHub Repository")
embed.add_field(name="Repository", value=f"[Go to GitHub]({BOT_REPO_URL})")
embed.set_thumbnail(url=self.bot.user.avatar_url)
await ctx.send(embed=embed)
return
elif not ctx.bot.get_command(source_item):
raise commands.BadArgument(
f"Unable to convert `{source_item}` to valid command or Cog."
)
github_source = _source.Source(self.bot.http_session, self.bot.user.avatar_url)
embed = await github_source.inspect(cmd=ctx.bot.get_command(source_item))
await ctx.send(embed=embed)
def setup(bot: commands.Bot) -> None:
"""Load the Github cog."""
bot.add_cog(Github(bot))
|
2889
|
from typing import Any, Dict, List, Tuple
from pytezos.michelson.forge import forge_array, forge_base58, optimize_timestamp
def bump_fitness(fitness: Tuple[str, str]) -> Tuple[str, str]:
if len(fitness) == 0:
major = 0
minor = 1
else:
major = int.from_bytes(bytes.fromhex(fitness[0]), 'big')
minor = int.from_bytes(bytes.fromhex(fitness[1]), 'big') + 1
return major.to_bytes(1, 'big').hex(), minor.to_bytes(8, 'big').hex()
def forge_int_fixed(value: int, length: int) -> bytes:
return value.to_bytes(length, 'big')
def forge_command(command: str) -> bytes:
if command == 'activate':
return b'\x00'
raise NotImplementedError(command)
def forge_fitness(fitness: List[str]) -> bytes:
return forge_array(b''.join(map(lambda x: forge_array(bytes.fromhex(x)), fitness)))
def forge_priority(priority: int) -> bytes:
return priority.to_bytes(2, 'big')
def forge_content(content: Dict[str, Any]) -> bytes:
res = b''
res += forge_command(content['command'])
res += forge_base58(content['hash'])
res += forge_fitness(content['fitness'])
res += bytes.fromhex(content['protocol_parameters'])
return res
def forge_protocol_data(protocol_data: Dict[str, Any]) -> bytes:
res = b''
if protocol_data.get('content'):
res += forge_content(protocol_data['content'])
else:
res += forge_priority(protocol_data['priority'])
res += bytes.fromhex(protocol_data['proof_of_work_nonce'])
if protocol_data.get('seed_nonce_hash'):
res += b'\xFF'
res += forge_base58(protocol_data['seed_nonce_hash'])
else:
res += b'\x00'
res += b'\xFF' if protocol_data['liquidity_baking_escape_vote'] else b'\x00'
return res
def forge_block_header(shell_header: Dict[str, Any]) -> bytes:
res = forge_int_fixed(shell_header['level'], 4)
res += forge_int_fixed(shell_header['proto'], 1)
res += forge_base58(shell_header['predecessor'])
res += forge_int_fixed(optimize_timestamp(shell_header['timestamp']), 8)
res += forge_int_fixed(shell_header['validation_pass'], 1)
res += forge_base58(shell_header['operations_hash'])
res += forge_fitness(shell_header['fitness'])
res += forge_base58(shell_header['context'])
res += bytes.fromhex(shell_header['protocol_data'])
return res
|
2919
|
from __future__ import absolute_import
from redis import Redis
from rq.decorators import job
from kaneda.utils import get_backend
backend = get_backend()
@job(queue='kaneda', connection=Redis())
def report(name, metric, value, tags, id_):
"""
RQ job to report metrics to the configured backend in kanedasettings.py
To run the worker execute this command:
rqworker [queue]
"""
return backend.report(name, metric, value, tags, id_)
|
2939
|
dataset_type = 'STVQADATASET'
data_root = '/home/datasets/mix_data/iMIX/'
feature_path = 'data/datasets/stvqa/defaults/features/'
ocr_feature_path = 'data/datasets/stvqa/defaults/ocr_features/'
annotation_path = 'data/datasets/stvqa/defaults/annotations/'
vocab_path = 'data/datasets/stvqa/defaults/extras/vocabs/'
train_datasets = ['train']
test_datasets = ['val']
reader_train_cfg = dict(
type='STVQAREADER',
card='default',
mix_features=dict(
train=data_root + feature_path + 'detectron.lmdb',
val=data_root + feature_path + 'detectron.lmdb',
test=data_root + feature_path + 'detectron.lmdb',
),
mix_ocr_features=dict(
train=data_root + ocr_feature_path + 'ocr_en_frcn_features.lmdb',
val=data_root + ocr_feature_path + 'ocr_en_frcn_features.lmdb',
test=data_root + ocr_feature_path + 'ocr_en_frcn_features.lmdb',
),
mix_annotations=dict(
train=data_root + annotation_path + 'imdb_subtrain.npy',
val=data_root + annotation_path + 'imdb_subval.npy',
test=data_root + annotation_path + 'imdb_test_task3.npy',
),
datasets=train_datasets)
reader_test_cfg = dict(
type='STVQAREADER',
card='default',
mix_features=dict(
train=data_root + feature_path + 'detectron.lmdb',
val=data_root + feature_path + 'detectron.lmdb',
test=data_root + feature_path + 'detectron.lmdb',
),
mix_ocr_features=dict(
train=data_root + ocr_feature_path + 'ocr_en_frcn_features.lmdb',
val=data_root + ocr_feature_path + 'ocr_en_frcn_features.lmdb',
test=data_root + ocr_feature_path + 'ocr_en_frcn_features.lmdb',
),
mix_annotations=dict(
train=data_root + annotation_path + 'imdb_subtrain.npy',
val=data_root + annotation_path + 'imdb_subval.npy',
test=data_root + annotation_path + 'imdb_test_task3.npy',
),
datasets=train_datasets)
info_cpler_cfg = dict(
type='STVQAInfoCpler',
glove_weights=dict(
glove6b50d=data_root + 'glove/glove.6B.50d.txt.pt',
glove6b100d=data_root + 'glove/glove.6B.100d.txt.pt',
glove6b200d=data_root + 'glove/glove.6B.200d.txt.pt',
glove6b300d=data_root + 'glove/glove.6B.300d.txt.pt',
),
fasttext_weights=dict(
wiki300d1m=data_root + 'fasttext/wiki-news-300d-1M.vec',
wiki300d1msub=data_root + 'fasttext/wiki-news-300d-1M-subword.vec',
wiki_bin=data_root + 'fasttext/wiki.en.bin',
),
tokenizer='/home/datasets/VQA/bert/' + 'bert-base-uncased-vocab.txt',
mix_vocab=dict(
answers_st_5k=data_root + vocab_path + 'fixed_answer_vocab_stvqa_5k.txt',
vocabulary_100k=data_root + vocab_path + 'vocabulary_100k.txt',
),
max_seg_lenth=20,
max_ocr_lenth=10,
word_mask_ratio=0.0,
vocab_name='vocabulary_100k',
vocab_answer_name='answers_st_5k',
glove_name='glove6b300d',
fasttext_name='wiki_bin',
if_bert=True,
)
train_data = dict(
samples_per_gpu=16,
workers_per_gpu=1,
data=dict(type=dataset_type, reader=reader_train_cfg, info_cpler=info_cpler_cfg, limit_nums=800))
test_data = dict(
samples_per_gpu=16,
workers_per_gpu=1,
data=dict(type=dataset_type, reader=reader_test_cfg, info_cpler=info_cpler_cfg),
)
|
2943
|
import numpy as np
import copy
import combo.misc
import cPickle as pickle
from results import history
from .. import utility
from ...variable import variable
from ..call_simulator import call_simulator
from ... import predictor
from ...gp import predictor as gp_predictor
from ...blm import predictor as blm_predictor
import combo.search.score
MAX_SEACH = int(20000)
class policy:
def __init__(self, test_X, config=None):
self.predictor = None
self.training = variable()
self.test = self._set_test(test_X)
self.actions = np.arange(0, self.test.X.shape[0])
self.history = history()
self.config = self._set_config(config)
def set_seed(self, seed):
self.seed = seed
np.random.seed(self.seed)
def delete_actions(self, index, actions=None):
actions = self._set_unchosed_actions(actions)
return np.delete(actions, index)
def write(self, action, t, X=None):
if X is None:
X = self.test.X[action, :]
Z = self.test.Z[action, :] if self.test.Z is not None else None
else:
Z = self.predictor.get_basis(X) \
if self.predictor is not None else None
self.new_data = variable(X, t, Z)
self.history.write(t, action)
self.training.add(X=X, t=t, Z=Z)
def random_search(self, max_num_probes, num_search_each_probe=1,
simulator=None, is_disp=True):
N = int(num_search_each_probe)
if int(max_num_probes) * N > len(self.actions):
raise ValueError('max_num_probes * num_search_each_probe must \
be smaller than the length of candidates')
if is_disp:
utility.show_interactive_mode(simulator, self.history)
for n in xrange(0, max_num_probes):
if is_disp and N > 1:
utility.show_start_message_multi_search(self.history.num_runs)
action = self.get_random_action(N)
if simulator is None:
return action
t, X = call_simulator(simulator, action)
self.write(action, t, X)
if is_disp:
utility.show_search_results(self.history, N)
return copy.deepcopy(self.history)
def bayes_search(self, training=None, max_num_probes=None,
num_search_each_probe=1,
predictor=None, is_disp=True,
simulator=None, score='TS', interval=0,
num_rand_basis=0):
if max_num_probes is None:
max_num_probes = 1
simulator = None
is_rand_expans = False if num_rand_basis == 0 else True
self.training = self._set_training(training)
if predictor is None:
self.predictor = self._init_predictor(is_rand_expans)
else:
self.predictor = predictor
N = int(num_search_each_probe)
for n in xrange(max_num_probes):
if utility.is_learning(n, interval):
self.predictor.fit(self.training, num_rand_basis)
self.test.Z = self.predictor.get_basis(self.test.X)
self.training.Z = self.predictor.get_basis(self.training.X)
self.predictor.prepare(self.training)
else:
try:
self.predictor.update(self.training, self.new_data)
except:
self.predictor.prepare(self.training)
if num_search_each_probe != 1:
utility.show_start_message_multi_search(self.history.num_runs,
score)
K = self.config.search.multi_probe_num_sampling
alpha = self.config.search.alpha
action = self.get_actions(score, N, K, alpha)
if simulator is None:
return action
t, X = call_simulator(simulator, action)
self.write(action, t, X)
if is_disp:
utility.show_search_results(self.history, N)
return copy.deepcopy(self.history)
def get_score(self, mode, predictor=None, training=None, alpha=1):
self._set_training(training)
self._set_predictor(predictor)
actions = self.actions
test = self.test.get_subset(actions)
if mode == 'EI':
f = combo.search.score.EI(predictor, training, test)
elif mode == 'PI':
f = combo.search.score.PI(predictor, training, test)
elif mode == 'TS':
f = combo.search.score.TS(predictor, training, test, alpha)
else:
raise NotImplementedError('mode must be EI, PI or TS.')
return f
def get_marginal_score(self, mode, chosed_actions, N, alpha):
f = np.zeros((N, len(self.actions)))
new_test = self.test.get_subset(chosed_actions)
virtual_t \
= self.predictor.get_predict_samples(self.training, new_test, N)
for n in xrange(N):
predictor = copy.deepcopy(self.predictor)
train = copy.deepcopy(self.training)
virtual_train = new_test
virtual_train.t = virtual_t[n, :]
if virtual_train.Z is None:
train.add(virtual_train.X, virtual_train.t)
else:
train.add(virtual_train.X, virtual_train.t, virtual_train.Z)
try:
predictor.update(train, virtual_train)
except:
predictor.prepare(train)
f[n, :] = self.get_score(mode, predictor, train)
return f
def get_actions(self, mode, N, K, alpha):
f = self.get_score(mode, self.predictor, self.training, alpha)
temp = np.argmax(f)
action = self.actions[temp]
self.actions = self.delete_actions(temp)
chosed_actions = np.zeros(N, dtype=int)
chosed_actions[0] = action
for n in xrange(1, N):
f = self.get_marginal_score(mode, chosed_actions[0:n], K, alpha)
temp = np.argmax(np.mean(f, 0))
chosed_actions[n] = self.actions[temp]
self.actions = self.delete_actions(temp)
return chosed_actions
def get_random_action(self, N):
random_index = np.random.permutation(xrange(self.actions.shape[0]))
index = random_index[0:N]
action = self.actions[index]
self.actions = self.delete_actions(index)
return action
def load(self, file_history, file_training=None, file_predictor=None):
self.history.load(file_history)
if file_training is None:
N = self.history.total_num_search
X = self.test.X[self.history.chosed_actions[0:N], :]
t = self.history.fx[0:N]
self.training = variable(X=X, t=t)
else:
self.training = variable()
self.training.load(file_training)
if file_predictor is not None:
with open(file_predictor) as f:
self.predictor = pickle.load(f)
def export_predictor(self):
return self.predictor
def export_training(self):
return self.training
def export_history(self):
return self.history
def _set_predictor(self, predictor=None):
if predictor is None:
predictor = self.predictor
return predictor
def _init_predictor(self, is_rand_expans, predictor=None):
self.predictor = self._set_predictor(predictor)
if self.predictor is None:
if is_rand_expans:
self.predictor = blm_predictor(self.config)
else:
self.predictor = gp_predictor(self.config)
return self.predictor
def _set_training(self, training=None):
if training is None:
training = self.training
return training
def _set_unchosed_actions(self, actions=None):
if actions is None:
actions = self.actions
return actions
def _set_test(self, test_X):
if isinstance(test_X, np.ndarray):
test = variable(X=test_X)
elif isinstance(test_X, variable):
test = test_X
else:
raise TypeError('The type of test_X must \
take ndarray or combo.variable')
return test
def _set_config(self, config=None):
if config is None:
config = combo.misc.set_config()
return config
|
2949
|
from setuptools import setup, find_packages
setup(
name='Pokedex',
version='0.1',
zip_safe=False,
packages=find_packages(),
package_data={
'pokedex': ['data/csv/*.csv']
},
install_requires=[
'SQLAlchemy>=1.0,<2.0',
'whoosh>=2.5,<2.7',
'markdown==2.4.1',
'construct==2.5.3',
'six>=1.9.0',
],
entry_points={
'console_scripts': [
'pokedex = pokedex.main:setuptools_entry',
],
},
classifiers=[
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.7",
]
)
|
2956
|
import numpy as np
import pickle
from os.path import exists, realpath
import sys
import math
from topple_data_loader import ToppleData, ToppleDataLoader
import transforms3d
class ToppleNormalizationInfo():
'''
Structure to hold all the normalization information for a dataset.
'''
def __init__(self):
# max element of any linear vel vector
self.max_lin_vel = None
# max element of any angular vel vector
self.max_ang_vel = None
# max distance between positions in two contiguous timesteps
self.max_pos = None
# max change in rotation around any axis between two contiguous timesteps (for euler rot)
self.max_rot = None
# max angle of rotation between two steps for axis-angle representation
self.max_delta_rot = None
# max 2-norm of applied impulse vector
self.force_vec_max = None
# max 2-norm of a point in an object point cloud (used for point cloud and force pos)
self.pc_max = None
# normalization values for shape-related stuff
self.density_offset = None
self.density_max = None
self.mass_offset = None
self.mass_max = None
self.inertia_offset = None
self.inertia_max = None
self.friction_offset = None
self.friction_max = None
def print_out(self):
print({'max_lin_vel' : self.max_lin_vel, 'max_ang_vel' : self.max_ang_vel, 'max_pos' : self.max_pos, \
'max_rot' : self.max_rot, 'max_delta_rot' : self.max_delta_rot, 'force_vec_max' : self.force_vec_max, 'pc_max' : self.pc_max, \
'density_off' : self.density_offset, 'density_max' : self.density_max, 'mass_off' : self.mass_offset, \
'mass_max' : self.mass_max, 'inertia_off' : self.inertia_offset, 'inertia_max' : self.inertia_max, \
'friction_off' : self.friction_offset, 'friction_max' : self.friction_max
})
def save(self, pkl_file):
''' Saves normalization info object to a specified .pkl file. '''
with open(pkl_file, 'wb') as f:
pickle.dump(self, f)
def load_from(self, pkl_file):
''' Load normalization info into this object from a specified .pkl file. '''
with open(pkl_file, 'rb') as f:
norm_info = pickle.load(f)
self.copy_from(norm_info)
def copy_from(self, norm_info):
'''
Takes values from the given normalization info object and copies them to this one
'''
self.max_lin_vel = norm_info.max_lin_vel
self.max_ang_vel = norm_info.max_ang_vel
self.max_pos = norm_info.max_pos
self.max_rot = norm_info.max_rot
try:
self.max_delta_rot = norm_info.max_delta_rot
except:
# old versions of data doesn't have max delta rot
pass
self.force_vec_max = norm_info.force_vec_max
self.pc_max = norm_info.pc_max
self.density_offset = norm_info.density_offset
self.density_max = norm_info.density_max
self.mass_offset = norm_info.mass_offset
self.mass_max = norm_info.mass_max
self.inertia_offset = norm_info.inertia_offset
self.inertia_max = norm_info.inertia_max
try:
self.friction_offset = norm_info.friction_offset
self.friction_max = norm_info.friction_max
except:
# old version doesn't have this
pass
class ToppleBatch(object):
'''
Structure to hold a single batch of data.
'''
def __init__(self, size, seq_len, num_pts):
self.size = size
self.num_steps = seq_len
self.num_pts = num_pts
self.point_cloud = np.zeros((self.size, self.num_pts, 3))
self.lin_vel = np.zeros((self.size, self.num_steps, 3))
self.ang_vel = np.zeros((self.size, self.num_steps, 3))
self.pos = np.zeros((self.size, self.num_steps, 3))
# cummulative euler angles
self.rot = np.zeros((self.size, self.num_steps, 3))
# change in rotation in quaternion rep (w, x, y, z)
self.delta_quat = np.zeros((self.size, self.num_steps, 4))
# change in rotation between steps in axis-angle rep (scaled 3 vec)
self.delta_rot = np.zeros((self.size, self.num_steps, 3))
# change in rotation between steps in split axis-angle rep (4-vec)
self.delta_rot_split = np.zeros((self.size, self.num_steps, 4))
# 0 if before topple idx, 1 if after
self.topple_label = np.zeros((self.size, self.num_steps), dtype=int)
# other meta-data not directly used in network
self.toppled = []
self.shape_name = []
self.body_friction = np.zeros((self.size))
self.mass = np.zeros((self.size))
self.scale = np.zeros((self.size, 3))
self.rot_euler = np.zeros((self.size, self.num_steps, 3))
class ToppleDataset(object):
'''
Loads toppling data and provides batches for training and model evaluation.
'''
def __init__(self, roots, norm_info_file, batch_size=32, num_steps=15, shuffle=False, num_pts=None, perturb_pts=0.0):
'''
- roots : list of directories containing data to load for this dataset
- norm_info_file : .pkl file containing normalization information
- batch_size : number of sequences to return in each batch
- num_steps : number of timesteps to return in each sequence
- shuffle : randomly shuffles the returned sequence ordering
- num_pts : the number of points to use in the returned point cloud. If None uses all points in the data.
- perturb_pts : the stdev to randomly perturb point clouds with. If None no perturbation is performed.
-
'''
# settings
self.batch_size = batch_size
self.steps_per_seq = num_steps
self.shuffle = shuffle
self.perturb_std = perturb_pts
self.num_pts = num_pts
# load in data
for root in roots:
if not exists(root):
print('Could not find dataset at ' + root)
return
data_loader = ToppleDataLoader()
self.data = data_loader.load_data(roots)
if num_pts is None:
# use all the points in the point cloud
self.num_pts = self.data.point_cloud.shape[1]
# load in normalization info
if not exists(norm_info_file):
print('Could not find normalization info at ' + norm_info_file)
return
self.norm_info = ToppleNormalizationInfo()
self.norm_info.load_from(norm_info_file)
print('Loaded normalization info!')
# see if we have axis-angle info (for backwards compat)
self.use_aa = False
self.use_aa_split = False
self.use_topple_idx = False
self.use_delta_quat = False
if len(self.data.delta_rot) > 0:
self.use_aa = True
if len(self.data.delta_rot_split) > 0:
self.use_aa_split = True
if len(self.data.topple_idx) > 0:
self.use_topple_idx = True
if len(self.data.body_friction) > 0:
self.use_body_friction = True
if len(self.data.delta_quat) > 0:
self.use_delta_quat = True
# normalize the data
print('Normalizing data...')
self.normalize_data(self.data, self.norm_info)
print('Finished normalizing!')
# order to iterate through data when returning batches (in order by default)
self.iter_inds = range(0, self.data.size)
# prepare to iterate through
self.reset()
def normalize_data(self, data, norm_info):
'''
Normalizes (in place) the given ToppleData using the ToppleNormalizationInfo.
'''
# point clouds -> [-1, 1]
data.point_cloud /= norm_info.pc_max
# force pos -> [-1, 1]
data.force_pos /= norm_info.pc_max
# force vec -> [-1, 1]
data.force_vec /= norm_info.force_vec_max
# density -> [0, 1]
data.density = (data.density - norm_info.density_offset) / norm_info.density_max
# mass -> [0, 1]
data.mass = (data.mass - norm_info.mass_offset) / norm_info.mass_max
# inertia -> [0, 1]
data.inertia = (data.inertia - norm_info.inertia_offset) / norm_info.inertia_max
# friction -> [0, 1]
if norm_info.friction_offset is not None:
data.body_friction = (data.body_friction - norm_info.friction_offset) / norm_info.friction_max
# now time sequence data
# velocities -> [-1, 1]
for i, lin_vel_steps in enumerate(data.lin_vel):
data.lin_vel[i] = [(x / norm_info.max_lin_vel) for x in lin_vel_steps]
for i, ang_vel_steps in enumerate(data.ang_vel):
data.ang_vel[i] = [(x / norm_info.max_ang_vel) for x in ang_vel_steps]
# delta position -> [-1, 1]
for i, pos_steps in enumerate(data.pos):
data.pos[i] = [(x / norm_info.max_pos) for x in pos_steps]
# delta rotation -> [-1, 1]
for i, rot_steps in enumerate(data.total_rot):
data.total_rot[i] = [(x / norm_info.max_rot) for x in rot_steps]
# delta rot axis-angle -> [-1, 1] norm
if self.use_aa:
for i, delta_rot_steps in enumerate(data.delta_rot):
data.delta_rot[i] = [(x / norm_info.max_delta_rot) for x in delta_rot_steps]
# make axes unit and and normalize angle -> [-1, 1]
if self.use_aa_split:
for i, delta_rot_split_steps in enumerate(data.delta_rot_split):
data.delta_rot_split[i] = [np.append(x[:3] / np.linalg.norm(x[:3]), x[3] / norm_info.max_delta_rot) for x in delta_rot_split_steps]
def reset(self):
'''
Prepares to iterate through dataset.
'''
if self.shuffle:
np.random.shuffle(self.iter_inds)
# we consider an epoch as returning one sequence from every single simulation
# ( though if the sequence length is shorter than sim length the unique sequences contained
# in the dataset will be much more than an epoch length )
self.num_batches = (self.data.size + self.batch_size - 1) // self.batch_size
self.batch_idx = 0
def has_next_batch(self):
'''
Returns false if done with the current "epoch" (seen each sim once).
'''
return self.batch_idx < self.num_batches
def next_batch(self, random_window=True, focus_toppling=False):
'''
Returns the next batch of data. if random_window=True will get a random sequence of correct length (otherwise
starts at 0). If focus_toppling=True, will make sure this sequence includes the part of the sequence where toppling occurs.
'''
# size is either batch_size, or shorter if we're at the end of the data
start_idx = self.batch_idx * self.batch_size
end_idx = min((self.batch_idx + 1) * self.batch_size, self.data.size)
batch_size = end_idx - start_idx
# get batch data
batch = ToppleBatch(self.batch_size, self.steps_per_seq, self.num_pts)
for i in range(batch_size):
pc, lin_vel, ang_vel, pos, rot, delta_quat, delta_rot, delta_rot_split, topple_label, meta_info = \
self.get_seq(self.iter_inds[start_idx + i], self.steps_per_seq, random_window, focus_toppling)
batch.point_cloud[i] = pc
batch.lin_vel[i] = lin_vel
batch.ang_vel[i] = ang_vel
batch.pos[i] = pos
batch.rot[i] = rot
if self.use_delta_quat:
batch.delta_quat[i] = delta_quat
if self.use_aa:
batch.delta_rot[i] = delta_rot
if self.use_aa_split:
batch.delta_rot_split[i] = delta_rot_split
if self.use_topple_idx:
batch.topple_label[i] = topple_label
batch.toppled.append(meta_info[0])
batch.shape_name.append(meta_info[1])
batch.scale[i] = meta_info[2]
batch.rot_euler[i] = meta_info[3]
if self.use_body_friction:
batch.body_friction[i] = meta_info[4]
batch.mass[i] = meta_info[5]
if batch_size != self.batch_size:
# need to pad the end with repeat of data
for i in range(self.batch_size - batch_size):
batch.point_cloud[batch_size + i] = batch.point_cloud[i]
batch.lin_vel[batch_size + i] = batch.lin_vel[i]
batch.ang_vel[batch_size + i] = batch.ang_vel[i]
batch.pos[batch_size + i] = batch.pos[i]
batch.rot[batch_size + i] = batch.rot[i]
if self.use_delta_quat:
batch.delta_quat[batch_size + i] = batch.delta_quat[i]
batch.toppled.append(batch.toppled[i])
batch.shape_name.append(batch.shape_name[i])
batch.scale[batch_size + i] = batch.scale[i]
batch.rot_euler[batch_size + i] = batch.rot_euler[i]
batch.mass[batch_size + i] = batch.mass[i]
if self.use_aa:
batch.delta_rot[batch_size + i] = batch.delta_rot[i]
if self.use_aa_split:
batch.delta_rot_split[batch_size + i] = batch.delta_rot_split[i]
if self.use_topple_idx:
batch.topple_label[batch_size + i] = batch.topple_label[i]
if self.use_body_friction:
batch.body_friction[batch_size + i] = batch.body_friction[i]
self.batch_idx += 1
return batch
def get_seq(self, idx, num_steps, random_window=True, focus_toppling=False):
'''
Returns a random contiguous sequence from the simulation at the given idx and length num_steps.
If num_steps > sim_length the final (sim_length-num_steps) steps are padded with the value at
sim[sim_length].
'''
# get the normalized canonical point cloud for this simulation
pc = np.copy(self.data.point_cloud[self.data.shape_idx[idx]])
scale = self.data.scale[idx]
# scale accordingly
pc *= np.reshape(scale, (1, -1))
# randomly perturb point cloud
pc += np.random.normal(0.0, self.perturb_std, pc.shape)
# randomly draw a subset of points if desired
if self.num_pts < pc.shape[0]:
pc_inds = np.random.choice(pc.shape[0], self.num_pts, replace=False)
pc = pc[pc_inds, :]
# randomly choose a size num_steps sequence from the simulation to return time-series data
total_steps = len(self.data.lin_vel[idx])
max_start_step = total_steps - num_steps
start_step = 0
if max_start_step < 0:
# simulation is shorter than desired sequence length
pad_len = abs(max_start_step)
lin_vel_list = self.data.lin_vel[idx]
lin_vel_out = np.array(lin_vel_list + [lin_vel_list[-1]]*pad_len)
ang_vel_list = self.data.ang_vel[idx]
ang_vel_out = np.array(ang_vel_list + [ang_vel_list[-1]]*pad_len)
pos_list = self.data.pos[idx]
pos_out = np.array(pos_list + [pos_list[-1]]*pad_len)
rot_list = self.data.total_rot[idx]
rot_out = np.array(rot_list + [rot_list[-1]]*pad_len)
if self.use_delta_quat:
delta_quat_list = self.data.delta_quat[idx]
delta_quat_out = np.array(delta_quat_list + [delta_quat_list[-1]]*pad_len)
euler_rot_list = self.data.rot_euler[idx]
euler_rot_out = np.array(euler_rot_list + [euler_rot_list[-1]]*pad_len)
if self.use_aa:
delta_rot_list = self.data.delta_rot[idx]
delta_rot_out = np.array(delta_rot_list + [delta_rot_list[-1]]*pad_len)
if self.use_aa_split:
delta_rot_split_list = self.data.delta_rot_split[idx]
delta_rot_split_out = np.array(delta_rot_split_list + [delta_rot_split_list[-1]]*pad_len)
if self.use_topple_idx:
topple_label_out = np.zeros((total_steps + pad_len), dtype=int)
seq_topple_idx = self.data.topple_idx[idx]
if seq_topple_idx > 0:
topple_label_out[seq_topple_idx:] = 1
else:
start_step = 0
if random_window:
if focus_toppling and self.data.toppled[idx]:
# choose window around the index where it topples
topple_idx = self.data.topple_idx[idx]
min_idx = max([topple_idx - num_steps + 1, 0])
if min_idx >= max_start_step:
# just pick the max index
start_step = max_start_step
else:
# our window is guaranteed to see some part of toppling
start_step = np.random.randint(min_idx, max_start_step+1)
else:
start_step = np.random.randint(0, max_start_step+1)
end_step = start_step + num_steps
# print('Range: %d, %d' % (start_step, end_step))
lin_vel_out = np.array(self.data.lin_vel[idx][start_step:end_step])
ang_vel_out = np.array(self.data.ang_vel[idx][start_step:end_step])
pos_out = np.array(self.data.pos[idx][start_step:end_step])
rot_out = np.array(self.data.total_rot[idx][start_step:end_step])
if self.use_delta_quat:
delta_quat_out = np.array(self.data.delta_quat[idx][start_step:end_step])
euler_rot_out = np.array(self.data.rot_euler[idx][start_step:end_step])
if self.use_aa:
delta_rot_out = np.array(self.data.delta_rot[idx][start_step:end_step])
if self.use_aa_split:
delta_rot_split_out = np.array(self.data.delta_rot_split[idx][start_step:end_step])
if self.use_topple_idx:
topple_label_out = np.zeros((num_steps), dtype=int)
seq_topple_idx = self.data.topple_idx[idx]
if seq_topple_idx > 0:
if seq_topple_idx <= start_step:
topple_label_out[:] = 1
elif seq_topple_idx < end_step:
topple_label_out[seq_topple_idx-start_step:] = 1
# rotate point cloud to align with first frame of sequence
init_rot = self.data.rot_euler[idx][start_step]
xrot, yrot, zrot = np.radians(init_rot)
R = transforms3d.euler.euler2mat(zrot, xrot, yrot, axes='szxy') # unity applies euler angles in z, x, y ordering
pc = np.dot(pc, R.T)
toppled = self.data.toppled[idx]
shape_name = self.data.shape_name[idx]
mass = self.data.mass[idx]
body_fric = -1.0
if self.use_body_friction:
body_fric = self.data.body_friction[idx]
meta_info = (toppled, shape_name, scale, euler_rot_out, body_fric, mass)
if not self.use_aa:
delta_rot_out = None
if not self.use_aa_split:
delta_rot_split_out = None
if not self.use_topple_idx:
topple_label_out = None
if not self.use_delta_quat:
delta_quat_out = None
return pc, lin_vel_out, ang_vel_out, pos_out, rot_out, delta_quat_out, delta_rot_out, delta_rot_split_out, topple_label_out, meta_info
def get_norm_info(self):
return self.norm_info
if __name__=='__main__':
# norm_info = ToppleNormalizationInfo()
# norm_info.load_from('../../data/sim/normalization_info/cube_train.pkl')
# norm_info.print_out()
topple_data = ToppleDataset(roots=['./data/sim/Cube/Cube30k_ObjSplit/Cube30kVal'], norm_info_file='./data/sim/normalization_info/cube_30k.pkl', \
batch_size=5, num_steps=10, shuffle=True, num_pts=None, perturb_pts=0.01)
count = 0
while topple_data.has_next_batch():
batch = topple_data.next_batch(random_window=True, focus_toppling=False)
count += 1
# print(batch.lin_vel[0])
# print(batch.toppled[0])
# print(batch.delta_rot_split[0])
# print(batch.delta_rot[0])
# print(batch.topple_label[0])
# print(batch.pos)
# print(batch.body_friction)
# print(batch.delta_quat[0])
# print(np.degrees(2*np.arccos(batch.delta_quat[0, :, 0])))
print('Total num batches: ' + str(count))
topple_data.reset()
count = 0
while topple_data.has_next_batch():
batch = topple_data.next_batch()
count += 1
print(batch.size)
print('Total num batches: ' + str(count))
|
2968
|
from bc4py_extension import PyAddress
import hashlib
def is_address(ck: PyAddress, hrp, ver):
"""check bech32 format and version"""
try:
if ck.hrp != hrp:
return False
if ck.version != ver:
return False
except ValueError:
return False
return True
def get_address(pk, hrp, ver) -> PyAddress:
"""get address from public key"""
identifier = hashlib.new('ripemd160', hashlib.sha256(pk).digest()).digest()
return PyAddress.from_param(hrp, ver, identifier)
def convert_address(ck: PyAddress, hrp, ver) -> PyAddress:
"""convert address's version"""
return PyAddress.from_param(hrp, ver, ck.identifier())
def dummy_address(dummy_identifier) -> PyAddress:
assert len(dummy_identifier) == 20
return PyAddress.from_param('dummy', 0, dummy_identifier)
__all__ = [
"is_address",
"get_address",
"convert_address",
"dummy_address",
]
|
3000
|
import modutil
mod, __getattr__ = modutil.lazy_import(__name__,
['tests.test_data.A', '.B', '.C as still_C'])
def trigger_A():
return mod.A
def trigger_B():
return mod.B
def trigger_C():
return mod.still_C
def trigger_failure():
return mod.does_not_exist
|
3019
|
from marshmallow import Schema, fields
from marshmallow.validate import Range, Length
from sqlalchemy import Column, Integer, Boolean, DateTime
from ..db import Base
from ..shared.models import StringTypes
# ---- Error-report
class ErrorReport(Base):
__tablename__ = 'error_report'
id = Column(Integer, primary_key=True)
description = Column(StringTypes.LONG_STRING, nullable=False)
time_stamp = Column(DateTime)
status_code = Column(Integer)
endpoint = Column(StringTypes.MEDIUM_STRING)
solved = Column(Boolean, default=False)
def __repr__(self):
return f"<Error-report(id={self.id})>"
class ErrorReportSchema(Schema):
id = fields.Integer(dump_only=True, required=True, validate=Range(min=1))
description = fields.String(required=True, validate=Length(min=1))
time_stamp = fields.DateTime()
status_code = fields.Integer()
endpoint = fields.String()
solved = fields.Boolean()
|
3052
|
from django.core.exceptions import ValidationError
from django.core.validators import validate_email
from django.template import Template, TemplateSyntaxError, TemplateDoesNotExist
from django.utils.encoding import force_str
def validate_email_with_name(value):
"""
Validate email address.
Both "<NAME> <<EMAIL>>" and "<EMAIL>" are valid.
"""
value = force_str(value)
recipient = value
if '<' in value and '>' in value:
start = value.find('<') + 1
end = value.find('>')
if start < end:
recipient = value[start:end]
validate_email(recipient)
def validate_comma_separated_emails(value):
"""
Validate every email address in a comma separated list of emails.
"""
if not isinstance(value, (tuple, list)):
raise ValidationError('Email list must be a list/tuple.')
for email in value:
try:
validate_email_with_name(email)
except ValidationError:
raise ValidationError('Invalid email: %s' % email, code='invalid')
def validate_template_syntax(source):
"""
Basic Django Template syntax validation. This allows for robuster template
authoring.
"""
try:
Template(source)
except (TemplateSyntaxError, TemplateDoesNotExist) as err:
raise ValidationError(str(err))
|
3056
|
import os
import sys
import logging
import time
import argparse
import numpy as np
from collections import OrderedDict
import scripts.options as option
import utils.util as util
from data.util import bgr2ycbcr
from data import create_dataset, create_dataloader
from models import create_model
# options
parser = argparse.ArgumentParser()
parser.add_argument('-opt', type=str, required=True, help='Path to options JSON file.')
opt = option.parse(parser.parse_args().opt, is_train=False)
util.mkdirs((path for key, path in opt['path'].items() if not key == 'pretrain_model_G'))
opt = option.dict_to_nonedict(opt)
util.setup_logger(None, opt['path']['log'], 'test.log', level=logging.INFO, screen=True)
logger = logging.getLogger('base')
logger.info(option.dict2str(opt))
# Create test dataset and dataloader
test_loaders = []
for phase, dataset_opt in sorted(opt['datasets'].items()):
test_set = create_dataset(dataset_opt)
test_loader = create_dataloader(test_set, dataset_opt)
logger.info('Number of test images in [{:s}]: {:d}'.format(dataset_opt['name'], len(test_set)))
test_loaders.append(test_loader)
# Create model
model = create_model(opt)
for test_loader in test_loaders:
test_set_name = test_loader.dataset.opt['name']
logger.info('\nTesting [{:s}]...'.format(test_set_name))
test_start_time = time.time()
dataset_dir = os.path.join(opt['path']['results_root'], test_set_name)
util.mkdir(dataset_dir)
test_results = OrderedDict()
test_results['psnr'] = []
test_results['ssim'] = []
test_results['psnr_y'] = []
test_results['ssim_y'] = []
for data in test_loader:
need_GT = False if test_loader.dataset.opt['dataroot_GT'] is None else True
# need_GT = True
model.feed_data_specular(data, need_GT=need_GT)
if opt["image_type"] == "exr":
y = data["x_offset"]
x = data["y_offset"]
img_path = data['NOISY_path'][0]
img_name = os.path.splitext(os.path.basename(img_path))[0]
start = time.time()
model.test() # test
end = time.time()
print("Time elapsed... %f "%(end - start))
visuals = model.get_current_visuals(need_GT=need_GT)
denoised_img = util.tensor2img(visuals['DENOISED']) # uint8
noisy_img = util.tensor2img(visuals['NOISY'])
gt_img = util.tensor2img(visuals['GT']) # uint8
# save images
suffix = opt['suffix']
if suffix ==None:
suffix = ""
save_DENOISED_img_path = os.path.join(dataset_dir, img_name + suffix + '_1denoised.png')
save_NOISY_img_path = os.path.join(dataset_dir, img_name + suffix + '_0noisy.png')
save_GT_img_path = os.path.join(dataset_dir, img_name + suffix + '_2gt.png')
# calculate PSNR and SSIM
if need_GT:
# gt_img = util.tensor2img(visuals['GT'])
gt_img = gt_img / 255.
denoised_img = denoised_img / 255.
crop_border = test_loader.dataset.opt['scale']
cropped_denoised_img = denoised_img#[crop_border:-crop_border, crop_border:-crop_border, :]
cropped_gt_img = gt_img#[crop_border:-crop_border, crop_border:-crop_border, :]
psnr = util.calculate_psnr(cropped_denoised_img * 255, cropped_gt_img * 255)
ssim = util.calculate_ssim(cropped_denoised_img * 255, cropped_gt_img * 255)
test_results['psnr'].append(psnr)
test_results['ssim'].append(ssim)
if gt_img.shape[2] == 3: # RGB image
denoised_img_y = bgr2ycbcr(denoised_img, only_y=True)
gt_img_y = bgr2ycbcr(gt_img, only_y=True)
cropped_denoised_img_y = denoised_img_y[crop_border:-crop_border, crop_border:-crop_border]
cropped_gt_img_y = gt_img_y[crop_border:-crop_border, crop_border:-crop_border]
psnr_y = util.calculate_psnr(cropped_denoised_img_y * 255, cropped_gt_img_y * 255)
ssim_y = util.calculate_ssim(cropped_denoised_img_y * 255, cropped_gt_img_y * 255)
test_results['psnr_y'].append(psnr_y)
test_results['ssim_y'].append(ssim_y)
logger.info('{:20s} - PSNR: {:.6f} dB; SSIM: {:.6f}; PSNR_Y: {:.6f} dB; SSIM_Y: {:.6f}.'\
.format(img_name, psnr, ssim, psnr_y, ssim_y))
else:
logger.info('{:20s} - PSNR: {:.6f} dB; SSIM: {:.6f}.'.format(img_name, psnr, ssim))
else:
logger.info(img_name)
if opt["image_type"] == "exr":
denoised_exr = util.tensor2exr(visuals['DENOISED']) # uint8
noisy_exr = util.tensor2exr(visuals['NOISY'])
gt_exr = util.tensor2exr(visuals['GT']) # uint8
save_DENOISED_img_path = os.path.join(dataset_dir, img_name + suffix + '_1denoised.exr')
save_NOISY_img_path = os.path.join(dataset_dir, img_name + suffix + '_0noisy.exr')
save_GT_img_path = os.path.join(dataset_dir, img_name + suffix + '_2gt.exr')
util.saveEXRfromMatrix(save_DENOISED_img_path, denoised_exr, (x, y))
util.saveEXRfromMatrix(save_NOISY_img_path, noisy_exr, (x, y))
util.saveEXRfromMatrix(save_GT_img_path, gt_exr, (x, y))
if need_GT: # metrics
# Average PSNR/SSIM results
ave_psnr = sum(test_results['psnr']) / len(test_results['psnr'])
ave_ssim = sum(test_results['ssim']) / len(test_results['ssim'])
logger.info('----Average PSNR/SSIM results for {}----\n\tPSNR: {:.6f} dB; SSIM: {:.6f}\n'\
.format(test_set_name, ave_psnr, ave_ssim))
# if test_results['psnr_y'] and test_results['ssim_y']:
# ave_psnr_y = sum(test_results['psnr_y']) / len(test_results['psnr_y'])
# ave_ssim_y = sum(test_results['ssim_y']) / len(test_results['ssim_y'])
# logger.info('----Y channel, average PSNR/SSIM----\n\tPSNR_Y: {:.6f} dB; SSIM_Y: {:.6f}\n'\
# .format(ave_psnr_y, ave_ssim_y))
|
3065
|
from setuptools import setup
setup(
name="greek-utils",
version="0.2",
description="various utilities for processing Ancient Greek",
license="MIT",
url="http://github.com/jtauber/greek-utils",
author="<NAME>",
author_email="<EMAIL>",
packages=["greekutils"],
classifiers=[
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Topic :: Text Processing",
"Topic :: Text Processing :: Linguistic",
"Topic :: Utilities",
],
)
|
3073
|
from pyLMS7002M import *
print("Searching for QSpark...")
try:
QSpark = QSpark()
except:
print("QSpark not found")
exit(1)
print("\QSpark info:")
QSpark.printInfo() # print the QSpark board info
# QSpark.LMS7002_Reset() # reset the LMS7002M
lms7002 = QSpark.getLMS7002() # get the LMS7002M object
ver, rev, mask = lms7002.chipInfo # get the chip info
print("\nLMS7002M info:")
print("VER : "+str(ver))
print("REV : "+str(rev))
print("MASK : "+str(mask))
|
3074
|
from django.conf import settings
from netaddr import mac_unix, mac_eui48
import importlib
import warnings
class mac_linux(mac_unix):
"""MAC format with zero-padded all upper-case hex and colon separated"""
word_fmt = '%.2X'
def default_dialect(eui_obj=None):
# Check to see if a default dialect class has been specified in settings,
# using 'module.dialect_cls' string and use importlib and getattr to retrieve dialect class. 'module' is the module and
# 'dialect_cls' is the class name of the custom dialect. The dialect must either be defined or imported by the module's
# __init__.py if the module is a package.
from .fields import MACAddressField # Remove import at v1.4
if hasattr(settings, 'MACADDRESS_DEFAULT_DIALECT') and not MACAddressField.dialect:
module, dialect_cls = settings.MACADDRESS_DEFAULT_DIALECT.split('.')
dialect = getattr(importlib.import_module(module), dialect_cls, mac_linux)
return dialect
else:
if MACAddressField.dialect: # Remove this "if" statement at v1.4
warnings.warn(
"The set_dialect class method on MACAddressField has been deprecated, in favor of the default_dialect "
"utility function and settings.MACADDRESS_DEFAULT_DIALECT. See macaddress.__init__.py source or the "
"project README for more information.",
DeprecationWarning,
)
return MACAddressField.dialect
if eui_obj:
return eui_obj.dialect
else:
return mac_linux
def format_mac(eui_obj, dialect):
# Format a EUI instance as a string using the supplied dialect class, allowing custom string classes by
# passing directly or as a string, a la 'module.dialect_cls', where 'module' is the module and 'dialect_cls'
# is the class name of the custom dialect. The dialect must either be defined or imported by the module's __init__.py if
# the module is a package.
if not isinstance(dialect, mac_eui48):
if isinstance(dialect, str):
module, dialect_cls = dialect.split('.')
dialect = getattr(importlib.import_module(module), dialect_cls)
eui_obj.dialect = dialect
return str(eui_obj)
from pkg_resources import get_distribution, DistributionNotFound
import os.path
try:
_dist = get_distribution('django-macaddress')
except DistributionNotFound:
__version__ = 'Please install this project with setup.py'
else:
__version__ = _dist.version
VERSION = __version__ # synonym
|
3084
|
import contextlib
import logging
import typing
from typing import Any, Dict, Tuple
import attr
from dbnd._core.configuration import get_dbnd_project_config
from dbnd._core.constants import (
RESULT_PARAM,
DbndTargetOperationStatus,
DbndTargetOperationType,
TaskRunState,
)
from dbnd._core.current import (
current_task_run,
get_databand_run,
is_verbose,
try_get_current_task,
)
from dbnd._core.errors.errors_utils import log_exception
from dbnd._core.log.external_exception_logging import log_exception_to_server
from dbnd._core.parameter.parameter_definition import ParameterDefinition
from dbnd._core.parameter.parameter_value import ParameterFilters
from dbnd._core.settings import TrackingConfig
from dbnd._core.task.tracking_task import TrackingTask
from dbnd._core.task_build.task_context import try_get_current_task
from dbnd._core.task_build.task_definition import TaskDefinition
from dbnd._core.task_build.task_results import FuncResultParameter
from dbnd._core.task_run.task_run import TaskRun
from dbnd._core.task_run.task_run_error import TaskRunError
from dbnd._core.utils.callable_spec import args_to_kwargs
from dbnd._core.utils.timezone import utcnow
from targets import InMemoryTarget, Target
from targets.value_meta import ValueMetaConf
from targets.values import get_value_type_of_obj
if typing.TYPE_CHECKING:
from dbnd._core.task_build.task_decorator import TaskDecorator
logger = logging.getLogger(__name__)
@attr.s
class TrackedFuncCallWithResult(object):
call_args = attr.ib() # type: Tuple[Any]
call_kwargs = attr.ib() # type: Dict[str,Any]
callable = attr.ib()
result = attr.ib(default=None)
def set_result(self, value):
self.result = value
return value
def invoke(self):
func = self.callable
return func(*self.call_args, **self.call_kwargs)
class CallableTrackingManager(object):
def __init__(self, task_decorator):
# type: (CallableTrackingManager, TaskDecorator) -> None
self.task_decorator = task_decorator
self._tracking_task_definition = None
self._call_count = 0
self._call_as_func = False
self._max_call_count = get_dbnd_project_config().max_calls_per_run
@property
def callable(self):
return self.task_decorator.class_or_func
def get_tracking_task_definition(self):
if not self._tracking_task_definition:
self._tracking_task_definition = self._build_tracking_task_definition()
return self._tracking_task_definition
def _build_tracking_task_definition(self):
return TaskDefinition.from_task_decorator(task_decorator=self.task_decorator)
def _call_count_limit_exceeded(self):
if not self._call_as_func:
self._call_count += 1
if self._call_count > self._max_call_count:
logger.info(
"Reached maximum tracking limit of {} tasks. Running function regularly.".format(
self._max_call_count
)
)
self._call_as_func = True
return self._call_as_func
@contextlib.contextmanager
def tracking_context(self, call_args, call_kwargs):
user_code_called = False # whether we got to executing of user code
user_code_finished = False # whether we passed executing of user code
func_call = None
try:
# 1. check that we don't have too many calls
if self._call_count_limit_exceeded():
yield _do_nothing_decorator
return
# 2. Start or reuse existing "main tracking task" that is root for tracked tasks
if not try_get_current_task():
"""
try to get existing task, and if not exists - try to get/create inplace_task_run
"""
from dbnd._core.tracking.script_tracking_manager import (
try_get_inplace_tracking_task_run,
)
inplace_tacking_task = try_get_inplace_tracking_task_run()
if not inplace_tacking_task:
# we didn't manage to start inplace tracking task run, we will not be able to track
yield _do_nothing_decorator
return
tracking_task_definition = self.get_tracking_task_definition()
callable_spec = tracking_task_definition.task_decorator.get_callable_spec()
func_call = TrackedFuncCallWithResult(
callable=self.callable,
call_args=tuple(call_args), # prevent original call_args modification
call_kwargs=dict(call_kwargs), # prevent original kwargs modification
)
# replace any position argument with kwarg if it possible
args, kwargs = args_to_kwargs(
callable_spec.args, func_call.call_args, func_call.call_kwargs,
)
# instantiate inline task
task = TrackingTask.for_func(tracking_task_definition, args, kwargs)
# update upstream/downstream relations - needed for correct tracking
# we can have the task as upstream , as it was executed already
parent_task = current_task_run().task
if not parent_task.task_dag.has_upstream(task):
parent_task.set_upstream(task)
# checking if any of the inputs are the outputs of previous task.
# we can add that task as upstream.
dbnd_run = get_databand_run()
call_kwargs_as_targets = dbnd_run.target_origin.get_for_map(kwargs)
for value_origin in call_kwargs_as_targets.values():
up_task = value_origin.origin_target.task
task.set_upstream(up_task)
# creating task_run as a task we found mid-run
task_run = dbnd_run.create_task_run_at_execution_time(
task, task_engine=current_task_run().task_engine
)
should_capture_log = TrackingConfig.current().capture_tracking_log
with task_run.runner.task_run_execution_context(
handle_sigterm=True, capture_log=should_capture_log
):
task_run.set_task_run_state(state=TaskRunState.RUNNING)
_log_inputs(task_run)
# if we reached this line, then all tracking initialization is
# finished successfully, and we're going to execute user code
user_code_called = True
try:
# tracking_context is context manager - user code will run on yield
yield func_call.set_result
# if we reached this line, this means that user code finished
# successfully without any exceptions
user_code_finished = True
except Exception as ex:
task_run.finished_time = utcnow()
error = TaskRunError.build_from_ex(ex, task_run)
task_run.set_task_run_state(TaskRunState.FAILED, error=error)
raise
else:
task_run.finished_time = utcnow()
# func_call.result should contain result, log it
_log_result(task_run, func_call.result)
task_run.set_task_run_state(TaskRunState.SUCCESS)
except Exception:
if user_code_called and not user_code_finished:
# if we started to call the user code and not got to user_code_finished
# line - it means there was user code exception - so just re-raise it
raise
# else it's either we didn't reached calling user code, or already passed it
# then it's some dbnd tracking error - just log it
if func_call:
_handle_tracking_error("tracking-init", func_call)
else:
log_exception_to_server()
# if we didn't reached user_code_called=True line - there was an error during
# dbnd tracking initialization, so nothing is done - user function wasn't called yet
if not user_code_called:
# tracking_context is context manager - user code will run on yield
yield _do_nothing_decorator
return
def _handle_tracking_error(msg, func_call=None):
log_exception_to_server()
location = " for %s" % func_call.callable if func_call else ""
msg = "Failed during dbnd %s for %s, ignoring, and continue without tracking" % (
msg,
location,
)
if is_verbose():
logger.warning(
msg, exc_info=True,
)
else:
logger.info(msg)
def _do_nothing_decorator(f):
return f
def _log_inputs(task_run):
"""
For tracking mode. Logs InMemoryTarget inputs.
"""
try:
params = task_run.task._params
for param_value in params.get_param_values(ParameterFilters.INPUTS):
param, value = param_value.parameter, param_value.value
if isinstance(param_value, InMemoryTarget):
try:
param = param.modify(
value_meta_conf=ValueMetaConf(
log_preview=True, log_schema=True,
)
)
task_run.tracker.log_parameter_data(
parameter=param,
target=param_value,
value=value,
operation_type=DbndTargetOperationType.read,
operation_status=DbndTargetOperationStatus.OK,
)
except Exception as ex:
log_exception(
"Failed to log input param to tracking store.",
ex=ex,
non_critical=True,
)
except Exception as ex:
log_exception(
"Failed to log input params to tracking store.", ex=ex, non_critical=True
)
def _log_result(task_run, result):
# type: (TaskRun, Any) -> None
"""
For tracking mode. Logs the task result and adds it to the target_origin map to support relationships between
dynamic tasks.
"""
try:
result_param = task_run.task.task_params.get_param_value(RESULT_PARAM)
if not result_param:
logger.debug(
"No result params to log for task {}".format(task_run.task_af_id)
)
return
# we now the parameter value is a target because this is an output param
# the target is created in the task creation
result_param_def, result_target = result_param.parameter, result_param.value
# spread result into relevant fields.
if isinstance(result_param_def, FuncResultParameter):
# assign all returned values to relevant band Outputs
if result is None:
return
for result_name, value in result_param_def.named_results(result):
# we now the parameter value is a target because this is an output param
# the target is created in the task creation
parameter_value = task_run.task.task_params.get_param_value(result_name)
_log_parameter_value(
task_run,
parameter_definition=parameter_value.parameter,
target=parameter_value.value,
value=value,
)
else:
_log_parameter_value(
task_run,
parameter_definition=result_param_def,
target=result_target,
value=result,
)
except Exception as ex:
log_exception(
"Failed to log result to tracking store.", ex=ex, non_critical=True
)
def _log_parameter_value(task_run, parameter_definition, target, value):
# type: (TaskRun, ParameterDefinition, Target, Any) -> None
# make sure it will be logged correctly
parameter_definition = parameter_definition.modify(
value_meta_conf=ValueMetaConf(log_preview=True, log_schema=True)
)
try:
# case what if result is Proxy
value_type = get_value_type_of_obj(value, parameter_definition.value_type)
task_run.run.target_origin.add(target, value, value_type)
except Exception as ex:
log_exception(
"Failed to register result to target tracking.", ex=ex, non_critical=True
)
try:
task_run.tracker.log_parameter_data(
parameter=parameter_definition, # was: task_run.task.task_definition.task_class.result,
target=target,
value=value,
operation_type=DbndTargetOperationType.write, # is it write? (or log?)
operation_status=DbndTargetOperationStatus.OK,
)
except Exception as ex:
log_exception(
"Failed to log result to tracking store.", ex=ex, non_critical=True
)
|
3087
|
import nose
import angr
import logging
l = logging.getLogger("angr.tests.test_bindiff")
import os
test_location = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'binaries', 'tests')
# todo make a better test
def test_bindiff_x86_64():
binary_path_1 = os.path.join(test_location, 'x86_64', 'bindiff_a')
binary_path_2 = os.path.join(test_location, 'x86_64', 'bindiff_b')
b = angr.Project(binary_path_1, load_options={"auto_load_libs": False})
b2 = angr.Project(binary_path_2, load_options={"auto_load_libs": False})
bindiff = b.analyses.BinDiff(b2)
identical_functions = bindiff.identical_functions
differing_functions = bindiff.differing_functions
unmatched_functions = bindiff.unmatched_functions
# check identical functions
nose.tools.assert_in((0x40064c, 0x40066a), identical_functions)
# check differing functions
nose.tools.assert_in((0x400616, 0x400616), differing_functions)
# check unmatched functions
nose.tools.assert_less_equal(len(unmatched_functions[0]), 1)
nose.tools.assert_less_equal(len(unmatched_functions[1]), 2)
# check for no major regressions
nose.tools.assert_greater(len(identical_functions), len(differing_functions))
nose.tools.assert_less(len(differing_functions), 4)
# check a function diff
fdiff = bindiff.get_function_diff(0x400616, 0x400616)
block_matches = { (a.addr, b.addr) for a, b in fdiff.block_matches }
nose.tools.assert_in((0x40064a, 0x400668), block_matches)
nose.tools.assert_in((0x400616, 0x400616), block_matches)
nose.tools.assert_in((0x40061e, 0x40061e), block_matches)
def run_all():
functions = globals()
all_functions = dict(filter((lambda kv: kv[0].startswith('test_')), functions.items()))
for f in sorted(all_functions.keys()):
if hasattr(all_functions[f], '__call__'):
all_functions[f]()
if __name__ == "__main__":
logging.getLogger("angr.analyses.bindiff").setLevel(logging.DEBUG)
import sys
if len(sys.argv) > 1:
globals()['test_' + sys.argv[1]]()
else:
run_all()
|
3125
|
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weights with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target<
y = np.dot(X, w) + noise
clf = ARDRegression(fit_intercept=False, n_iter=1000)
clf.fit(X, y)
ols = LinearRegression(fit_intercept=False)
ols.fit(X, y)
from copy import deepcopy
from sds.distributions.lingauss import SingleOutputLinearGaussianWithKnownPrecision
from sds.distributions.lingauss import SingleOutputLinearGaussianWithKnownMean
from sds.distributions.gaussian import GaussianWithPrecision
from sds.distributions.gaussian import GaussianWithKnownMeanAndDiagonalPrecision
from sds.distributions.gamma import Gamma
likelihood_precision_prior = Gamma(dim=1, alphas=np.ones((1, )),
betas=1e-6 * np.ones((1, )))
parameter_precision_prior = Gamma(dim=n_features, alphas=np.ones((n_features, )),
betas=1e-6 * np.ones((n_features, )))
likelihood_precision_posterior = deepcopy(likelihood_precision_prior)
parameter_precision_posterior = deepcopy(parameter_precision_prior)
parameter_posterior = None
for i in range(100):
# parameter posterior
alphas = parameter_precision_posterior.mean()
parameter_prior = GaussianWithPrecision(dim=n_features,
mu=np.zeros((n_features, )),
lmbda=np.diag(alphas))
parameter_posterior = deepcopy(parameter_prior)
beta = likelihood_precision_posterior.mean()
likelihood_known_precision = SingleOutputLinearGaussianWithKnownPrecision(column_dim=n_features,
lmbda=beta,
affine=False)
stats = likelihood_known_precision.statistics(X, y)
parameter_posterior.nat_param = parameter_prior.nat_param + stats
# likelihood precision posterior
param = parameter_posterior.mean()
likelihood_known_mean = SingleOutputLinearGaussianWithKnownMean(column_dim=n_features,
W=param, affine=False)
stats = likelihood_known_mean.statistics(X, y)
likelihood_precision_posterior.nat_param = likelihood_precision_prior.nat_param + stats
# parameter precision posterior
parameter_likelihood = GaussianWithKnownMeanAndDiagonalPrecision(dim=n_features)
param = parameter_posterior.mean()
stats = parameter_likelihood.statistics(param)
parameter_precision_posterior.nat_param = parameter_precision_prior.nat_param + stats
our_ard = parameter_posterior.mode()
from sds.distributions.composite import MatrixNormalGamma
from sds.distributions.lingauss import LinearGaussianWithDiagonalPrecision
M = np.zeros((1, n_features))
K = 1e-16 * np.eye(n_features)
alphas = 1e-16 * np.ones((1, ))
betas = 1e-16 * np.ones((1, ))
prior = MatrixNormalGamma(column_dim=n_features, row_dim=1,
M=M, K=K, alphas=alphas, betas=betas)
posterior = deepcopy(prior)
likelihood = LinearGaussianWithDiagonalPrecision(column_dim=n_features,
row_dim=1,
affine=False)
stats = likelihood.statistics(X, np.atleast_2d(y).T)
posterior.nat_param = prior.nat_param + stats
our_ols = posterior.mode()[0]
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(w, color='orange', linestyle='-', linewidth=2, label="Ground truth")
plt.plot(clf.coef_, color='darkblue', linestyle='-', linewidth=2, label="Sklearn ARD")
plt.plot(our_ard, color='red', linestyle='-', linewidth=2, label="Our ARD")
# plt.plot(ols.coef_, color='yellowgreen', linestyle=':', linewidth=2, label="Sklearn OLS")
# plt.plot(our_ols.flatten(), color='cyan', linestyle='-', linewidth=2, label="Our OLS")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.show()
|
3128
|
import datetime as dt
import logging
from babel import Locale, UnknownLocaleError
from babel.dates import format_datetime, format_time, format_date
import pytz
from tzlocal import get_localzone
from . import settings
logger = logging.getLogger(__name__)
class LocaleHelper:
"""Helpers for converting date & time according to current locale and timezone"""
def __init__(
self,
my_locale: Locale = None,
my_tz: pytz.BaseTzInfo = None,
author_info: dict = None,
) -> None:
"""
Args:
- my_locale: Primary locale to use
- my_tz: Primary timezone to use
- author_info: locale and timezone to use from this Slack response
if my_locale and/or my_tz are not given
"""
self._locale = self._determine_locale(my_locale, author_info)
self._timezone = self._determine_timezone(my_tz, author_info)
@staticmethod
def _determine_locale(my_locale: Locale = None, author_info: dict = None) -> Locale:
if my_locale:
if not isinstance(my_locale, Locale):
raise TypeError("my_locale must be a babel Locale object")
else:
if author_info:
try:
my_locale = Locale.parse(author_info["locale"], sep="-")
except UnknownLocaleError:
logger.warning("Could not use locale info from Slack")
my_locale = Locale.default()
else:
my_locale = Locale.default()
if not my_locale:
my_locale = Locale.parse(settings.FALLBACK_LOCALE)
return my_locale
@staticmethod
def _determine_timezone(
my_tz: pytz.BaseTzInfo = None, author_info: dict = None
) -> pytz.BaseTzInfo:
if my_tz:
if not isinstance(my_tz, pytz.BaseTzInfo):
raise TypeError("my_tz must be of type pytz")
else:
if author_info:
try:
my_tz = pytz.timezone(author_info["tz"])
except pytz.exceptions.UnknownTimeZoneError:
logger.warning("Could not use timezone info from Slack")
my_tz = get_localzone()
else:
my_tz = get_localzone()
if not my_tz:
my_tz = pytz.UTC
return my_tz
@property
def locale(self) -> Locale:
return self._locale
@property
def timezone(self) -> pytz.BaseTzInfo:
return self._timezone
def format_date_full_str(self, my_datetime: dt.datetime) -> str:
return format_date(my_datetime, format="full", locale=self.locale)
def format_datetime_str(self, my_datetime: dt.datetime) -> str:
"""returns formated datetime string for given dt using locale"""
return format_datetime(my_datetime, format="short", locale=self.locale)
def get_datetime_formatted_str(self, ts: int) -> str:
"""return given timestamp as formated datetime string using locale"""
my_datetime = self.get_datetime_from_ts(ts)
return format_datetime(my_datetime, format="short", locale=self.locale)
def get_time_formatted_str(self, ts: int) -> str:
"""return given timestamp as formated datetime string using locale"""
my_datetime = self.get_datetime_from_ts(ts)
return format_time(my_datetime, format="short", locale=self.locale)
def get_datetime_from_ts(self, ts: int) -> dt.datetime:
"""returns datetime object of a unix timestamp with local timezone"""
my_datetime = dt.datetime.fromtimestamp(float(ts), pytz.UTC)
return my_datetime.astimezone(self.timezone)
|
3192
|
from ._ffi.base import string_types
from ._ffi.object import register_object, Object
from ._ffi.node import register_node, NodeBase
from ._ffi.node import convert_to_node as _convert_to_node
from ._ffi.node_generic import _scalar_type_inference
from ._ffi.function import Function
from ._ffi.function import _init_api, register_func, get_global_func, extract_ext_funcs
from ._ffi.function import convert_to_tvm_func as _convert_tvm_func
from ._ffi.runtime_ctypes import TVMType
from . import _api_internal
from . import make as _make
from . import expr as _expr
from . import tensor as _tensor
from . import schedule as _schedule
from . import container as _container
from . import tag as _tag
int8 = "int8"
int32 = "int32"
float32 = "float32"
handle = "handle"
def min_value(dtype):
return _api_internal._min_value(dtype)
|
3202
|
import numpy as np
from unittest import TestCase
import numpy.testing as npt
from distancematrix.util import diag_indices_of
from distancematrix.consumer.distance_matrix import DistanceMatrix
class TestContextualMatrixProfile(TestCase):
def setUp(self):
self.dist_matrix = np.array([
[8.67, 1.10, 1.77, 1.26, 1.91, 4.29, 6.32, 4.24, 4.64, 5.06, 6.41, 4.07, 4.67, 9.32, 5.09],
[4.33, 4.99, 0.14, 2.79, 2.10, 6.26, 9.40, 4.14, 5.53, 4.26, 8.21, 5.91, 6.83, 9.26, 6.19],
[0.16, 9.05, 1.35, 4.78, 7.01, 4.36, 5.24, 8.81, 7.90, 5.84, 8.90, 7.88, 3.37, 4.70, 6.94],
[0.94, 8.70, 3.87, 6.29, 0.32, 1.79, 5.80, 2.61, 1.43, 6.32, 1.62, 0.20, 2.28, 7.11, 2.15],
[9.90, 4.51, 2.11, 2.83, 5.52, 8.55, 6.90, 0.24, 1.58, 4.26, 8.75, 3.71, 9.93, 8.33, 0.38],
[7.30, 5.84, 9.63, 1.95, 3.76, 3.61, 9.42, 5.56, 5.09, 7.07, 1.90, 4.78, 1.06, 0.69, 3.67],
[2.17, 8.37, 3.99, 4.28, 4.37, 2.86, 8.61, 3.39, 8.37, 6.95, 6.57, 1.79, 7.40, 4.41, 7.64],
[6.26, 0.29, 6.44, 8.84, 1.24, 2.52, 6.25, 3.07, 5.55, 3.19, 8.16, 5.32, 9.01, 0.39, 9.],
[4.67, 8.88, 3.05, 3.06, 2.36, 8.34, 4.91, 5.46, 9.25, 9.78, 0.03, 5.64, 5.10, 3.58, 6.92],
[1.01, 0.91, 6.28, 7.79, 0.68, 5.50, 6.72, 5.11, 0.80, 9.30, 9.77, 4.71, 3.26, 7.29, 6.26]])
def mock_initialise(self, dm):
dm.initialise(1, self.dist_matrix.shape[0], self.dist_matrix.shape[1])
def test_process_diagonal(self):
dm = DistanceMatrix()
self.mock_initialise(dm)
for diag in range(-self.dist_matrix.shape[0] + 1, self.dist_matrix.shape[1]):
diag_ind = diag_indices_of(self.dist_matrix, diag)
dm.process_diagonal(diag, np.atleast_2d(self.dist_matrix[diag_ind]))
npt.assert_equal(dm.distance_matrix, self.dist_matrix)
def test_process_diagonal_partial_calculation(self):
dm = DistanceMatrix()
self.mock_initialise(dm)
correct = np.full_like(self.dist_matrix, np.nan, dtype=float)
for diag in range(-8, self.dist_matrix.shape[1], 3):
diag_ind = diag_indices_of(self.dist_matrix, diag)
dm.process_diagonal(diag, np.atleast_2d(self.dist_matrix[diag_ind]))
correct[diag_ind] = self.dist_matrix[diag_ind]
npt.assert_equal(dm.distance_matrix, correct)
def test_process_column(self):
dm = DistanceMatrix()
self.mock_initialise(dm)
for column in range(0, self.dist_matrix.shape[1]):
dm.process_column(column, np.atleast_2d(self.dist_matrix[:, column]))
npt.assert_equal(dm.distance_matrix, self.dist_matrix)
def test_process_column_partial_calculation(self):
dm = DistanceMatrix()
self.mock_initialise(dm)
correct = np.full_like(self.dist_matrix, np.nan, dtype=float)
for column in [2, 3, 4, 5, 10, 11, 12]:
dm.process_column(column, np.atleast_2d(self.dist_matrix[:, column]))
correct[:, column] = self.dist_matrix[:, column]
npt.assert_equal(dm.distance_matrix, correct)
def test_streaming_process_column(self):
dm = DistanceMatrix()
dm.initialise(1, 5, 5)
dm.process_column(0, np.atleast_2d(self.dist_matrix[0, 0]))
dm.process_column(1, np.atleast_2d(self.dist_matrix[:2, 1]))
expected = np.full((5, 5), np.nan)
expected[0, 0] = self.dist_matrix[0, 0]
expected[:2, 1] = self.dist_matrix[:2, 1]
npt.assert_equal(dm.distance_matrix, expected)
for column in range(0, 5):
dm.process_column(column, np.atleast_2d(self.dist_matrix[:5, :5][:, column]))
npt.assert_equal(dm.distance_matrix, self.dist_matrix[:5, :5])
dm.shift_query(1)
dm.shift_series(3)
correct = np.full((5, 5), np.nan)
correct[0:4, 0:2] = self.dist_matrix[1:5, 3:5]
npt.assert_equal(dm.distance_matrix, correct)
for column in range(0, 5):
dm.process_column(column, np.atleast_2d(self.dist_matrix[1:6, 3:8][:, column]))
npt.assert_equal(dm.distance_matrix, self.dist_matrix[1:6, 3:8])
dm.shift_query(2)
dm.shift_series(1)
dm.process_column(4, np.atleast_2d(self.dist_matrix[3:8, 8]))
correct = np.full((5, 5), np.nan)
correct[0:3, 0:4] = self.dist_matrix[3:6, 4:8]
correct[:, 4] = self.dist_matrix[3:8, 8]
npt.assert_equal(dm.distance_matrix, correct)
def test_streaming_process_diagonal(self):
dm = DistanceMatrix()
dm.initialise(1, 5, 5)
dm.process_diagonal(0, np.atleast_2d(self.dist_matrix[0, 0]))
diag_ind = diag_indices_of(self.dist_matrix[:3, :3], 1)
dm.process_diagonal(1, np.atleast_2d(np.atleast_2d(self.dist_matrix[diag_ind])))
expected = np.full((5, 5), np.nan)
expected[0, 0] = self.dist_matrix[0, 0]
expected[0, 1] = self.dist_matrix[0, 1]
expected[1, 2] = self.dist_matrix[1, 2]
npt.assert_equal(dm.distance_matrix, expected)
for diag in range(-4,5):
diag_ind = diag_indices_of(self.dist_matrix[:5, :5], diag)
dm.process_diagonal(diag, np.atleast_2d(self.dist_matrix[diag_ind]))
npt.assert_equal(dm.distance_matrix, self.dist_matrix[:5, :5])
dm.shift_query(2)
dm.shift_series(1)
expected = self.dist_matrix[2:7, 1:6].copy()
expected[-2:, :] = np.nan
expected[:, -1:] = np.nan
npt.assert_equal(dm.distance_matrix, expected)
for diag in range(-4,5):
diag_ind = diag_indices_of(self.dist_matrix[:5, :5], diag)
dm.process_diagonal(diag, np.atleast_2d(self.dist_matrix[diag_ind]))
npt.assert_equal(dm.distance_matrix, self.dist_matrix[:5, :5])
|
3205
|
import torch
import torch.nn as nn
import os
import torch.nn.functional as F
class LDS(nn.Module):
def __init__(self,):
super(LDS, self).__init__()
self.pool1 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=0)
self.pool2 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=0)
self.pool3 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=1)
def forward(self, x):
x_pool1 = self.pool1(x)
x_pool2 = self.pool2(x_pool1)
x_pool3 = self.pool3(x_pool2)
return x_pool3
class ConvBlock(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True, bn=True, bias=False):
super(ConvBlock, self).__init__()
self.out_channels = out_planes
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
self.bn = nn.BatchNorm2d(out_planes, eps=1e-5, momentum=0.01, affine=True) if bn else None
self.relu = nn.ReLU(inplace=False) if relu else None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class LSN_init(nn.Module):
def __init__(self, in_planes, out_planes, stride=1):
super(LSN_init, self).__init__()
self.out_channels = out_planes
inter_planes = out_planes // 4
self.part_a = nn.Sequential(
ConvBlock(in_planes, inter_planes, kernel_size=(3, 3), stride=stride, padding=1),
ConvBlock(inter_planes, inter_planes, kernel_size=1, stride=1),
ConvBlock(inter_planes, inter_planes, kernel_size=(3, 3), stride=stride, padding=1)
)
self.part_b = ConvBlock(inter_planes, out_planes, kernel_size=1, stride=1, relu=False)
def forward(self, x):
out1 = self.part_a(x)
out2 = self.part_b(out1)
return out1, out2
class LSN_later(nn.Module):
def __init__(self, in_planes, out_planes, stride=1):
super(LSN_later, self).__init__()
self.out_channels = out_planes
inter_planes = out_planes // 4
self.part_a = ConvBlock(in_planes, inter_planes, kernel_size=(3, 3), stride=stride, padding=1)
self.part_b = ConvBlock(inter_planes, out_planes, kernel_size=1, stride=1, relu=False)
def forward(self, x):
out1 = self.part_a(x)
out2 = self.part_b(out1)
return out1, out2
class IBN(nn.Module):
def __init__(self, out_planes, bn=True):
super(IBN, self).__init__()
self.out_channels = out_planes
self.bn = nn.BatchNorm2d(out_planes, eps=1e-5, momentum=0.01, affine=True) if bn else None
def forward(self, x):
if self.bn is not None:
x = self.bn(x)
return x
class One_Three_Conv(nn.Module):
def __init__(self, in_planes, out_planes, stride=1):
super(One_Three_Conv, self).__init__()
self.out_channels = out_planes
inter_planes = in_planes // 4
self.single_branch = nn.Sequential(
ConvBlock(in_planes, inter_planes, kernel_size=1, stride=1),
ConvBlock(inter_planes, out_planes, kernel_size=(3, 3), stride=stride, padding=1, relu=False)
)
def forward(self, x):
out = self.single_branch(x)
return out
class Relu_Conv(nn.Module):
def __init__(self, in_planes, out_planes, stride=1):
super(Relu_Conv, self).__init__()
self.out_channels = out_planes
self.relu = nn.ReLU(inplace=False)
self.single_branch = nn.Sequential(
ConvBlock(in_planes, out_planes, kernel_size=(3, 3), stride=stride, padding=1)
)
def forward(self, x):
x = self.relu(x)
out = self.single_branch(x)
return out
class Ds_Conv(nn.Module):
def __init__(self, in_planes, out_planes, stride=1, padding=(1, 1)):
super(Ds_Conv, self).__init__()
self.out_channels = out_planes
self.single_branch = nn.Sequential(
ConvBlock(in_planes, out_planes, kernel_size=(3, 3), stride=stride, padding=padding, relu=False)
)
def forward(self, x):
out = self.single_branch(x)
return out
class LRFNet(nn.Module):
"""LRFNet for object detection
The network is based on the SSD architecture.
Each multibox layer branches into
1) conv2d for class conf scores
2) conv2d for localization predictions
3) associated priorbox layer to produce default bounding
boxes specific to the layer's feature map size.
Args:
phase: (string) Can be "test" or "train"
base: VGG16 layers for input, size of either 300 or 512
extras: extra layers that feed to multibox loc and conf layers
head: "multibox head" consists of loc and conf conv layers
"""
def __init__(self, phase, size, base, extras, head, num_classes):
super(LRFNet, self).__init__()
self.phase = phase
self.num_classes = num_classes
self.size = size
# vgg network
self.base = nn.ModuleList(base)
self.lds = LDS()
# convs for merging the lsn and ssd features
self.Norm1 = Relu_Conv(512, 512, stride=1)
self.Norm2 = Relu_Conv(1024, 1024, stride=1)
self.Norm3 = Relu_Conv(512, 512, stride=1)
self.Norm4 = Relu_Conv(256, 256, stride=1)
# convs for generate the lsn features
self.icn1 = LSN_init(3, 512, stride=1)
self.icn2 = LSN_later(128, 1024, stride=2)
self.icn3 = LSN_later(256, 512, stride=2)
# convs with s=2 to downsample the features
self.dsc1 = Ds_Conv(512, 1024, stride=2, padding=(1, 1))
self.dsc2 = Ds_Conv(1024, 512, stride=2, padding=(1, 1))
self.dsc3 = Ds_Conv(512, 256, stride=2, padding=(1, 1))
# convs to reduce the feature dimensions of current level
self.agent1 = ConvBlock(512, 256, kernel_size=1, stride=1)
self.agent2 = ConvBlock(1024, 512, kernel_size=1, stride=1)
self.agent3 = ConvBlock(512, 256, kernel_size=1, stride=1)
# convs to reduce the feature dimensions of other levels
self.proj1 = ConvBlock(1024, 128, kernel_size=1, stride=1)
self.proj2 = ConvBlock(512, 128, kernel_size=1, stride=1)
self.proj3 = ConvBlock(256, 128, kernel_size=1, stride=1)
# convs to reduce the feature dimensions of other levels
self.convert1 = ConvBlock(384, 256, kernel_size=1)
self.convert2 = ConvBlock(256, 512, kernel_size=1)
self.convert3 = ConvBlock(128, 256, kernel_size=1)
# convs to merge the features of the current and higher level features
self.merge1 = ConvBlock(512, 512, kernel_size=3, stride=1, padding=1)
self.merge2 = ConvBlock(1024, 1024, kernel_size=3, stride=1, padding=1)
self.merge3 = ConvBlock(512, 512, kernel_size=3, stride=1, padding=1)
self.ibn1 = IBN(512, bn=True)
self.ibn2 = IBN(1024, bn=True)
self.relu = nn.ReLU(inplace=False)
self.extras = nn.ModuleList(extras)
self.loc = nn.ModuleList(head[0])
self.conf = nn.ModuleList(head[1])
if self.phase == 'test':
self.softmax = nn.Softmax()
def forward(self, x):
"""Applies network layers and ops on input image(s) x.
Args:
x: input image or batch of images. Shape: [batch,3,300,300].
Return:
Depending on phase:
test:
list of concat outputs from:
1: softmax layers, Shape: [batch*num_priors,num_classes]
2: localization layers, Shape: [batch,num_priors*4]
3: priorbox layers, Shape: [2,num_priors*4]
train:
list of concat outputs from:
1: confidence layers, Shape: [batch*num_priors,num_classes]
2: localization layers, Shape: [batch,num_priors*4]
3: priorbox layers, Shape: [2,num_priors*4]
"""
sources = list()
loc = list()
conf = list()
new_sources = list()
# apply lds to the initial image
x_pool = self.lds(x)
# apply vgg up to conv4_3
for k in range(22):
x = self.base[k](x)
conv4_3_bn = self.ibn1(x)
x_pool1_skip, x_pool1_icn = self.icn1(x_pool)
s = self.Norm1(conv4_3_bn * x_pool1_icn)
# apply vgg up to fc7
for k in range(22, 34):
x = self.base[k](x)
conv7_bn = self.ibn2(x)
x_pool2_skip, x_pool2_icn = self.icn2(x_pool1_skip)
p = self.Norm2(self.dsc1(s) + conv7_bn * x_pool2_icn)
x = self.base[34](x)
# apply extra layers and cache source layer outputs
for k, v in enumerate(self.extras):
x = v(x)
if k == 0:
x_pool3_skip, x_pool3_icn = self.icn3(x_pool2_skip)
w = self.Norm3(self.dsc2(p) + x * x_pool3_icn)
elif k == 2:
q = self.Norm4(self.dsc3(w) + x)
sources.append(q)
elif k == 5 or k == 7:
sources.append(x)
else:
pass
# project the forward features into lower dimension.
tmp1 = self.proj1(p)
tmp2 = self.proj2(w)
tmp3 = self.proj3(q)
# The conv4_3 level
proj1 = F.upsample(tmp1, size=(38, 38), mode='bilinear')
proj2 = F.upsample(tmp2, size=(38, 38), mode='bilinear')
proj3 = F.upsample(tmp3, size=(38, 38), mode='bilinear')
proj = torch.cat([proj1, proj2, proj3], dim=1)
agent1 = self.agent1(s)
convert1 = self.convert1(proj)
pred1 = torch.cat([agent1, convert1], dim=1)
pred1 = self.merge1(pred1)
new_sources.append(pred1)
# The fc_7 level
proj2 = F.upsample(tmp2, size=(19, 19), mode='bilinear')
proj3 = F.upsample(tmp3, size=(19, 19), mode='bilinear')
proj = torch.cat([proj2, proj3], dim=1)
agent2 = self.agent2(p)
convert2 = self.convert2(proj)
pred2 = torch.cat([agent2, convert2], dim=1)
pred2 = self.merge2(pred2)
new_sources.append(pred2)
# The conv8 level
proj3 = F.upsample(tmp3, size=(10, 10), mode='bilinear')
proj = proj3
agent3 = self.agent3(w)
convert3 = self.convert3(proj)
pred3 = torch.cat([agent3, convert3], dim=1)
pred3 = self.merge3(pred3)
new_sources.append(pred3)
for prediction in sources:
new_sources.append(prediction)
# apply multibox head to source layers
for (x, l, c) in zip(new_sources, self.loc, self.conf):
loc.append(l(x).permute(0, 2, 3, 1).contiguous())
conf.append(c(x).permute(0, 2, 3, 1).contiguous())
loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)
conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)
if self.phase == "test":
output = (
loc.view(loc.size(0), -1, 4), # loc preds
self.softmax(conf.view(-1, self.num_classes)), # conf preds
)
else:
output = (
loc.view(loc.size(0), -1, 4),
conf.view(conf.size(0), -1, self.num_classes),
)
return output
def load_weights(self, base_file):
other, ext = os.path.splitext(base_file)
if ext == '.pkl' or '.pth':
print('Loading weights into state dict...')
self.load_state_dict(torch.load(base_file))
print('Finished!')
else:
print('Sorry only .pth and .pkl files supported.')
def vgg(cfg, i, batch_norm=False):
layers = []
in_channels = i
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
elif v == 'C':
layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=False)]
else:
layers += [conv2d, nn.ReLU(inplace=False)]
in_channels = v
pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)
conv7 = nn.Conv2d(1024, 1024, kernel_size=1)
layers += [pool5, conv6,
nn.ReLU(inplace=False), conv7, nn.ReLU(inplace=False)]
return layers
base = {
'300': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'C', 512, 512, 512, 'M',
512, 512, 512]}
def add_extras(size, cfg, i, batch_norm=False):
# Extra layers added to VGG for feature scaling
layers = []
in_channels = i
flag = False
for k, v in enumerate(cfg):
if in_channels != 'S':
if v == 'S':
if in_channels == 256 and size == 512:
layers += [One_Three_Conv(in_channels, cfg[k+1], stride=2), nn.ReLU(inplace=False)]
else:
layers += [One_Three_Conv(in_channels, cfg[k+1], stride=2), nn.ReLU(inplace=False)]
in_channels = v
layers += [ConvBlock(256, 128, kernel_size=1,stride=1)]
layers += [ConvBlock(128, 256, kernel_size=3,stride=1)]
layers += [ConvBlock(256, 128, kernel_size=1,stride=1)]
layers += [ConvBlock(128, 256, kernel_size=3,stride=1)]
return layers
extras = {
'300': [1024, 'S', 512, 'S', 256]}
def multibox(size, vgg, extra_layers, cfg, num_classes):
loc_layers = []
conf_layers = []
vgg_source = [1, -2]
for k, v in enumerate(vgg_source):
if k == 0:
loc_layers += [nn.Conv2d(512,
cfg[k] * 4, kernel_size=3, padding=1)]
conf_layers +=[nn.Conv2d(512,
cfg[k] * num_classes, kernel_size=3, padding=1)]
else:
loc_layers += [nn.Conv2d(vgg[v].out_channels,
cfg[k] * 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(vgg[v].out_channels,
cfg[k] * num_classes, kernel_size=3, padding=1)]
i = 2
indicator = 3
for k, v in enumerate(extra_layers):
if (k < indicator+1 and k % 2 == 0) or (k > indicator+1 and k % 2 != 0):
loc_layers += [nn.Conv2d(v.out_channels, cfg[i]
* 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(v.out_channels, cfg[i]
* num_classes, kernel_size=3, padding=1)]
i += 1
return vgg, extra_layers, (loc_layers, conf_layers)
mbox = {
'300': [6, 6, 6, 6, 4, 4]}
def build_net(phase, size=300, num_classes=81):
if size != 300:
print("Error: The input image size is not supported!")
return
return LRFNet(phase, size, *multibox(size, vgg(base[str(size)], 3),
add_extras(size, extras[str(size)], 1024),
mbox[str(size)], num_classes), num_classes)
|
3234
|
import os
from conans import ConanFile, tools
from conans.errors import ConanInvalidConfiguration
class CxxOptsConan(ConanFile):
name = "cxxopts"
homepage = "https://github.com/jarro2783/cxxopts"
url = "https://github.com/conan-io/conan-center-index"
description = "Lightweight C++ option parser library, supporting the standard GNU style syntax for options."
license = "MIT"
topics = ("conan", "option-parser", "positional-arguments ", "header-only")
settings = "compiler"
options = { "unicode": [True, False] }
default_options = { "unicode": False }
no_copy_source = True
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _minimum_cpp_standard(self):
return 11
@property
def _minimum_compilers_version(self):
return {
"Visual Studio": "14",
"gcc": "5",
"clang": "3.9",
"apple-clang": "8",
}
def configure(self):
if self.settings.compiler.get_safe("cppstd"):
tools.check_min_cppstd(self, self._minimum_cpp_standard)
min_version = self._minimum_compilers_version.get(str(self.settings.compiler))
if not min_version:
self.output.warn("{} recipe lacks information about the {} compiler support.".format(
self.name, self.settings.compiler))
else:
if tools.Version(self.settings.compiler.version) < min_version:
raise ConanInvalidConfiguration("{} requires C++{} support. The current compiler {} {} does not support it.".format(
self.name, self._minimum_cpp_standard, self.settings.compiler, self.settings.compiler.version))
def requirements(self):
if self.options.unicode:
self.requires("icu/64.2")
def source(self):
tools.get(**self.conan_data["sources"][self.version])
os.rename("{}-{}".format(self.name, self.version), self._source_subfolder)
def package(self):
self.copy("LICENSE", dst="licenses", src=self._source_subfolder)
self.copy("{}.hpp".format(self.name), dst="include", src=os.path.join(self._source_subfolder, "include"))
def package_id(self):
self.info.header_only()
def package_info(self):
if self.options.unicode:
self.cpp_info.defines = ["CXXOPTS_USE_UNICODE"]
|
3273
|
from typing import *
import attr
from dlms_cosem.hdlc import validators
@attr.s(auto_attribs=True)
class HdlcAddress:
"""
A client address shall always be expressed on one byte.
To enable addressing more than one logical device within a single physical device
and to support the multi-drop configuration the server address may be divided in
two parts– may be divided into two parts:
The logical address to address a logical device (separate addressable entity
within a physical device) makes up the upper HDLC address
The logical address must always be present.
The physical address is used to address a physical device ( a physical device on
a multi-drop)
The physical address can be omitted it not used.
"""
logical_address: int = attr.ib(validator=[validators.validate_hdlc_address])
physical_address: Optional[int] = attr.ib(
default=None, validator=[validators.validate_hdlc_address]
)
address_type: str = attr.ib(
default="client", validator=[validators.validate_hdlc_address_type]
)
@property
def length(self):
"""
The number of bytes the address makes up.
:return:
"""
return len(self.to_bytes())
def to_bytes(self):
out: List[Optional[int]] = list()
if self.address_type == "client":
# shift left 1 bit and set the lsb to mark end of address.
out.append(((self.logical_address << 1) | 0b00000001))
else:
# server address type
logical_higher, logical_lower = self._split_address(self.logical_address)
if self.physical_address:
physical_higher, physical_lower = self._split_address(
self.physical_address
)
# mark physical lower as end
physical_lower = physical_lower | 0b00000001
out.extend(
[logical_higher, logical_lower, physical_higher, physical_lower]
)
else:
# no physical address so mark the logial as end.
logical_lower = logical_lower | 0b00000001
out.extend([logical_higher, logical_lower])
out_bytes = list()
for address in out:
if address:
out_bytes.append(address.to_bytes(1, "big"))
return b"".join(out_bytes)
@staticmethod
def _split_address(address: int) -> Tuple[Optional[int], int]:
higher: Optional[int]
lower: int
if address > 0b01111111:
lower = (address & 0b0000000001111111) << 1
higher = (address & 0b0011111110000000) >> 6
else:
lower = address << 1
higher = None
return higher, lower
@staticmethod
def _address_to_byte(address: int) -> bytes:
return address.to_bytes(1, "big")
@classmethod
def destination_from_bytes(cls, frame_bytes: bytes, address_type: str):
destination_address_data, _ = HdlcAddress.find_address_in_frame_bytes(
frame_bytes
)
(
destination_logical,
destination_physical,
destination_length,
) = destination_address_data
return cls(destination_logical, destination_physical, address_type)
@classmethod
def source_from_bytes(cls, frame_bytes: bytes, address_type: str):
_, source_address_data = HdlcAddress.find_address_in_frame_bytes(frame_bytes)
source_logical, source_physical, source_length = source_address_data
return cls(source_logical, source_physical, address_type)
@staticmethod
def find_address_in_frame_bytes(
hdlc_frame_bytes: bytes,
) -> Tuple[Tuple[int, Optional[int], int], Tuple[int, Optional[int], int]]:
"""
address can be 1, 2 or 4 bytes long. the end byte is indicated by the of
the last byte LSB being 1
The first address is the destination address and the seconds is the
source address.
:param frame_bytes:
:return:
"""
# Find destination address.
destination_length: int = 1
destination_logical: int = 0
destination_physical: Optional[int] = 0
destination_positions_list: List[Tuple[int, int]] = [(3, 1), (4, 2), (6, 4)]
address_bytes: bytes
for pos, _length in destination_positions_list:
end_byte = hdlc_frame_bytes[pos]
if bool(end_byte & 0b00000001):
# Found end byte:
destination_length = _length
break
continue
if destination_length == 1:
address_bytes = hdlc_frame_bytes[3].to_bytes(1, "big")
destination_logical = address_bytes[0] >> 1
destination_physical = None
elif destination_length == 2:
address_bytes = hdlc_frame_bytes[3:5]
destination_logical = address_bytes[0] >> 1
destination_physical = address_bytes[1] >> 1
elif destination_length == 4:
address_bytes = hdlc_frame_bytes[3:7]
destination_logical = HdlcAddress.parse_two_byte_address(address_bytes[:2])
destination_physical = HdlcAddress.parse_two_byte_address(address_bytes[3:])
# Find source address
source_length: int = 1
source_logical: int = 0
source_physical: Optional[int] = 0
source_position_list: List[Tuple[int, int]] = [
(item[0] + destination_length, item[1])
for item in destination_positions_list
]
for pos, _length in source_position_list:
end_byte = hdlc_frame_bytes[pos]
if bool(end_byte & 0b00000001):
# Found end byte:
source_length = _length
break
continue
if source_length == 1:
address_bytes = hdlc_frame_bytes[3 + destination_length].to_bytes(1, "big")
source_logical = address_bytes[0] >> 1
source_physical = None
elif source_length == 2:
address_bytes = hdlc_frame_bytes[3 + destination_length : 5 + source_length]
source_logical = address_bytes[0] >> 1
source_physical = address_bytes[1] >> 1
elif destination_length == 4:
address_bytes = hdlc_frame_bytes[3 + destination_length : 7 + source_length]
source_logical = HdlcAddress.parse_two_byte_address(address_bytes[:2])
source_physical = HdlcAddress.parse_two_byte_address(address_bytes[3:])
return (
(destination_logical, destination_physical, destination_length),
(source_logical, source_physical, source_length),
)
@staticmethod
def parse_two_byte_address(address_bytes: bytes):
if address_bytes != 2:
raise ValueError(f"Can only parse 2 bytes for address")
upper = address_bytes[0] >> 1
lower = address_bytes[1] >> 1
return lower + (upper << 7)
|
3321
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import CustomUser
admin.site.register(CustomUser, UserAdmin)
|
3370
|
from grpc._channel import _InactiveRpcError, _MultiThreadedRendezvous
from functools import wraps
_COMPLEX_PLOTTING_ERROR_MSG = """
Complex fields cannot be plotted. Use operators to get the amplitude
or the result at a defined sweeping phase before plotting.
"""
_FIELD_CONTAINER_PLOTTING_MSG = """"
This fields_container contains multiple fields. Only one time-step
result can be plotted at a time. Extract a field with
``fields_container[index]``.
"""
class DpfVersionNotSupported(RuntimeError):
"""Error raised when the dpf-core/grpc-dpf python features are not
supported by the DPF gRPC server version."""
def __init__(self, version, msg=None):
if msg is None:
msg = "Feature not supported. Upgrade the server to "
msg += str(version)
msg += " version (or above)."
RuntimeError.__init__(self, msg)
class DpfValueError(ValueError):
"""Error raised when a specific DPF error value must be defined."""
def __init__(
self, msg="A value that has been set leads to incorrect DPF behavior."
):
ValueError.__init__(self, msg)
class InvalidTypeError(ValueError):
"""Error raised when a parameter has the wrong type."""
def __init__(self, data_type, parameter_name):
msg = (
"A "
+ data_type
+ " must be used for the following parameter: "
+ parameter_name
+ "."
)
ValueError.__init__(self, msg)
class LocationError(ValueError):
"""Error raised when using an invalid location."""
def __init__(self, msg="Invalid location"):
ValueError.__init__(self, msg)
class ComplexPlottingError(ValueError):
"""Error raised when attempting to plot a field with complex data."""
def __init__(self, msg=_COMPLEX_PLOTTING_ERROR_MSG):
ValueError.__init__(self, msg)
class FieldContainerPlottingError(ValueError):
"""Error raised when attempting to plot a fields_container containing
multiple fields."""
def __init__(self, msg=_FIELD_CONTAINER_PLOTTING_MSG):
ValueError.__init__(self, msg)
class InvalidANSYSVersionError(RuntimeError):
"""Error raised when the Ansys verion is invalid."""
def __init__(self, msg=""):
RuntimeError.__init__(self, msg)
class DPFServerException(Exception):
"""Error raised when the DPF server has encountered an error."""
def __init__(self, msg=""):
Exception.__init__(self, msg)
class DPFServerNullObject(Exception):
"""Error raised when the DPF server cannot find an object."""
def __init__(self, msg=""):
Exception.__init__(self, msg)
class InvalidPortError(OSError):
"""Error raised when used an invalid port when starting DPF."""
def __init__(self, msg=""):
OSError.__init__(self, msg)
def protect_grpc(func):
"""Capture gRPC exceptions and return a more succinct error message."""
@wraps(func)
def wrapper(*args, **kwargs):
"""Capture gRPC exceptions."""
# Capture gRPC exceptions
try:
out = func(*args, **kwargs)
except (_InactiveRpcError, _MultiThreadedRendezvous) as error:
details = error.details()
if "object is null in the dataBase" in details:
raise DPFServerNullObject(details) from None
raise DPFServerException(details) from None
return out
return wrapper
|
3372
|
from dataclasses import dataclass
from dataclasses import field
from time import time
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
@dataclass
class NewUser:
"""Deals with the commands the user is currently sending"""
user_id: int
chat_id: int
command: str
def __repr__(self) -> str:
return f"{self.user_id=} {self.command=}"
@dataclass
class UserCommand:
"""Stores the latest command sent by the user"""
user_id: int
command: str
insert_time: int = int(time()) # for garbage collection
def __repr__(self) -> str:
return f"{self.user_id=} {self.command=} {self.insert_time=}"
@dataclass
class MessageInfo:
"""Important things in the message"""
user_id: int
chat_id: int
message_id: int
text: str
def __repr__(self) -> str:
return f"{self.user_id=} {self.chat_id=} {self.message_id=} {self.text=}"
@dataclass
class UserDBInfo:
"""Info about the user from the DB"""
feed: bool # if false, the bot will not send any news feeds on a daily basis
user_id: int
db_id: int
topics: List[str] = field(default_factory=lambda: [])
def __repr__(self) -> str:
return f"{self.user_id=} {self.feed=} {self.db_id=} {self.topics=}"
@dataclass
class StagedFunction:
"""For FunctionStagingArea"""
fn: Callable[..., Any]
args: Optional[Tuple[Any, ...]] = None
kwargs: Optional[Dict[str, Any]] = None
|
3381
|
from __future__ import absolute_import
import abc
import os
import json
import glob
import shutil
from tensorflow.python.estimator import gc
from tensorflow.python.estimator import util
from tensorflow.python.estimator.canned import metric_keys
from tensorflow.python.framework import errors_impl
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging
from tensorflow.python.summary import summary_iterator
from tensorflow.python.estimator.exporter import Exporter, _SavedModelExporter
def _verify_compare_fn_args(compare_fn):
"""Verifies compare_fn arguments."""
args = set(util.fn_args(compare_fn))
if 'best_eval_result' not in args:
raise ValueError(
'compare_fn (%s) must include best_eval_result argument.' % compare_fn)
if 'current_eval_result' not in args:
raise ValueError(
'compare_fn (%s) must include current_eval_result argument.' %
compare_fn)
non_valid_args = list(args - set(['best_eval_result', 'current_eval_result']))
if non_valid_args:
raise ValueError('compare_fn (%s) has following not expected args: %s' %
(compare_fn, non_valid_args))
def _loss_smaller(best_eval_result, current_eval_result):
"""Compares two evaluation results and returns true if the 2nd one is smaller.
Both evaluation results should have the values for MetricKeys.LOSS, which are
used for comparison.
Args:
best_eval_result: best eval metrics.
current_eval_result: current eval metrics.
Returns:
True if the loss of current_eval_result is smaller; otherwise, False.
Raises:
ValueError: If input eval result is None or no loss is available.
"""
default_key = metric_keys.MetricKeys.LOSS
if not best_eval_result or default_key not in best_eval_result:
raise ValueError(
'best_eval_result cannot be empty or no loss is found in it.')
if not current_eval_result or default_key not in current_eval_result:
raise ValueError(
'current_eval_result cannot be empty or no loss is found in it.')
return best_eval_result[default_key] > current_eval_result[default_key]
class BestExporter(Exporter):
"""This class exports the serving graph and checkpoints of the best models.
This class performs a model export everytime when the new model is better
than any exsiting model.
"""
def __init__(self,
name='best_exporter',
serving_input_receiver_fn=None,
event_file_pattern='eval/*.tfevents.*',
compare_fn=_loss_smaller,
assets_extra=None,
as_text=False,
exports_to_keep=5):
"""Create an `Exporter` to use with `tf.estimator.EvalSpec`.
Example of creating a BestExporter for training and evluation:
```python
def make_train_and_eval_fn():
# Set up feature columns.
categorial_feature_a = (
tf.feature_column.categorical_column_with_hash_bucket(...))
categorial_feature_a_emb = embedding_column(
categorical_column=categorial_feature_a, ...)
... # other feature columns
estimator = tf.estimator.DNNClassifier(
config=tf.estimator.RunConfig(
model_dir='/my_model', save_summary_steps=100),
feature_columns=[categorial_feature_a_emb, ...],
hidden_units=[1024, 512, 256])
serving_feature_spec = tf.feature_column.make_parse_example_spec(
categorial_feature_a_emb)
serving_input_receiver_fn = (
tf.estimator.export.build_parsing_serving_input_receiver_fn(
serving_feature_spec))
exporter = tf.estimator.BestExporter(
name="best_exporter",
serving_input_receiver_fn=serving_input_receiver_fn,
exports_to_keep=5)
train_spec = tf.estimator.TrainSpec(...)
eval_spec = [tf.estimator.EvalSpec(
input_fn=eval_input_fn,
steps=100,
exporters=exporter,
start_delay_secs=0,
throttle_secs=5)]
return tf.estimator.DistributedTrainingSpec(estimator, train_spec,
eval_spec)
```
Args:
name: unique name of this `Exporter` that is going to be used in the
export path.
serving_input_receiver_fn: a function that takes no arguments and returns
a `ServingInputReceiver`.
event_file_pattern: event file name pattern relative to model_dir. If
None, however, the exporter would not be preemption-safe. To bex
preemption-safe, event_file_pattern should be specified.
compare_fn: a function that compares two evaluation results and returns
true if current evaluation result is better. Follows the signature:
* Args:
* `best_eval_result`: This is the evaluation result of the best model.
* `current_eval_result`: This is the evaluation result of current
candidate model.
* Returns:
True if current evaluation result is better; otherwise, False.
assets_extra: An optional dict specifying how to populate the assets.extra
directory within the exported SavedModel. Each key should give the
destination path (including the filename) relative to the assets.extra
directory. The corresponding value gives the full path of the source
file to be copied. For example, the simple case of copying a single
file without renaming it is specified as `{'my_asset_file.txt':
'/path/to/my_asset_file.txt'}`.
as_text: whether to write the SavedModel proto in text format. Defaults to
`False`.
exports_to_keep: Number of exports to keep. Older exports will be
garbage-collected. Defaults to 5. Set to `None` to disable garbage
collection.
Raises:
ValueError: if any arguments is invalid.
"""
self._compare_fn = compare_fn
if self._compare_fn is None:
raise ValueError('`compare_fn` must not be None.')
_verify_compare_fn_args(self._compare_fn)
self._saved_model_exporter = _SavedModelExporter(
name, serving_input_receiver_fn, assets_extra, as_text)
self._event_file_pattern = event_file_pattern
self._model_dir = None
self._best_eval_result = None
self._exports_to_keep = exports_to_keep
self._log = {}
if exports_to_keep is not None and exports_to_keep <= 0:
raise ValueError(
'`exports_to_keep`, if provided, must be positive number')
@property
def name(self):
return self._saved_model_exporter.name
def export(self, estimator, export_path, checkpoint_path, eval_result,
is_the_final_export):
export_result = None
if self._model_dir != estimator.model_dir and self._event_file_pattern:
# Loads best metric from event files.
tf_logging.info('Loading best metric from event files.')
self._model_dir = estimator.model_dir
full_event_file_pattern = os.path.join(self._model_dir,
self._event_file_pattern)
self._best_eval_result = self._get_best_eval_result(
full_event_file_pattern)
if os.path.isfile(os.path.join(export_path, 'export.log')):
self._log = {}
try:
self._log = json.load(open(os.path.join(export_path, 'export.log'), 'r'))
except json.JSONDecodeError:
pass
if len(self._log) == 0:
self._best_eval_result = None
if self._best_eval_result is None or self._compare_fn(
best_eval_result=self._best_eval_result,
current_eval_result=eval_result):
tf_logging.info('Performing best model export.')
self._best_eval_result = eval_result
export_result = self._saved_model_exporter.export(
estimator, export_path, checkpoint_path, eval_result,
is_the_final_export)
export_result_path = export_result.decode("utf-8")
self._log[export_result_path] = {k: float(v) for k, v in eval_result.items()}
self._copy_checkpoint(checkpoint_path, export_result_path, eval_result["global_step"])
self._garbage_collect_exports(export_path)
with open(os.path.join(export_path, 'export.log'), 'w') as fp:
json.dump(self._log, fp)
return export_result
def _copy_checkpoint(self, checkpoint_pattern, dest_path, step):
for file in glob.glob(checkpoint_pattern + '*'):
shutil.copy(file, dest_path)
with open(os.path.join(dest_path, 'checkpoint'), 'w') as fp:
text = 'model_checkpoint_path: "model.ckpt-number"\n'.replace('number', str(step))
fp.write(text)
fp.close()
def _garbage_collect_exports(self, export_dir_base):
"""Deletes older exports, retaining only a given number of the most recent.
Export subdirectories are assumed to be named with monotonically increasing
integers; the most recent are taken to be those with the largest values.
Args:
export_dir_base: the base directory under which each export is in a
versioned subdirectory.
"""
if self._exports_to_keep is None:
return
def _export_version_parser(path):
# create a simple parser that pulls the export_version from the directory.
filename = os.path.basename(path.path)
if not (len(filename) == 10 and filename.isdigit()):
return None
return path._replace(export_version=int(filename))
# pylint: disable=protected-access
keep_filter = gc._largest_export_versions(self._exports_to_keep)
delete_filter = gc._negation(keep_filter)
for p in delete_filter(
gc._get_paths(export_dir_base, parser=_export_version_parser)):
try:
del self._log[p.path]
gfile.DeleteRecursively(p.path)
except errors_impl.NotFoundError as e:
tf_logging.warn('Can not delete %s recursively: %s', p.path, e)
# pylint: enable=protected-access
def _get_best_eval_result(self, event_files):
"""Get the best eval result from event files.
Args:
event_files: Absolute pattern of event files.
Returns:
The best eval result.
"""
if not event_files:
return None
event_count = 0
best_eval_result = None
for event_file in gfile.Glob(os.path.join(event_files)):
for event in summary_iterator.summary_iterator(event_file):
if event.HasField('summary'):
event_eval_result = {}
for value in event.summary.value:
if value.HasField('simple_value'):
event_eval_result[value.tag] = value.simple_value
if event_eval_result:
if best_eval_result is None or self._compare_fn(
best_eval_result, event_eval_result):
event_count += 1
best_eval_result = event_eval_result
if event_count < 2:
return None
return best_eval_result
|
3386
|
from __future__ import annotations
import typing
from ctc import spec
from . import timestamp_crud
from . import metric_crud
from . import analytics_spec
async def async_create_payload(
*,
blocks: typing.Sequence[spec.BlockNumberReference] | None = None,
timestamps: typing.Sequence[int] | None = None,
timescale: analytics_spec.TimescaleSpec | None = None,
end_time: analytics_spec.Timestamp | None = None,
window_size: str | None = None,
interval_size: str | None = None,
provider: spec.ProviderSpec = None,
) -> analytics_spec.AnalyticsPayload:
"""create data payload from scratch"""
time_data = await timestamp_crud.async_get_time_data(
blocks=blocks,
timestamps=timestamps,
timescale=timescale,
end_time=end_time,
window_size=window_size,
interval_size=interval_size,
provider=provider,
)
# get data
data = await metric_crud.async_get_metrics(
blocks=time_data['block_numbers']
)
return {
'version': '0.1.0',
#
# time data
'n_samples': time_data['n_samples'],
'window_size': time_data['window_size'],
'interval_size': time_data['interval_size'],
'timestamps': time_data['timestamps'],
'block_numbers': time_data['block_numbers'],
'created_at_timestamp': time_data['created_at_timestamp'],
#
# metric data
'data': data,
}
# def update_payload(
# timescale: analytics_spec.Timescale,
# old_payload: analytics_spec.AnalyticsPayload,
# ) -> analytics_spec.AnalyticsPayload:
# new_timestamps = get_new_timestamps(
# timescale=timescale,
# old_payload=old_payload,
# )
# new_blocks = get_new_blocks(
# new_timestamps=new_timestamps,
# old_payload=old_payload,
# )
# new_metrics = get_metrics(blocks=new_blocks)
# return combine_new_data(
# old_payload=old_payload,
# new_metrics=new_metrics,
# )
|
3426
|
import asyncio
import uuid
import pytest
from aiomisc_pytest.pytest_plugin import TCPProxy
import aiormq
async def test_simple(amqp_channel: aiormq.Channel):
await amqp_channel.basic_qos(prefetch_count=1)
assert amqp_channel.number
queue = asyncio.Queue()
deaclare_ok = await amqp_channel.queue_declare(auto_delete=True)
consume_ok = await amqp_channel.basic_consume(deaclare_ok.queue, queue.put)
await amqp_channel.basic_publish(
b"foo",
routing_key=deaclare_ok.queue,
properties=aiormq.spec.Basic.Properties(message_id="123"),
)
message = await queue.get() # type: DeliveredMessage
assert message.body == b"foo"
cancel_ok = await amqp_channel.basic_cancel(consume_ok.consumer_tag)
assert cancel_ok.consumer_tag == consume_ok.consumer_tag
assert cancel_ok.consumer_tag not in amqp_channel.consumers
await amqp_channel.queue_delete(deaclare_ok.queue)
deaclare_ok = await amqp_channel.queue_declare(auto_delete=True)
await amqp_channel.basic_publish(b"foo bar", routing_key=deaclare_ok.queue)
message = await amqp_channel.basic_get(deaclare_ok.queue, no_ack=True)
assert message.body == b"foo bar"
async def test_blank_body(amqp_channel: aiormq.Channel):
await amqp_channel.basic_qos(prefetch_count=1)
assert amqp_channel.number
queue = asyncio.Queue()
deaclare_ok = await amqp_channel.queue_declare(auto_delete=True)
consume_ok = await amqp_channel.basic_consume(deaclare_ok.queue, queue.put)
await amqp_channel.basic_publish(
b"",
routing_key=deaclare_ok.queue,
properties=aiormq.spec.Basic.Properties(message_id="123"),
)
message = await queue.get() # type: DeliveredMessage
assert message.body == b""
cancel_ok = await amqp_channel.basic_cancel(consume_ok.consumer_tag)
assert cancel_ok.consumer_tag == consume_ok.consumer_tag
assert cancel_ok.consumer_tag not in amqp_channel.consumers
await amqp_channel.queue_delete(deaclare_ok.queue)
deaclare_ok = await amqp_channel.queue_declare(auto_delete=True)
await amqp_channel.basic_publish(b"foo bar", routing_key=deaclare_ok.queue)
message = await amqp_channel.basic_get(deaclare_ok.queue, no_ack=True)
assert message.body == b"foo bar"
@pytest.mark.no_catch_loop_exceptions
async def test_bad_consumer(amqp_channel: aiormq.Channel, loop):
channel = amqp_channel # type: aiormq.Channel
await channel.basic_qos(prefetch_count=1)
declare_ok = await channel.queue_declare()
future = loop.create_future()
await channel.basic_publish(b"urgent", routing_key=declare_ok.queue)
consumer_tag = loop.create_future()
async def bad_consumer(message):
await channel.basic_cancel(await consumer_tag)
future.set_result(message)
raise Exception
consume_ok = await channel.basic_consume(
declare_ok.queue, bad_consumer, no_ack=False,
)
consumer_tag.set_result(consume_ok.consumer_tag)
message = await future
await channel.basic_reject(message.delivery.delivery_tag, requeue=True)
assert message.body == b"urgent"
future = loop.create_future()
await channel.basic_consume(
declare_ok.queue, future.set_result, no_ack=True,
)
message = await future
assert message.body == b"urgent"
async def test_ack_nack_reject(amqp_channel: aiormq.Channel):
channel = amqp_channel # type: aiormq.Channel
await channel.basic_qos(prefetch_count=1)
declare_ok = await channel.queue_declare(auto_delete=True)
queue = asyncio.Queue()
await channel.basic_consume(declare_ok.queue, queue.put, no_ack=False)
await channel.basic_publish(b"rejected", routing_key=declare_ok.queue)
message = await queue.get()
assert message.body == b"rejected"
await channel.basic_reject(message.delivery.delivery_tag, requeue=False)
await channel.basic_publish(b"nacked", routing_key=declare_ok.queue)
message = await queue.get()
assert message.body == b"nacked"
await channel.basic_nack(message.delivery.delivery_tag, requeue=False)
await channel.basic_publish(b"acked", routing_key=declare_ok.queue)
message = await queue.get()
assert message.body == b"acked"
await channel.basic_ack(message.delivery.delivery_tag)
async def test_confirm_multiple(amqp_channel: aiormq.Channel):
"""
RabbitMQ has been observed to send confirmations in a strange pattern
when publishing simultaneously where only some messages are delivered
to a queue. It sends acks like this 1 2 4 5(multiple, confirming also 3).
This test is probably inconsequential without publisher_confirms
This is a regression for https://github.com/mosquito/aiormq/issues/10
"""
channel = amqp_channel # type: aiormq.Channel
exchange = uuid.uuid4().hex
await channel.exchange_declare(exchange, exchange_type="topic")
try:
declare_ok = await channel.queue_declare(exclusive=True)
await channel.queue_bind(
declare_ok.queue, exchange, routing_key="test.5",
)
for i in range(10):
messages = [
asyncio.ensure_future(channel.basic_publish(
b"test", exchange=exchange, routing_key="test.{}".format(i),
))
for i in range(10)
]
_, pending = await asyncio.wait(messages, timeout=0.2)
assert not pending, "not all publishes were completed (confirmed)"
await asyncio.sleep(0.05)
finally:
await channel.exchange_delete(exchange)
async def test_exclusive_queue_locked(amqp_connection):
channel0 = await amqp_connection.channel()
channel1 = await amqp_connection.channel()
qname = str(uuid.uuid4())
await channel0.queue_declare(qname, exclusive=True)
try:
await channel0.basic_consume(qname, print, exclusive=True)
with pytest.raises(aiormq.exceptions.ChannelLockedResource):
await channel1.queue_declare(qname)
await channel1.basic_consume(qname, print, exclusive=True)
finally:
await channel0.queue_delete(qname)
async def test_remove_writer_when_closed(amqp_channel: aiormq.Channel):
with pytest.raises(aiormq.exceptions.ChannelClosed):
await amqp_channel.queue_declare(
"amq.forbidden_queue_name", auto_delete=True,
)
with pytest.raises(aiormq.exceptions.ChannelInvalidStateError):
await amqp_channel.queue_delete("amq.forbidden_queue_name")
async def test_proxy_connection(proxy_connection, proxy: TCPProxy):
channel = await proxy_connection.channel() # type: aiormq.Channel
await channel.queue_declare(auto_delete=True)
async def test_declare_queue_timeout(proxy_connection, proxy: TCPProxy):
for _ in range(3):
channel = await proxy_connection.channel() # type: aiormq.Channel
qname = str(uuid.uuid4())
with proxy.slowdown(read_delay=5, write_delay=0):
with pytest.raises(asyncio.TimeoutError):
await channel.queue_declare(
qname, auto_delete=True, timeout=0.5
)
|
3465
|
from prompt_toolkit.key_binding.bindings.named_commands import (accept_line,
self_insert, backward_delete_char, beginning_of_line)
from prompt_toolkit.key_binding.bindings.basic import if_no_repeat
from prompt_toolkit.key_binding.bindings.basic import load_basic_bindings
from prompt_toolkit.key_binding.bindings.emacs import load_emacs_bindings, load_emacs_search_bindings
from prompt_toolkit.key_binding.bindings.mouse import load_mouse_bindings
from prompt_toolkit.key_binding.bindings.cpr import load_cpr_bindings
from prompt_toolkit.key_binding.bindings.page_navigation import load_emacs_page_navigation_bindings
from prompt_toolkit.key_binding import KeyBindings, merge_key_bindings
from prompt_toolkit.keys import Keys, ALL_KEYS
from prompt_toolkit.filters import Condition, HasSelection, is_searching
from prompt_toolkit.selection import SelectionState
from prompt_toolkit.clipboard import ClipboardData
from prompt_toolkit.input.vt100_parser import ANSI_SEQUENCES
from prompt_toolkit.application.current import get_app
from prompt_toolkit.application import run_in_terminal
from prompt_toolkit import __version__ as prompt_toolkit_version
from .multiline import (auto_newline, tab_should_insert_whitespace,
document_is_multiline_python)
from .tokenize import inside_string, matching_parens
from .theme import emoji, emoji_pudb
from .processors import get_pyflakes_warnings
import re
import subprocess
import sys
import textwrap
import platform
def get_key_bindings():
# Based on prompt_toolkit.key_binding.defaults.load_key_bindings()
return merge_key_bindings([
load_basic_bindings(),
load_emacs_bindings(),
load_emacs_search_bindings(),
load_emacs_page_navigation_bindings(),
load_mouse_bindings(),
load_cpr_bindings(),
custom_key_bindings,
])
r = custom_key_bindings = KeyBindings()
def warning_positions(event):
document = event.current_buffer.document
warnings = get_pyflakes_warnings(document.text, frozenset(event.current_buffer.session._locals))
positions = []
for (row, col, msg, m) in warnings:
# Handle SyntaxErrorMessage which is the same warning for the whole
# line.
if m.col != col:
continue
pos = document.translate_row_col_to_index(row, col)
positions.append(pos)
return positions
@r.add_binding(Keys.Escape, 'p')
def previous_warning(event):
positions = warning_positions(event)
buffer = event.current_buffer
buffer._show_syntax_warning = True
if not positions or positions[0] >= buffer.cursor_position:
return
p = positions[0]
for pos in positions:
if pos >= buffer.cursor_position:
break
p = pos
event.current_buffer._show_syntax_warning = True
event.current_buffer.cursor_position = p
@r.add_binding(Keys.Escape, 'n')
def next_warning(event):
positions = warning_positions(event)
buffer = event.current_buffer
buffer._show_syntax_warning = True
if not positions or positions[-1] <= buffer.cursor_position:
return
p = positions[-1]
for pos in reversed(positions):
if pos <= buffer.cursor_position:
break
p = pos
event.current_buffer.cursor_position = p
# This can be removed once
# https://github.com/prompt-toolkit/python-prompt-toolkit/pull/857 is in a
# released version of prompt-toolkit.
ANSI_SEQUENCES['\x1b[1;9A'] = (Keys.Escape, Keys.Up)
ANSI_SEQUENCES['\x1b[1;9B'] = (Keys.Escape, Keys.Down)
@r.add_binding(Keys.Escape, Keys.Up)
def previous_history_search(event):
event.key_sequence[-1].accept_next = True
buffer = event.current_buffer
buffer.history_backward(count=event.arg, history_search=True)
@r.add_binding(Keys.Escape, 'P')
@r.add_binding(Keys.Escape, Keys.Down)
def forward_history_search(event):
event.key_sequence[-1].accept_next = True
buffer = event.current_buffer
buffer.history_forward(count=event.arg, history_search=True)
@r.add_binding(Keys.Escape, '<')
def beginning(event):
"""
Move to the beginning
"""
event.current_buffer.cursor_position = 0
@r.add_binding(Keys.Escape, '>')
def end(event):
"""
Move to the end
"""
event.current_buffer.cursor_position = len(event.current_buffer.text)
# Document.start_of_paragraph/end_of_paragraph don't treat multiple blank
# lines correctly.
# Gives the positions right before one or more blank lines
BLANK_LINES = re.compile(r'\S *(\n *\n)')
@r.add_binding(Keys.Escape, '}')
def forward_paragraph(event):
"""
Move forward one paragraph of text
"""
text = event.current_buffer.text
cursor_position = event.current_buffer.cursor_position
for m in BLANK_LINES.finditer(text):
if m.start(0) > cursor_position:
event.current_buffer.cursor_position = m.start(1)+1
return
event.current_buffer.cursor_position = len(text)
@r.add_binding(Keys.Escape, '{')
def backward_paragraph(event):
"""
Move back one paragraph of text
"""
text = event.current_buffer.text
cursor_position = event.current_buffer.cursor_position
for m in BLANK_LINES.finditer(text[::-1]):
if m.start(0) > len(text) - cursor_position:
event.current_buffer.cursor_position = len(text) - m.end(1) + 1
return
event.current_buffer.cursor_position = 0
WORD = re.compile(r'([a-z0-9]+|[A-Z]{2,}|[a-zA-Z0-9][a-z0-9]*)')
@r.add_binding(Keys.Escape, 'f')
@r.add_binding(Keys.Escape, Keys.Right)
def forward_word(event):
text = event.current_buffer.text
cursor_position = event.current_buffer.cursor_position
for m in WORD.finditer(text):
if m.end(0) > cursor_position:
event.current_buffer.cursor_position = m.end(0)
return
event.current_buffer.cursor_position = len(text)
@r.add_binding(Keys.Escape, 'b')
@r.add_binding(Keys.Escape, Keys.Left)
def backward_word(event):
"""
Move back one paragraph of text
"""
text = event.current_buffer.text
cursor_position = event.current_buffer.cursor_position
for m in reversed(list(WORD.finditer(text))):
if m.start(0) < cursor_position:
event.current_buffer.cursor_position = m.start(0)
return
event.current_buffer.cursor_position = 0
@r.add_binding(Keys.Escape, 'd')
def kill_word(event):
buffer = event.current_buffer
text = buffer.text
cursor_position = buffer.cursor_position
pos = None
for m in WORD.finditer(text):
if m.end(0) > cursor_position:
pos = m.end(0) - cursor_position
break
if pos:
deleted = buffer.delete(count=pos)
event.app.clipboard.set_text(deleted)
@r.add_binding(Keys.Escape, Keys.Backspace)
def backward_kill_word(event):
buffer = event.current_buffer
text = buffer.text
cursor_position = buffer.cursor_position
for m in reversed(list(WORD.finditer(text))):
if m.start(0) < cursor_position:
pos = cursor_position - m.start(0)
break
else:
pos = buffer.cursor_position
if pos:
deleted = buffer.delete_before_cursor(count=pos)
event.app.clipboard.set_text(deleted)
def insert_text_ovewrite(buffer, data, move_cursor=True):
"""
Insert characters at cursor position.
:param fire_event: Fire `on_text_insert` event. This is mainly used to
trigger autocompletion while typing.
"""
# Original text & cursor position.
otext = buffer.text
ocpos = buffer.cursor_position
# Don't overwrite the newline itself. Just before the line ending,
# it should act like insert mode.
overwritten_text = otext[ocpos:ocpos + len(data)]
buffer.text = otext[:ocpos] + data + otext[ocpos + len(overwritten_text):]
if move_cursor:
buffer.cursor_position += len(data)
@r.add_binding(Keys.Escape, 'l')
def downcase_word(event):
buffer = event.current_buffer
text = buffer.text
cursor_position = event.current_buffer.cursor_position
for m in WORD.finditer(text):
pos = m.end(0)
if pos > cursor_position:
word = buffer.document.text[cursor_position:pos]
insert_text_ovewrite(buffer, word.lower())
return
event.current_buffer.cursor_position = len(text)
@r.add_binding(Keys.Escape, 'u')
def upcase_word(event):
buffer = event.current_buffer
text = buffer.text
cursor_position = event.current_buffer.cursor_position
for m in WORD.finditer(text):
pos = m.end(0)
if pos > cursor_position:
word = buffer.document.text[cursor_position:pos]
insert_text_ovewrite(buffer, word.upper())
return
event.current_buffer.cursor_position = len(text)
@r.add_binding(Keys.Escape, 'c')
def capitalize_word(event):
buffer = event.current_buffer
text = buffer.text
cursor_position = event.current_buffer.cursor_position
for m in WORD.finditer(text):
pos = m.end(0)
if pos > cursor_position:
word = buffer.document.text[cursor_position:pos]
# Don't use word.capitalize() because the first character could be
# - or _
for i, c in enumerate(word):
if c.isalnum():
word = word[:i] + c.capitalize() + word[i+1:].lower()
break
insert_text_ovewrite(buffer, word)
return
event.current_buffer.cursor_position = len(text)
@r.add_binding(Keys.Escape, Keys.ControlF)
def forward_sexp(event):
buffer = event.current_buffer
document = buffer.document
text = buffer.text
row, col = document.translate_index_to_position(buffer.cursor_position)
row += 1
matching, mismatching = matching_parens(text)
for opening, closing in matching:
if opening.start == (row, col):
new_pos = document.translate_row_col_to_index(closing.end[0]-1, closing.end[1])
buffer.cursor_position = new_pos
return
event.app.output.bell()
@r.add_binding(Keys.Escape, Keys.ControlB)
def backward_sexp(event):
buffer = event.current_buffer
document = buffer.document
text = buffer.text
row, col = document.translate_index_to_position(buffer.cursor_position)
row += 1
matching, mismatching = matching_parens(text)
for opening, closing in matching:
if closing.end == (row, col):
new_pos = document.translate_row_col_to_index(opening.start[0]-1, opening.start[1])
buffer.cursor_position = new_pos
return
event.app.output.bell()
@r.add_binding(Keys.Left)
def left_multiline(event):
"""
Left that wraps around in multiline.
"""
if event.current_buffer.cursor_position - event.arg >= 0:
event.current_buffer.cursor_position -= event.arg
if getattr(event.current_buffer.selection_state, "shift_arrow", False):
event.current_buffer.selection_state = None
@r.add_binding(Keys.Right)
def right_multiline(event):
"""
Right that wraps around in multiline.
"""
if event.current_buffer.cursor_position + event.arg <= len(event.current_buffer.text):
event.current_buffer.cursor_position += event.arg
if getattr(event.current_buffer.selection_state, "shift_arrow", False):
event.current_buffer.selection_state = None
@r.add_binding(Keys.ControlD)
def exit(event):
event.app.exit(exception=EOFError, style='class:exiting')
@r.add_binding(Keys.ControlC, filter=~is_searching)
def keyboard_interrupt(event):
event.app.exit(exception=KeyboardInterrupt, style='class:aborting')
is_returnable = Condition(
lambda: get_app().current_buffer.is_returnable)
@r.add_binding(Keys.Enter, filter=is_returnable)
def multiline_enter(event):
"""
When not in multiline, execute. When in multiline, try to
intelligently add a newline or execute.
"""
buffer = event.current_buffer
document = buffer.document
multiline = document_is_multiline_python(document)
text_after_cursor = document.text_after_cursor
text_before_cursor = document.text_before_cursor
text = buffer.text
# isspace doesn't respect vacuous truth
if (not text_after_cursor or text_after_cursor.isspace()) and text_before_cursor.replace(' ', '').endswith('\n'):
# If we are at the end of the buffer, accept unless we are in a
# docstring
row, col = document.translate_index_to_position(buffer.cursor_position)
row += 1
if multiline and inside_string(text, row, col):
# We are inside a docstring
auto_newline(event.current_buffer)
else:
accept_line(event)
elif not multiline:
# Always accept a single valid line. Also occurs for unclosed single
# quoted strings (which will give a syntax error)
accept_line(event)
else:
auto_newline(event.current_buffer)
# Always accept the line if the previous key was Up
# Requires https://github.com/jonathanslenders/python-prompt-toolkit/pull/492.
# We don't need a parallel for down because down is already at the end of the
# prompt.
@r.add_binding(Keys.Enter, filter=is_returnable)
def accept_after_history_backward(event):
pks = event.previous_key_sequence
if pks and getattr(pks[-1], 'accept_next', False) and ((len(pks) == 1 and
pks[0].key == "up") or (len(pks) == 2 and pks[0].key == "escape"
and isinstance(pks[1].key, str) and pks[1].key in ['p', 'P', 'up',
'down'])):
accept_line(event)
else:
multiline_enter(event)
@r.add_binding(Keys.Escape, Keys.Enter)
@r.add_binding(Keys.Escape, Keys.ControlJ)
def insert_newline(event):
auto_newline(event.current_buffer)
@r.add_binding(Keys.ControlO)
def open_line(event):
event.current_buffer.newline(copy_margin=False)
event.current_buffer.cursor_left()
# M-[ a g is set to S-Enter in iTerm2 settings
Keys.ShiftEnter = "<Shift-Enter>"
ALL_KEYS.append('<Shift-Enter>')
ANSI_SEQUENCES['\x1b[ag'] = Keys.ShiftEnter
ANSI_SEQUENCES['\x1bOM'] = Keys.ShiftEnter
if prompt_toolkit_version[0] != '3':
r.add_binding(Keys.ShiftEnter)(accept_line)
@r.add_binding(Keys.Tab, filter=tab_should_insert_whitespace)
def indent(event):
"""
When tab should insert whitespace, do that instead of completion.
"""
# Text before cursor on the line must be whitespace because of the
# TabShouldInsertWhitespaceFilter.
before_cursor = event.app.current_buffer.document.current_line_before_cursor
event.app.current_buffer.insert_text(' '*(4 - len(before_cursor)%4))
LEADING_WHITESPACE = re.compile(r'( *)[^ ]?')
@r.add_binding(Keys.Escape, 'm')
def back_to_indentation(event):
"""
Move back to the beginning of the line, ignoring whitespace.
"""
current_line = event.app.current_buffer.document.current_line
before_cursor = event.app.current_buffer.document.current_line_before_cursor
indent = LEADING_WHITESPACE.search(current_line)
if indent:
event.app.current_buffer.cursor_position -= len(before_cursor) - indent.end(1)
@r.add_binding(Keys.Backspace, save_before=if_no_repeat)
def delete_char_or_unindent(event):
buffer = event.app.current_buffer
if buffer.document.current_line_before_cursor.isspace():
spaces = len(buffer.document.current_line_before_cursor)
# Delete up to the tab stop
buffer.delete_before_cursor(count=4 + spaces%-4)
else:
backward_delete_char(event)
# Reset the history search text
buffer.history_search_text = None
@r.add_binding(Keys.Escape, ' ')
def cycle_spacing(event):
"""
Based on emacs's cycle-spacing
On first call, remove all whitespace (if any) from around the cursor and
replace it with a single space.
On second call, remove all whitespace.
On third call, restore the original whitespace and cursor position.
"""
buffer = event.app.current_buffer
# Avoid issues when text grows or shrinks below, keeping the cursor
# position out of sync
cursor_position = buffer.cursor_position
buffer.cursor_position = 0
buffer.text, buffer.cursor_position = do_cycle_spacing(buffer.text, cursor_position)
def do_cycle_spacing(text, cursor_position, state=[]):
rstripped = text[:cursor_position].rstrip()
lstripped = text[cursor_position:].lstrip()
text_before_cursor = text[:cursor_position]
# The first element of state is the original text. The last element is the
# buffer text and cursor position as we last left them. If either of those
# have changed, reset. The state here is global, but that's fine, because
# we consider any change to be enough clear the state. The worst that
# happens here is that we resume when we shouldn't if things look exactly
# as they did where we left off.
# TODO: Use event.previous_key_sequence instead.
if state and state[-1] != (text, cursor_position):
state.clear()
if len(state) == 0:
# Replace all whitespace at the cursor (if any) with a single space.
state.append((text, cursor_position))
cursor_position -= len(text_before_cursor) - len(rstripped) -1
text = rstripped + ' ' + lstripped
state.append((text, cursor_position))
elif len(state) == 2:
# Exactly one space at the cursor. Remove it.
cursor_position -= 1
text = rstripped + lstripped
state.append((text, cursor_position))
elif len(state) == 3:
# Restore original text and cursor position
text, cursor_position = state[0]
state.clear()
if cursor_position < 0:
cursor_position = 0
if cursor_position > len(text):
cursor_position = len(text)
return text, cursor_position
@r.add_binding(Keys.ControlX, Keys.ControlO)
def delete_blank_lines(event):
"""
On blank line, delete all surrounding blank lines, leaving just one.
On isolated blank line, delete that one.
On nonblank line, delete any immediately following blank lines.
"""
buffer = event.app.current_buffer
document = buffer.document
lines_up_to_current = document.lines[:document.cursor_position_row+1]
lines_after_current = document.lines[document.cursor_position_row+1:]
blank_lines_before = 0
for line in lines_up_to_current[::-1]:
if not line.strip():
blank_lines_before += 1
else:
break
blank_lines_after = 0
for line in lines_after_current:
if not line.strip():
blank_lines_after += 1
else:
break
if not blank_lines_before:
stripped_before = lines_up_to_current
else:
stripped_before = lines_up_to_current[:-blank_lines_before]
stripped_after = lines_after_current[blank_lines_after:]
# XXX: Emacs always keeps a newline at the end of the file, but I don't
# think it matters here.
if (not blank_lines_before and blank_lines_after) or blank_lines_before + blank_lines_after == 1:
new_text = '\n'.join(stripped_before + stripped_after)
elif blank_lines_before + blank_lines_after == 0:
return
else:
buffer.cursor_up(max(blank_lines_before-1, 0))
new_text = '\n'.join(stripped_before + [''] + stripped_after)
# Even though we do auto_up, it can be out of bounds from trailing
# whitespace
buffer.cursor_position = min(buffer.cursor_position, len(new_text))
buffer.text = new_text
@r.add_binding(Keys.ControlX, Keys.ControlT)
def transpose_lines(event):
buffer = event.current_buffer
document = buffer.document
row = document.cursor_position_row
new_lines = document.lines[:]
if len(new_lines) == 1:
new_lines.append('')
if row == 0:
buffer.cursor_down()
row += 1
if row == len(new_lines) - 1:
new_lines.append('')
new_lines[row], new_lines[row-1] = new_lines[row-1], new_lines[row]
buffer.text = '\n'.join(new_lines)
buffer.cursor_down()
beginning_of_line(event)
# Selection stuff
@r.add_binding(Keys.ShiftLeft)
def select_left(event):
buffer = event.current_buffer
if buffer.document.text_before_cursor:
if not buffer.selection_state:
buffer.start_selection()
buffer.selection_state.shift_arrow = True
buffer.cursor_position -= event.arg
@r.add_binding(Keys.ShiftRight)
def select_right(event):
buffer = event.current_buffer
if buffer.document.text_after_cursor:
if not buffer.selection_state:
buffer.start_selection()
buffer.selection_state.shift_arrow = True
buffer.cursor_position += event.arg
@r.add_binding(Keys.Up)
def auto_up(event):
buffer = event.current_buffer
count = event.arg
if buffer.document.cursor_position_row > 0:
buffer.cursor_up(count=count)
elif not buffer.selection_state:
event.key_sequence[-1].accept_next = True
buffer.history_backward(count=count)
if getattr(buffer.selection_state, "shift_arrow", False):
buffer.selection_state = None
@r.add_binding(Keys.Down)
def auto_down(event):
buffer = event.current_buffer
count = event.arg
if buffer.document.cursor_position_row < buffer.document.line_count - 1:
buffer.cursor_down(count=count)
elif not buffer.selection_state:
buffer.history_forward(count=count)
if getattr(buffer.selection_state, "shift_arrow", False):
buffer.selection_state = None
@r.add_binding(Keys.ShiftUp)
def select_line_up(event):
buffer = event.current_buffer
if buffer.document.text_before_cursor:
if not buffer.selection_state:
buffer.start_selection()
buffer.selection_state.shift_arrow = True
up_position = buffer.document.get_cursor_up_position()
buffer.cursor_position += up_position
if not up_position:
buffer.cursor_position = 0
@r.add_binding(Keys.ShiftDown)
def select_line_down(event):
buffer = event.current_buffer
if buffer.document.text_after_cursor:
if not buffer.selection_state:
buffer.start_selection()
buffer.selection_state.shift_arrow = True
down_position = buffer.document.get_cursor_down_position()
buffer.cursor_position += down_position
if not down_position:
buffer.cursor_position = len(buffer.document.text)
# The default doesn't toggle correctly
@r.add_binding(Keys.ControlSpace)
def toggle_selection(event):
buffer = event.current_buffer
if buffer.selection_state:
buffer.selection_state = None
else:
buffer.start_selection()
@r.add_binding(Keys.ControlX, 'h')
def select_all(event):
buffer = event.current_buffer
buffer.selection_state = SelectionState(len(buffer.document.text))
buffer.cursor_position = 0
@r.add_binding(Keys.Delete, filter=HasSelection())
@r.add_binding(Keys.Backspace, filter=HasSelection())
def delete_selection(event):
event.current_buffer.cut_selection()
@r.add_binding(Keys.Any, filter=HasSelection())
def self_insert_and_clear_selection(event):
event.current_buffer.cut_selection()
self_insert(event)
@r.add_binding(Keys.ControlK, filter=HasSelection())
@r.add_binding(Keys.ControlU, filter=HasSelection())
def kill_selection(event):
data = event.current_buffer.cut_selection()
event.app.clipboard.set_data(data)
def system_copy(text):
if "Linux" in platform.platform():
copy_command = ['xclip', '-selection', 'c']
else:
copy_command = ['pbcopy']
try:
# In Python 3.6 we can do this:
# run(copy_command, input=text, encoding='utf-8', check=True)
subprocess.run(copy_command, input=text.encode('utf-8'), check=True)
except FileNotFoundError:
print("Error: could not find", copy_command[0], file=sys.stderr)
except subprocess.CalledProcessError as e:
print(copy_command[0], "error:", e, file=sys.stderr)
def system_paste():
if "Linux" in platform.platform():
paste_command = ['xsel', '-b']
else:
paste_command = ['pbpaste']
try:
# In Python 3.6 we can do this:
# run(paste_command, input=text, encoding='utf-8')
p = subprocess.run(paste_command, stdout=subprocess.PIPE, check=True)
except FileNotFoundError:
print("Error: could not find", paste_command[0], file=sys.stderr)
except subprocess.CalledProcessError as e:
print(paste_command[0], "error:", e, file=sys.stderr)
return p.stdout.decode('utf-8')
@r.add_binding(Keys.ControlX, Keys.ControlW)
def copy_to_clipboard(event):
if event.current_buffer.document.selection:
from_, to = event.current_buffer.document.selection_range()
run_in_terminal(lambda:system_copy(event.current_buffer.document.text[from_:to + 1]))
@r.add_binding(Keys.ControlX, Keys.ControlY)
def paste_from_clipboard(event):
paste_text_future = run_in_terminal(system_paste)
event.current_buffer.cut_selection()
paste_text_future.add_done_callback(lambda future:\
event.current_buffer.paste_clipboard_data(ClipboardData(future.result())))
# M-[ a b is set to C-S-/ (C-?) in iTerm2 settings
Keys.ControlQuestionmark = "<C-?>"
ALL_KEYS.append("<C-?>")
ANSI_SEQUENCES['\x1b[ab'] = Keys.ControlQuestionmark
Keys.ControlSlash = "<C-/>"
ALL_KEYS.append("<C-/>")
ANSI_SEQUENCES['\x1b"5/'] = Keys.ControlSlash
# This won't work until
# https://github.com/jonathanslenders/python-prompt-toolkit/pull/484 is
# merged.
if prompt_toolkit_version[0] != '3':
@r.add_binding(Keys.ControlQuestionmark, save_before=lambda e: False)
def redo(event):
event.current_buffer.redo()
@r.add_binding(Keys.ControlSlash, save_before=lambda e: False)
def undo(event):
event.current_buffer.undo()
# Need to escape all spaces here because of verbose (x) option below
ps1_prompts = [r'>>>\ '] + [re.escape(i) + r'\[\d+\]:\ ' for i, j in emoji + [emoji_pudb]] + [r'In\ \[\d+\]:\ ']
ps2_prompts = [r'\ *\.\.\.:\ ?', r'\.\.\.\ ?', '\N{CLAPPING HANDS SIGN}+\\ ?⎢\\ ?']
PS1_PROMPTS_RE = re.compile('|'.join(ps1_prompts))
PS2_PROMPTS_RE = re.compile('|'.join(ps2_prompts))
PROMPTED_TEXT_RE = re.compile(r'''(?x) # Multiline and verbose
(?P<prompt>
(?P<ps1prompt>{PS1_PROMPTS_RE.pattern}) # Match prompts at the front
| (?P<ps2prompt>{PS2_PROMPTS_RE.pattern}))? # of the line.
(?P<noprompt>(?(prompt)\r|))? # If the prompt is not
# matched, this is a special
# marker group that will match
# the empty string.
# Otherwise it will not
# match (because all \r's
# have been stripped from
# the string).
(?P<line>.*)\n # The actual line.
'''.format(PS1_PROMPTS_RE=PS1_PROMPTS_RE, PS2_PROMPTS_RE=PS2_PROMPTS_RE))
def prompt_repl(match):
r"""
repl function for re.sub for clearing prompts
Replaces PS1 prompts with \r and removes PS2 prompts.
"""
# TODO: Remove the lines with no prompt
if match.group('ps1prompt') is not None:
return '\r' + match.group('line') + '\n'
elif match.group('ps2prompt') is not None:
return match.group('line') + '\n'
return ''
def split_prompts(text, indent=''):
r"""
Takes text copied from mypython, Python, or IPython session and returns a
list of inputs
Outputs are stripped. If no prompts are found the text is left alone.
The resulting text is indented by indent, except for the first line.
It is assumed that the text contains no carriage returns (\r).
Trailing whitespace and newlines is stripped from the outputs.
Example:
>>> split_prompts('''
... In [1]: a = 1
...
... In [2]: a
... Out[2]: 1
...
... In [3]: def test():
... ...: pass
... ...:
... ''')
['a = 1', 'a', 'def test():\n pass']
"""
from .mypython import validate_text
text = textwrap.dedent(text).strip() + '\n'
text = textwrap.dedent(PROMPTED_TEXT_RE.sub(prompt_repl, text)).lstrip()
lines = text.split('\r')
# Make sure multilines end in two newlines
for i, line in enumerate(lines):
try:
validate_text(line)
except SyntaxError:
# If there is a syntax error, we can't use the CMD_QUEUE (it
# breaks things).
lines = ['\n'.join(lines)]
break
if '\n' in line.rstrip():
lines[i] += '\n'
lines[0] = textwrap.indent(lines[0], indent,
# Don't indent the first line, it's already indented
lambda line, _x=[]: bool(_x or _x.append(1)))
for i in range(1, len(lines)):
lines[i] = textwrap.indent(lines[i], indent)
# Extraneous newlines at the end will be stripped by the prompt anyway.
# This just makes this function easier to test.
lines = [i.rstrip() for i in lines]
return lines
@r.add_binding(Keys.BracketedPaste)
def bracketed_paste(event):
from .mypython import CMD_QUEUE
data = event.data
buffer = event.current_buffer
# Be sure to use \n as line ending.
# This part is the same as the default binding
# Some terminals (Like iTerm2) seem to paste \r\n line endings in a
# bracketed paste. See: https://github.com/ipython/ipython/issues/9737
data = data.replace('\r\n', '\n')
data = data.replace('\r', '\n')
# Replace tabs with four spaces (C-x C-y will still paste the text exactly)
data = data.replace('\t', ' ')
# Strip prompts off pasted text
document = buffer.document
row, col = document.translate_index_to_position(buffer.cursor_position)
row += 1
if not inside_string(event.current_buffer.text, row, col):
indent = LEADING_WHITESPACE.match(document.current_line_before_cursor)
current_line_indent = indent.group(1) if indent else ''
if PS1_PROMPTS_RE.match(data.strip()) or PS2_PROMPTS_RE.match(data.strip()):
lines = split_prompts(data, current_line_indent)
else:
lines = [textwrap.indent(data, current_line_indent,
# Don't indent the first line, it's already indented
lambda line, _x=[]: bool(_x or _x.append(1)))]
else:
lines = [data]
event.current_buffer.insert_text(lines[0])
for text in lines[1:]:
# TODO: Send last chunk as bracketed paste, so it can be edited
CMD_QUEUE.append(text)
if CMD_QUEUE:
accept_line(event)
@r.add_binding(Keys.Escape, ';')
def comment(event):
buffer = event.current_buffer
document = buffer.document
cursor_line, cursor_col = document.translate_index_to_position(document.cursor_position)
if document.selection:
from_, to = document.selection_range()
start_line, start_col = document.translate_index_to_position(from_)
end_line, end_col = document.translate_index_to_position(to - 1)
end_line += 1
else:
start_line = cursor_line
end_line = start_line + 1
# Get the indentation for the comment delimiters
min_indent = float('inf')
for line in document.lines[start_line:end_line]:
if not line.strip():
continue
indent = LEADING_WHITESPACE.search(line)
if indent:
min_indent = min(min_indent, len(indent.group(1)))
else:
min_indent = 0
if min_indent == 0:
break
if min_indent == float('inf'):
min_indent = 0
uncomment = (all(not line.strip() or line[min_indent] == '#' for line in
document.lines[start_line:end_line])
and ''.join(document.lines[start_line:end_line]).strip())
lines = []
for i, line in enumerate(document.lines):
if start_line <= i < end_line:
if uncomment:
lines.append(line[:min_indent] + line[min_indent+2:])
else:
lines.append(line[:min_indent] + '# ' + line[min_indent:])
else:
lines.append(line)
new_text = '\n'.join(lines)
# TODO: Set the cursor position correctly
n_changed = 2*(cursor_line - start_line + 1)
if cursor_line >= end_line - 1:
n_changed -= 2
if uncomment:
buffer.cursor_position -= n_changed
buffer.text = new_text
else:
buffer.text = new_text
buffer.cursor_position += n_changed
@r.add_binding(Keys.ControlX, Keys.ControlE)
def open_in_editor(event):
event.current_buffer.open_in_editor(event.app)
@r.add_binding(Keys.ControlX, Keys.ControlS)
@r.add_binding(Keys.ControlX, Keys.ControlC)
def noop(event):
pass
|
3466
|
from biogeme import *
from headers import *
from loglikelihood import *
from statistics import *
from nested import *
#import random
cons_work= Beta('cons for work', 0,-10,10,0)
cons_edu = Beta('cons for education',0,-50,10,0)
cons_shopping = Beta('cons for shopping',0,-10,10,0)
cons_other = Beta('cons for other',0,-10,10,0)
cons_Q = Beta('cons for quit',0,-10,10,1)
first_stop_inbound= Beta('dummy for first stop of inbound half tour', 0,-10,10,1)
second_stop_inbound= Beta('dummy for second stop of inbound half tour',0,-10,10,0)
threeplus_stop_inbound=Beta('dummy for 3+ stop of inbound half tour',0,-10,10,0)
first_stop_outbound= Beta('dummy for first stop of outbound half tour', 0,-10,10,0)
second_stop_outbound= Beta('dummy for second stop of outbound half tour',0,-10,10,0)
threeplus_stop_outbound=Beta('dummy for 3+ stop of outbound half tour',0,-10,10,0)
work_tour_dummy_Q=Beta('work tour dummy in quit',0,-10,10,1)
edu_tour_dummy_Q=Beta('edu tour dummy in quit',0,-10,10,1)
shopping_tour_dummy_Q=Beta('shopping tour dummy in quit',0,-10,10,1)
other_tour_dummy_Q=Beta('other tour dummy in quit',0,-10,10,1)
first_tour_dummy_Q=Beta('first tour dummy in quit',0,-10,10,0)
sub_tour_dummy_Q=Beta('has subtour dummy in quit',0,-10,10,0)
zero_tour_remain_Q=Beta('zero tour remain dummy',0,-10,10,1)
one_tour_remain_Q=Beta('one tour remain dummy',0,-10,10,0)
twoplus_tour_remain_Q=Beta('2+ tour remain dummy',0,-10,10,1)
work_tour_dummy_W=Beta('work tour dummy in work',0,-10,10,1)
edu_tour_dummy_W=Beta('edu tour dummy in work',0,-10,10,1)
shopping_tour_dummy_W=Beta('shopping tour dummy in work',0,-10,10,1)
other_tour_dummy_W=Beta('other tour dummy in work',0,-10,10,1)
female_dummy_W=Beta('female dummy in work',0,-10,10,0)
student_dummy_W=Beta('student dummy in work',0,-10,10,1)
worker_dummy_W=Beta('worker dummy in work',0,-10,10,1)
driver_dummy_W=Beta('driver dummy in work',0,-10,10,0)
passenger_dummy_W=Beta('passenger dummy in work',0,-10,10,0)
public_dummy_W=Beta('PT dummy in work',0,-10,10,0)
work_tour_dummy_E=Beta('work tour dummy in edu',0,-10,10,1)
edu_tour_dummy_E=Beta('edu tour dummy in edu',0,-10,10,1)
shopping_tour_dummy_E=Beta('shopping tour dummy in edu',0,-10,10,1)
other_tour_dummy_E=Beta('other tour dummy in edu',0,-10,10,1)
female_dummy_E=Beta('female dummy in edu',0,-10,10,0)
student_dummy_E=Beta('student dummy in edu',0,-10,10,1)
worker_dummy_E=Beta('worker dummy in edu',0,-10,10,1)
driver_dummy_E=Beta('driver dummy in edu',0,-10,10,0)
passenger_dummy_E=Beta('passenger dummy in edu',0,-10,10,0)
public_dummy_E=Beta('PT dummy in edu',0,-10,10,0)
work_tour_dummy_S=Beta('work tour dummy in shopping',0,-10,10,1)
edu_tour_dummy_S=Beta('edu tour dummy in shopping',0,-10,10,1)
shopping_tour_dummy_S=Beta('shopping tour dummy in shopping',0,-10,10,1)
other_tour_dummy_S=Beta('other tour dummy in shopping',0,-10,10,0)
female_dummy_S=Beta('female dummy in shopping',0,-10,10,0)
student_dummy_S=Beta('student dummy in shopping',0,-10,10,1)
worker_dummy_S=Beta('worker dummy in shopping',0,-10,10,0)
driver_dummy_S=Beta('driver dummy in shopping',0,-10,10,0)
passenger_dummy_S=Beta('passenger dummy in shopping',0,-10,10,0)
public_dummy_S=Beta('PT dummy in shopping',0,-10,10,0)
work_tour_dummy_O=Beta('work tour dummy in other',0,-10,10,0)
edu_tour_dummy_O=Beta('edu tour dummy in other',0,-10,10,0)
shopping_tour_dummy_O=Beta('shopping tour dummy in other',0,-10,10,0)
other_tour_dummy_O=Beta('other tour dummy in other',0,-10,10,1)
female_dummy_O=Beta('female dummy in other',0,-10,10,0)
student_dummy_O=Beta('student dummy in other',0,-10,10,0)
worker_dummy_O=Beta('worker dummy in other',0,-10,10,0)
driver_dummy_O=Beta('driver dummy in other',0,-10,10,0)
passenger_dummy_O=Beta('passenger dummy in other',0,-10,10,0)
public_dummy_O=Beta('PT dummy in other',0,-10,10,0)
work_logsum=Beta('work logsum in work',0,-10,10,1)
edu_logsum=Beta('edu logsum in edu',0,-10,10,1)
shop_logsum=Beta('shop logsum in shop',0,-10,10,1)
other_logsum=Beta('other logsum in other',0,-10,10,1)
time_window_work=Beta('time available in work',0,-10,10,1)
time_window_edu= Beta('time available in edu',0,-10,10,1)
time_window_shopping= Beta('time available in shopping',0,-10,10,1)
time_window_other= Beta('time available in other',0,-10,10,1)
tour_distance_work= Beta('log tour distance in work',0,-10,10,0)
tour_distance_edu= Beta('log tour distance in edu',0,-10,10,0)
tour_distance_shopping= Beta('log tour distance in shopping',0,-10,10,0)
tour_distance_other=Beta('log tour distance in other',0,-10,10,0)
a700_a930_work= Beta('period 7am to 9:30am in work',0,-10,10,0)
a930_a1200_work=Beta('period 9:30am to 12pm in work',0,-10,10,0)
p300_p530_work=Beta('period 3pm to 5:30pm in work',0,-10,10,0)
p530_p730_work=Beta('period 5:30pm to 7:30 pm in work',0,-10,10,0)
p730_p1000_work=Beta('period 7:30pm to 10pm in work',0,-10,10,0)
p1000_a700_work=Beta('period 10pm to 7am in work',0,-10,10,0)
a700_a930_edu= Beta('period 7am to 9:30am in edu',0,-10,10,0)
a930_a1200_edu=Beta('period 9:30am to 12pm in edu',0,-10,10,0)
p300_p530_edu=Beta('period 3pm to 5:30pm in edu',0,-10,10,0)
p530_p730_edu=Beta('period 5:30pm to 7:30 pm in edu',0,-10,10,0)
p730_p1000_edu=Beta('period 7:30pm to 10pm in edu',0,-10,10,0)
p1000_a700_edu=Beta('period 10pm to 7am in edu',0,-10,10,0)
a700_a930_shopping= Beta('period 7am to 9:30am in shopping',0,-10,10,0)
a930_a1200_shopping=Beta('period 9:30am to 12pm in shopping',0,-10,10,0)
p300_p530_shopping=Beta('period 3pm to 5:30pm in shopping',0,-10,10,0)
p530_p730_shopping=Beta('period 5:30pm to 7:30 pm in shopping',0,-10,10,0)
p730_p1000_shopping=Beta('period 7:30pm to 10pm in shopping',0,-10,10,0)
p1000_a700_shopping=Beta('period 10pm to 7am in shopping',0,-10,10,0)
a700_a930_other= Beta('period 7am to 9:30am in other',0,-10,10,0)
a930_a1200_other=Beta('period 9:30am to 12pm in other',0,-10,10,0)
p300_p530_other=Beta('period 3pm to 5:30pm in other',0,-10,10,0)
p530_p730_other=Beta('period 5:30pm to 7:30 pm in other',0,-10,10,0)
p730_p1000_other=Beta('period 7:30pm to 10pm in other',0,-10,10,0)
p1000_a700_other=Beta('period 10pm to 7am in other',0,-10,10,0)
MU1 = Beta('MU for quit',1,0,100,1)
MU2 = Beta('MU for non-quit', 1.0,0,100,1)
#V for work
V_work= cons_work+\
work_tour_dummy_W*1*(tour_type==1)+\
edu_tour_dummy_W*1*(tour_type==2)+\
shopping_tour_dummy_W*1*(tour_type==3)+\
other_tour_dummy_W*1*(tour_type==4)+\
female_dummy_W*female_dummy+\
student_dummy_W*student_dummy+\
worker_dummy_W*worker_dummy+\
driver_dummy_W*driver_dummy+\
passenger_dummy_W*passenger_dummy+\
public_dummy_W*public_dummy+\
work_logsum * worklogsum+\
time_window_work*time_window_h+\
tour_distance_work*log(1+distance)+\
a700_a930_work*p_700a_930a+\
a930_a1200_work*p_930a_1200a+\
p300_p530_work*p_300p_530p+\
p530_p730_work*p_530p_730p+\
p730_p1000_work*p_730p_1000p+\
p1000_a700_work*p_1000p_700a
#V for education
V_edu = cons_edu+\
work_tour_dummy_E*1*(tour_type==1)+\
edu_tour_dummy_E*1*(tour_type==2)+\
shopping_tour_dummy_E*1*(tour_type==3)+\
other_tour_dummy_E*1*(tour_type==4)+\
female_dummy_E*female_dummy+\
student_dummy_E*student_dummy+\
worker_dummy_E*worker_dummy+\
driver_dummy_E*driver_dummy+\
passenger_dummy_E*passenger_dummy+\
public_dummy_E*public_dummy+\
edu_logsum * edulogsum+\
time_window_edu*time_window_h+\
tour_distance_edu*log(1+distance)+\
a700_a930_edu*p_700a_930a+\
a930_a1200_edu*p_930a_1200a+\
p300_p530_edu*p_300p_530p+\
p530_p730_edu*p_530p_730p+\
p730_p1000_edu*p_730p_1000p+\
p1000_a700_edu*p_1000p_700a
#V for shopping
V_shopping = cons_shopping+\
work_tour_dummy_S*1*(tour_type==1)+\
edu_tour_dummy_S*1*(tour_type==2)+\
shopping_tour_dummy_S*1*(tour_type==3)+\
other_tour_dummy_S*1*(tour_type==4)+\
female_dummy_S*female_dummy+\
student_dummy_S*student_dummy+\
worker_dummy_S*worker_dummy+\
driver_dummy_S*driver_dummy+\
passenger_dummy_S*passenger_dummy+\
public_dummy_S*public_dummy+\
shop_logsum * shoplogsum+\
time_window_shopping*time_window_h+\
tour_distance_shopping*log(1+distance)+\
a700_a930_shopping*p_700a_930a+\
a930_a1200_shopping*p_930a_1200a+\
p300_p530_shopping*p_300p_530p+\
p530_p730_shopping*p_530p_730p+\
p730_p1000_shopping*p_730p_1000p+\
p1000_a700_shopping*p_1000p_700a
#V for other
V_other=cons_other+\
work_tour_dummy_O*1*(tour_type==1)+\
edu_tour_dummy_O*1*(tour_type==2)+\
shopping_tour_dummy_O*1*(tour_type==3)+\
other_tour_dummy_O*1*(tour_type==4)+\
female_dummy_O*female_dummy+\
student_dummy_O*student_dummy+\
worker_dummy_O*worker_dummy+\
driver_dummy_O*driver_dummy+\
passenger_dummy_O*passenger_dummy+\
public_dummy_O*public_dummy+\
other_logsum * otherlogsum+\
time_window_other*time_window_h+\
tour_distance_other*log(1+distance)+\
a700_a930_other*p_700a_930a+\
a930_a1200_other*p_930a_1200a+\
p300_p530_other*p_300p_530p+\
p530_p730_other*p_530p_730p+\
p730_p1000_other*p_730p_1000p+\
p1000_a700_other*p_1000p_700a
#V for quit
V_quit= cons_Q+first_stop_inbound*first_stop*first_bound+\
second_stop_inbound*second_stop*first_bound+\
threeplus_stop_inbound*three_plus_stop*first_bound+\
first_stop_outbound*first_stop*second_bound+\
second_stop_outbound*second_stop*second_bound+\
threeplus_stop_outbound*three_plus_stop*second_bound+\
work_tour_dummy_Q*1*(tour_type==1)+\
edu_tour_dummy_Q*1*(tour_type==2)+\
shopping_tour_dummy_Q*1*(tour_type==3)+\
other_tour_dummy_Q*1*(tour_type==4)+\
first_tour_dummy_Q*first_tour_dummy+\
sub_tour_dummy_Q*has_subtour+zero_tour_remain_Q*1*(tour_remain==0)+\
one_tour_remain_Q*1*(tour_remain==1)+twoplus_tour_remain_Q*1*(tour_remain>=2)
V = {0:V_quit,1: V_work,2:V_edu,3:V_shopping,4:V_other}
av= {0:avail_quit,1:avail_workstop,2:avail_edustop,3:avail_shopstop,4:avail_otherstop}
nest_quit = MU1 , [0]
nest_nonquit = MU2 , [1,2,3,4]
nests=nest_quit,nest_nonquit
prob = nested(V,av,nests,stop_type)
#prob = bioLogit(V,av,stop_type)
rowIterator('obsIter')
BIOGEME_OBJECT.ESTIMATE = Sum(log(prob),'obsIter')
exclude = ((avail_violation==1)+(origin_mtz==0)+(destination_mtz==0)+(time_window_h>=10)) > 0
BIOGEME_OBJECT.EXCLUDE = exclude
nullLoglikelihood(av,'obsIter')
choiceSet = [0,1,2,3,4]
cteLoglikelihood(choiceSet,stop_type,'obsIter')
availabilityStatistics(av,'obsIter')
BIOGEME_OBJECT.PARAMETERS['optimizationAlgorithm'] = "CFSQP"
BIOGEME_OBJECT.PARAMETERS['checkDerivatives'] = "1"
BIOGEME_OBJECT.PARAMETERS['numberOfThreads'] = "6"
|
3473
|
from spherical_distortion.util import *
sample_order = 9 # Input resolution to examine
def ang_fov(s):
print('Spherical Resolution:', s)
for b in range(s):
dim = tangent_image_dim(b, s) # Pixel dimension of tangent image
corners = tangent_image_corners(b, s) # Corners of each tangent image
fov_x, fov_y = compute_tangent_image_angular_resolution(corners)
print(' At base level', b)
print(' FOV (x) =', fov_x)
print(' FOV (y) =', fov_y)
print(' deg/pix (x) =', fov_x/dim)
print(' deg/pix (y) =', fov_y/dim)
ang_fov(sample_order)
|
3494
|
import mock
import pytest
import py_zipkin.storage
@pytest.fixture(autouse=True, scope="module")
def create_zipkin_attrs():
# The following tests all expect _thread_local.zipkin_attrs to exist: if it
# doesn't, mock.patch will fail.
py_zipkin.storage.ThreadLocalStack().get()
def test_get_zipkin_attrs_returns_none_if_no_zipkin_attrs():
tracer = py_zipkin.storage.get_default_tracer()
with mock.patch.object(tracer._context_stack, "_storage", []):
assert not py_zipkin.storage.ThreadLocalStack().get()
assert not py_zipkin.storage.ThreadLocalStack().get()
def test_get_zipkin_attrs_with_context_returns_none_if_no_zipkin_attrs():
with mock.patch.object(py_zipkin.storage.log, "warning", autospec=True) as log:
assert not py_zipkin.storage.Stack([]).get()
assert log.call_count == 1
def test_storage_stack_still_works_if_you_dont_pass_in_storage():
# Let's make sure this still works if we don't pass in a custom storage.
assert not py_zipkin.storage.Stack().get()
def test_get_zipkin_attrs_returns_the_last_of_the_list():
tracer = py_zipkin.storage.get_default_tracer()
with mock.patch.object(tracer._context_stack, "_storage", ["foo"]):
assert "foo" == py_zipkin.storage.ThreadLocalStack().get()
def test_get_zipkin_attrs_with_context_returns_the_last_of_the_list():
assert "foo" == py_zipkin.storage.Stack(["bar", "foo"]).get()
def test_pop_zipkin_attrs_does_nothing_if_no_requests():
tracer = py_zipkin.storage.get_default_tracer()
with mock.patch.object(tracer._context_stack, "_storage", []):
assert not py_zipkin.storage.ThreadLocalStack().pop()
def test_pop_zipkin_attrs_with_context_does_nothing_if_no_requests():
assert not py_zipkin.storage.Stack([]).pop()
def test_pop_zipkin_attrs_removes_the_last_zipkin_attrs():
tracer = py_zipkin.storage.get_default_tracer()
with mock.patch.object(tracer._context_stack, "_storage", ["foo", "bar"]):
assert "bar" == py_zipkin.storage.ThreadLocalStack().pop()
assert "foo" == py_zipkin.storage.ThreadLocalStack().get()
def test_pop_zipkin_attrs_with_context_removes_the_last_zipkin_attrs():
context_stack = py_zipkin.storage.Stack(["foo", "bar"])
assert "bar" == context_stack.pop()
assert "foo" == context_stack.get()
def test_push_zipkin_attrs_adds_new_zipkin_attrs_to_list():
tracer = py_zipkin.storage.get_default_tracer()
with mock.patch.object(tracer._context_stack, "_storage", ["foo"]):
assert "foo" == py_zipkin.storage.ThreadLocalStack().get()
py_zipkin.storage.ThreadLocalStack().push("bar")
assert "bar" == py_zipkin.storage.ThreadLocalStack().get()
def test_push_zipkin_attrs_with_context_adds_new_zipkin_attrs_to_list():
stack = py_zipkin.storage.Stack(["foo"])
assert "foo" == stack.get()
stack.push("bar")
assert "bar" == stack.get()
def test_stack_copy():
stack = py_zipkin.storage.Stack()
stack.push("a")
stack.push("b")
the_copy = stack.copy()
the_copy.push("c")
stack.push("d")
assert ["a", "b", "c"] == the_copy._storage
assert ["a", "b", "d"] == stack._storage
|
3518
|
import argparse
import os
import pickle
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--mnist', action='store_true', default=False,
help='open mnist result')
args = parser.parse_args()
def subplot(subplot, data_first, data_second, title):
plt.subplot(subplot)
if args.mnist:
x = np.arange(0,100)
else:
x = np.arange(0,500)
y_first = np.mean(data_first, axis=0)
y_second = np.mean(data_second, axis=0)
y_first_err = np.std(data_first, axis=0) / 2.
y_second_err = np.std(data_second, axis=0) / 2.
plt.fill_between(x, y_first - y_first_err, y_first + y_first_err, color='m', alpha=0.3)
plt.fill_between(x, y_second - y_second_err, y_second + y_second_err, color='c', alpha=0.3)
plt.plot(x, y_first, color='r', label='Task A')
plt.plot(x, y_second, color='g', label='Task B (transfer learning)')
plt.legend(bbox_to_anchor=(0.8, 0.3), loc=2, ncol=1, fontsize=15)
axes = plt.gca()
if args.mnist:
axes.set_xlim([0, 100])
axes.set_ylim([0, 1.2])
else:
axes.set_xlim([0, 500])
axes.set_ylim([0, 0.6])
plt.title(title, fontsize=20, y = 0.9)
plt.ylabel('Accuracy',fontsize=15)
plt.xlabel('Generations',fontsize=15)
plt.grid(True)
try:
if args.mnist:
f = open(os.path.join('./result/result_mnist.pickle'))
result = pickle.load(f)
f.close()
pathnet_first = []
pathnet_second = []
for res in result:
pathnet_first.append(res[2])
pathnet_second.append(res[3])
subplot('111', pathnet_first, pathnet_second,'MNIST')
plt.show()
else:
f = open(os.path.join('./result/result_cifar_svhn.pickle'))
result = pickle.load(f)
f.close()
cifar_first = []
cifar_second = []
svhn_first = []
svhn_second = []
for res in result:
if res[0] == 'pathnet_cifar_first':
cifar_first.append(res[2])
svhn_second.append(res[3])
else:
svhn_first.append(res[2])
cifar_second.append(res[3])
subplot('211', cifar_first, cifar_second,'CIFAR-10')
subplot('212', svhn_first, svhn_second,'cSVHN')
plt.show()
except IOError:
print("Result file does not exist")
|
3547
|
from controller.invoker import (
invoker_cmd_branch_checkout,
invoker_cmd_branch_commit,
invoker_cmd_branch_create,
invoker_cmd_branch_delete,
invoker_cmd_branch_list,
invoker_cmd_evaluate,
invoker_cmd_filter,
invoker_cmd_gpu_info,
invoker_cmd_inference,
invoker_cmd_init,
invoker_cmd_label_add,
invoker_cmd_label_get,
invoker_cmd_log,
invoker_cmd_merge,
invoker_cmd_pull_image,
invoker_cmd_repo_check,
invoker_cmd_repo_clear,
invoker_cmd_sampling,
invoker_cmd_terminate,
invoker_cmd_user_create,
invoker_task_factory,
)
from proto import backend_pb2
RequestTypeToInvoker = {
backend_pb2.CMD_BRANCH_CHECKOUT: invoker_cmd_branch_checkout.BranchCheckoutInvoker,
backend_pb2.CMD_BRANCH_CREATE: invoker_cmd_branch_create.BranchCreateInvoker,
backend_pb2.CMD_BRANCH_DEL: invoker_cmd_branch_delete.BranchDeleteInvoker,
backend_pb2.CMD_BRANCH_LIST: invoker_cmd_branch_list.BranchListInvoker,
backend_pb2.CMD_COMMIT: invoker_cmd_branch_commit.BranchCommitInvoker,
backend_pb2.CMD_EVALUATE: invoker_cmd_evaluate.EvaluateInvoker,
backend_pb2.CMD_FILTER: invoker_cmd_filter.FilterBranchInvoker,
backend_pb2.CMD_GPU_INFO_GET: invoker_cmd_gpu_info.GPUInfoInvoker,
backend_pb2.CMD_INFERENCE: invoker_cmd_inference.InferenceCMDInvoker,
backend_pb2.CMD_INIT: invoker_cmd_init.InitInvoker,
backend_pb2.CMD_LABEL_ADD: invoker_cmd_label_add.LabelAddInvoker,
backend_pb2.CMD_LABEL_GET: invoker_cmd_label_get.LabelGetInvoker,
backend_pb2.CMD_LOG: invoker_cmd_log.LogInvoker,
backend_pb2.CMD_MERGE: invoker_cmd_merge.MergeInvoker,
backend_pb2.CMD_PULL_IMAGE: invoker_cmd_pull_image.ImageHandler,
backend_pb2.CMD_TERMINATE: invoker_cmd_terminate.CMDTerminateInvoker,
backend_pb2.CMD_REPO_CHECK: invoker_cmd_repo_check.RepoCheckInvoker,
backend_pb2.CMD_REPO_CLEAR: invoker_cmd_repo_clear.RepoClearInvoker,
backend_pb2.REPO_CREATE: invoker_cmd_init.InitInvoker,
backend_pb2.TASK_CREATE: invoker_task_factory.CreateTaskInvokerFactory,
backend_pb2.USER_CREATE: invoker_cmd_user_create.UserCreateInvoker,
backend_pb2.CMD_SAMPLING: invoker_cmd_sampling.SamplingInvoker,
}
|
3564
|
from datetime import timedelta
from dateutil.relativedelta import relativedelta
from django.core.management.base import BaseCommand, CommandError
from django.utils import timezone
from ...models import Request
DURATION_OPTIONS = {
'hours': lambda amount: timezone.now() - timedelta(hours=amount),
'days': lambda amount: timezone.now() - timedelta(days=amount),
'weeks': lambda amount: timezone.now() - timedelta(weeks=amount),
'months': lambda amount: timezone.now() + relativedelta(months=-amount),
'years': lambda amount: timezone.now() + relativedelta(years=-amount),
}
try:
# to keep backward Python 2 compatibility
input = raw_input
except NameError:
pass
class Command(BaseCommand):
help = 'Purge old requests.'
def add_arguments(self, parser):
parser.add_argument(
'amount',
type=int,
)
parser.add_argument('duration')
parser.add_argument(
'--noinput',
action='store_false',
dest='interactive',
default=True,
help='Tells Django to NOT prompt the user for input of any kind.'
)
def handle(self, *args, **options):
amount = options['amount']
duration = options['duration']
# Check we have the correct values
if duration[-1] != 's': # If its not plural, make it plural
duration_plural = '{0}s'.format(duration)
else:
duration_plural = duration
if duration_plural not in DURATION_OPTIONS:
raise CommandError('Amount must be {0}'.format(', '.join(DURATION_OPTIONS)))
qs = Request.objects.filter(time__lte=DURATION_OPTIONS[duration_plural](amount))
count = qs.count()
if count == 0:
print('There are no requests to delete.')
return
if options.get('interactive'):
confirm = input('''
You have requested a database reset.
This will IRREVERSIBLY DESTROY any
requests created before {0} {1} ago.
That is a total of {2} requests.
Are you sure you want to do this?
Type 'yes' to continue, or 'no' to cancel:'''.format(amount, duration, count))
else:
confirm = 'yes'
if confirm == 'yes':
qs.delete()
else:
print('Purge cancelled')
|
3567
|
from pathlib import Path
from testaid.pathlist import PathList
def test_testaid_unit_pathlist_roles_blacklist(testvars_roles_blacklist):
assert testvars_roles_blacklist is not None
def test_testaid_unit_pathlist_roles_whitelist(testvars_roles_whitelist):
assert testvars_roles_whitelist is not None
def test_testaid_unit_pathlist_get(tmp_path):
msd = tmp_path / 'molecule_scenario_directory'
dir1 = msd / 'dir1'
dir1.mkdir(parents=True)
dir2 = tmp_path / 'dir2'
dir2.mkdir()
file1 = dir1 / 'file1.yml'
file1.touch()
file2 = dir1 / 'file2.yml'
file2.touch()
file3 = dir2 / 'file3.yml'
file3.touch()
my_pathlist = [Path(file3), Path(file1), Path(file2)]
my_pathstring = 'dir1:../dir2/file3.yml'
pathlist = PathList(my_pathstring, msd)
assert pathlist.get() == my_pathlist
|
3569
|
def getRoot(config):
if not config.parent:
return config
return getRoot(config.parent)
root = getRoot(config)
# We only run a small set of tests on Windows for now.
# Override the parent directory's "unsupported" decision until we can handle
# all of its tests.
if root.host_os in ['Windows']:
config.unsupported = False
else:
config.unsupported = True
|
3607
|
import pyredner
import numpy as np
import torch
cam = pyredner.Camera(position = torch.tensor([0.0, 0.0, -5.0]),
look_at = torch.tensor([0.0, 0.0, 0.0]),
up = torch.tensor([0.0, 1.0, 0.0]),
fov = torch.tensor([45.0]), # in degree
clip_near = 1e-2, # needs to > 0
resolution = (256, 256),
fisheye = False)
mat_grey = pyredner.Material(\
diffuse_reflectance = \
torch.tensor([0.5, 0.5, 0.5], device = pyredner.get_device()))
materials = [mat_grey]
shape_triangle = pyredner.Shape(\
vertices = torch.tensor([[-1.7, 1.0, 0.0], [1.0, 1.0, 0.0], [-0.5, -1.0, 0.0]],
device = pyredner.get_device()),
indices = torch.tensor([[0, 1, 2]], dtype = torch.int32,
device = pyredner.get_device()),
uvs = None,
normals = None,
material_id = 0)
shape_light = pyredner.Shape(\
vertices = torch.tensor([[-1.0, -1.0, -7.0],
[ 1.0, -1.0, -7.0],
[-1.0, 1.0, -7.0],
[ 1.0, 1.0, -7.0]], device = pyredner.get_device()),
indices = torch.tensor([[0, 1, 2],[1, 3, 2]],
dtype = torch.int32, device = pyredner.get_device()),
uvs = None,
normals = None,
material_id = 0)
shapes = [shape_triangle, shape_light]
light = pyredner.AreaLight(shape_id = 1,
intensity = torch.tensor([20.0,20.0,20.0]))
area_lights = [light]
scene = pyredner.Scene(cam, shapes, materials, area_lights)
scene_state_dict = scene.state_dict()
scene = pyredner.Scene.load_state_dict(scene_state_dict)
scene_args = pyredner.RenderFunction.serialize_scene(\
scene = scene,
num_samples = 16,
max_bounces = 1)
render = pyredner.RenderFunction.apply
img = render(0, *scene_args)
pyredner.imwrite(img.cpu(), 'results/test_serialize/img.exr')
|
3617
|
from goopylib.objects.GraphicsObject import GraphicsObject
from goopylib.styles import *
class BBox(GraphicsObject):
# Internal base class for objects represented by bounding box
# (opposite corners) Line segment is a degenerate case.
resizing_objects = []
def __init__(self, p1, p2, bounds=None, fill=None, outline=None, outline_width=None, cursor="arrow", layer=0,
tag=None):
self.p1 = p1
self.p2 = p2
# These make sure that the p2 is 'after' p1, ie the x & y value of p2 is greater than that of p1
if self.p1[0] > self.p2[0]: # Checking if p1's x value is greater than p2's. If so, then swap the values
self.p1[0], self.p2[0] = self.p2[0], self.p1[0]
if self.p1[1] > self.p2[1]: # Checking if p1's y value is greater than p2's. If so, then swap the values
self.p1[1], self.p2[1] = self.p2[1], self.p1[1]
self.anchor = [(self.p1[0] + self.p2[0]) // 2, (self.p1[1] + self.p2[1]) // 2]
GraphicsObject.__init__(self, options=(), cursor=cursor, layer=layer, bounds=bounds, tag=tag)
# abs(p2[0] - p1[0]) is not required because the p2 value is always greater than or equal to the p1 value
self.width = self.p2[0] - self.p1[0]
self.height = self.p2[1] - self.p1[1]
self.min_width = None
self.min_height = None
self.max_width = None
self.max_height = None
self.resizing_bounds = {}
self.is_resizing = {}
self.bounds_thickness = 0
if fill is None:
self.fill = STYLES["default"]["fill"]
elif isinstance(fill, Colour): # Checking if the option is a colour
self.fill = fill
else: # If not, raise an error
raise GraphicsError(f"\n\nGraphicsError: The Rectangle fill must be a Colour object , not {fill}")
if outline is None:
self.outline = STYLES["default"]["outline"]
elif isinstance(outline, Colour): # Checking if the option is a colour
self.outline = outline
else: # If not, raise an error
raise GraphicsError(f"\n\nGraphicsError: The rectangle outline must be a Colour object , not {outline}")
if outline_width is None:
self.outline_width = STYLES["default"]["width"]
elif isinstance(outline_width, int): # Checking if the option is an integer
self.outline_width = outline_width
else: # If not, raise an error
raise GraphicsError(f"\n\nGraphicsError: The rectangle outline width must be an integer, not {outline_width}")
def __repr__(self):
return "_BBox"
def _set_resizable(self, resizables, top_bounds=None, bottom_bounds=None, left_bounds=None, right_bounds=None,
thickness=10):
"""Override in subclasses"""
pass
def _move(self, dx, dy):
self.p1[0] += dx
self.p1[1] += dy
self.p2[0] += dx
self.p2[1] += dy
self.anchor[0] += dx
self.anchor[1] += dy
def is_clicked(self, mouse_pos):
if self.bounds is None:
if mouse_pos is None:
return False
else:
if (self.p1[0] < mouse_pos[0] < self.p2[0] or self.p1[0] > mouse_pos[0] > self.p2[0]) and \
(self.p1[1] < mouse_pos[1] < self.p2[1] or self.p1[1] > mouse_pos[1] > self.p2[1]):
return True
else:
return False
else:
return self.bounds.is_clicked(mouse_pos)
def get_p1(self):
return self.p1.copy()
def get_p2(self):
return self.p2.copy()
def get_top_right(self):
return self.p1.copy()
def get_top_left(self):
return [self.p2[0], self.p1[1]]
def get_bottom_left(self):
return [self.p1[0], self.p2[1]]
def get_bottom_right(self):
return self.p2.copy()
def get_top(self):
return [(self.p2[0] + self.p1[0]) / 2, self.p1[1]]
def get_bottom(self):
return [(self.p2[0] + self.p1[0]) / 2, self.p2[1]]
def get_left(self):
return [self.p1[0], (self.p1[1] + self.p2[1]) / 2]
def get_right(self):
return [self.p2[0], (self.p1[1] + self.p2[1]) / 2]
def get_width(self):
return self.width
def get_height(self):
return self.height
def get_fill(self):
return self.fill
def get_outline(self):
return self.outline
def get_outline_width(self):
return self.outline_width
def get_anchor(self):
return self.anchor
def set_dimensions(self, width, height, horizontal_align="center", vertical_align="center"):
self.set_width(width, horizontal_align)
self.set_height(height, vertical_align)
return self
def set_resizable(self, top=False, left=False, bottom=False, right=False, min_width=40, min_height=40,
bounds_width=10, top_bounds=None, bottom_bounds=None, left_bounds=None, right_bounds=None):
if min_width < 1 or min_height < 1:
raise GraphicsError(f"\n\nGraphicsError: Minimum height and width of resizable object must be greater than "
f"or equal to 1. Right now, min_width={min_width} & min_height={min_height}")
self.min_width = min_width
self.min_height = min_height
self.is_resizing = {"top": top, "left": left, "bottom": bottom, "right": right}
self._set_resizable([top, bottom, left, right], top_bounds=top_bounds, bottom_bounds=bottom_bounds,
left_bounds=left_bounds, right_bounds=right_bounds, thickness=bounds_width)
if top is False and bottom is False and left is False and right is False:
if self in GraphicsObject.resizing_objects:
GraphicsObject.resizing_objects.remove(self)
elif self not in GraphicsObject.resizing_objects:
GraphicsObject.resizing_objects.add(self)
self.bounds_thickness = bounds_width
return self
def set_coords(self, p1, p2):
self.p1 = p1.copy()
self.p2 = p2.copy()
# These make sure that the p2 is 'after' p1, ie the x & y value of p2 is greater than that of p1
if self.p1[0] > self.p2[0]: # Checking if p1's x value is greater than p2's. If so, then swap the values
self.p1[0], self.p2[0] = self.p2[0], self.p1[0]
if self.p1[1] > self.p2[1]: # Checking if p1's y value is greater than p2's. If so, then swap the values
self.p1[1], self.p2[1] = self.p2[1], self.p1[1]
# abs(p2[0] - p1[0]) is not required because the p2 value is always greater than or equal to the p1 value
self.width = self.p2[0] - self.p1[0]
self.height = self.p2[1] - self.p1[1]
width_scale = (p2[0] - p1[0]) / self.width
height_scale = (p2[1] - p1[1]) / self.height
# abs(p2[0] - p1[0]) is not required because the p2 value is always greater than or equal to the p1 value
self.width = p2[0] - p1[0]
self.height = p2[1] - p1[1]
self.anchor = [(self.p1[0] + self.p2[0]) // 2, (self.p1[1] + self.p2[1]) // 2]
self._update_layer()
return self
def set_width(self, width, center="center"):
if center not in {"center", "right", "left"}:
raise GraphicsError(
"\n\nThe center argument for resizing the object (set_outline_width) needs to be one of "
f'{["center", "right", "left"]}')
if center == "left":
self.set_coords(self.p1, self.p2.add_x(width - self.width))
elif center == "right":
self.set_coords(self.p1.add_x(-(width - self.width)), self.p2)
else:
self.set_coords(self.p1.add_x(-(width / 2 - self.width)), self.p2.add_x(width / 2 - self.width))
return self
def set_height(self, height, center="center"):
if center not in {"center", "top", "bottom"}:
raise GraphicsError(
"\n\nThe center argument for resizing the object (set_height) needs to be one of "
f'{["center", "top", "bottom"]}')
if center == "top":
self.set_coords(self.p1, self.p2.add_y(height - self.height))
elif center == "bottom":
self.set_coords(self.p1.add_y(-(height - self.height)), self.p2)
else:
self.set_coords(self.p1.add_y(-(height / 2 - self.height)), self.p2.add_y(height / 2 - self.height))
return self
def set_fill(self, fill):
if fill is None:
self.fill = STYLES["default"]["fill"]
elif isinstance(fill, Colour): # Checking if the option is a colour
self.fill = fill
else: # If not, raise an error
raise GraphicsError(f"\n\nGraphicsError: The Rectangle fill must be a Colour object , not {fill}")
self._update_layer()
return self
def set_outline(self, outline):
if outline is None:
self.outline = STYLES["default"]["outline"]
elif isinstance(outline, Colour): # Checking if the option is a colour
self.outline = outline
else: # If not, raise an error
raise GraphicsError(f"\n\nGraphicsError: The rectangle outline must be a Colour object , not {outline}")
self._update_layer()
return self
def set_outline_width(self, outline_width):
if outline_width is None:
self.outline_width = STYLES["default"]["width"]
elif isinstance(outline_width, int): # Checking if the option is an integer
self.outline_width = outline_width
else: # If not, raise an error
raise GraphicsError(
f"\n\nGraphicsError: The rectangle outline width must be an integer, not {outline_width}")
self._update_layer()
return self
|
3630
|
from pylab import *
from numpy import *
from numpy.linalg import solve
from scipy.integrate import odeint
from scipy.stats import norm, uniform, beta
from scipy.special import jacobi
a = 0.0
b = 3.0
theta=1.0
sigma=sqrt(theta/(2*(a+b+2)))
tscale = 0.05
invariant_distribution = poly1d( [-1 for x in range(int(a))], True)*poly1d( [1 for x in range(int(b))], True)
def eigenvalue(n):
return theta*n*(n+a+b+1)/(a+b+2)
gaussian_var = norm()
def dW(dt):
return norm.rvs() / sqrt(dt)
def random_walk(y0, tmax, dt, times = None):
dt = dt * tscale
def rhs(y,t):
return -theta*(y-(a-b)/(a+b+2)) + sqrt(2*theta*(1-y*y)/(a+b+2))*dW(dt/tscale)
if (times is None):
times = arange(0,tmax,dt)
y = zeros(shape=times.shape, dtype=float)
y[0] = y0
for i in range(1,y.shape[0]):
y[i] = y[i-1] + rhs(y[i-1], times[i])*dt
if abs(y[i]) > 1:
y[i] = y[i] / abs(y[i])
return (times, y)
def beta_prior(s, f):
return poly1d(ones(shape=(s,)), True)*poly1d(-1*ones(shape=(f,)), True)
def poly_to_jacobi(x):
"""x is a poly1d object"""
xc = x.coeffs
N = x.order+1
matrix = zeros(shape=(N,N), dtype=float)
for i in range(N):
matrix[N-i-1:N, i] = jacobi(i,a,b).coeffs
return solve(matrix, xc)
def jacobi_to_poly(x):
result = poly1d([0])
for i in range(x.shape[0]):
result = result + (jacobi(i,a,b)*invariant_distribution)*x[i]
return result
def jacobi_to_poly_no_invariant(x):
result = poly1d([0])
for i in range(x.shape[0]):
result = result + jacobi(i,a,b)*x[i]
return result
def propagate_jacobi(pc, t):
"""Takes jacobi coefficients and propagates them"""
n = arange(pc.shape[0], dtype=float)
l = theta*n*(n+a+b+1.0)/(a+b+2.0)*tscale
return exp(-l*t)*pc
def truncate_unnecessary_jacobi(p):
p_normalized = p / (abs(p).sum())
cs = cumsum(abs(p_normalized[::-1]))[::-1]
return p_normalized[where(abs(cs) > 1e-4)]
def pde_solve(prior, t):
result = zeros(shape=(t.shape[0], prior.shape[0]), dtype=float)
result[0,:] = prior
for i in range(1,t.shape[0]):
result[i,:] = propagate_jacobi(result[i-1,:], t[i]-t[i-1])
return result
def transform_to_x(pdf, x):
result = zeros(shape=(pdf.shape[0], x.shape[0]), dtype=float)
for i in range(0, pdf.shape[0]):
p = jacobi_to_poly(pdf[i,:])
result[i,:] = p(x)
result[i,:] /= result[i,:].sum()
return result
tmax = 4
prior = beta_prior(40, 20)
prior_in_jacobi = poly_to_jacobi(prior)
dt = 0.1
times = arange(0,tmax,dt)
x = arange(-1,1,0.01)
rw_dt = 0.01
t, y = random_walk(0.35*2-1, tmax, rw_dt)
solution_as_x = zeros(shape=(times.size, x.size), dtype=float)
solution_as_jacobi = None
empirical_ctr = zeros(shape=(4,), dtype=float)
for i in range(0,4):
nt = int(1.0/dt)
prior = prior_in_jacobi
rnd = uniform(0,1)
if (i > 0):
nsamples = 40
r = rnd.rvs(nsamples)
ctr = (y[i/rw_dt]+1)/2.0
print "CTR: " + str(ctr)
success = (r < ctr).sum()
print "Empirical: " + str(success / float(nsamples))
evidence = beta_prior( nsamples - success, success)
prior = None
j = truncate_unnecessary_jacobi(solution_as_jacobi[int(1/dt)-1])
prior = poly_to_jacobi(evidence * jacobi_to_poly_no_invariant(j))
empirical_ctr[i] = success / float(nsamples)
solution_as_jacobi = pde_solve(prior, times[i*nt:(i+1)*nt])
solution_as_x[i*nt:(i+1)*nt] = transform_to_x(solution_as_jacobi, x)
plot(arange(0,4), empirical_ctr, 'go')
plot(t, (y+1)/2.0, 'k')
imshow(solution_as_x.transpose(), origin='lower', extent=[0,tmax,0,1])
xlabel("time")
ylabel("CTR")
title("Bayesian Estimate of CTR")
colorbar()
show()
|
3639
|
from abc import ABC, abstractmethod
from contextlib import contextmanager
from uuid import uuid4
import pytest
from sqlalchemy import (
delete,
select,
UniqueConstraint,
)
class AbstractBaseTest(ABC):
@pytest.fixture
def cls_(self):
"""
Return class under test.
Assumptions: if the class under test is Foo, then the class grouping
the tests should be a subclass of BaseTest, named TestFoo.
"""
prefix = len("Test")
class_name = self.__class__.__name__[prefix:]
return getattr(self.get_model(), class_name)
@abstractmethod
def get_model(self):
pass
def dbcleanup_wrapper(session, obj, where_clause=None):
with dbcleanup(session, obj, where_clause):
yield obj
@contextmanager
def dbcleanup(session, obj, where_clause=None):
"""
Use the session to store obj in database; delete from database on exit, bypassing the session.
If obj does not have an id field, a SQLAlchemy WHERE clause should be provided to construct
a custom select statement.
"""
return_id = where_clause is None
try:
obj_id = persist(session, obj, return_id)
yield obj_id
finally:
table = obj.__table__
if where_clause is None:
where_clause = _get_default_where_clause(type(obj), obj_id)
stmt = delete(table).where(where_clause)
session.execute(stmt)
def persist(session, obj, return_id=True):
"""
Use the session to store obj in database, then remove obj from session,
so that on a subsequent load from the database we get a clean instance.
"""
session.add(obj)
session.flush()
obj_id = obj.id if return_id else None # save this before obj is expunged
session.expunge(obj)
return obj_id
def delete_from_database(session, objects):
"""
Delete each object in objects from database.
May be called at the end of a test if use of a context manager is impractical.
(Assume all objects have the id field as their primary key.)
"""
# Ensure we have a list of objects (check for list explicitly: a model can be iterable)
if not isinstance(objects, list):
objects = [objects]
for obj in objects:
table = obj.__table__
stmt = delete(table).where(table.c.id == obj.id)
session.execute(stmt)
def get_stored_obj(session, cls, obj_id=None, where_clause=None, unique=False):
# Either obj_id or where_clause must be provided, but not both
assert bool(obj_id) ^ (where_clause is not None)
if where_clause is None:
where_clause = _get_default_where_clause(cls, obj_id)
stmt = select(cls).where(where_clause)
result = session.execute(stmt)
# unique() is required if result contains joint eager loads against collections
# https://gerrit.sqlalchemy.org/c/sqlalchemy/sqlalchemy/+/2253
if unique:
result = result.unique()
return result.scalar_one()
def has_unique_constraint(table, fields):
for constraint in table.constraints:
if isinstance(constraint, UniqueConstraint):
col_names = {c.name for c in constraint.columns}
if set(fields) == col_names:
return True
def has_index(table, fields):
for index in table.indexes:
col_names = {c.name for c in index.columns}
if set(fields) == col_names:
return True
def collection_consists_of_objects(collection, *objects):
"""
Returns True iff list(collection) == list(objects), where object equality is determined
by primary key equality: object1.id == object2.id.
"""
if len(collection) != len(objects): # False if lengths are different
return False
if not collection: # True if both are empty
return True
# Sort, then compare each member by its 'id' attribute, which must be its primary key.
collection.sort(key=lambda item: item.id)
objects_l = list(objects)
objects_l.sort(key=lambda item: item.id)
for item1, item2 in zip(collection, objects_l):
if item1.id is None or item2.id is None or item1.id != item2.id:
return False
return True
def get_unique_value():
"""Generate unique values to accommodate unique constraints."""
return uuid4().hex
def _get_default_where_clause(cls, obj_id):
where_clause = cls.__table__.c.id == obj_id
return where_clause
|
3663
|
import pytest
from copy import deepcopy
from gefest.core.structure.point import Point
from gefest.core.structure.polygon import Polygon
from gefest.core.structure.structure import Structure
from gefest.core.algs.postproc.resolve_errors import *
from gefest.core.algs.geom.validation import *
# marking length and width for testing polygon
poly_width = 10
poly_length = 20
# creating a testing polygons via corner points
rectangle_points = [(-1, 40), (-1, poly_length+40), (-poly_width-10, poly_length+40), (-poly_width-10, 40)]
out_bounds_rectangle_poly = Polygon('rectangle', points=[Point(*coords) for coords in rectangle_points])
triangle_points = [(1, 1), (poly_width, poly_length), (1, poly_length)]
unclosed_triangle_poly = Polygon('triangle', points=[Point(*coords) for coords in triangle_points])
incorrect_points = [(5, 5), (5, poly_length), (8, poly_length), (5, 5), (5, 30)]
incorrect_poly = Polygon('incorrect_poly', points=[Point(*coords) for coords in incorrect_points])
domain = Domain()
def test_unclosed_poly():
input_structure = Structure([unclosed_triangle_poly])
observed_structure = postprocess(input_structure, domain)
assert unclosed_poly(input_structure, domain)
assert not unclosed_poly(observed_structure, domain)
def test_self_intersection():
input_structure = Structure([incorrect_poly])
observed_structure = postprocess(input_structure, domain)
assert self_intersection(input_structure)
assert not self_intersection(observed_structure)
def test_out_of_bound():
input_structure = Structure([out_bounds_rectangle_poly])
observed_structure = postprocess(input_structure, domain)
assert out_of_bound(input_structure, domain)
assert not out_of_bound(observed_structure, domain)
def test_fixed_polys():
domain = Domain(fixed_points=[[[15, 30],
[40, 30],
[15, 40]]])
poly_like_fixed = Polygon('like_fixed', points=[Point(15, 30), Point(40, 30), Point(15, 40)])
input_structure = Structure([poly_like_fixed, unclosed_triangle_poly])
observed_structure = postprocess(input_structure, domain)
assert all([np.isclose(len(observed_structure.polygons), 2),
'like_fixed' not in [poly.id for poly in observed_structure.polygons],
'fixed' in [poly.id for poly in observed_structure.polygons]])
def test_too_close():
same_poly = deepcopy(unclosed_triangle_poly)
same_poly.id = 'same_triangle'
input_structure = Structure([unclosed_triangle_poly, same_poly])
observed_structure = postprocess(input_structure, domain)
print(observed_structure.polygons)
assert np.isclose(len(observed_structure.polygons), 1)
|
3669
|
from typing import Tuple
import torch
from torch.autograd import Function
import torch.nn as nn
from metrics.pointops import pointops_cuda
import numpy as np
class FurthestSampling(Function):
@staticmethod
def forward(ctx, xyz, m):
"""
input: xyz: (b, n, 3) and n > m, m: int32
output: idx: (b, m)
"""
assert xyz.is_contiguous()
b, n, _ = xyz.size()
idx = torch.cuda.IntTensor(b, m)
temp = torch.cuda.FloatTensor(b, n).fill_(1e10)
pointops_cuda.furthestsampling_cuda(b, n, m, xyz, temp, idx)
return idx
@staticmethod
def backward(xyz, a=None):
return None, None
furthestsampling = FurthestSampling.apply
class Gathering(Function):
@staticmethod
def forward(ctx, features, idx):
"""
input: features: (b, c, n), idx : (b, m) tensor
output: (b, c, m)
"""
assert features.is_contiguous()
assert idx.is_contiguous()
b, c, n = features.size()
m = idx.size(1)
output = torch.cuda.FloatTensor(b, c, m)
pointops_cuda.gathering_forward_cuda(b, c, n, m, features, idx, output)
ctx.for_backwards = (idx, c, n)
return output
@staticmethod
def backward(ctx, grad_out):
idx, c, n = ctx.for_backwards
b, m = idx.size()
grad_features = torch.cuda.FloatTensor(b, c, n).zero_()
grad_out_data = grad_out.data.contiguous()
pointops_cuda.gathering_backward_cuda(b, c, n, m, grad_out_data, idx, grad_features.data)
return grad_features, None
gathering = Gathering.apply
class NearestNeighbor(Function):
@staticmethod
def forward(ctx, unknown: torch.Tensor, known: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Find the three nearest neighbors of unknown in known
input: unknown: (b, n, 3), known: (b, m, 3)
output: dist2: (b, n, 3) l2 distance to the three nearest neighbors
idx: (b, n, 3) index of 3 nearest neighbors
"""
assert unknown.is_contiguous()
assert known.is_contiguous()
b, n, _ = unknown.size()
m = known.size(1)
dist2 = torch.cuda.FloatTensor(b, n, 3)
idx = torch.cuda.IntTensor(b, n, 3)
pointops_cuda.nearestneighbor_cuda(b, n, m, unknown, known, dist2, idx)
return torch.sqrt(dist2), idx
@staticmethod
def backward(ctx, a=None, b=None):
return None, None
nearestneighbor = NearestNeighbor.apply
class Interpolation(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:
"""
Performs weight linear interpolation on 3 features
input: features: (b, c, m) features descriptors to be interpolated from
idx: (b, n, 3) three nearest neighbors of the target features in features
weight: (b, n, 3) weights
output: (b, c, n) tensor of the interpolated features
"""
assert features.is_contiguous()
assert idx.is_contiguous()
assert weight.is_contiguous()
b, c, m = features.size()
n = idx.size(1)
ctx.interpolation_for_backward = (idx, weight, m)
output = torch.cuda.FloatTensor(b, c, n)
pointops_cuda.interpolation_forward_cuda(b, c, m, n, features, idx, weight, output)
return output
@staticmethod
def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
input: grad_out: (b, c, n)
output: grad_features: (b, c, m), None, None
"""
idx, weight, m = ctx.interpolation_for_backward
b, c, n = grad_out.size()
grad_features = torch.cuda.FloatTensor(b, c, m).zero_()
grad_out_data = grad_out.data.contiguous()
pointops_cuda.interpolation_backward_cuda(b, c, n, m, grad_out_data, idx, weight, grad_features.data)
return grad_features, None, None
interpolation = Interpolation.apply
class Grouping(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
"""
input: features: (b, c, n), idx : (b, m, nsample) containing the indicies of features to group with
output: (b, c, m, nsample)
"""
assert features.is_contiguous()
assert idx.is_contiguous()
b, c, n = features.size()
_, m, nsample = idx.size()
output = torch.cuda.FloatTensor(b, c, m, nsample)
pointops_cuda.grouping_forward_cuda(b, c, n, m, nsample, features, idx, output)
ctx.for_backwards = (idx, n)
return output
@staticmethod
def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
input: grad_out: (b, c, m, nsample)
output: (b, c, n), None
"""
idx, n = ctx.for_backwards
b, c, m, nsample = grad_out.size()
grad_features = torch.cuda.FloatTensor(b, c, n).zero_()
grad_out_data = grad_out.data.contiguous()
pointops_cuda.grouping_backward_cuda(b, c, n, m, nsample, grad_out_data, idx, grad_features.data)
return grad_features, None
grouping = Grouping.apply
class GroupingInt(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
"""
input: features: (b, c, n), idx : (b, m, nsample) containing the indicies of features to group with
output: (b, c, m, nsample)
"""
assert features.is_contiguous()
assert idx.is_contiguous()
b, c, n = features.size()
_, m, nsample = idx.size()
output = torch.cuda.LongTensor(b, c, m, nsample)
pointops_cuda.grouping_int_forward_cuda(b, c, n, m, nsample, features, idx, output)
return output
@staticmethod
def backward(ctx, a=None):
return None, None
grouping_int = GroupingInt.apply
class BallQuery(Function):
@staticmethod
def forward(ctx, radius: float, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor) -> torch.Tensor:
"""
input: radius: float, radius of the balls
nsample: int, maximum number of features in the balls
xyz: torch.Tensor, (b, n, 3) xyz coordinates of the features
new_xyz: torch.Tensor, (b, m, 3) centers of the ball query
output: (b, m, nsample) tensor with the indicies of the features that form the query balls
"""
assert xyz.is_contiguous()
assert new_xyz.is_contiguous()
b, n, _ = xyz.size()
m = new_xyz.size(1)
idx = torch.cuda.IntTensor(b, m, nsample).zero_()
pointops_cuda.ballquery_cuda(b, n, m, radius, nsample, new_xyz, xyz, idx)
return idx
@staticmethod
def backward(ctx, a=None):
return None, None, None, None
ballquery = BallQuery.apply
class FeatureDistribute(Function):
@staticmethod
def forward(ctx, max_xyz: torch.Tensor, xyz: torch.Tensor) -> torch.Tensor:
"""
:param ctx:
:param max_xyz: (b, n, 3)
:param xyz: (b, m, 3)
:return: distribute_idx: (b, m)
"""
assert max_xyz.is_contiguous()
assert xyz.is_contiguous()
b, n, _ = max_xyz.size()
m = xyz.size(1)
distribute_idx = torch.cuda.IntTensor(b, m).zero_()
pointops_cuda.featuredistribute_cuda(b, n, m, max_xyz, xyz, distribute_idx)
return distribute_idx
@staticmethod
def backward(ctx, a=None):
return None, None
featuredistribute = FeatureDistribute.apply
class FeatureGather(Function):
@staticmethod
def forward(ctx, max_feature: torch.Tensor, distribute_idx: torch.Tensor) -> torch.Tensor:
'''
:param ctx:
:param max_feature: (b, c, n)
:param distribute_idx: (b, m)
:return: distribute_feature: (b, c, m)
'''
assert max_feature.is_contiguous()
assert distribute_idx.is_contiguous()
b, c, n = max_feature.size()
m = distribute_idx.size(1)
distribute_feature = torch.cuda.FloatTensor(b, c, m).zero_()
pointops_cuda.featuregather_forward_cuda(b, n, m, c, max_feature, distribute_idx, distribute_feature)
ctx.for_backwards = (distribute_idx, n)
return distribute_feature
@staticmethod
def backward(ctx, grad_distribute_feature: torch.Tensor):
'''
:param ctx:
:param grad_distribute_feature: (b, c, m)
:return: grad_max_feature: (b, c, n), None
'''
distribute_idx, n = ctx.for_backwards
b, c, m = grad_distribute_feature.size()
grad_max_feature = torch.cuda.FloatTensor(b, c, n).zero_()
grad_distribute_feature_data = grad_distribute_feature.data.contiguous()
pointops_cuda.featuregather_backward_cuda(b, n, m, c, grad_distribute_feature_data, distribute_idx, grad_max_feature.data)
return grad_max_feature, None
featuregather = FeatureGather.apply
class LabelStatBallRange(Function):
@staticmethod
def forward(ctx, radius: float, xyz: torch.Tensor, new_xyz: torch.Tensor, label_stat: torch.Tensor) -> torch.Tensor:
'''
:param ctx:
:param radius:
:param xyz: (b, n, 3)
:param new_xyz: (b, m, 3)
:param label_stat: (b, n, nclass)
:return: new_label_stat: (b, m, nclass)
'''
assert xyz.is_contiguous()
assert new_xyz.is_contiguous()
assert label_stat.is_contiguous()
b, n, nclass = label_stat.size()
m = new_xyz.size(1)
new_label_stat = torch.cuda.IntTensor(b, m, nclass).zero_()
pointops_cuda.labelstat_ballrange_cuda(b, n, m, radius, nclass, new_xyz, xyz, label_stat, new_label_stat)
return new_label_stat
@staticmethod
def backward(ctx, a=None):
return None, None, None, None
labelstat_ballrange = LabelStatBallRange.apply
class LabelStatIdx(Function):
@staticmethod
def forward(ctx, nsample: int, label_stat: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
'''
:param ctx:
:param nsample:
:param label_stat: (b, n, nclass)
:param idx: (b, m, nsample)
:return: new_label_stat: (b, m, nclass)
'''
assert label_stat.is_contiguous()
assert idx.is_contiguous()
b, n, nclass = label_stat.size()
m = idx.size(1)
new_label_stat = torch.cuda.IntTensor(b, m, nclass).zero_()
pointops_cuda.labelstat_idx_cuda(b, n, m, nsample, nclass, label_stat, idx, new_label_stat)
return new_label_stat
@staticmethod
def backward(ctx, a=None):
return None, None, None
labelstat_idx = LabelStatIdx.apply
class LabelStatAndBallQuery(Function):
@staticmethod
def forward(ctx, radius: float, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor, label_stat: torch.Tensor):
'''
:param ctx:
:param radius:
:param nsample:
:param xyz: (b, n, 3)
:param new_xyz: (b, m, 3)
:param label_stat: (b, n, nclass)
:return: new_label_stat: (b, m, nclass) idx: (b, m, nsample)
'''
assert xyz.is_contiguous()
assert new_xyz.is_contiguous()
assert label_stat.is_contiguous()
b, n, nclass = label_stat.size()
m = new_xyz.size(1)
new_label_stat = torch.cuda.IntTensor(b, m, nclass).zero_()
idx = torch.cuda.IntTensor(b, m, nsample).zero_()
pointops_cuda.labelstat_and_ballquery_cuda(b, n, m, radius, nsample, nclass, new_xyz, xyz, label_stat, idx, new_label_stat)
return new_label_stat, idx
@staticmethod
def backward(ctx, a=None, b=None):
return None, None, None, None, None
labelstat_and_ballquery = LabelStatAndBallQuery.apply
def pairwise_distances(x, y=None):
'''
Input: x is a Nxd matrix
y is an optional Mxd matirx
Output: dist is a NxM matrix where dist[i,j] is the square norm between x[i,:] and y[j,:]
if y is not given then use 'y=x'.
i.e. dist[i,j] = ||x[i,:]-y[j,:]||^2
'''
x_norm = (x ** 2).sum(1).view(-1, 1)
if y is not None:
y_t = torch.transpose(y, 0, 1)
y_norm = (y ** 2).sum(1).view(1, -1)
else:
y_t = torch.transpose(x, 0, 1)
y_norm = x_norm.view(1, -1)
dist = x_norm + y_norm - 2.0 * torch.mm(x, y_t)
import numpy as np
return torch.clamp(dist, 0.0, np.inf)
class KNNQueryNaive(Function):
@staticmethod
def forward(ctx, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor = None) -> Tuple[torch.Tensor]:
"""
KNN Indexing
input: nsample: int32, Number of neighbor
xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
output: idx: (b, m, nsample)
"""
if new_xyz is None:
new_xyz = xyz
b, m, _ = new_xyz.size()
n = xyz.size(1)
'''
idx = torch.zeros(b, m, nsample).int().cuda()
for i in range(b):
dist = pairwise_distances(new_xyz[i, :, :], xyz[i, :, :])
[_, idxs] = torch.sort(dist, dim=1)
idx[i, :, :] = idxs[:, 0:nsample]
'''
# '''
# new_xyz_repeat = new_xyz.repeat(1, 1, n).view(b, m * n, 3)
# xyz_repeat = xyz.repeat(1, m, 1).view(b, m * n, 3)
# dist = (new_xyz_repeat - xyz_repeat).pow(2).sum(dim=2).view(b, m, n)
dist = (new_xyz.repeat(1, 1, n).view(b, m * n, 3) - xyz.repeat(1, m, 1).view(b, m * n, 3)).pow(2).sum(dim=2).view(b, m, n)
[_, idxs] = torch.sort(dist, dim=2)
idx = idxs[:, :, 0:nsample].int()
# '''
return idx
@staticmethod
def backward(ctx):
return None, None, None
knnquery_naive = KNNQueryNaive.apply
class KNNQuery(Function):
@staticmethod
def forward(ctx, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor = None) -> Tuple[torch.Tensor]:
"""
KNN Indexing
input: nsample: int32, Number of neighbor
xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
output: idx: (b, m, nsample)
( dist2: (b, m, nsample) )
"""
if new_xyz is None:
new_xyz = xyz
assert xyz.is_contiguous()
assert new_xyz.is_contiguous()
b, m, _ = new_xyz.size()
n = xyz.size(1)
idx = torch.cuda.IntTensor(b, m, nsample).zero_()
dist2 = torch.cuda.FloatTensor(b, m, nsample).zero_()
pointops_cuda.knnquery_cuda(b, n, m, nsample, xyz, new_xyz, idx, dist2)
return idx
@staticmethod
def backward(ctx, a=None):
return None, None, None
knnquery = KNNQuery.apply
class KNNQueryExclude(Function):
@staticmethod
def forward(ctx, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor = None) -> Tuple[torch.Tensor]:
"""
KNN Indexing
input: nsample: int32, Number of neighbor
xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
output: new_features: (b, m, nsample)
"""
if new_xyz is None:
new_xyz = xyz
b, m, _ = new_xyz.size()
n = xyz.size(1)
'''
idx = torch.zeros(b, m, nsample).int().cuda()
for i in range(b):
dist = pairwise_distances(new_xyz[i, :, :], xyz[i, :, :])
[_, idxs] = torch.sort(dist, dim=1)
idx[i, :, :] = idxs[:, 0:nsample]
'''
# '''
# new_xyz_repeat = new_xyz.repeat(1, 1, n).view(b, m * n, 3)
# xyz_repeat = xyz.repeat(1, m, 1).view(b, m * n, 3)
# dist = (new_xyz_repeat - xyz_repeat).pow(2).sum(dim=2).view(b, m, n)
dist = (new_xyz.repeat(1, 1, n).view(b, m * n, 3) - xyz.repeat(1, m, 1).view(b, m * n, 3)).pow(2).sum(dim=2).view(b, m, n)
[_, idxs] = torch.sort(dist, dim=2)
idx = idxs[:, :, 1:nsample+1].int()
# '''
return idx
@staticmethod
def backward(ctx):
return None, None, None
knnquery_exclude = KNNQueryExclude.apply
class Le_QueryAndGroup_SameSize(nn.Module):
"""
Groups with a ball query of radius
parameters:
radius: float32, Radius of ball
nsample: int32, Maximum number of features to gather in the ball
"""
def __init__(self, radius=None, nsample=32, use_xyz=True):
super(Le_QueryAndGroup_SameSize, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None, features: torch.Tensor = None, idx: torch.Tensor = None) -> torch.Tensor:
"""
input: xyz: (b, n, 3) coordinates of the features
new_xyz: (b, n, 3) centriods
features: (b, c, n)
idx: idx of neighbors
# idxs: (b, n)
output: new_features: (b, c+3, m, nsample)
# grouped_idxs: (b, m, nsample)
"""
assert xyz.size() == new_xyz.size()
if new_xyz is None:
new_xyz = xyz
if idx is None:
if self.radius is not None:
idx = ballquery(self.radius, self.nsample, xyz, new_xyz)
else:
# idx = knnquery_naive(self.nsample, xyz, new_xyz) # (b, m, nsample)
idx = knnquery(self.nsample, xyz, new_xyz) # (b, m, nsample)
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping(xyz_trans, idx) # (b, 3, m, nsample)
# grouped_idxs = grouping(idxs.unsqueeze(1).float(), idx).squeeze(1).int() # (b, m, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping(features, idx) # (b, c, m, nsample)
if self.use_xyz:
#new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (b, c+3, m, nsample) # le
new_features = grouped_features # (b, c, m, nsample)
else:
new_features = grouped_features
else:
assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return grouped_xyz, new_features
class QueryAndGroup(nn.Module):
"""
Groups with a ball query of radius
parameters:
radius: float32, Radius of ball
nsample: int32, Maximum number of features to gather in the ball
"""
def __init__(self, radius=None, nsample=32, use_xyz=True):
super(QueryAndGroup, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None, features: torch.Tensor = None, idx: torch.Tensor = None) -> torch.Tensor:
"""
input: xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
features: (b, c, n)
idx: idx of neighbors
# idxs: (b, n)
output: new_features: (b, c+3, m, nsample)
# grouped_idxs: (b, m, nsample)
"""
if new_xyz is None:
new_xyz = xyz
if idx is None:
if self.radius is not None:
idx = ballquery(self.radius, self.nsample, xyz, new_xyz)
else:
# idx = knnquery_naive(self.nsample, xyz, new_xyz) # (b, m, nsample)
idx = knnquery(self.nsample, xyz, new_xyz) # (b, m, nsample)
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping(xyz_trans, idx) # (b, 3, m, nsample)
# grouped_idxs = grouping(idxs.unsqueeze(1).float(), idx).squeeze(1).int() # (b, m, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping(features, idx)
if self.use_xyz:
new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (b, c+3, m, nsample)
else:
new_features = grouped_features
else:
assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return new_features
class QueryAndGroup_Dilate(nn.Module):
"""
Groups with a ball query of radius
parameters:
radius: float32, Radius of ball
nsample: int32, Maximum number of features to gather in the ball
"""
def __init__(self, radius=None, nsample=32, use_xyz=True):
super(QueryAndGroup_Dilate, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None, features: torch.Tensor = None, idx: torch.Tensor = None) -> torch.Tensor:
"""
input: xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
features: (b, c, n)
idx: idx of neighbors
# idxs: (b, n)
output: new_features: (b, c+3, m, nsample)
# grouped_idxs: (b, m, nsample)
"""
if new_xyz is None:
new_xyz = xyz
if idx is None:
if self.radius is not None:
idx = ballquery(self.radius, 2*self.nsample, xyz, new_xyz)
else:
# idx = knnquery_naive(self.nsample, xyz, new_xyz) # (b, m, nsample)
idx = knnquery(2*self.nsample, xyz, new_xyz) # (b, m, nsample)
idx2 = np.array([i for i in range(2*self.nsample)])
np.random.shuffle(idx2)
idx2 = idx2[:self.nsample]
idx = idx[:, :, idx2]
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping(xyz_trans, idx) # (b, 3, m, nsample)
# grouped_idxs = grouping(idxs.unsqueeze(1).float(), idx).squeeze(1).int() # (b, m, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping(features, idx)
if self.use_xyz:
new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (b, c+3, m, nsample)
else:
new_features = grouped_features
else:
assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return new_features
class Le_QueryAndGroup(nn.Module):
"""
Groups with a ball query of radius
parameters:
radius: float32, Radius of ball
nsample: int32, Maximum number of features to gather in the ball
"""
def __init__(self, radius=None, nsample=32, use_xyz=True):
super(Le_QueryAndGroup, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None, features: torch.Tensor = None, idx: torch.Tensor = None) -> torch.Tensor:
"""
input: xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
features: (b, c, n)
idx: idx of neighbors
# idxs: (b, n)
output: new_features: (b, c+3, m, nsample)
# grouped_idxs: (b, m, nsample)
"""
if new_xyz is None:
new_xyz = xyz
if idx is None:
if self.radius is not None:
idx = ballquery(self.radius, self.nsample, xyz, new_xyz)
else:
# idx = knnquery_naive(self.nsample, xyz, new_xyz) # (b, m, nsample)
idx = knnquery(self.nsample, xyz, new_xyz) # (b, m, nsample)
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping(xyz_trans, idx) # (b, 3, m, nsample)
# grouped_idxs = grouping(idxs.unsqueeze(1).float(), idx).squeeze(1).int() # (b, m, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping(features, idx) # (b, c, m, nsample)
if self.use_xyz:
#new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (b, c+3, m, nsample) # le
new_features = grouped_features # (b, c, m, nsample)
else:
new_features = grouped_features
else:
assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return grouped_xyz, new_features
class Gen_QueryAndGroupXYZ(nn.Module):
"""
Groups with a ball query of radius
parameters:
radius: float32, Radius of ball
nsample: int32, Maximum number of features to gather in the ball
"""
def __init__(self, radius=None, nsample=32, use_xyz=True):
super(Gen_QueryAndGroupXYZ, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
#def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None, features: torch.Tensor = None, idx: torch.Tensor = None) -> torch.Tensor:
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None) -> torch.Tensor:
"""
input: xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
features: (b, c, n)
idx: idx of neighbors
# idxs: (b, n)
output: new_features: (b, c+3, m, nsample)
# grouped_idxs: (b, m, nsample)
"""
if new_xyz is None:
new_xyz = xyz
#if idx is None:
if self.radius is not None:
idx = ballquery(self.radius, self.nsample, xyz, new_xyz)
else:
idx = knnquery(self.nsample, xyz, new_xyz) # (b, m, nsample)
xyz_trans = xyz.transpose(1, 2).contiguous() # BxNx3 -> Bx3xN
grouped_xyz = grouping(xyz_trans, idx) # (b, 3, m, nsample)
return grouped_xyz
class Le_QueryAndGroup_OnlyFeature(nn.Module):
"""
Groups with a ball query of radius
parameters:
radius: float32, Radius of ball
nsample: int32, Maximum number of features to gather in the ball
"""
def __init__(self, radius=None, nsample=32, use_xyz=True):
super(Le_QueryAndGroup_OnlyFeature, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None, features: torch.Tensor = None, idx: torch.Tensor = None) -> torch.Tensor:
"""
input: xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
features: (b, c, n)
idx: idx of neighbors
# idxs: (b, n)
output: new_features: (b, c+3, m, nsample)
# grouped_idxs: (b, m, nsample)
"""
if new_xyz is None:
new_xyz = xyz
if idx is None:
if self.radius is not None:
idx = ballquery(self.radius, self.nsample, xyz, new_xyz)
else:
# idx = knnquery_naive(self.nsample, xyz, new_xyz) # (b, m, nsample)
idx = knnquery(self.nsample, xyz, new_xyz) # (b, m, nsample)
#xyz_trans = xyz.transpose(1, 2).contiguous()
#grouped_xyz = grouping(xyz_trans, idx) # (b, 3, m, nsample)
# grouped_idxs = grouping(idxs.unsqueeze(1).float(), idx).squeeze(1).int() # (b, m, nsample)
#grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping(features, idx) # (b, c, m, nsample)
if self.use_xyz:
#new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (b, c+3, m, nsample) # le
new_features = grouped_features # (b, c, m, nsample)
else:
new_features = grouped_features
else:
assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return new_features
class GroupAll(nn.Module):
"""
Groups all features
"""
def __init__(self, use_xyz: bool = True):
super(GroupAll, self).__init__()
self.use_xyz = use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor, features: torch.Tensor = None) -> Tuple[torch.Tensor]:
"""
input: xyz: (b, n, 3) coordinates of the features
new_xyz: ignored torch
features: (b, c, n) descriptors of the features
output: new_features: (b, c+3, 1, N) tensor
"""
grouped_xyz = xyz.transpose(1, 2).unsqueeze(2)
if features is not None:
grouped_features = features.unsqueeze(2)
if self.use_xyz:
new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (b, c+3, 1, n)
else:
new_features = grouped_features
else:
new_features = grouped_xyz
return new_features
|
3701
|
from django import forms
from .models import Application
class ApplicationForm(forms.ModelForm):
class Meta:
model = Application
fields = ('resume', 'cover_letter',)
|
3742
|
import sqlite3
con = sqlite3.connect(":memory:")
# enable extension loading
con.enable_load_extension(True)
# Load the fulltext search extension
con.execute("select load_extension('./fts3.so')")
# alternatively you can load the extension using an API call:
# con.load_extension("./fts3.so")
# disable extension loading again
con.enable_load_extension(False)
# example from SQLite wiki
con.execute("create virtual table recipe using fts3(name, ingredients)")
con.executescript("""
insert into recipe (name, ingredients) values ('broccoli stew', 'broccoli peppers cheese tomatoes');
insert into recipe (name, ingredients) values ('pumpkin stew', 'pumpkin onions garlic celery');
insert into recipe (name, ingredients) values ('broccoli pie', 'broccoli cheese onions flour');
insert into recipe (name, ingredients) values ('pumpkin pie', 'pumpkin sugar flour butter');
""")
for row in con.execute("select rowid, name, ingredients from recipe where name match 'pie'"):
print(row)
|
3753
|
class Solution:
"""
@param s: a string
@param t: a string
@return: true if they are both one edit distance apart or false
"""
def isOneEditDistance(self, s, t):
# write your code here
if s == t:
return False
if abs(len(s) - len(t)) > 1:
return False
n, m = len(s), len(t)
f = [[0] * (m + 1) for _ in range(2)]
for j in range(m + 1):
f[0][j] = j
for i in range(1, n + 1):
f[i % 2][0] = i
for j in range(1, m + 1):
if s[i - 1] == t[j - 1]:
f[i % 2][j] = min(f[(i - 1) % 2][j - 1],
f[(i - 1) % 2][j] + 1, f[i % 2][j - 1] + 1)
else:
f[i % 2][j] = min(f[(i - 1) % 2][j - 1] + 1,
f[(i - 1) % 2][j] + 1, f[i % 2][j - 1] + 1)
return f[n % 2][m] == 1
|
3806
|
import bpy
import random as rnd
from collections import Counter
import itertools as iter
feld_von, feld_bis = -4, 4
spielfeld_von, spielfeld_bis = feld_von-6, feld_bis+6
anz = int((feld_bis-feld_von)**3*.3)
spielfeld = {(rnd.randint(feld_von, feld_bis), rnd.randint(
feld_von, feld_bis), rnd.randint(feld_von, feld_bis)) for _ in range(anz)}
animate_frame = 8
def nachbarn(pos):
for x,y,z in iter.product(range(-1,2), repeat = 3):
if z == y == x == 0: continue
yield pos[0]+x, pos[1]+y, pos[2]+z
def nächsteGeneration(spielfeld):
nachb = Counter([p for pos in spielfeld for p in nachbarn(pos)])
return {pos for pos, anz in nachb.items() if anz == 6 or (anz in (5, 6, 7, 8) and pos in spielfeld)}
def scale_rotate(ob, scale, rot, fr):
ob.scale = (scale, scale, scale)
ob.rotation_euler.rotate_axis("Z", rot)
ob.keyframe_insert(data_path='rotation_euler', frame=fr)
ob.keyframe_insert(data_path='scale', frame=fr)
bpy.ops.mesh.primitive_cube_add(size=0.001, location=(0, 0, 0))
orig_cube = bpy.context.active_object
n = "cube"
m = orig_cube.data.copy()
cubes = {}
for x,y,z in iter.product(range(spielfeld_von,spielfeld_bis), repeat = 3):
o = bpy.data.objects.new(n, m)
o.location = (x, y, z)
cubes[x, y, z] = o
bpy.context.collection.objects.link(o)
o.select_set(False)
for i in range(200):
print(f'Durchlauf No. {i}, Anz. Zellen = {len(spielfeld)}')
spielfeld2 = nächsteGeneration(spielfeld)
dead = spielfeld - spielfeld2
new = spielfeld2 - spielfeld
spielfeld = spielfeld2
if not new and not dead:
break
for zelle in new | dead:
if zelle not in cubes:
continue
ob = cubes[zelle]
if zelle in new:
scale_rotate(ob, 0.001, -3.141/2, (i-1)*animate_frame)
scale_rotate(ob, 750, 3.141/2, i * animate_frame)
else:
scale_rotate(ob, 750, 3.141/2, (i-1) * animate_frame)
scale_rotate(ob, 0.001, -3.141/2, i * animate_frame)
if not spielfeld:
break
bpy.context.scene.frame_current = 1
|
3817
|
from torch import nn as nn
from .base_model import BaseModel
from ..nn.conv2d import DenseConv2d
from ..nn.linear import DenseLinear
__all__ = ["Conv2", "conv2", "Conv4", "conv4"]
class Conv2(BaseModel):
def __init__(self):
super(Conv2, self).__init__()
self.features = nn.Sequential(DenseConv2d(1, 32, kernel_size=5, padding=2), # 32x28x28
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2), # 32x14x14
DenseConv2d(32, 64, kernel_size=5, padding=2), # 64x14x14
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2)) # 64x7x7
self.classifier = nn.Sequential(DenseLinear(64 * 7 * 7, 2048),
nn.ReLU(inplace=True),
DenseLinear(2048, 62))
self.collect_prunable_layers()
def forward(self, inp):
out = self.features(inp)
out = out.view(out.size(0), -1)
out = self.classifier(out)
return out
class Conv4(BaseModel):
def __init__(self):
super(Conv4, self).__init__()
self.features = nn.Sequential(DenseConv2d(3, 32, kernel_size=3, padding=1),
nn.BatchNorm2d(32),
nn.MaxPool2d(2),
DenseConv2d(32, 32, kernel_size=3, padding=1),
nn.BatchNorm2d(32),
nn.MaxPool2d(2),
DenseConv2d(32, 32, kernel_size=3, padding=2),
nn.BatchNorm2d(32),
nn.MaxPool2d(2),
DenseConv2d(32, 32, kernel_size=3, padding=2),
nn.BatchNorm2d(32),
nn.MaxPool2d(2))
self.classifier = DenseLinear(in_features=32 * 6 * 6, out_features=2)
def forward(self, inp):
out = self.features(inp)
out = out.view(out.size(0), -1)
out = self.classifier(out)
return out
def conv2() -> Conv2:
return Conv2()
def conv4() -> Conv4:
return Conv4()
# TODO: define pretrain etc.
|
3820
|
import bpy
from bpy.app.handlers import persistent
bl_info = {
"name": "Playback Once",
"author": "<NAME>",
"version": (1, 0, 0),
"blender": (2, 67, 3),
"location": "",
"description": "Playback once.",
"warning": "",
"wiki_url": "",
"tracker_url": "",
"category": "Animation"}
@persistent
def stopPlaybackAtEnd(scene):
if scene.frame_current >= scene.frame_end:
bpy.ops.screen.animation_cancel()
def register():
bpy.app.handlers.frame_change_pre.append(stopPlaybackAtEnd)
def unregister():
bpy.app.handlers.frame_change_pre.remove(stopPlaybackAtEnd)
if __name__ == "__main__":
register()
|
3826
|
import json
from washer.worker.actions import AppendStdout, AppendStderr
from washer.worker.actions import CreateNamedLog, AppendToLog
from washer.worker.actions import SetProperty
from washer.worker.commands import washertask
def pipenv_graph2deps(rawgraph):
graph = json.loads(rawgraph)
def build_entry(data):
if 'required_version' in data:
spec = data['key'] + data['required_version']
else:
spec = data['key']
return {'installer': 'pipenv',
'spec': spec,
'source': 'pypi',
'name': data['package_name'],
'version': data['installed_version']}
def extract_dependencies(entries):
for entry in entries:
if 'package' in entry:
package = entry['package']
dependencies = entry.get('dependencies', [])
yield build_entry(package)
yield from extract_dependencies(dependencies)
else:
yield build_entry(entry)
yield from extract_dependencies(graph)
@washertask
def pip_install(repopath, path=".", **kwargs):
import invoke
c = invoke.Context()
with c.cd(repopath):
with c.cd(path):
res = c.run("pipenv install .")
deps = c.run("pipenv graph --json")
yield AppendStdout(res.stdout)
yield AppendStderr(res.stderr)
yield SetProperty("dependencies", list(pipenv_graph2deps(deps.stdout)))
return True
@washertask
def requirement_file(repopath, requirement="requirements.txt",
path=".", **kwargs):
import invoke
c = invoke.Context()
with c.cd(repopath):
with c.cd(path):
res = c.run("pipenv install -r %s" % requirement)
deps = c.run("pipenv graph --json")
yield AppendStdout(res.stdout)
yield AppendStderr(res.stderr)
yield SetProperty("dependencies", list(pipenv_graph2deps(deps.stdout)))
return True
|
3843
|
from bluesky.plans import scan
from bluesky.simulators import (print_summary, print_summary_wrapper,
summarize_plan,
check_limits,
plot_raster_path)
import pytest
from bluesky.plans import grid_scan
def test_print_summary(hw):
det = hw.det
motor = hw.motor
print_summary(scan([det], motor, -1, 1, 10)) # old name
summarize_plan(scan([det], motor, -1, 1, 10)) # new name
list(print_summary_wrapper(scan([det], motor, -1, 1, 10)))
def test_old_module_name(hw):
det = hw.det
motor = hw.motor
motor1 = hw.motor1
motor2 = hw.motor2
from bluesky.plan_tools import (print_summary, print_summary_wrapper,
plot_raster_path)
with pytest.warns(UserWarning):
print_summary(scan([det], motor, -1, 1, 10))
with pytest.warns(UserWarning):
list(print_summary_wrapper(scan([det], motor, -1, 1, 10)))
with pytest.warns(UserWarning):
plan = grid_scan([det], motor1, -5, 5, 10, motor2, -7, 7, 15, True)
plot_raster_path(plan, 'motor1', 'motor2', probe_size=.3)
def test_check_limits(RE, hw):
det = hw.det
motor = hw.motor
# The motor object does not currently implement limits.
# Use an assert to help us out if this changes in the future.
assert not hasattr(motor, 'limits')
# # check_limits should warn if it can't find check_value
# TODO: Is there _any_ object to test?
# with pytest.warns(UserWarning):
# check_limits(scan([det], motor, -1, 1, 3))
# monkey-patch some limits
motor.limits = (-2, 2)
# check_limits should do nothing here
check_limits(scan([det], motor, -1, 1, 3))
# check_limits should error if limits are exceeded only if object raises
# this object does not raise
check_limits(scan([det], motor, -3, 3, 3))
# check_limits should raise if limits are equal only if object raises
# this object does not raise
motor.limits = (2, 2)
check_limits(scan([det], motor, -1, 1, 3))
def test_check_limits_needs_RE():
with pytest.raises(RuntimeError) as ctx:
check_limits([])
assert str(ctx.value) == "Bluesky event loop not running"
def test_plot_raster_path(hw):
det = hw.det
motor1 = hw.motor1
motor2 = hw.motor2
plan = grid_scan([det], motor1, -5, 5, 10, motor2, -7, 7, 15, True)
plot_raster_path(plan, 'motor1', 'motor2', probe_size=.3)
|
3845
|
import warnings
import pytest
from leapp.libraries.actor.systemfacts import get_selinux_status
from leapp.models import SELinuxFacts
no_selinux = False
try:
import selinux
except ImportError:
no_selinux = True
warnings.warn(
'Tests which uses `selinux` will be skipped'
' due to library unavailability.', ImportWarning)
reason_to_skip_msg = "Selinux is not available"
# FIXME: create valid tests...
@pytest.mark.skipif(no_selinux, reason=reason_to_skip_msg)
def test_selinux_enabled_enforcing(monkeypatch):
"""
Test case SELinux is enabled in enforcing mode
"""
monkeypatch.setattr(selinux, 'is_selinux_mls_enabled', lambda: 1)
monkeypatch.setattr(selinux, 'security_getenforce', lambda: 1)
monkeypatch.setattr(selinux, 'selinux_getenforcemode', lambda: [0, 1])
monkeypatch.setattr(selinux, 'is_selinux_enabled', lambda: 1)
monkeypatch.setattr(selinux, 'selinux_getpolicytype', lambda: [0, 'targeted'])
expected_data = {'policy': 'targeted',
'mls_enabled': True,
'enabled': True,
'runtime_mode': 'enforcing',
'static_mode': 'enforcing'}
assert SELinuxFacts(**expected_data) == get_selinux_status()
@pytest.mark.skipif(no_selinux, reason=reason_to_skip_msg)
def test_selinux_enabled_permissive(monkeypatch):
"""
Test case SELinux is enabled in permissive mode
"""
monkeypatch.setattr(selinux, 'is_selinux_mls_enabled', lambda: 1)
monkeypatch.setattr(selinux, 'security_getenforce', lambda: 0)
monkeypatch.setattr(selinux, 'selinux_getenforcemode', lambda: [0, 0])
monkeypatch.setattr(selinux, 'is_selinux_enabled', lambda: 1)
monkeypatch.setattr(selinux, 'selinux_getpolicytype', lambda: [0, 'targeted'])
expected_data = {'policy': 'targeted',
'mls_enabled': True,
'enabled': True,
'runtime_mode': 'permissive',
'static_mode': 'permissive'}
assert SELinuxFacts(**expected_data) == get_selinux_status()
@pytest.mark.skipif(no_selinux, reason=reason_to_skip_msg)
def test_selinux_disabled(monkeypatch):
"""
Test case SELinux is disabled
"""
monkeypatch.setattr(selinux, 'is_selinux_mls_enabled', lambda: 0)
monkeypatch.setattr(selinux, 'security_getenforce', lambda: 0)
monkeypatch.setattr(selinux, 'selinux_getenforcemode', lambda: [0, 0])
monkeypatch.setattr(selinux, 'is_selinux_enabled', lambda: 0)
monkeypatch.setattr(selinux, 'selinux_getpolicytype', lambda: [0, 'targeted'])
expected_data = {'policy': 'targeted',
'mls_enabled': False,
'enabled': False,
'runtime_mode': 'permissive',
'static_mode': 'permissive'}
assert SELinuxFacts(**expected_data) == get_selinux_status()
class MockNoConfigFileOSError(object):
def __init__(self):
raise OSError
@pytest.mark.skipif(no_selinux, reason=reason_to_skip_msg)
def test_selinux_disabled_no_config_file(monkeypatch):
"""
Test case SELinux is disabled
"""
monkeypatch.setattr(selinux, 'is_selinux_mls_enabled', lambda: 0)
monkeypatch.setattr(selinux, 'security_getenforce', lambda: 0)
monkeypatch.setattr(selinux, 'selinux_getenforcemode', MockNoConfigFileOSError)
monkeypatch.setattr(selinux, 'is_selinux_enabled', lambda: 0)
monkeypatch.setattr(selinux, 'selinux_getpolicytype', lambda: [0, 'targeted'])
expected_data = {'policy': 'targeted',
'mls_enabled': False,
'enabled': False,
'runtime_mode': 'permissive',
'static_mode': 'disabled'}
assert SELinuxFacts(**expected_data) == get_selinux_status()
|
3872
|
import re
import discord
from redbot.core import commands
class Covfefe(commands.Cog):
"""
Convert almost any word into covfefe
"""
def __init__(self, bot):
self.bot = bot
async def covfefe(self, x, k="aeiouy])"):
"""
https://codegolf.stackexchange.com/a/123697
"""
try:
b, c, v = re.findall(f"(.*?[{k}([^{k}.*?([{k}", x)[0]
return b + c + (("bcdfgkpstvz" + c)["pgtvkgbzdfs".find(c)] + v) * 2
except IndexError:
return None
async def red_delete_data_for_user(self, **kwargs):
"""
Nothing to delete
"""
return
@commands.command()
async def covefy(self, ctx, msg):
"""Convert almost any word into covfefe"""
newword = await self.covfefe(msg)
if newword is not None:
await ctx.send(newword)
else:
await ctx.send("I cannot covfefeify that word")
|
3874
|
import os
import numpy as np
import tensorflow as tf
from models_gqa.model import Model
from models_gqa.config import build_cfg_from_argparse
from util.gqa_train.data_reader import DataReader
import json
# Load config
cfg = build_cfg_from_argparse()
# Start session
os.environ["CUDA_VISIBLE_DEVICES"] = str(cfg.GPU_ID)
sess = tf.Session(config=tf.ConfigProto(
gpu_options=tf.GPUOptions(allow_growth=cfg.GPU_MEM_GROWTH)))
# Data files
imdb_file = cfg.IMDB_FILE % cfg.TEST.SPLIT_VQA
scene_graph_file = cfg.SCENE_GRAPH_FILE % \
cfg.TEST.SPLIT_VQA.replace('_balanced', '').replace('_all', '')
data_reader = DataReader(
imdb_file, shuffle=False, one_pass=True, batch_size=cfg.TEST.BATCH_SIZE,
T_encoder=cfg.T_ENCODER,
vocab_question_file=cfg.VOCAB_QUESTION_FILE,
vocab_answer_file=cfg.VOCAB_ANSWER_FILE,
feature_type=cfg.FEAT_TYPE,
spatial_feature_dir=cfg.SPATIAL_FEATURE_DIR,
objects_feature_dir=cfg.OBJECTS_FEATURE_DIR,
objects_max_num=cfg.W_FEAT,
scene_graph_file=scene_graph_file,
vocab_name_file=cfg.VOCAB_NAME_FILE,
vocab_attr_file=cfg.VOCAB_ATTR_FILE,
spatial_pos_enc_dim=cfg.SPATIAL_POS_ENC_DIM,
bbox_tile_num=cfg.BBOX_TILE_NUM)
num_vocab = data_reader.batch_loader.vocab_dict.num_vocab
num_choices = data_reader.batch_loader.answer_dict.num_vocab
# Inputs and model
input_seq_batch = tf.placeholder(tf.int32, [None, None])
seq_length_batch = tf.placeholder(tf.int32, [None])
image_feat_batch = tf.placeholder(
tf.float32, [None, cfg.H_FEAT, cfg.W_FEAT, cfg.D_FEAT])
image_valid_batch = tf.placeholder(
tf.float32, [None, cfg.H_FEAT, cfg.W_FEAT])
model = Model(
input_seq_batch, seq_length_batch, image_feat_batch, image_valid_batch,
num_vocab=num_vocab, num_choices=num_choices, is_training=False)
# Load snapshot
if cfg.TEST.USE_EMA:
ema = tf.train.ExponentialMovingAverage(decay=0.9) # decay doesn't matter
var_names = {
(ema.average_name(v) if v in model.params else v.op.name): v
for v in tf.global_variables()}
else:
var_names = {v.op.name: v for v in tf.global_variables()}
snapshot_file = cfg.TEST.SNAPSHOT_FILE % (cfg.EXP_NAME, cfg.TEST.ITER)
print('loading model snapshot from %s' % snapshot_file)
snapshot_saver = tf.train.Saver(var_names)
snapshot_saver.restore(sess, snapshot_file)
print('Done')
# Write results
result_dir = cfg.TEST.RESULT_DIR % (cfg.EXP_NAME, cfg.TEST.ITER)
os.makedirs(result_dir, exist_ok=True)
# Run test
answer_correct, num_questions = 0, 0
if cfg.TEST.OUTPUT_VQA_EVAL_PRED:
output_predictions = []
answer_word_list = data_reader.batch_loader.answer_dict.word_list
pred_file = os.path.join(
result_dir, 'gqa_eval_preds_%s_%s_%08d.json' % (
cfg.TEST.SPLIT_VQA, cfg.EXP_NAME, cfg.TEST.ITER))
for n_batch, batch in enumerate(data_reader.batches()):
if 'answer_label_batch' not in batch:
batch['answer_label_batch'] = -np.ones(
len(batch['qid_list']), np.int32)
if num_questions == 0:
print('imdb has no answer labels. Using dummy labels.\n\n'
'**The final accuracy will be zero (no labels provided)**\n')
vqa_scores_value = sess.run(model.vqa_scores, feed_dict={
input_seq_batch: batch['input_seq_batch'],
seq_length_batch: batch['seq_length_batch'],
image_feat_batch: batch['image_feat_batch'],
image_valid_batch: batch['image_valid_batch']})
# compute accuracy
vqa_labels = batch['answer_label_batch']
vqa_predictions = np.argmax(vqa_scores_value, axis=1)
answer_correct += np.sum(vqa_predictions == vqa_labels)
num_questions += len(vqa_labels)
accuracy = answer_correct / num_questions
if n_batch % 20 == 0:
print('exp: %s, iter = %d, accumulated accuracy on %s = %f (%d / %d)' %
(cfg.EXP_NAME, cfg.TEST.ITER, cfg.TEST.SPLIT_VQA,
accuracy, answer_correct, num_questions))
if cfg.TEST.OUTPUT_VQA_EVAL_PRED:
output_predictions.extend([
{"questionId": qId, "prediction": answer_word_list[p]}
for qId, p in zip(batch['qid_list'], vqa_predictions)])
with open(os.path.join(
result_dir, 'vqa_results_%s.txt' % cfg.TEST.SPLIT_VQA), 'w') as f:
print('\nexp: %s, iter = %d, final accuracy on %s = %f (%d / %d)' %
(cfg.EXP_NAME, cfg.TEST.ITER, cfg.TEST.SPLIT_VQA,
accuracy, answer_correct, num_questions))
print('exp: %s, iter = %d, final accuracy on %s = %f (%d / %d)' %
(cfg.EXP_NAME, cfg.TEST.ITER, cfg.TEST.SPLIT_VQA,
accuracy, answer_correct, num_questions), file=f)
if cfg.TEST.OUTPUT_VQA_EVAL_PRED:
with open(pred_file, 'w') as f:
json.dump(output_predictions, f, indent=2)
print('prediction file written to %s' % pred_file)
|
3908
|
from cklib.args import get_arg_parser, ArgumentParser
from cloudkeeper_plugin_cleanup_aws_loadbalancers import CleanupAWSLoadbalancersPlugin
def test_args():
arg_parser = get_arg_parser()
CleanupAWSLoadbalancersPlugin.add_args(arg_parser)
arg_parser.parse_args()
assert ArgumentParser.args.cleanup_aws_loadbalancers is False
assert ArgumentParser.args.cleanup_aws_loadbalancers_age == "7 days"
|
3915
|
from __future__ import annotations
import json
import logging
from contextlib import contextmanager, ExitStack
from typing import List, Dict
import pandas as pd
from lithops.storage import Storage
from lithops.storage.utils import CloudObject, StorageNoSuchKeyError
from sm.engine.annotation_lithops.build_moldb import (
build_moldb,
InputMolDb,
DbFDRData,
)
from sm.engine.annotation_lithops.calculate_centroids import (
calculate_centroids,
validate_centroids,
)
from sm.engine.annotation_lithops.executor import Executor
from sm.engine.annotation_lithops.io import (
CObj,
save_cobj,
iter_cobjects_with_prefetch,
deserialize,
)
from sm.engine.annotation_lithops.utils import jsonhash
from sm.engine.utils.db_mutex import DBMutex
from sm.engine.ds_config import DSConfig
from sm.engine.annotation.isocalc_wrapper import IsocalcWrapper
logger = logging.getLogger('annotation-pipeline')
class CentroidsCacheEntry:
def __init__(
self, executor: Executor, sm_storage: Dict, ds_config: DSConfig, moldbs: List[InputMolDb]
):
ds_hash_params = ds_config.copy()
self.ds_config = {
**ds_hash_params, # type: ignore # https://github.com/python/mypy/issues/4122
# Include the `targeted` value of databases so that a new cache entry is made if
# someone manually changes that field
'databases': [(moldb['id'], moldb['targeted']) for moldb in moldbs],
}
# Remove database_ids as it may be in a different order to moldbs
del self.ds_config['database_ids']
self.ds_hash = jsonhash(self.ds_config)
self.executor = executor
self.storage = executor.storage
self.bucket, raw_prefix = sm_storage['centroids']
self.prefix = f"{raw_prefix}/{self.ds_hash}"
self.config_key = f'{self.prefix}/ds_config.json'
self.meta_key = f'{self.prefix}/meta'
@contextmanager
def lock(self):
with DBMutex().lock(self.ds_hash, timeout=3600):
yield
def load(self):
try:
db_data_cobjs, peaks_cobjs = deserialize(
self.storage.get_object(self.bucket, self.meta_key)
)
return db_data_cobjs, peaks_cobjs
except StorageNoSuchKeyError:
return None
def save(self, db_data_cobjs: List[CObj[DbFDRData]], peaks_cobjs: List[CObj[pd.DataFrame]]):
def batch_copy(src_cobjs: List[CloudObject], dest_prefix: str, *, storage: Storage):
# If Lithops' storage supported Copy Object operations, this could be easily optimized.
# Not sure if it's worth the effort yet
result_cobjs = []
for i, data in enumerate(iter_cobjects_with_prefetch(storage, src_cobjs)):
dest_key = f'{dest_prefix}/{i:06}'
result_cobjs.append(storage.put_cloudobject(data, dest_bucket, dest_key))
return result_cobjs
dest_bucket = self.bucket
# Copy cobjs to the cache dir
new_db_data_cobjs, new_peaks_cobjs = self.executor.map(
batch_copy,
[(db_data_cobjs, f'{self.prefix}/db_data'), (peaks_cobjs, f'{self.prefix}/peaks')],
runtime_memory=1024,
)
# Save config in case it's needed for debugging
self.storage.put_cloudobject(
json.dumps(self.ds_config, indent=4), self.bucket, self.config_key
)
# Save list of cobjects. This list would be easy to reconstruct by listing keys, but
# saving a separate object as the last step of the process is helpful to confirm that
# the cache item is complete, and didn't partially fail to copy.
save_cobj(self.storage, (new_db_data_cobjs, new_peaks_cobjs), self.bucket, self.meta_key)
return new_db_data_cobjs, new_peaks_cobjs
def clear(self):
keys = self.storage.list_keys(self.bucket, self.prefix)
if keys:
logger.info(f'Clearing centroids cache {self.prefix}')
self.storage.delete_objects(self.bucket, keys)
def get_moldb_centroids(
executor: Executor,
sm_storage: Dict,
ds_config: DSConfig,
moldbs: List[InputMolDb],
debug_validate=False,
use_cache=True,
use_db_mutex=True,
):
moldb_cache = CentroidsCacheEntry(executor, sm_storage, ds_config, moldbs)
with ExitStack() as stack:
if use_db_mutex:
stack.enter_context(moldb_cache.lock())
if use_cache:
cached_val = moldb_cache.load()
else:
cached_val = None
moldb_cache.clear()
if cached_val:
db_data_cobjs, peaks_cobjs = cached_val
logger.info(
f'Loaded {len(db_data_cobjs)} DBs, {len(peaks_cobjs)} peak segms from cache'
)
else:
formula_cobjs, db_data_cobjs = build_moldb(executor, ds_config, moldbs)
isocalc_wrapper = IsocalcWrapper(ds_config)
peaks_cobjs = calculate_centroids(executor, formula_cobjs, isocalc_wrapper)
if debug_validate:
validate_centroids(executor, peaks_cobjs)
moldb_cache.save(db_data_cobjs, peaks_cobjs)
logger.info(f'Saved {len(db_data_cobjs)} DBs, {len(peaks_cobjs)} peak segms to cache')
return db_data_cobjs, peaks_cobjs
|
3920
|
def multiple_replace(text: str, chars_to_mapping: dict):
"""
This function is used to replace a dictionary of characters inside a text string
:param text:
:param chars_to_mapping:
:return:
"""
import re
pattern = "|".join(map(re.escape, chars_to_mapping.keys()))
return re.sub(pattern, lambda m: chars_to_mapping[m.group()], str(text))
|
3943
|
from .torch2onnx import torch2onnx
from .onnx2trt import onnx2trt
from .torch2trt import torch2trt
from .base import load, save
|
3965
|
import pytest
from pydantic import ValidationError
from overhave.transport import OverhaveS3ManagerSettings
class TestS3ManagerSettings:
""" Unit tests for :class:`OverhaveS3ManagerSettings`. """
@pytest.mark.parametrize("test_s3_enabled", [False])
def test_disabled(self, test_s3_enabled: bool) -> None:
settings = OverhaveS3ManagerSettings(enabled=test_s3_enabled)
assert not settings.enabled
assert not settings.url
assert not settings.access_key
assert not settings.secret_key
@pytest.mark.parametrize("test_s3_enabled", [True])
def test_empty_enabled(self, test_s3_enabled: bool) -> None:
with pytest.raises(ValidationError):
OverhaveS3ManagerSettings(enabled=test_s3_enabled)
@pytest.mark.parametrize("test_s3_autocreate_buckets", [False, True], indirect=True)
@pytest.mark.parametrize("test_s3_enabled", [True], indirect=True)
def test_correct_enabled(
self,
test_s3_enabled: bool,
test_s3_autocreate_buckets: bool,
test_s3_manager_settings: OverhaveS3ManagerSettings,
) -> None:
assert test_s3_manager_settings.enabled == test_s3_enabled
assert test_s3_manager_settings.url
assert test_s3_manager_settings.access_key
assert test_s3_manager_settings.secret_key
assert test_s3_manager_settings.verify
assert test_s3_manager_settings.autocreate_buckets == test_s3_autocreate_buckets
|
3969
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("LIKELIHOODPDFDBREADER")
# process.load("MuonAnalysis.MomentumScaleCalibration.local_CSA08_Y_cff")
process.source = cms.Source("EmptySource",
numberEventsInRun = cms.untracked.uint32(1),
firstRun = cms.untracked.uint32(1)
)
process.load("Configuration.StandardSequences.MagneticField_cff")
process.load("Geometry.CMSCommonData.cmsIdealGeometryXML_cfi")
process.load("Geometry.CommonTopologies.globalTrackingGeometry_cfi")
process.load("RecoMuon.DetLayers.muonDetLayerGeometry_cfi")
process.load("Geometry.MuonNumbering.muonNumberingInitialization_cfi")
process.load("RecoMuon.TrackingTools.MuonServiceProxy_cff")
# process.source = cms.Source("PoolSource",
# fileNames = cms.untracked.vstring()
# )
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.poolDBESSource = cms.ESSource("PoolDBESSource",
BlobStreamerName = cms.untracked.string('TBufferBlobStreamingService'),
DBParameters = cms.PSet(
messageLevel = cms.untracked.int32(2),
authenticationPath = cms.untracked.string('/afs/cern.ch/cms/DB/conddb')
),
timetype = cms.untracked.string('runnumber'),
connect = cms.string('sqlite_file:dummy2.db'),
toGet = cms.VPSet(cms.PSet(
record = cms.string('MuScleFitLikelihoodPdfRcd'),
tag = cms.string('MuScleFitLikelihoodPdf_2_1_12')
))
)
process.LikelihoodPdfDBReaderModule = cms.EDAnalyzer(
"LikelihoodPdfDBReader"
)
process.p1 = cms.Path(process.LikelihoodPdfDBReaderModule)
|
3978
|
import logging
import random
from datetime import timedelta
from typing import TYPE_CHECKING
from duration import to_iso8601
from pyramid.httpexceptions import HTTPBadRequest, HTTPCreated, HTTPNotFound, HTTPOk
from weaver import sort
from weaver.config import WEAVER_CONFIGURATION_ADES, WEAVER_CONFIGURATION_EMS, get_weaver_configuration
from weaver.database import get_db
from weaver.datatype import Bill, Quote
from weaver.exceptions import ProcessNotFound, QuoteNotFound, log_unhandled_exceptions
from weaver.formats import OUTPUT_FORMAT_JSON
from weaver.processes.types import PROCESS_APPLICATION, PROCESS_WORKFLOW
from weaver.processes.wps_package import get_package_workflow_steps, get_process_location
from weaver.store.base import StoreBills, StoreQuotes
from weaver.utils import get_settings, get_weaver_url
from weaver.wps_restapi import swagger_definitions as sd
from weaver.wps_restapi.processes.processes import submit_local_job
if TYPE_CHECKING:
from weaver.datatype import Process
from weaver.typedefs import JSON
LOGGER = logging.getLogger(__name__)
def process_quote_estimator(process): # noqa: E811
# type: (Process) -> JSON
"""
Simulate quote parameters for the process execution.
:param process: instance of :class:`weaver.datatype.Process` for which to evaluate the quote.
:return: dict of {price, currency, estimatedTime} values for the process quote.
"""
# TODO: replace by some fancy ml technique or something?
price = random.uniform(0, 10) # nosec
currency = "CAD"
estimated_time = to_iso8601(timedelta(minutes=random.uniform(5, 60))) # nosec
return {"price": price, "currency": currency, "estimatedTime": estimated_time}
@sd.process_quotes_service.post(tags=[sd.TAG_BILL_QUOTE, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON,
schema=sd.PostProcessQuoteRequestEndpoint(), response_schemas=sd.post_quotes_responses)
@log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description)
def request_quote(request):
"""
Request a quotation for a process.
"""
settings = get_settings(request)
weaver_config = get_weaver_configuration(settings)
if weaver_config not in [WEAVER_CONFIGURATION_ADES, WEAVER_CONFIGURATION_EMS]:
raise HTTPBadRequest("Unsupported request for configuration '{}'.".format(weaver_config))
process_id = request.matchdict.get("process_id")
process_store = get_db(request).get_store("processes")
try:
process = process_store.fetch_by_id(process_id)
except ProcessNotFound:
raise HTTPNotFound("Could not find process with specified 'process_id'.")
store = get_db(request).get_store(StoreQuotes)
process_url = get_process_location(process_id, data_source=get_weaver_url(settings))
process_type = process.type
process_params = dict()
for param in ["inputs", "outputs", "mode", "response"]:
if param in request.json:
process_params[param] = request.json.pop(param)
process_quote_info = process_quote_estimator(process)
process_quote_info.update({
"process": process_id,
"processParameters": process_params,
"location": process_url,
"user": str(request.authenticated_userid)
})
# loop workflow sub-process steps to get individual quotes
if process_type == PROCESS_WORKFLOW and weaver_config == WEAVER_CONFIGURATION_EMS:
workflow_quotes = list()
for step in get_package_workflow_steps(process_url):
# retrieve quote from provider ADES
# TODO: data source mapping
process_step_url = get_process_location(step["reference"])
process_quote_url = "{}/quotations".format(process_step_url)
subreq = request.copy()
subreq.path_info = process_quote_url
resp_json = request.invoke_subrequest(subreq).json()
quote_json = resp_json["quote"]
quote = store.save_quote(Quote(**quote_json))
workflow_quotes.append(quote.id)
process_quote_info.update({"steps": workflow_quotes})
quote = store.save_quote(Quote(**process_quote_info))
return HTTPCreated(json={"quote": quote.json()})
# single application quotes (ADES or EMS)
elif process_type == PROCESS_APPLICATION:
quote = store.save_quote(Quote(**process_quote_info))
quote_json = quote.json()
quote_json.pop("steps", None)
return HTTPCreated(json={"quote": quote_json})
# error if not handled up to this point
raise HTTPBadRequest("Unsupported quoting process type '{0}' on '{1}'.".format(process_type, weaver_config))
@sd.process_quotes_service.get(tags=[sd.TAG_BILL_QUOTE, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON,
schema=sd.ProcessQuotesEndpoint(), response_schemas=sd.get_quote_list_responses)
@sd.quotes_service.get(tags=[sd.TAG_BILL_QUOTE], renderer=OUTPUT_FORMAT_JSON,
schema=sd.QuotesEndpoint(), response_schemas=sd.get_quote_list_responses)
@log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description)
def get_quote_list(request):
"""
Get list of quotes IDs.
"""
page = int(request.params.get("page", "0"))
limit = int(request.params.get("limit", "10"))
filters = {
"process_id": request.params.get("process", None) or request.matchdict.get("process_id", None),
"page": page,
"limit": limit,
"sort": request.params.get("sort", sort.SORT_CREATED),
}
store = get_db(request).get_store(StoreQuotes)
items, count = store.find_quotes(**filters)
return HTTPOk(json={
"count": count,
"page": page,
"limit": limit,
"quotes": [quote.id for quote in items]
})
@sd.process_quote_service.get(tags=[sd.TAG_BILL_QUOTE, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON,
schema=sd.ProcessQuoteEndpoint(), response_schemas=sd.get_quote_responses)
@sd.quote_service.get(tags=[sd.TAG_BILL_QUOTE], renderer=OUTPUT_FORMAT_JSON,
schema=sd.QuoteEndpoint(), response_schemas=sd.get_quote_responses)
@log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description)
def get_quote_info(request):
"""
Get quote information.
"""
quote_id = request.matchdict.get("quote_id")
store = get_db(request).get_store(StoreQuotes)
try:
quote = store.fetch_by_id(quote_id)
except QuoteNotFound:
raise HTTPNotFound("Could not find quote with specified 'quote_id'.")
return HTTPOk(json={"quote": quote.json()})
@sd.process_quote_service.post(tags=[sd.TAG_BILL_QUOTE, sd.TAG_EXECUTE, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON,
schema=sd.PostProcessQuote(), response_schemas=sd.post_quote_responses)
@sd.quote_service.post(tags=[sd.TAG_BILL_QUOTE, sd.TAG_EXECUTE], renderer=OUTPUT_FORMAT_JSON,
schema=sd.PostQuote(), response_schemas=sd.post_quote_responses)
@log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description)
def execute_quote(request):
"""
Execute a quoted process.
"""
quote_info = get_quote_info(request).json["quote"]
quote_bill_info = {
"quote": quote_info.get("id"),
"price": quote_info.get("price"),
"currency": quote_info.get("currency")
}
job_resp = submit_local_job(request)
job_json = job_resp.json
job_id = job_json.get("jobID")
user_id = str(request.authenticated_userid)
store = get_db(request).get_store(StoreBills)
bill = store.save_bill(Bill(user=user_id, job=job_id, **quote_bill_info))
job_json.update({"bill": bill.id})
return HTTPCreated(json=job_json)
|
3988
|
import os
def get_root_path():
current_path = os.path.abspath(os.path.dirname(__file__))
root_path = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.dirname(current_path)))
)
return os.path.join(root_path, "xbot")
def get_config_path():
config_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../config"))
return config_path
def get_data_path():
data_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), "../../../data/")
)
return data_path
|
3989
|
from django.utils.html import format_html
from wagtail.wagtailcore import hooks
@hooks.register('insert_editor_js')
def enable_source():
return format_html(
"""
<script>
registerHalloPlugin('hallohtml');
</script>
"""
)
|
3991
|
from buildbot.process.remotecommand import RemoteCommand
from buildbot.interfaces import WorkerTooOldError
import stat
class FileExists(object):
"""I check a file existence on the worker. I return True if the file
with the given name exists, False if the file does not exist or that is
a directory.
Use me with doStepIf to make a build step conditional to existence of some
file. For example
doStepIf=FileExists('build/configure')
"""
def __init__(self, filename):
self.filename = filename
def __call__(self, step):
step.checkWorkerHasCommand('stat')
cmd = RemoteCommand('stat', {'file': self.filename})
d = step.runCommand(cmd)
d.addCallback(lambda res: self.commandComplete(cmd))
return d
def commandComplete(self, cmd):
if cmd.didFail():
return False
s = cmd.updates["stat"][-1]
filemode = s[stat.ST_MODE]
if stat.S_ISREG(filemode) or stat.S_ISLNK(filemode):
# True only if this is a file or a link and not any other file
# system object.
return True
else:
return False
class FileDoesNotExist(object):
"""I check a file existence on the worker. I return False if
the file with the given name exists or that is a directory, True if the
file does not exist.
Use me with doStepIf to make a build step conditional to nonexistence
of some file. For example
doStepIf=FileDoesNotExist('build/configure')
"""
def __init__(self, filename):
self.filename = filename
def __call__(self, step):
step.checkWorkerHasCommand('stat')
cmd = RemoteCommand('stat', {'file': self.filename})
d = step.runCommand(cmd)
d.addCallback(lambda res: self.commandComplete(cmd))
return d
def commandComplete(self, cmd):
# False if any filesystem object with the given name exists.
return cmd.didFail()
|
4001
|
import re
from curtsies.formatstring import fmtstr, FmtStr
from curtsies.termformatconstants import (
FG_COLORS,
BG_COLORS,
colors as CURTSIES_COLORS,
)
from functools import partial
from ..lazyre import LazyReCompile
COLORS = CURTSIES_COLORS + ("default",)
CNAMES = dict(zip("krgybmcwd", COLORS))
# hack for finding the "inverse"
INVERSE_COLORS = {
CURTSIES_COLORS[idx]: CURTSIES_COLORS[
(idx + (len(CURTSIES_COLORS) // 2)) % len(CURTSIES_COLORS)
]
for idx in range(len(CURTSIES_COLORS))
}
INVERSE_COLORS["default"] = INVERSE_COLORS[CURTSIES_COLORS[0]]
def func_for_letter(letter_color_code: str, default: str = "k"):
"""Returns FmtStr constructor for a bpython-style color code"""
if letter_color_code == "d":
letter_color_code = default
elif letter_color_code == "D":
letter_color_code = default.upper()
return partial(
fmtstr,
fg=CNAMES[letter_color_code.lower()],
bold=letter_color_code.isupper(),
)
def color_for_letter(letter_color_code: str, default: str = "k"):
if letter_color_code == "d":
letter_color_code = default
return CNAMES[letter_color_code.lower()]
def parse(s):
"""Returns a FmtStr object from a bpython-formatted colored string"""
rest = s
stuff = []
while True:
if not rest:
break
start, rest = peel_off_string(rest)
stuff.append(start)
return (
sum((fs_from_match(d) for d in stuff[1:]), fs_from_match(stuff[0]))
if len(stuff) > 0
else FmtStr()
)
def fs_from_match(d):
atts = {}
if d["fg"]:
# this isn't according to spec as I understand it
if d["fg"].isupper():
d["bold"] = True
# TODO figure out why boldness isn't based on presence of \x02
color = CNAMES[d["fg"].lower()]
if color != "default":
atts["fg"] = FG_COLORS[color]
if d["bg"]:
if d["bg"] == "I":
# hack for finding the "inverse"
color = INVERSE_COLORS[color]
else:
color = CNAMES[d["bg"].lower()]
if color != "default":
atts["bg"] = BG_COLORS[color]
if d["bold"]:
atts["bold"] = True
return fmtstr(d["string"], **atts)
peel_off_string_re = LazyReCompile(
r"""(?P<colormarker>\x01
(?P<fg>[krgybmcwdKRGYBMCWD]?)
(?P<bg>[krgybmcwdKRGYBMCWDI]?)?)
(?P<bold>\x02?)
\x03
(?P<string>[^\x04]*)
\x04
(?P<rest>.*)
""",
re.VERBOSE | re.DOTALL,
)
def peel_off_string(s):
m = peel_off_string_re.match(s)
assert m, repr(s)
d = m.groupdict()
rest = d["rest"]
del d["rest"]
return d, rest
|
4003
|
from ..imports import *
from .. import utils as U
from ..core import GenLearner
class NodeClassLearner(GenLearner):
"""
```
Main class used to tune and train Keras models for node classification
Main parameters are:
model (Model): A compiled instance of keras.engine.training.Model
train_data (Iterator): a Iterator instance for training set
val_data (Iterator): A Iterator instance for validation set
```
"""
def __init__(self, model, train_data=None, val_data=None,
batch_size=U.DEFAULT_BS, eval_batch_size=U.DEFAULT_BS,
workers=1, use_multiprocessing=False):
super().__init__(model, train_data=train_data, val_data=val_data,
batch_size=batch_size, eval_batch_size=eval_batch_size,
workers=workers, use_multiprocessing=use_multiprocessing)
return
def view_top_losses(self, n=4, preproc=None, val_data=None):
"""
```
Views observations with top losses in validation set.
Typically over-ridden by Learner subclasses.
Args:
n(int or tuple): a range to select in form of int or tuple
e.g., n=8 is treated as n=(0,8)
preproc (Preprocessor): A TextPreprocessor or ImagePreprocessor.
For some data like text data, a preprocessor
is required to undo the pre-processing
to correctly view raw data.
val_data: optional val_data to use instead of self.val_data
Returns:
list of n tuples where first element is either
filepath or id of validation example and second element
is loss.
```
"""
val = self._check_val(val_data)
# get top losses and associated data
tups = self.top_losses(n=n, val_data=val, preproc=preproc)
# get multilabel status and class names
classes = preproc.get_classes() if preproc is not None else None
# iterate through losses
for tup in tups:
# get data
idx = tup[0]
loss = tup[1]
truth = tup[2]
pred = tup[3]
print('----------')
print("id:%s | loss:%s | true:%s | pred:%s)\n" % (idx, round(loss,2), truth, pred))
#print(obs)
return
def layer_output(self, layer_id, example_id=0, batch_id=0, use_val=False):
"""
```
Prints output of layer with index <layer_id> to help debug models.
Uses first example (example_id=0) from training set, by default.
```
"""
raise Exception('currently_unsupported: layer_output method is not yet supported for ' +
'graph neural networks in ktrain')
class LinkPredLearner(GenLearner):
"""
```
Main class used to tune and train Keras models for link prediction
Main parameters are:
model (Model): A compiled instance of keras.engine.training.Model
train_data (Iterator): a Iterator instance for training set
val_data (Iterator): A Iterator instance for validation set
```
"""
def __init__(self, model, train_data=None, val_data=None,
batch_size=U.DEFAULT_BS, eval_batch_size=U.DEFAULT_BS,
workers=1, use_multiprocessing=False):
super().__init__(model, train_data=train_data, val_data=val_data,
batch_size=batch_size, eval_batch_size=eval_batch_size,
workers=workers, use_multiprocessing=use_multiprocessing)
return
def view_top_losses(self, n=4, preproc=None, val_data=None):
"""
```
Views observations with top losses in validation set.
Typically over-ridden by Learner subclasses.
Args:
n(int or tuple): a range to select in form of int or tuple
e.g., n=8 is treated as n=(0,8)
preproc (Preprocessor): A TextPreprocessor or ImagePreprocessor.
For some data like text data, a preprocessor
is required to undo the pre-processing
to correctly view raw data.
val_data: optional val_data to use instead of self.val_data
Returns:
list of n tuples where first element is either
filepath or id of validation example and second element
is loss.
```
"""
val = self._check_val(val_data)
# get top losses and associated data
tups = self.top_losses(n=n, val_data=val, preproc=preproc)
# get multilabel status and class names
classes = preproc.get_classes() if preproc is not None else None
# iterate through losses
for tup in tups:
# get data
idx = tup[0]
loss = tup[1]
truth = tup[2]
pred = tup[3]
print('----------')
print("id:%s | loss:%s | true:%s | pred:%s)\n" % (idx, round(loss,2), truth, pred))
#print(obs)
return
def layer_output(self, layer_id, example_id=0, batch_id=0, use_val=False):
"""
```
Prints output of layer with index <layer_id> to help debug models.
Uses first example (example_id=0) from training set, by default.
```
"""
raise Exception('currently_unsupported: layer_output method is not yet supported for ' +
'graph neural networks in ktrain')
|
4007
|
def _jpeg_compression(im):
assert torch.is_tensor(im)
im = ToPILImage()(im)
savepath = BytesIO()
im.save(savepath, 'JPEG', quality=75)
im = Image.open(savepath)
im = ToTensor()(im)
return im
|
4013
|
import torch
import torch.nn as nn
import torch.nn.functional as f
from prettytable import PrettyTable
from c2nl.modules.char_embedding import CharEmbedding
from c2nl.modules.embeddings import Embeddings
from c2nl.modules.highway import Highway
from c2nl.encoders.transformer import TransformerEncoder
from c2nl.decoders.transformer import TransformerDecoder
from c2nl.inputters import constants
from c2nl.modules.global_attention import GlobalAttention
from c2nl.modules.copy_generator import CopyGenerator, CopyGeneratorCriterion
from c2nl.utils.misc import sequence_mask
class Embedder(nn.Module):
def __init__(self, args):
super(Embedder, self).__init__()
self.enc_input_size = 0
self.dec_input_size = 0
# at least one of word or char embedding options should be True
assert args.use_src_word or args.use_src_char
assert args.use_tgt_word or args.use_tgt_char
self.use_src_word = args.use_src_word
self.use_tgt_word = args.use_tgt_word
if self.use_src_word:
self.src_word_embeddings = Embeddings(args.emsize,
args.src_vocab_size,
constants.PAD)
self.enc_input_size += args.emsize
if self.use_tgt_word:
self.tgt_word_embeddings = Embeddings(args.emsize,
args.tgt_vocab_size,
constants.PAD)
self.dec_input_size += args.emsize
self.use_src_char = args.use_src_char
self.use_tgt_char = args.use_tgt_char
if self.use_src_char:
assert len(args.filter_size) == len(args.nfilters)
self.src_char_embeddings = CharEmbedding(args.n_characters,
args.char_emsize,
args.filter_size,
args.nfilters)
self.enc_input_size += sum(list(map(int, args.nfilters)))
self.src_highway_net = Highway(self.enc_input_size, num_layers=2)
if self.use_tgt_char:
assert len(args.filter_size) == len(args.nfilters)
self.tgt_char_embeddings = CharEmbedding(args.n_characters,
args.char_emsize,
args.filter_size,
args.nfilters)
self.dec_input_size += sum(list(map(int, args.nfilters)))
self.tgt_highway_net = Highway(self.dec_input_size, num_layers=2)
self.use_type = args.use_code_type
if self.use_type:
self.type_embeddings = nn.Embedding(len(constants.TOKEN_TYPE_MAP),
self.enc_input_size)
self.src_pos_emb = args.src_pos_emb
self.tgt_pos_emb = args.tgt_pos_emb
self.no_relative_pos = all(v == 0 for v in args.max_relative_pos)
if self.src_pos_emb and self.no_relative_pos:
self.src_pos_embeddings = nn.Embedding(args.max_src_len,
self.enc_input_size)
if self.tgt_pos_emb:
self.tgt_pos_embeddings = nn.Embedding(args.max_tgt_len + 2,
self.dec_input_size)
self.dropout = nn.Dropout(args.dropout_emb)
def forward(self,
sequence,
sequence_char,
sequence_type=None,
mode='encoder',
step=None):
if mode == 'encoder':
word_rep = None
if self.use_src_word:
word_rep = self.src_word_embeddings(sequence.unsqueeze(2)) # B x P x d
if self.use_src_char:
char_rep = self.src_char_embeddings(sequence_char) # B x P x f
if word_rep is None:
word_rep = char_rep
else:
word_rep = torch.cat((word_rep, char_rep), 2) # B x P x d+f
word_rep = self.src_highway_net(word_rep) # B x P x d+f
if self.use_type:
type_rep = self.type_embeddings(sequence_type)
word_rep = word_rep + type_rep
if self.src_pos_emb and self.no_relative_pos:
pos_enc = torch.arange(start=0,
end=word_rep.size(1)).type(torch.LongTensor)
pos_enc = pos_enc.expand(*word_rep.size()[:-1])
if word_rep.is_cuda:
pos_enc = pos_enc.cuda()
pos_rep = self.src_pos_embeddings(pos_enc)
word_rep = word_rep + pos_rep
elif mode == 'decoder':
word_rep = None
if self.use_tgt_word:
word_rep = self.tgt_word_embeddings(sequence.unsqueeze(2)) # B x P x d
if self.use_tgt_char:
char_rep = self.tgt_char_embeddings(sequence_char) # B x P x f
if word_rep is None:
word_rep = char_rep
else:
word_rep = torch.cat((word_rep, char_rep), 2) # B x P x d+f
word_rep = self.tgt_highway_net(word_rep) # B x P x d+f
if self.tgt_pos_emb:
if step is None:
pos_enc = torch.arange(start=0,
end=word_rep.size(1)).type(torch.LongTensor)
else:
pos_enc = torch.LongTensor([step]) # used in inference time
pos_enc = pos_enc.expand(*word_rep.size()[:-1])
if word_rep.is_cuda:
pos_enc = pos_enc.cuda()
pos_rep = self.tgt_pos_embeddings(pos_enc)
word_rep = word_rep + pos_rep
else:
raise ValueError('Unknown embedder mode!')
word_rep = self.dropout(word_rep)
return word_rep
class Encoder(nn.Module):
def __init__(self,
args,
input_size):
super(Encoder, self).__init__()
self.transformer = TransformerEncoder(num_layers=args.nlayers,
d_model=input_size,
heads=args.num_head,
d_k=args.d_k,
d_v=args.d_v,
d_ff=args.d_ff,
dropout=args.trans_drop,
max_relative_positions=args.max_relative_pos,
use_neg_dist=args.use_neg_dist)
self.use_all_enc_layers = args.use_all_enc_layers
if self.use_all_enc_layers:
self.layer_weights = nn.Linear(input_size, 1, bias=False)
def count_parameters(self):
return self.transformer.count_parameters()
def forward(self,
input,
input_len):
layer_outputs, _ = self.transformer(input, input_len) # B x seq_len x h
if self.use_all_enc_layers:
output = torch.stack(layer_outputs, dim=2) # B x seq_len x nlayers x h
layer_scores = self.layer_weights(output).squeeze(3)
layer_scores = f.softmax(layer_scores, dim=-1)
memory_bank = torch.matmul(output.transpose(2, 3),
layer_scores.unsqueeze(3)).squeeze(3)
else:
memory_bank = layer_outputs[-1]
return memory_bank, layer_outputs
class Decoder(nn.Module):
def __init__(self, args, input_size):
super(Decoder, self).__init__()
self.input_size = input_size
self.split_decoder = args.split_decoder and args.copy_attn
if self.split_decoder:
# Following (https://arxiv.org/pdf/1808.07913.pdf), we split decoder
self.transformer_c = TransformerDecoder(
num_layers=args.nlayers,
d_model=self.input_size,
heads=args.num_head,
d_k=args.d_k,
d_v=args.d_v,
d_ff=args.d_ff,
coverage_attn=args.coverage_attn,
dropout=args.trans_drop
)
self.transformer_d = TransformerDecoder(
num_layers=args.nlayers,
d_model=self.input_size,
heads=args.num_head,
d_k=args.d_k,
d_v=args.d_v,
d_ff=args.d_ff,
dropout=args.trans_drop
)
# To accomplish eq. 19 - 21 from `https://arxiv.org/pdf/1808.07913.pdf`
self.fusion_sigmoid = nn.Sequential(
nn.Linear(self.input_size * 2, self.input_size),
nn.Sigmoid()
)
self.fusion_gate = nn.Sequential(
nn.Linear(self.input_size * 2, self.input_size),
nn.ReLU()
)
else:
self.transformer = TransformerDecoder(
num_layers=args.nlayers,
d_model=self.input_size,
heads=args.num_head,
d_k=args.d_k,
d_v=args.d_v,
d_ff=args.d_ff,
coverage_attn=args.coverage_attn,
dropout=args.trans_drop
)
if args.reload_decoder_state:
state_dict = torch.load(
args.reload_decoder_state, map_location=lambda storage, loc: storage
)
self.decoder.load_state_dict(state_dict)
def count_parameters(self):
if self.split_decoder:
return self.transformer_c.count_parameters() + self.transformer_d.count_parameters()
else:
return self.transformer.count_parameters()
def init_decoder(self,
src_lens,
max_src_len):
if self.split_decoder:
state_c = self.transformer_c.init_state(src_lens, max_src_len)
state_d = self.transformer_d.init_state(src_lens, max_src_len)
return state_c, state_d
else:
return self.transformer.init_state(src_lens, max_src_len)
def decode(self,
tgt_words,
tgt_emb,
memory_bank,
state,
step=None,
layer_wise_coverage=None):
if self.split_decoder:
copier_out, attns = self.transformer_c(tgt_words,
tgt_emb,
memory_bank,
state[0],
step=step,
layer_wise_coverage=layer_wise_coverage)
dec_out, _ = self.transformer_d(tgt_words,
tgt_emb,
memory_bank,
state[1],
step=step)
f_t = self.fusion_sigmoid(torch.cat([copier_out, dec_out], dim=-1))
gate_input = torch.cat([copier_out, torch.mul(f_t, dec_out)], dim=-1)
decoder_outputs = self.fusion_gate(gate_input)
else:
decoder_outputs, attns = self.transformer(tgt_words,
tgt_emb,
memory_bank,
state,
step=step,
layer_wise_coverage=layer_wise_coverage)
return decoder_outputs, attns
def forward(self,
memory_bank,
memory_len,
tgt_pad_mask,
tgt_emb):
max_mem_len = memory_bank[0].shape[1] \
if isinstance(memory_bank, list) else memory_bank.shape[1]
state = self.init_decoder(memory_len, max_mem_len)
return self.decode(tgt_pad_mask, tgt_emb, memory_bank, state)
class Transformer(nn.Module):
"""Module that writes an answer for the question given a passage."""
def __init__(self, args, tgt_dict):
""""Constructor of the class."""
super(Transformer, self).__init__()
self.name = 'Transformer'
if len(args.max_relative_pos) != args.nlayers:
assert len(args.max_relative_pos) == 1
args.max_relative_pos = args.max_relative_pos * args.nlayers
self.embedder = Embedder(args)
self.encoder = Encoder(args, self.embedder.enc_input_size)
self.decoder = Decoder(args, self.embedder.dec_input_size)
self.layer_wise_attn = args.layer_wise_attn
self.generator = nn.Linear(self.decoder.input_size, args.tgt_vocab_size)
if args.share_decoder_embeddings:
if self.embedder.use_tgt_word:
assert args.emsize == self.decoder.input_size
self.generator.weight = self.embedder.tgt_word_embeddings.word_lut.weight
self._copy = args.copy_attn
if self._copy:
self.copy_attn = GlobalAttention(dim=self.decoder.input_size,
attn_type=args.attn_type)
self.copy_generator = CopyGenerator(self.decoder.input_size,
tgt_dict,
self.generator)
self.criterion = CopyGeneratorCriterion(vocab_size=len(tgt_dict),
force_copy=args.force_copy)
else:
self.criterion = nn.CrossEntropyLoss(reduction='none')
def _run_forward_ml(self,
code_word_rep,
code_char_rep,
code_type_rep,
code_len,
summ_word_rep,
summ_char_rep,
summ_len,
tgt_seq,
src_map,
alignment,
**kwargs):
batch_size = code_len.size(0)
# embed and encode the source sequence
code_rep = self.embedder(code_word_rep,
code_char_rep,
code_type_rep,
mode='encoder')
memory_bank, layer_wise_outputs = self.encoder(code_rep, code_len) # B x seq_len x h
# embed and encode the target sequence
summ_emb = self.embedder(summ_word_rep,
summ_char_rep,
mode='decoder')
summ_pad_mask = ~sequence_mask(summ_len, max_len=summ_emb.size(1))
enc_outputs = layer_wise_outputs if self.layer_wise_attn else memory_bank
layer_wise_dec_out, attns = self.decoder(enc_outputs,
code_len,
summ_pad_mask,
summ_emb)
decoder_outputs = layer_wise_dec_out[-1]
loss = dict()
target = tgt_seq[:, 1:].contiguous()
if self._copy:
# copy_score: batch_size, tgt_len, src_len
_, copy_score, _ = self.copy_attn(decoder_outputs,
memory_bank,
memory_lengths=code_len,
softmax_weights=False)
# mask copy_attn weights here if needed
if kwargs['code_mask_rep'] is not None:
mask = kwargs['code_mask_rep'].byte().unsqueeze(1) # Make it broadcastable.
copy_score.data.masked_fill_(mask, -float('inf'))
attn_copy = f.softmax(copy_score, dim=-1)
scores = self.copy_generator(decoder_outputs, attn_copy, src_map)
scores = scores[:, :-1, :].contiguous()
ml_loss = self.criterion(scores,
alignment[:, 1:].contiguous(),
target)
else:
scores = self.generator(decoder_outputs) # `batch x tgt_len x vocab_size`
scores = scores[:, :-1, :].contiguous() # `batch x tgt_len - 1 x vocab_size`
ml_loss = self.criterion(scores.view(-1, scores.size(2)),
target.view(-1))
ml_loss = ml_loss.view(*scores.size()[:-1])
ml_loss = ml_loss.mul(target.ne(constants.PAD).float())
ml_loss = ml_loss.sum(1) * kwargs['example_weights']
loss['ml_loss'] = ml_loss.mean()
loss['loss_per_token'] = ml_loss.div((summ_len - 1).float()).mean()
return loss
def forward(self,
code_word_rep,
code_char_rep,
code_type_rep,
code_len,
summ_word_rep,
summ_char_rep,
summ_len,
tgt_seq,
src_map,
alignment,
**kwargs):
"""
Input:
- code_word_rep: ``(batch_size, max_doc_len)``
- code_char_rep: ``(batch_size, max_doc_len, max_word_len)``
- code_len: ``(batch_size)``
- summ_word_rep: ``(batch_size, max_que_len)``
- summ_char_rep: ``(batch_size, max_que_len, max_word_len)``
- summ_len: ``(batch_size)``
- tgt_seq: ``(batch_size, max_len)``
Output:
- ``(batch_size, P_LEN)``, ``(batch_size, P_LEN)``
"""
if self.training:
return self._run_forward_ml(code_word_rep,
code_char_rep,
code_type_rep,
code_len,
summ_word_rep,
summ_char_rep,
summ_len,
tgt_seq,
src_map,
alignment,
**kwargs)
else:
return self.decode(code_word_rep,
code_char_rep,
code_type_rep,
code_len,
src_map,
alignment,
**kwargs)
def __tens2sent(self,
t,
tgt_dict,
src_vocabs):
words = []
for idx, w in enumerate(t):
widx = w[0].item()
if widx < len(tgt_dict):
words.append(tgt_dict[widx])
else:
widx = widx - len(tgt_dict)
words.append(src_vocabs[idx][widx])
return words
def __generate_sequence(self,
params,
choice='greedy',
tgt_words=None):
batch_size = params['memory_bank'].size(0)
use_cuda = params['memory_bank'].is_cuda
if tgt_words is None:
tgt_words = torch.LongTensor([constants.BOS])
if use_cuda:
tgt_words = tgt_words.cuda()
tgt_words = tgt_words.expand(batch_size).unsqueeze(1) # B x 1
tgt_chars = None
if self.embedder.use_tgt_char:
tgt_chars = params['tgt_dict'].word_to_char_ids(constants.BOS_WORD)
tgt_chars = torch.Tensor(tgt_chars.tolist()).unsqueeze(0)
tgt_chars = tgt_chars.repeat(batch_size, 1)
tgt_chars = tgt_chars.to(tgt_words).unsqueeze(1)
dec_preds = []
copy_info = []
attentions = []
dec_log_probs = []
acc_dec_outs = []
max_mem_len = params['memory_bank'][0].shape[1] \
if isinstance(params['memory_bank'], list) else params['memory_bank'].shape[1]
dec_states = self.decoder.init_decoder(params['src_len'], max_mem_len)
attns = {"coverage": None}
enc_outputs = params['layer_wise_outputs'] if self.layer_wise_attn \
else params['memory_bank']
# +1 for <EOS> token
for idx in range(params['max_len'] + 1):
tgt = self.embedder(tgt_words,
tgt_chars,
mode='decoder',
step=idx)
tgt_pad_mask = tgt_words.data.eq(constants.PAD)
layer_wise_dec_out, attns = self.decoder.decode(tgt_pad_mask,
tgt,
enc_outputs,
dec_states,
step=idx,
layer_wise_coverage=attns['coverage'])
decoder_outputs = layer_wise_dec_out[-1]
acc_dec_outs.append(decoder_outputs.squeeze(1))
if self._copy:
_, copy_score, _ = self.copy_attn(decoder_outputs,
params['memory_bank'],
memory_lengths=params['src_len'],
softmax_weights=False)
# mask copy_attn weights here if needed
if params['src_mask'] is not None:
mask = params['src_mask'].byte().unsqueeze(1) # Make it broadcastable.
copy_score.data.masked_fill_(mask, -float('inf'))
attn_copy = f.softmax(copy_score, dim=-1)
prediction = self.copy_generator(decoder_outputs,
attn_copy,
params['src_map'])
prediction = prediction.squeeze(1)
for b in range(prediction.size(0)):
if params['blank'][b]:
blank_b = torch.LongTensor(params['blank'][b])
fill_b = torch.LongTensor(params['fill'][b])
if use_cuda:
blank_b = blank_b.cuda()
fill_b = fill_b.cuda()
prediction[b].index_add_(0, fill_b,
prediction[b].index_select(0, blank_b))
prediction[b].index_fill_(0, blank_b, 1e-10)
else:
prediction = self.generator(decoder_outputs.squeeze(1))
prediction = f.softmax(prediction, dim=1)
if choice == 'greedy':
tgt_prob, tgt = torch.max(prediction, dim=1, keepdim=True)
log_prob = torch.log(tgt_prob + 1e-20)
elif choice == 'sample':
tgt, log_prob = self.reinforce.sample(prediction.unsqueeze(1))
else:
assert False
dec_log_probs.append(log_prob.squeeze(1))
dec_preds.append(tgt.squeeze(1).clone())
if "std" in attns:
# std_attn: batch_size x num_heads x 1 x src_len
std_attn = torch.stack(attns["std"], dim=1)
attentions.append(std_attn.squeeze(2))
if self._copy:
mask = tgt.gt(len(params['tgt_dict']) - 1)
copy_info.append(mask.float().squeeze(1))
words = self.__tens2sent(tgt, params['tgt_dict'], params['source_vocab'])
tgt_chars = None
if self.embedder.use_tgt_char:
tgt_chars = [params['tgt_dict'].word_to_char_ids(w).tolist() for w in words]
tgt_chars = torch.Tensor(tgt_chars).to(tgt).unsqueeze(1)
words = [params['tgt_dict'][w] for w in words]
words = torch.Tensor(words).type_as(tgt)
tgt_words = words.unsqueeze(1)
return dec_preds, attentions, copy_info, dec_log_probs
def decode(self,
code_word_rep,
code_char_rep,
code_type_rep,
code_len,
src_map,
alignment,
**kwargs):
word_rep = self.embedder(code_word_rep,
code_char_rep,
code_type_rep,
mode='encoder')
memory_bank, layer_wise_outputs = self.encoder(word_rep, code_len) # B x seq_len x h
params = dict()
params['memory_bank'] = memory_bank
params['layer_wise_outputs'] = layer_wise_outputs
params['src_len'] = code_len
params['source_vocab'] = kwargs['source_vocab']
params['src_map'] = src_map
params['src_mask'] = kwargs['code_mask_rep']
params['fill'] = kwargs['fill']
params['blank'] = kwargs['blank']
params['src_dict'] = kwargs['src_dict']
params['tgt_dict'] = kwargs['tgt_dict']
params['max_len'] = kwargs['max_len']
params['src_words'] = code_word_rep
dec_preds, attentions, copy_info, _ = self.__generate_sequence(params, choice='greedy')
dec_preds = torch.stack(dec_preds, dim=1)
copy_info = torch.stack(copy_info, dim=1) if copy_info else None
# attentions: batch_size x tgt_len x num_heads x src_len
attentions = torch.stack(attentions, dim=1) if attentions else None
return {
'predictions': dec_preds,
'copy_info': copy_info,
'memory_bank': memory_bank,
'attentions': attentions
}
def count_parameters(self):
return sum(p.numel() for p in self.parameters() if p.requires_grad)
def count_encoder_parameters(self):
return self.encoder.count_parameters()
def count_decoder_parameters(self):
return self.decoder.count_parameters()
def layer_wise_parameters(self):
table = PrettyTable()
table.field_names = ["Layer Name", "Output Shape", "Param #"]
table.align["Layer Name"] = "l"
table.align["Output Shape"] = "r"
table.align["Param #"] = "r"
for name, parameters in self.named_parameters():
if parameters.requires_grad:
table.add_row([name, str(list(parameters.shape)), parameters.numel()])
return table
|
4020
|
import csv
import datetime
import random
import os
from parsers.parser_base import ParserBase
FILE_TIME_EPOCH = datetime.datetime(1601, 1, 1)
FILE_TIME_MICROSECOND = 10
def filetime_to_epoch_datetime(file_time):
if isinstance(file_time, int):
microseconds_since_file_time_epoch = file_time / FILE_TIME_MICROSECOND
else:
microseconds_since_file_time_epoch = int(file_time) / FILE_TIME_MICROSECOND
return FILE_TIME_EPOCH + datetime.timedelta(microseconds=microseconds_since_file_time_epoch)
class SrumParser(ParserBase):
CSV_FIELDS = {
"Unknown1.csv": ["TimeStamp", "AppId", "UserId", "EndTime", "DurationMS"],
"Unknown2.csv": [],
"Unknown3.csv": [],
"Unknown4.csv": ["TimeStamp", "AppId", "UserId"],
"SruDbCheckpointTable.csv": [],
"SruDbIdMapTable.csv": [],
"Network Usage.csv": ["TimeStamp", "AppId", "UserId", "InterfaceLuid", "L2ProfileId", "BytesSent",
"BytesRecvd"],
"Network Connections.csv": [],
"Energy Usage.csv": [],
"Energy Usage(Long - Term).csv": [],
"Application Resources.csv": ["TimeStamp", "AppId", "UserId"],
"Application Resource Usage.csv": ["TimeStamp", "AppId", "UserId"]
}
PARSING_TOOL = r"Tools\ese-analyst-master\ese2csv.exe"
PARSE_COMMAND = "{parser_path} -o {output_path} -p srudb_plugin {srum_db} --plugin-args {software_hive}"
def __init__(self, temp, config):
super().__init__(config)
self.temp_result_path = temp
def parse(self, args):
srum_db, software_hive = args
output = r"{}\srum_{}".format(self.temp_result_path, random.randint(1, 1000000))
os.mkdir(output)
command = self.PARSE_COMMAND.format(parser_path=self.PARSING_TOOL, output_path=output, srum_db=srum_db,
software_hive=software_hive)
self._run_command(command)
for csv_file in os.listdir(output):
srum_records = []
full_path = os.path.join(output, csv_file)
headers = self.CSV_FIELDS.get(csv_file)
if not headers:
continue
if csv_file == "Unknown1.csv":
with open(full_path, "r") as f:
reader = csv.DictReader(f)
for line in reader:
cur_record = {}
endTime = line.get("EndTime")
duration = line.get("DurationMS")
if endTime and duration:
cur_record["time"] = filetime_to_epoch_datetime(int(endTime) - int(duration)).isoformat()
cur_record["EndTime"] = filetime_to_epoch_datetime(endTime).isoformat()
cur_record["DurationMS"] = duration
else:
cur_record["time"] = datetime.datetime(1970, 1, 1).isoformat()
cur_record["AppId"] = line.get("AppId")
cur_record["UserId"] = line.get("UserId")
srum_records.append(cur_record)
else:
with open(full_path, "r") as f:
reader = csv.DictReader(f)
for line in reader:
cur_record = {}
for header in headers:
if header == "TimeStamp":
cur_record["time"] = line.get("TimeStamp").replace(" ", "T")
line.pop("TimeStamp")
value = line.get(header)
if value:
if isinstance(value, bytes):
cur_record[header.lower().replace(" ", "_")] = value.decode()
elif str.isdigit(value):
cur_record[header.lower().replace(" ", "_")] = int(value)
else:
cur_record[header.lower().replace(" ", "_")] = value
else:
cur_record[header.lower().replace(" ", "_")] = ""
srum_records.append(cur_record)
self._write_results_list([("srum-{}".format(csv_file.split(".")[0].lower().replace(" ", "_")), srum_records)])
|
4038
|
from collections import defaultdict
import json
import re
import time
from urllib.parse import urlparse
import uuid
import boto3
import boto3.exceptions
import botocore.exceptions
import markus
import redis.exceptions
import requests
import requests.exceptions
from sqlalchemy import select
import sqlalchemy.exc
from ichnaea.data import _map_content_enabled
from ichnaea.models import (
ApiKey,
BlueObservation,
BlueReport,
BlueShard,
CellObservation,
CellReport,
CellShard,
DataMap,
ExportConfig,
Report,
WifiObservation,
WifiReport,
WifiShard,
)
from ichnaea.models.content import encode_datamap_grid
from ichnaea import util
WHITESPACE = re.compile(r"\s", flags=re.UNICODE)
METRICS = markus.get_metrics()
class IncomingQueue(object):
"""
The incoming queue contains the data collected in the web application. It
is the single entrypoint from which all other data pipelines get their
data.
It distributes the data into the configured export queues, checks those
queues and if they contain enough or old enough data schedules an async
export task to process the data in each queue.
"""
def __init__(self, task):
self.task = task
def __call__(self, export_task):
redis_client = self.task.redis_client
data_queue = self.task.app.data_queues["update_incoming"]
data = data_queue.dequeue()
grouped = defaultdict(list)
for item in data:
grouped[(item["api_key"], item.get("source", "gnss"))].append(
{"api_key": item["api_key"], "report": item["report"]}
)
with self.task.db_session(commit=False) as session:
export_configs = ExportConfig.all(session)
with self.task.redis_pipeline() as pipe:
for (api_key, source), items in grouped.items():
for config in export_configs:
if config.allowed(api_key, source):
queue_key = config.queue_key(api_key, source)
queue = config.queue(queue_key, redis_client)
queue.enqueue(items, pipe=pipe)
for config in export_configs:
# Check all queues if they now contain enough data or
# old enough data to be ready for processing.
for queue_key in config.partitions(redis_client):
queue = config.queue(queue_key, redis_client)
if queue.ready():
export_task.delay(config.name, queue_key)
if data_queue.ready():
self.task.apply_countdown()
class ReportExporter(object):
_retriable = (IOError,)
_retries = 3
_retry_wait = 1.0
def __init__(self, task, config, queue_key):
self.task = task
self.config = config
self.queue_key = queue_key
self.queue = config.queue(queue_key, task.redis_client)
self.stats_tags = ["key:" + self.config.name]
@staticmethod
def export(task, name, queue_key):
with task.db_session(commit=False) as session:
config = ExportConfig.get(session, name)
exporter_types = {
"dummy": DummyExporter,
"geosubmit": GeosubmitExporter,
"internal": InternalExporter,
"s3": S3Exporter,
}
exporter_type = exporter_types.get(config.schema)
if exporter_type is not None:
exporter_type(task, config, queue_key)()
def __call__(self):
queue_items = self.queue.dequeue()
if not queue_items:
return
success = False
for i in range(self._retries):
try:
with METRICS.timer("data.export.upload.timing", tags=self.stats_tags):
self.send(queue_items)
success = True
except self._retriable:
success = False
time.sleep(self._retry_wait * (i ** 2 + 1))
if success:
METRICS.incr("data.export.batch", tags=self.stats_tags)
break
if success and self.queue.ready():
self.task.apply_countdown(args=[self.config.name, self.queue_key])
def send(self, queue_items):
raise NotImplementedError()
class DummyExporter(ReportExporter):
def send(self, queue_items):
pass
class GeosubmitExporter(ReportExporter):
_retriable = (IOError, requests.exceptions.RequestException)
def send(self, queue_items):
# ignore metadata
reports = [item["report"] for item in queue_items]
headers = {
"Content-Encoding": "gzip",
"Content-Type": "application/json",
"User-Agent": "ichnaea",
}
response = requests.post(
self.config.url,
data=util.encode_gzip(
json.dumps({"items": reports}).encode(), compresslevel=5
),
headers=headers,
timeout=60.0,
)
# log upload_status and trigger exception for bad responses
# this causes the task to be re-tried
METRICS.incr(
"data.export.upload",
tags=self.stats_tags + ["status:%s" % response.status_code],
)
response.raise_for_status()
class S3Exporter(ReportExporter):
_retriable = (
IOError,
boto3.exceptions.Boto3Error,
botocore.exceptions.BotoCoreError,
)
def send(self, queue_items):
# ignore metadata
reports = [item["report"] for item in queue_items]
_, bucketname, path = urlparse(self.config.url)[:3]
# s3 key names start without a leading slash
path = path.lstrip("/")
if not path.endswith("/"):
path += "/"
year, month, day = util.utcnow().timetuple()[:3]
# strip away queue prefix again
parts = self.queue_key.split(":")
source = parts[1]
api_key = parts[2]
obj_name = path.format(
source=source, api_key=api_key, year=year, month=month, day=day
)
obj_name += uuid.uuid1().hex + ".json.gz"
try:
data = util.encode_gzip(
json.dumps({"items": reports}).encode(), compresslevel=7
)
s3 = boto3.resource("s3")
bucket = s3.Bucket(bucketname)
obj = bucket.Object(obj_name)
obj.put(Body=data, ContentEncoding="gzip", ContentType="application/json")
METRICS.incr(
"data.export.upload", tags=self.stats_tags + ["status:success"]
)
except Exception:
METRICS.incr(
"data.export.upload", tags=self.stats_tags + ["status:failure"]
)
raise
class InternalTransform(object):
"""
This maps the geosubmit v2 schema used in view code and external
transfers (backup, forward to partners) to the internal submit v1
schema used in our own database models.
"""
# *_id maps a source section id to a target section id
# *_map maps fields inside the section from source to target id
# if the names are equal, a simple string can be specified instead
# of a two-tuple
position_id = ("position", None)
position_map = [
("latitude", "lat"),
("longitude", "lon"),
"accuracy",
"altitude",
("altitudeAccuracy", "altitude_accuracy"),
"heading",
"pressure",
"speed",
"source",
]
blue_id = ("bluetoothBeacons", "blue")
blue_map = [("macAddress", "mac"), "age", ("signalStrength", "signal")]
cell_id = ("cellTowers", "cell")
cell_map = [
("radioType", "radio"),
("mobileCountryCode", "mcc"),
("mobileNetworkCode", "mnc"),
("locationAreaCode", "lac"),
("cellId", "cid"),
"age",
"asu",
("primaryScramblingCode", "psc"),
"serving",
("signalStrength", "signal"),
("timingAdvance", "ta"),
]
wifi_id = ("wifiAccessPoints", "wifi")
wifi_map = [
("macAddress", "mac"),
"age",
"channel",
"frequency",
("radioType", "radio"),
("signalToNoiseRatio", "snr"),
("signalStrength", "signal"),
]
def _map_dict(self, item_source, field_map):
value = {}
for spec in field_map:
if isinstance(spec, tuple):
source, target = spec
else:
source = spec
target = spec
source_value = item_source.get(source)
if source_value is not None:
value[target] = source_value
return value
def _parse_dict(self, item, report, key_map, field_map):
value = {}
item_source = item.get(key_map[0])
if item_source:
value = self._map_dict(item_source, field_map)
if value:
if key_map[1] is None:
report.update(value)
else:
report[key_map[1]] = value
return value
def _parse_list(self, item, report, key_map, field_map):
values = []
for value_item in item.get(key_map[0], ()):
value = self._map_dict(value_item, field_map)
if value:
values.append(value)
if values:
report[key_map[1]] = values
return values
def __call__(self, item):
report = {}
self._parse_dict(item, report, self.position_id, self.position_map)
blues = self._parse_list(item, report, self.blue_id, self.blue_map)
cells = self._parse_list(item, report, self.cell_id, self.cell_map)
wifis = self._parse_list(item, report, self.wifi_id, self.wifi_map)
position = item.get("position") or {}
gps_age = position.get("age", 0)
timestamp = item.get("timestamp")
if timestamp:
# turn timestamp into GPS timestamp
report["timestamp"] = timestamp - gps_age
if gps_age:
# Normalize age fields to be relative to GPS time
for type_ in ("blue", "cell", "wifi"):
for record in report.get(type_, ()):
record["age"] = record.get("age", 0) - gps_age
if blues or cells or wifis:
return report
return {}
class InternalExporter(ReportExporter):
_retriable = (IOError, redis.exceptions.RedisError, sqlalchemy.exc.InternalError)
transform = InternalTransform()
def send(self, queue_items):
api_keys = set()
api_keys_known = set()
metrics = {}
items = []
for item in queue_items:
# preprocess items and extract set of API keys
item["report"] = self.transform(item["report"])
if item["report"]:
items.append(item)
api_keys.add(item["api_key"])
for api_key in api_keys:
metrics[api_key] = {}
for type_ in ("report", "blue", "cell", "wifi"):
for action in ("drop", "upload"):
metrics[api_key]["%s_%s" % (type_, action)] = 0
with self.task.db_session(commit=False) as session:
# limit database session to get API keys
keys = [key for key in api_keys if key]
if keys:
columns = ApiKey.__table__.c
rows = session.execute(
select([columns.valid_key]).where(columns.valid_key.in_(keys))
).fetchall()
for row in rows:
api_keys_known.add(row.valid_key)
positions = []
observations = {"blue": [], "cell": [], "wifi": []}
for item in items:
api_key = item["api_key"]
report = item["report"]
obs, malformed_obs = self.process_report(report)
any_data = False
for name in ("blue", "cell", "wifi"):
if obs.get(name):
observations[name].extend(obs[name])
metrics[api_key][name + "_upload"] += len(obs[name])
any_data = True
metrics[api_key][name + "_drop"] += malformed_obs.get(name, 0)
metrics[api_key]["report_upload"] += 1
if any_data:
positions.append((report["lat"], report["lon"]))
else:
metrics[api_key]["report_drop"] += 1
with self.task.redis_pipeline() as pipe:
self.queue_observations(pipe, observations)
if _map_content_enabled and positions:
self.process_datamap(pipe, positions)
self.emit_metrics(api_keys_known, metrics)
def queue_observations(self, pipe, observations):
for datatype, shard_model, shard_key, queue_prefix in (
("blue", BlueShard, "mac", "update_blue_"),
("cell", CellShard, "cellid", "update_cell_"),
("wifi", WifiShard, "mac", "update_wifi_"),
):
queued_obs = defaultdict(list)
for obs in observations[datatype]:
# group by sharded queue
shard_id = shard_model.shard_id(getattr(obs, shard_key))
queue_id = queue_prefix + shard_id
queued_obs[queue_id].append(obs.to_json())
for queue_id, values in queued_obs.items():
# enqueue values for each queue
queue = self.task.app.data_queues[queue_id]
queue.enqueue(values, pipe=pipe)
def emit_metrics(self, api_keys_known, metrics):
for api_key, key_metrics in metrics.items():
api_tag = []
if api_key and api_key in api_keys_known:
api_tag = ["key:%s" % api_key]
for name, count in key_metrics.items():
if not count:
continue
type_, action = name.split("_")
if type_ == "report":
suffix = "report"
tags = api_tag
else:
suffix = "observation"
tags = ["type:%s" % type_] + api_tag
METRICS.incr("data.%s.%s" % (suffix, action), count, tags=tags)
def process_report(self, data):
report = Report.create(**data)
if report is None:
return ({}, {})
malformed = {}
observations = {}
for name, report_cls, obs_cls in (
("blue", BlueReport, BlueObservation),
("cell", CellReport, CellObservation),
("wifi", WifiReport, WifiObservation),
):
malformed[name] = 0
observations[name] = {}
if data.get(name):
for item in data[name]:
# validate the blue/cell/wifi specific fields
item_report = report_cls.create(**item)
if item_report is None:
malformed[name] += 1
continue
# combine general and specific report data into one
item_obs = obs_cls.combine(report, item_report)
item_key = item_obs.unique_key
# if we have better data for the same key, ignore
existing = observations[name].get(item_key)
if existing is not None and existing.better(item_obs):
continue
observations[name][item_key] = item_obs
obs = {
"blue": observations["blue"].values(),
"cell": observations["cell"].values(),
"wifi": observations["wifi"].values(),
}
return (obs, malformed)
def process_datamap(self, pipe, positions):
grids = set()
for lat, lon in positions:
if lat is not None and lon is not None:
grids.add(DataMap.scale(lat, lon))
shards = defaultdict(set)
for lat, lon in grids:
shards[DataMap.shard_id(lat, lon)].add(encode_datamap_grid(lat, lon))
for shard_id, values in shards.items():
queue = self.task.app.data_queues["update_datamap_" + shard_id]
queue.enqueue(list(values), pipe=pipe)
|
4079
|
class resampler:
def __init__(self):
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from collections import Counter
import numpy as np
self.bins = 3
self.pd = pd
self.LabelEncoder = LabelEncoder
self.Counter = Counter
self.X = 0
self.Y_classes = 0
self.target = 0
self.np = np
# This function adds classes to each sample and returns the class list as a dataframe/numpy array (as per input)
# It also merges classes as and when required
def fit(self, X, target, bins=3, min_n_samples=6, balanced_binning=False, verbose=2):
self.bins = bins
tmp = target
# If data is numpy, then convert it into pandas
if type(target) == int:
if target < 0:
target = X.shape[1]+target
tmp = target
self.X = self.pd.DataFrame()
for i in range(X.shape[1]):
if i!=target:
self.X[str(i)] = X[:,i]
self.X["target"] = X[:,target]
target = "target"
else:
self.X = X.copy()
# Use qcut if balanced binning is required
if balanced_binning:
self.Y_classes = self.pd.qcut(self.X[target], q=self.bins, precision=0)
else:
self.Y_classes = self.pd.cut(self.X[target], bins=self.bins)
# Pandas outputs ranges after binning. Convert ranges to classes
le = self.LabelEncoder()
self.Y_classes = le.fit_transform(self.Y_classes)
# Merge classes if number of neighbours is more than the number of samples
classes_count = list(map(list, self.Counter(self.Y_classes).items()))
classes_count = sorted(classes_count, key = lambda x: x[0])
mid_point = len(classes_count)
# Logic for merging
for i in range(len(classes_count)):
if classes_count[i][1] < min_n_samples:
self.Y_classes[self.np.where(self.Y_classes == classes_count[i][0])[0]] = classes_count[i-1][0]
if verbose > 0:
print("INFO: Class " + str(classes_count[i][0]) + " has been merged into Class " + str(classes_count[i-1][0]) + " due to low number of samples")
classes_count[i][0] = classes_count[i-1][0]
if verbose > 0:
print()
# Perform label-encoding once again
# Avoids class skipping after merging
le = self.LabelEncoder()
self.Y_classes = le.fit_transform(self.Y_classes)
# Pretty print
if verbose > 1:
print("Class Distribution:\n-------------------")
classes_count = list(map(list, self.Counter(self.Y_classes).items()))
classes_count = sorted(classes_count, key = lambda x: x[0])
for class_, count in classes_count:
print(str(class_)+": "+str(count))
print()
# Finally concatenate and return as dataframe or numpy
# Based on what type of target was sent
self.X["classes"] = self.Y_classes
if type(tmp) == int:
self.target = tmp
else:
self.target = target
return self.Y_classes
# This function performs the re-sampling
def resample(self, sampler_obj, trainX, trainY):
# If classes haven't yet been created, then run the "fit" function
if type(self.Y_classes) == int:
print("Error! Run fit method first!!")
return None
# Finally, perform the re-sampling
resampled_data, _ = sampler_obj.fit_resample(trainX, trainY)
if type(resampled_data).__module__ == 'numpy':
resampled_data = self.pd.DataFrame(resampled_data, columns=self.X.drop("classes", axis=1).columns)
# Return the correct X and Y
if type(self.target) == int:
return resampled_data.drop("target", axis=1).values, resampled_data["target"].values
else:
return resampled_data.drop(self.target, axis=1), resampled_data[self.target]
|
4082
|
import pytest
from httmock import urlmatch, HTTMock
from util.config import URLSchemeAndHostname
from util.config.validator import ValidatorContext
from util.config.validators import ConfigValidationException
from util.config.validators.validate_bitbucket_trigger import BitbucketTriggerValidator
from test.fixtures import *
@pytest.mark.parametrize(
"unvalidated_config",
[
(ValidatorContext({})),
(ValidatorContext({"BITBUCKET_TRIGGER_CONFIG": {}})),
(ValidatorContext({"BITBUCKET_TRIGGER_CONFIG": {"CONSUMER_KEY": "foo"}})),
(ValidatorContext({"BITBUCKET_TRIGGER_CONFIG": {"CONSUMER_SECRET": "foo"}})),
],
)
def test_validate_invalid_bitbucket_trigger_config(unvalidated_config, app):
validator = BitbucketTriggerValidator()
with pytest.raises(ConfigValidationException):
validator.validate(unvalidated_config)
def test_validate_bitbucket_trigger(app):
url_hit = [False]
@urlmatch(netloc=r"bitbucket.org")
def handler(url, request):
url_hit[0] = True
return {
"status_code": 200,
"content": "oauth_token=foo&oauth_token_secret=bar",
}
with HTTMock(handler):
validator = BitbucketTriggerValidator()
url_scheme_and_hostname = URLSchemeAndHostname("http", "localhost:5000")
unvalidated_config = ValidatorContext(
{
"BITBUCKET_TRIGGER_CONFIG": {
"CONSUMER_KEY": "foo",
"CONSUMER_SECRET": "bar",
},
},
url_scheme_and_hostname=url_scheme_and_hostname,
)
validator.validate(unvalidated_config)
assert url_hit[0]
|
4089
|
from typing import List, Tuple
from omegaconf import DictConfig
import torch
import torch.nn as nn
import torch.nn.functional as F
from rlcycle.common.abstract.loss import Loss
class DQNLoss(Loss):
"""Compute double DQN loss"""
def __init__(self, hyper_params: DictConfig, use_cuda: bool):
Loss.__init__(self, hyper_params, use_cuda)
def __call__(
self, networks: Tuple[nn.Module, ...], data: Tuple[torch.Tensor, ...]
) -> Tuple[torch.Tensor, ...]:
network, target_network = networks
states, actions, rewards, next_states, dones = data
q_value = network.forward(states).gather(1, actions)
with torch.no_grad():
next_q = torch.max(target_network.forward(next_states), 1)[0].unsqueeze(1)
n_step_gamma = self.hyper_params.gamma ** self.hyper_params.n_step
target_q = rewards + (1 - dones) * n_step_gamma * next_q
element_wise_loss = F.smooth_l1_loss(
q_value, target_q.detach(), reduction="none"
)
return element_wise_loss
class QRLoss(Loss):
"""Compute quantile regression loss"""
def __init__(self, hyper_params: DictConfig, use_cuda: bool):
Loss.__init__(self, hyper_params, use_cuda)
def __call__(
self, networks: Tuple[nn.Module, ...], data: Tuple[torch.Tensor, ...],
) -> Tuple[torch.Tensor, ...]:
network, target_network = networks
states, actions, rewards, next_states, dones = data
z_dists = network.forward(states)
z_dists = z_dists[list(range(states.size(0))), actions.view(-1)]
with torch.no_grad():
next_z = target_network.forward(next_states)
next_actions = torch.max(next_z.mean(2), dim=1)[1]
next_z = next_z[list(range(states.size(0))), next_actions]
n_step_gamma = self.hyper_params.gamma ** self.hyper_params.n_step
target_z = rewards + (1 - dones) * n_step_gamma * next_z
distance = target_z - z_dists
quantile_huber_loss = (
network.tau - (distance.detach() < 0).float()
).abs() * self.huber_loss(distance)
element_wise_loss = torch.mean(quantile_huber_loss, dim=1, keepdim=True)
return element_wise_loss
@staticmethod
def huber_loss(x: List[torch.Tensor], k: float = 1.0):
return torch.where(x.abs() <= k, 0.5 * x.pow(2), k * (x.abs() - 0.5 * k))
class CategoricalLoss(Loss):
"""Compute C51 loss"""
def __init__(self, hyper_params: DictConfig, use_cuda: bool):
Loss.__init__(self, hyper_params, use_cuda)
def __call__(
self, networks: Tuple[nn.Module, ...], data: Tuple[torch.Tensor, ...]
) -> Tuple[torch.Tensor, ...]:
network, target_network = networks
states, actions, rewards, next_states, dones = data
batch_size = states.size(0)
offset = (
torch.linspace(0, (batch_size - 1) * network.num_atoms, batch_size)
.long()
.unsqueeze(1)
.expand(batch_size, network.num_atoms)
)
if self.use_cuda:
offset = offset.cuda()
z_dists = network.forward(states)
z_dists = z_dists[list(range(states.size(0))), actions.view(-1)]
with torch.no_grad():
next_z = target_network.forward(next_states)
next_actions = torch.max(next_z.mean(2), dim=1)[1]
next_z = next_z[list(range(states.size(0))), next_actions]
n_step_gamma = self.hyper_params.gamma ** self.hyper_params.n_step
target_z = rewards + (1 - dones) * n_step_gamma * network.support
target_z = torch.clamp(target_z, min=network.v_min, max=network.v_max)
target_proj = self.dist_projection(network, next_z, target_z, offset)
log_dist = torch.log(z_dists)
element_wise_loss = -(target_proj * log_dist).sum(1)
return element_wise_loss
def dist_projection(
self,
network: nn.Module,
next_z: torch.Tensor,
target_z: torch.Tensor,
offset: torch.Tensor,
) -> torch.Tensor:
b = (target_z - network.v_min) / network.delta_z
lb = b.floor().long()
ub = b.ceil().long()
proj_dist = torch.zeros(next_z.size())
if self.use_cuda:
proj_dist = proj_dist.cuda()
proj_dist.view(-1).index_add_(
0, (lb + offset).view(-1), (next_z * (ub.float() - b)).view(-1)
)
proj_dist.view(-1).index_add_(
0, (ub + offset).view(-1), (next_z * (b - lb.float())).view(-1)
)
return proj_dist
|
4111
|
from devito.ir import Call
from devito.passes.iet.definitions import DataManager
from devito.passes.iet.langbase import LangBB
__all__ = ['CBB', 'CDataManager']
class CBB(LangBB):
mapper = {
'aligned': lambda i:
'__attribute__((aligned(%d)))' % i,
'host-alloc': lambda i, j, k:
Call('posix_memalign', (i, j, k)),
'host-free': lambda i:
Call('free', (i,)),
}
class CDataManager(DataManager):
lang = CBB
|
4128
|
import os
import random
from flask import Flask, request, send_from_directory
from werkzeug.utils import secure_filename
from pianonet.core.pianoroll import Pianoroll
from pianonet.model_inspection.performance_from_pianoroll import get_performance_from_pianoroll
app = Flask(__name__)
base_path = "/app/"
# base_path = "/Users/angsten/PycharmProjects/pianonet"
performances_path = os.path.join(base_path, 'data', 'performances')
def get_random_midi_file_name():
"""
Get a random midi file name that will not ever collide.
"""
return str(random.randint(0, 10000000000000000000)) + ".midi"
def get_performance_path(midi_file_name):
"""
Returns full path to performaqnce midi file given a file name.
"""
return os.path.join(performances_path, midi_file_name)
@app.route('/')
def alive():
return 'OK'
@app.route('/performances/', methods=['GET'])
def get_performance():
"""
Returns the requested performance as midi file.
Expected query string is 'midi_file_name', such as 1234.midi
"""
performance_midi_file_name = request.args.get('midi_file_name')
performance_midi_file_name = secure_filename(performance_midi_file_name)
print(performance_midi_file_name)
if performance_midi_file_name == None:
return {"http_code": 400, "code": "BadRequest", "message": "midi_file_name not found in request."}
midi_file_path = get_performance_path(performance_midi_file_name)
if not os.path.exists(midi_file_path):
return {
"http_code": 404,
"code": "Not Found",
"message": "midi_file " + performance_midi_file_name + " not found."
}
with open(midi_file_path, 'rb') as midi_file:
return send_from_directory(performances_path, performance_midi_file_name)
@app.route('/create-performance', methods=['POST'])
def performance():
"""
Expects post form data as follows:
seed_midi_file_data: Midi file that forms the seed for a performance as string encoding like "8,2,3,4,5..."
seconds_to_generate: Number of seconds of new notes to generate
model_complexity: Quality of model to use, one of ['low', 'medium', 'high', 'highest']
"""
seed_midi_file_data = request.form.get('seed_midi_file_data')
if seed_midi_file_data == None:
return {"http_code": 400, "code": "BadRequest", "message": "seed_midi_file_data not found in request."}
else:
seed_midi_file_int_array = [int(x) for x in seed_midi_file_data.split(',')]
frame = bytearray()
for i in seed_midi_file_int_array:
frame.append(i)
saved_seed_midi_file_path = os.path.join(base_path, 'data', 'seeds', get_random_midi_file_name())
with open(saved_seed_midi_file_path, 'wb') as midi_file:
midi_file.write(frame)
seconds_to_generate = request.form.get('seconds_to_generate')
if seconds_to_generate == None:
return {"http_code": 400, "code": "BadRequest", "message": "seconds_to_generate not found in request."}
else:
seconds_to_generate = float(seconds_to_generate)
model_complexity = request.form.get('model_complexity', 'low')
if model_complexity == 'low':
model_name = "micro_1"
else:
model_name = "r9p0_3500kparams_approx_9_blocks_model"
model_path = os.path.join(base_path, 'models', model_name)
input_pianoroll = Pianoroll(saved_seed_midi_file_path, use_custom_multitrack=True)
input_pianoroll.trim_silence_off_ends()
final_pianoroll = get_performance_from_pianoroll(
pianoroll_seed=input_pianoroll,
num_time_steps=int(48 * seconds_to_generate),
model_path=model_path,
)
midi_file_name = get_random_midi_file_name()
midi_file_path = get_performance_path(midi_file_name)
final_pianoroll.save_to_midi_file(midi_file_path)
return {"http_code": 200, "code": "Success", "message": "", "midi_file_name": midi_file_name}
if __name__ == '__main__':
app.run(host='0.0.0.0')
|
4131
|
from robot import __version__ as ROBOT_VERSION
import sys
import tempfile
import textwrap
import unittest
import shutil
import subprocess
class PabotOrderingGroupTest(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def _run_tests_with(self, testfile, orderfile):
robot_file = open("{}/test.robot".format(self.tmpdir), "w")
robot_file.write(textwrap.dedent(testfile))
robot_file.close()
with open("{}/order.dat".format(self.tmpdir), "w") as f:
f.write(textwrap.dedent(orderfile))
process = subprocess.Popen(
[
sys.executable,
"-m" "pabot.pabot",
"--testlevelsplit",
"--ordering",
"{}/order.dat".format(self.tmpdir),
"{}/test.robot".format(self.tmpdir),
],
cwd=self.tmpdir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
return process.communicate()
def test_orders(self):
stdout, stderr = self._run_tests_with(
"""
*** Variables ***
${SCALAR} Hello, globe!
*** Test Cases ***
First Test
Set Suite Variable ${SCALAR} Hello, world!
Second Test
Should Be Equal ${SCALAR} Hello, world!
Third Test
Should Be Equal ${SCALAR} Hello, globe!
""",
"""
{
--test Test.First Test
--test Test.Second Test
}
--test Test.Third Test
""",
)
if sys.version_info < (3, 0):
self.assertIn("PASSED", stdout, stderr)
self.assertNotIn("FAILED", stdout, stderr)
self.assertEqual(stdout.count("PASSED"), 2)
else:
self.assertIn(b"PASSED", stdout, stderr)
self.assertNotIn(b"FAILED", stdout, stderr)
self.assertEqual(stdout.count(b"PASSED"), 2)
def test_two_orders(self):
stdout, stderr = self._run_tests_with(
"""
*** Variables ***
${SCALAR} Hello, globe!
*** Test Cases ***
First Test
Set Suite Variable ${SCALAR} Hello, world!
Second Test
Should Be Equal ${SCALAR} Hello, world!
Second And Quarter
Should Be Equal ${SCALAR} Hello, globe!
Second And Half
Should Be Equal ${SCALAR} Hello, globe!
Third Test
Should Be Equal ${SCALAR} Hello, globe!
""",
"""
{
--test Test.First Test
--test Test.Second Test
}
{
--test Test.Second And Quarter
--test Test.Second And Half
}
--test Test.Third Test
""",
)
if sys.version_info < (3, 0):
self.assertIn("PASSED", stdout, stderr)
self.assertNotIn("FAILED", stdout, stderr)
if ROBOT_VERSION < "4.0":
expected_write = "5 critical tests, 5 passed, 0 failed"
else:
expected_write = "5 tests, 5 passed, 0 failed, 0 skipped."
self.assertIn(expected_write, stdout, stderr)
self.assertEqual(stdout.count("PASSED"), 3)
else:
self.assertIn(b"PASSED", stdout, stderr)
self.assertNotIn(b"FAILED", stdout, stderr)
if ROBOT_VERSION < "4.0":
expected_write = b"5 critical tests, 5 passed, 0 failed"
else:
expected_write = b"5 tests, 5 passed, 0 failed, 0 skipped."
self.assertIn(expected_write, stdout, stderr)
self.assertEqual(stdout.count(b"PASSED"), 3)
def test_too_big_testname(self):
stdout, stderr = self._run_tests_with(
"""
*** Test Cases ***
Test Lorem ipsum dolor sit amet, consectetur adipiscing elit. Mauris eu velit nunc. Duis eget purus eget orci porta blandit sed ut tortor. Nunc vel nulla bibendum, auctor sem ac, molestie risus. Sed eu metus volutpat, hendrerit nibh in, auctor urna. Nunc a sodales.
Log Test
""",
"""
--test Invalid
""",
)
if sys.version_info < (3, 0):
self.assertIn("PASSED", stdout, stderr)
self.assertNotIn("FAILED", stdout, stderr)
self.assertEqual(stdout.count("PASSED"), 1)
else:
self.assertIn(b"PASSED", stdout, stderr)
self.assertNotIn(b"FAILED", stdout, stderr)
self.assertEqual(stdout.count(b"PASSED"), 1)
def test_longnames_in_tests(self):
stdout, stderr = self._run_tests_with(
"""
*** Settings ***
Test Template Test1
*** Test Cases ***
The Somewhat Long Name Of The Test S1Test 01 1
The Somewhat Long Name Of The Test S1Test 02 1
The Somewhat Long Name Of The Test S1Test 03 1
The Somewhat Long Name Of The Test S1Test 04 1
The Somewhat Long Name Of The Test S1Test 05 1
The Somewhat Long Name Of The Test S1Test 06 1
The Somewhat Long Name Of The Test S1Test 07 1
The Somewhat Long Name Of The Test S1Test 08 1
The Somewhat Long Name Of The Test S1Test 09 1
The Somewhat Long Name Of The Test S1Test 10 1
The Somewhat Long Name Of The Test S1Test 11 1
The Somewhat Long Name Of The Test S1Test 12 1
*** Keywords ***
Test1
[Arguments] ${arg}
Log Test
""",
"""
{
--test Test.The Somewhat Long Name Of The Test S1Test 01
--test Test.The Somewhat Long Name Of The Test S1Test 02
--test Test.The Somewhat Long Name Of The Test S1Test 03
--test Test.The Somewhat Long Name Of The Test S1Test 04
--test Test.The Somewhat Long Name Of The Test S1Test 05
--test Test.The Somewhat Long Name Of The Test S1Test 06
}
{
--test Test.The Somewhat Long Name Of The Test S1Test 07
--test Test.The Somewhat Long Name Of The Test S1Test 08
--test Test.The Somewhat Long Name Of The Test S1Test 09
--test Test.The Somewhat Long Name Of The Test S1Test 10
--test Test.The Somewhat Long Name Of The Test S1Test 11
--test Test.The Somewhat Long Name Of The Test S1Test 12
}
""",
)
if sys.version_info < (3, 0):
self.assertIn("PASSED", stdout, stderr)
self.assertNotIn("FAILED", stdout, stderr)
self.assertEqual(stdout.count("PASSED"), 2)
else:
self.assertIn(b"PASSED", stdout, stderr)
self.assertNotIn(b"FAILED", stdout, stderr)
self.assertEqual(stdout.count(b"PASSED"), 2)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.