content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def create_cli(ctx):
"""
Create dataset in a CKAN instance.
"""
create(
ctx.obj['CKAN_HOST'],
ctx.obj['CKAN_KEY'],
ctx.obj['DATAPACKAGE'],
ctx.obj['DATASTORE'],
ctx.obj['EXIT_CODE'],
) | 5,353,300 |
def divideArray(array, factor):
"""Dzielimy tablice na #factor tablic, kazda podtablica ma tyle samo elem oprocz ostatniej"""
factor = min(factor, len(array))
length = floor(len(array) * 1.0 / factor)
res = []
for i in range(factor - 1):
res = res + list([array[i * length:(i + 1) * length]])
return list(res + list([array[length * (factor - 1):]])) | 5,353,301 |
async def open(command):
"""Open the shutter."""
command.info(text="Opening the shutter!")
# Here we would implement the actual communication
# with the shutter hardware.
command.finish(shutter="open")
return | 5,353,302 |
def argMax(scores):
"""
Returns the key with the highest value.
"""
if len(scores) == 0: return None
all = scores.items()
values = [x[1] for x in all]
maxIndex = values.index(max(values))
return all[maxIndex][0] | 5,353,303 |
def rotateright(arr,k)->list:
"""
Rotate the array right side k number of times.
"""
temp=a[0]
poi=0
for i in range(len(arr)):
for j in range(0,k):
poi+=1
if(poi==len(arr)):
poi=0
temp1=arr[poi]
arr[poi]=temp
temp=temp1
return arr | 5,353,304 |
def get_video_chunk_path(instance, filename):
"""
Get path to store video chunk
the path will be of format : project_id/chunks/chunk_no.mp3
"""
if (not instance.project_id) and (not instance.chunk_no):
raise ValidationError('Invalid Project ID')
return os.path.join(instance.project_id +
'/chunks/' +
instance.chunk_no +
'.mp4') | 5,353,305 |
def scrap_page(targetURL):
""" Scraps a single URL for sentences, downloading audios """
# Extract post title from URL and use as .csv file name
name = targetURL.split("/")[3]
with open(f"csv/{name}.csv", "w+", encoding="utf8") as card:
# Headers for the GET request so it doesn't get easily rejected
headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36"
}
req = requests.get(targetURL, headers=headers)
if req.status_code == 200:
print("Successful GET request!")
# Initialize lists for later appending
englishSentences = list()
portugueseSentences = list()
audiosURLs = list()
audioFilenames = list()
content = req.content
html = BeautifulSoup(content, "html.parser")
# Extract sentences
div = html.find("div", class_="post-content")
# Sentences are under this div
# divStrings = "".join(list(map(str, div)))
# print(divStrings)
sentencesRegex = (
r"(?:<p.*?>(.*?)<em>(.*?)<br><span.*?><audio .*?src=(\".*?\"))"
)
findSentences = re.findall(
sentencesRegex, "".join(list(map(str, div))), re.MULTILINE
)
for matches in findSentences:
englishSentences.append(matches[0])
portugueseSentences.append(matches[1])
audiosURLs.append(matches[2])
del findSentences
audiosURLs = list(
# Add domain to audio URLs
map(lambda url: f"https://blog.influx.com.br{url}", audiosURLs)
)
portugueseSentences = list(
# Clean and format Portuguese
map(format_portuguese_sentence, portugueseSentences)
)
englishSentences = list(
# Clean and format English
map(format_english_sentence, englishSentences)
)
# Download audios
for url in audiosURLs:
print(f"Downloading {url}")
localFilename = download_file(url)
audioFilenames.append(localFilename)
if len(portugueseSentences) != len(englishSentences) != len(audioFilenames):
print(
f"""Lists don't have all the same length. Output may be compromised.
- in '{name}':
({len(englishSentences)} english sentences, {len(portugueseSentences)} portuguese sentences, {len(audioFilenames)} audio files.)"""
)
cardInfos = zip(englishSentences, portugueseSentences, audioFilenames)
for sentence in cardInfos:
card.write(
f"{sentence[0]}\t{sentence[1]}\t[sound:{sentence[2]}]\tenglish_influx\n"
) # Use TAB as separator
else:
print("Failed GET request.") | 5,353,306 |
def create_reverse_routes(rt):
"""Function to reverse TSP routes"""
if rt.tsp_solver_status == 1:
for key, value in rt.stop_order_dict.items():
if value != 0:
rt.stop_order_dict[key] = rt.num_stops - value | 5,353,307 |
def main(cfg: Config) -> None:
"""Main function."""
data_cfg = cfg.data
if data_cfg.dataset == DS.adult:
print("Using the adult dataset.")
elif data_cfg.dataset == DS.cmnist:
print("Using CMNIST.")
if cfg.misc.use_wandb:
print("Starting W&B.")
args_as_dict = flatten(OmegaConf.to_container(cfg, resolve=True)) # convert to dictionary
print("==========================\nAll args as dictionary:")
print(args_as_dict) | 5,353,308 |
def draw_string(turtle):
"""
This function draws the strings to the guitar
:param turtle: The name of the turtle
:return: None
"""
turtle.penup()
length = 35
turtle.setpos(60, -45)
for i in range(5):
turtle.penup()
turtle.left(90)
turtle.pendown()
turtle.forward(300)
turtle.left(90)
turtle.penup()
length = length - 6
turtle.forward(length - 6) | 5,353,309 |
def switch_to_sink(pulse, sink):
"""Switch output to the given sink."""
# set the default sink
pulse.default_set(sink)
# move existing streams over to the sink
for sink_input in pulse.sink_input_list():
pulse.sink_input_move(sink_input.index, sink.index) | 5,353,310 |
def get_ratio(numerator, denominator):
"""Get ratio from numerator and denominator."""
return (
0 if not denominator else round(float(numerator or 0) / float(denominator), 2)
) | 5,353,311 |
def create_app():
"""
Create a Flask application for face alignment
Returns:
flask.Flask -> Flask application
"""
app = Flask(__name__)
model = setup_model()
app.config.from_mapping(MODEL=model)
@app.route("/", methods=["GET"])
def howto():
instruction = (
"Send POST request to /align to fix face orientation in input image"
"\nex."
"\n\tcurl -X POST -F 'image=@/path/to/face.jpg' --output output.jpg localhost:5000/align"
)
return instruction
@app.route("/align", methods=["POST"])
def align():
data = request.files["image"]
img_str = data.read()
nparr = np.fromstring(img_str, np.uint8)
img = cv2.imdecode(nparr, cv2.IMREAD_ANYCOLOR)
faces = model.detect(img)
if len(faces) == 0:
return "No face found. Try again", 400
elif len(faces) > 1:
return "Too many faces found. Try again", 400
else:
face = faces[0]
rotated_image = rotate_bound(img, face.angle)
# Encode image
is_completed, buf = cv2.imencode(".jpg", rotated_image)
if not is_completed:
return "Unexpected encoding error. Try again", 400
byte_buffer = io.BytesIO(buf.tostring())
return send_file(
byte_buffer,
"image/jpeg",
as_attachment=True,
attachment_filename="output.jpg",
)
return app | 5,353,312 |
def Red(n=1.0):
"""Change to red color"""
glColor3fv((n, 0.0, 0.0)) | 5,353,313 |
def test_load_images_and_stack_2d_random(test_output_dirs: OutputFolderForTests) -> None:
"""
Test load of 2D images
"""
image_size = (20, 30)
low = 0
high = 200
array1 = np.random.randint(low=low, high=high, size=image_size, dtype='uint16')
write_test_dicom(array1, test_output_dirs.root_dir / "file1.dcm")
array2 = np.random.randint(low=low, high=high, size=image_size, dtype='uint16')
write_test_dicom(array2, test_output_dirs.root_dir / "file2.dcm")
array3 = np.random.randint(low=low, high=high, size=image_size, dtype='uint16')
write_test_dicom(array3, test_output_dirs.root_dir / "file3.dcm")
expected_tensor = torch.from_numpy(np.expand_dims(np.stack([array1, array2, array3]).astype(float), axis=1))
file_list = [test_output_dirs.root_dir / f"file{i}.dcm" for i in range(1, 4)]
imaging_data = load_images_and_stack(file_list,
load_segmentation=False,
image_size=(1,) + image_size)
assert len(imaging_data.images.shape) == 4
assert imaging_data.images.shape[0] == 3
assert imaging_data.images.shape[1] == 1
assert imaging_data.images.shape[2:] == image_size
assert torch.allclose(imaging_data.images, expected_tensor) | 5,353,314 |
def grad_clip(x:Tensor) -> Tensor:
"""
Clips too big and too small gradients.
Example::
grad = grad_clip(grad)
Args:
x(:obj:`Tensor`): Gradient with too large or small values
Returns:
:obj:`Tensor`: Cliped Gradient
"""
x[x>5] = 5
x[x<-5] = -5
return x | 5,353,315 |
def unfold_kernel(kernel):
"""
In pytorch format, kernel is stored as [out_channel, in_channel, height, width]
Unfold kernel into a 2-dimension weights: [height * width * in_channel, out_channel]
:param kernel: numpy ndarray
:return:
"""
k_shape = kernel.shape
weight = np.zeros([k_shape[1] * k_shape[2] * k_shape[3], k_shape[0]])
for i in range(k_shape[0]):
weight[:, i] = np.reshape(kernel[i, :, :, :], [-1])
return weight | 5,353,316 |
def write(message: str, color: str = None) -> None:
""" Write message. """
logger.write(message, color) | 5,353,317 |
def _parse_polyline_locations(locations, max_n_locations):
"""Parse and validate locations in Google polyline format.
The "locations" argument of the query should be a string of ascii characters above 63.
Args:
locations: The location query string.
max_n_locations: The max allowable number of locations, to keep query times reasonable.
Returns:
lats: List of latitude floats.
lons: List of longitude floats.
Raises:
ClientError: If too many locations are given, or if the location string can't be parsed.
"""
# The Google maps API prefixes their polylines with 'enc:'.
if locations and locations.startswith("enc:"):
locations = locations[4:]
try:
latlons = polyline.decode(locations)
except Exception as e:
msg = "Unable to parse locations as polyline."
raise ClientError(msg)
# Polyline result in in list of (lat, lon) tuples.
lats = [p[0] for p in latlons]
lons = [p[1] for p in latlons]
# Check number.
n_locations = len(lats)
if n_locations > max_n_locations:
msg = f"Too many locations provided ({n_locations}), the limit is {max_n_locations}."
raise ClientError(msg)
return lats, lons | 5,353,318 |
def dense(data, weight, bias=None, out_dtype=None):
"""The default implementation of dense in topi.
Parameters
----------
data : tvm.Tensor
2-D with shape [batch, in_dim]
weight : tvm.Tensor
2-D with shape [out_dim, in_dim]
bias : tvm.Tensor, optional
1-D with shape [out_dim]
out_dtype : str
The output type. This is used for mixed precision.
Returns
-------
output : tvm.Tensor
2-D with shape [batch, out_dim]
"""
assert len(data.shape) == 2 and len(weight.shape) == 2, \
"only support 2-dim dense"
if bias is not None:
assert len(bias.shape) == 1
if out_dtype is None:
out_dtype = data.dtype
batch, in_dim = data.shape
out_dim, _ = weight.shape
k = tvm.reduce_axis((0, in_dim), name='k')
matmul = tvm.compute((batch, out_dim), \
lambda i, j: tvm.sum(data[i, k].astype(out_dtype) * \
weight[j, k].astype(out_dtype), axis=k), \
name='T_dense', tag='dense')
if bias is not None:
matmul = tvm.compute((batch, out_dim), \
lambda i, j: matmul[i, j] + bias[j].astype(out_dtype), \
tag=tag.BROADCAST)
return matmul | 5,353,319 |
def choice(*choices: T, default: Union[T, _MISSING_TYPE] = MISSING, **kwargs: Any) -> T:
"""Makes a field which can be chosen from the set of choices from the
command-line.
Returns a regular `dataclasses.field()`, but with metadata which indicates
the allowed values.
(New:) If `choices` is a dictionary, then passing the 'key' will result in
the corresponding value being used. The values may be objects, for example.
Similarly for Enum types, passing a type of enum will
Args:
default (T, optional): The default value of the field. Defaults to dataclasses.MISSING,
in which case the command-line argument is required.
Raises:
ValueError: If the default value isn't part of the given choices.
Returns:
T: the result of the usual `dataclasses.field()` function (a dataclass field/attribute).
"""
assert len(choices) > 0, "Choice requires at least one positional argument!"
if len(choices) == 1:
choices = choices[0]
if inspect.isclass(choices) and issubclass(choices, Enum):
# If given an enum, construct a mapping from names to values.
choice_enum: Type[Enum] = choices
choices = OrderedDict((e.name, e) for e in choice_enum)
if default is not MISSING and not isinstance(default, choice_enum):
if default in choices:
warnings.warn(
UserWarning(
f"Setting default={default} could perhaps be ambiguous "
f"(enum names vs enum values). Consider using the enum "
f"value {choices[default]} instead."
)
)
default = choices[default]
else:
raise ValueError(
f"'default' arg should be of type {choice_enum}, but got {default}"
)
if isinstance(choices, dict):
# if the choices is a dict, the options are the keys
# save the info about the choice_dict in the field metadata.
metadata = kwargs.setdefault("metadata", {})
choice_dict = choices
# save the choice_dict in metadata so that we can recover the values in postprocessing.
metadata["choice_dict"] = choice_dict
choices = list(choice_dict.keys())
# TODO: If the choice dict is given, then add encoding/decoding functions that just
# get/set the right key.
def _encoding_fn(value: Any) -> str:
"""Custom encoding function that will simply represent the value as the
the key in the dict rather than the value itself.
"""
if value in choice_dict.keys():
return value
elif value in choice_dict.values():
return [k for k, v in choice_dict.items() if v == value][0]
return value
kwargs.setdefault("encoding_fn", _encoding_fn)
def _decoding_fn(value: Any) -> str:
"""Custom decoding function that will retrieve the value from the
stored key in the dictionary.
"""
return choice_dict.get(value, value)
kwargs.setdefault("decoding_fn", _decoding_fn)
return field(default=default, choices=choices, **kwargs) | 5,353,320 |
def __set_config(key: str, value: Any):
""" Update config entry. You should not use this function in your code, once the cfg is set up it should not be changed again!
Args:
key: name of config entry
value: new value
"""
print('update ', (key, value), ' config')
setattr(cfg, key, value) | 5,353,321 |
def modulelink(module, baseurl=''):
"""Hyperlink to a module, either locally or on python.org"""
if module+'.py' not in local_files:
baseurl = 'http://www.python.org/doc/current/lib/module-'
return link(baseurl+module+'.html', module) | 5,353,322 |
def test_view_translate_not_authed_public_project(client, locale0,
settings_debug):
"""
If the user is not authenticated and we're translating project
ID 1, return a 200.
"""
# Clear out existing project with ID=1 if necessary.
Project.objects.filter(id=1).delete()
project = Project.objects.create(id=1, slug='valid-project')
ProjectLocale.objects.create(
project=project, locale=locale0)
resource = Resource.objects.create(
project=project,
path='foo.lang',
total_strings=1)
TranslatedResource.objects.create(
resource=resource, locale=locale0)
response = client.get(
'/%s/%s/%s/'
% (locale0.code, project.slug, resource.path))
assert response.status_code == 200 | 5,353,323 |
def print_copy_method(struct_name, field_count, struct_members):
"""Generate copy function for joint data structure."""
print("struct joint_{0} *joint_{0}_copy(void *(*mem_allocator)(size_t), struct joint_{0} *lhs, struct joint_{0} *rhs) {{".format(struct_name))
print(" if (lhs == rhs) {")
print(" return rhs;")
print(" }")
print(" assert(((void) \"lhs cannot be null\", lhs != NULL));")
print(" assert(((void) \"rhs cannot be null\", rhs != NULL));")
# Copy memory_block from lhs to rhs
print("\n if (lhs->memory_block != rhs->memory_block) {")
print(" rhs->memory_block = mem_allocator(lhs->offset[{0}]);".format(field_count - 1))
print(" if (rhs->memory_block == NULL) {")
print(" return NULL;")
print(" }")
print(" memcpy(rhs->memory_block, lhs->memory_block, lhs->offset[{0}]);".format(field_count - 1))
print(" }")
# Copy offsets from lhs to rhs
print("\n rhs->array_count = lhs->array_count;\n")
for (i, field) in enumerate(struct_members):
print(" rhs->offset[{0}] = lhs->offset[{0}];".format(i))
print()
for (i, field) in enumerate(struct_members):
if i == 0:
print(" rhs->{0} = ({1} *)(rhs->memory_block);".format(field['name'], field['type']))
else:
print(" rhs->{0} = ({1} *)((char *)(rhs->memory_block) + (rhs->offset[{2}]));".format(field['name'], field['type'], i - 1))
print("\n return rhs;")
print("}\n") | 5,353,324 |
def cleanup_command(
__cli_options=False,
artifactsdir=defaults['artifacts_dir'], # Where dexy should store working files.
logdir=defaults['log_dir'], # DEPRECATED
reports=True # Whether directories generated by reports should also be removed.
):
"""
Remove the directories which dexy created, including working directories
and reports.
"""
wrapper = init_wrapper(locals())
wrapper.remove_dexy_dirs()
wrapper.remove_reports_dirs(reports) | 5,353,325 |
def add_reference(
*, short_purpose: str, reference: Optional[str] = None, doi: Optional[str] = None
) -> Callable:
"""Decorator to link a reference to a function or method.
Acts as a marker in code where particular alogrithms/data/... originates.
General execution of code silently passes these markers, but remembers how and where
they were called. Which markers were passed in a particular program run
can be recalled with `print(BIBLIOGRAPHY)`.
One and only one method for providing the reference is allowed.
Args:
short_purpose (str): Identify the thing being referenced.
reference (Optional, str): The reference itself, as a plain text string.
doi (Optional, str): DOI of the reference.
Returns:
The decorated function.
"""
if reference and doi:
raise ValueError("Only one method for providing the reference is allowed.")
elif reference:
ref = reference
elif doi:
ref = doi if "doi.org" in doi else f"https://doi.org/{doi}"
else:
raise ValueError("No reference information provided!")
@wrapt.decorator(enabled=lambda: BIBLIOGRAPHY.track_references)
def wrapper(wrapped, instance, args, kwargs):
source = inspect.getsourcefile(wrapped)
line = inspect.getsourcelines(wrapped)[1]
identifier = f"{source}:{line}"
if identifier in BIBLIOGRAPHY and ref in BIBLIOGRAPHY[identifier].references:
return wrapped(*args, **kwargs)
if identifier not in BIBLIOGRAPHY:
BIBLIOGRAPHY[identifier] = FunctionReference(
wrapped.__name__, line, source, [], []
)
BIBLIOGRAPHY[identifier].short_purpose.append(short_purpose)
BIBLIOGRAPHY[identifier].references.append(ref)
return wrapped(*args, **kwargs)
return wrapper | 5,353,326 |
def test_autoimport_list(mocker, credentials):
"""Test list autoimport jobs being outputed to the shell."""
runner = CliRunner()
autoimport_response = {
"meta": {"count": 2, "next": None, "previous": None},
"results": [
{
"id": "41fd0397-62b0-4ef9-992f-423435b5d5ef",
"project_id": "290e0c5a-c87e-474e-8b32-fe56fc54cc4d",
"identifier": "identifier-in-basespace-project-name",
"metadata": None,
},
{
"id": "0f60ab5e-a34f-4afc-a428-66f81890565f",
"project_id": "290e0c5a-c87e-474e-8b32-fe56fc54cc4d",
"identifier": "Dummy",
"metadata": {},
},
],
}
mocked_list_basespace_autoimport_jobs = mocker.patch.object(
APIClient,
"list_basespace_autoimport_jobs",
return_value=BaseSpaceProjectImport(**autoimport_response),
)
res = runner.invoke(autoimport_list, credentials)
assert res.exit_code == 0
mocked_list_basespace_autoimport_jobs.assert_called()
autoimport_jobs = autoimport_response["results"]
jobs = "\n".join(
[
"\t".join(
[
job["id"],
job["project_id"],
job["identifier"],
]
)
for job in autoimport_jobs
]
)
assert f"{jobs}\n" == res.output | 5,353,327 |
def get_capital_ptd_act():
"""Get chart of accounts from shared drive."""
logging.info('Retrieving latest CIP project to date')
command = "smbclient //ad.sannet.gov/dfs " \
+ "--user={adname}%{adpass} -W ad -c " \
+ "'prompt OFF;" \
+ " cd \"FMGT-Shared/Shared/BUDGET/" \
+ "Open Data/Open Data Portal/" \
+ "Shared with Performance and Analytics/" \
+ "Actuals/Capital/P-T-D/\";" \
+ " lcd \"/data/temp/\";" \
+ " mget FY*ACTUALS.xlsx;'"
command = command.format(adname=conf['alb_sannet_user'],
adpass=conf['alb_sannet_pass'],
temp_dir=conf['temp_data_dir'])
logging.info(command)
try:
p = subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT)
return p
except subprocess.CalledProcessError as e:
return e.output | 5,353,328 |
def eval_curvature(poly, x_vals):
"""
This function returns a vector with the curvature based on path defined by `poly`
evaluated on distance vector `x_vals`
"""
# https://en.wikipedia.org/wiki/Curvature# Local_expressions
def curvature(x):
a = abs(2 * poly[1] + 6 * poly[0] * x) / (1 + (3 * poly[0] * x**2 + 2 * poly[1] * x + poly[2])**2)**(1.5)
return a
return np.vectorize(curvature)(x_vals) | 5,353,329 |
def calculate_folder_size(path, _type="mb") -> float:
"""Return the size of the given path in MB, bytes if wanted"""
p1 = subprocess.Popen(["du", "-sb", path], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["awk", "{print $1}"], stdin=p1.stdout, stdout=subprocess.PIPE)
p1.stdout.close() # type: ignore
byte_size = 0.0
byte_size = float(p2.communicate()[0].decode("utf-8").strip())
if _type == "bytes":
return byte_size
else:
return byte_to_mb(byte_size) | 5,353,330 |
def templates():
"""Return all of the templates and settings."""
return settings | 5,353,331 |
def add_license(license_uri, based_on_uri, version, jurisdiction,
legalcode_uri, rdf_dir, license_code):
"""Create a new license based on an existing one. Write the resulting
graph to the rdf_dir."""
# make sure the license_uri ends with a slash
if license_uri[-1] != '/':
license_uri += '/'
# create the graph for the new license
license = graph()
if based_on_uri:
# we're starting from an existing license
# load the based on graph
based_on = load_graph(license_rdf_filename(based_on_uri, rdf_dir))
# copy base assertions
for (p, o) in based_on.predicate_objects(URIRef(based_on_uri)):
license.add((URIRef(license_uri), p, o))
replace_predicate(
license, URIRef(license_uri), NS_DC.source,
URIRef(based_on_uri))
# Record the existing FOAF:logos for reference
old_logos = [
result[2].split('/')[-1]
for result in license.triples(
(URIRef(license_uri), NS_FOAF.logo, None))]
# Add the FOAF:logos
license.remove(
(URIRef(license_uri), NS_FOAF.logo, None))
# Images get put into /l/ or /p/ depending on whether they are
# /licenses/ or /publicdomain/ respectively...
group_letter = urllib.parse.urlparse(license_uri)[2].lstrip('/')[0]
for old_logo in old_logos:
# http://i.creativecommons.org/l/by/3.0/88x31.png
logo_url = "http://i.creativecommons.org/%s/%s/%s/" % (
group_letter, license_code, version)
if jurisdiction:
logo_url += jurisdiction + "/"
image_name = old_logo.split('/')[-1]
logo_url += image_name
license.add(
((URIRef(license_uri), NS_FOAF.logo,
URIRef(logo_url))))
else:
# add the basic framework -- this is a license
license.add((URIRef(license_uri), NS_RDF.type, NS_CC.License))
# add the jurisdiction, version, source
if jurisdiction is not None:
jurisdiction_url = "http://creativecommons.org/international/%s/" % (
jurisdiction)
replace_predicate(license, URIRef(license_uri), NS_CC.jurisdiction,
URIRef(jurisdiction_url))
else:
# unported; remove any jurisdiction assertion
license.remove((URIRef(license_uri), NS_CC.jurisdiction, None))
# set/replace the version
replace_predicate(license, URIRef(license_uri), NS_DCQ.hasVersion,
Literal(version))
# determine the legalcode URI
if legalcode_uri is None:
legalcode_uri = license_uri + "legalcode"
# add the legalcode predicate
replace_predicate(license, URIRef(license_uri), NS_CC.legalcode,
URIRef(legalcode_uri))
# Add the x-i18n private use subtag (RFC 5646 Language Tag)
replace_predicate(
license, URIRef(license_uri), NS_DC['title'],
Literal(gen_license_i18n_title(license_code, version, jurisdiction),
lang="x-i18n"))
translate_graph(license)
# write the graph out
save_graph(license, license_rdf_filename(license_uri, rdf_dir)) | 5,353,332 |
def get_semitones(interval_tuplet):
"""
Takes an interval tuplet of the form returned by get_interval()
Returns an int representing the semitones within the interval.
"""
return mintervals.semitones_from_shorthand(interval_tuplet[0]) + 12*interval_tuplet[1] | 5,353,333 |
def test_fieldset_sample(fieldset, xdim=120, ydim=80):
""" Sample the fieldset using indexing notation. """
lon = np.linspace(-170, 170, xdim, dtype=np.float32)
lat = np.linspace(-80, 80, ydim, dtype=np.float32)
v_s = np.array([fieldset.V[0, 0., 70., x] for x in lon])
u_s = np.array([fieldset.U[0, 0., y, -45.] for y in lat])
assert np.allclose(v_s, lon, rtol=1e-7)
assert np.allclose(u_s, lat, rtol=1e-7) | 5,353,334 |
def svds(a, k=6, *, ncv=None, tol=0, which='LM', maxiter=None,
return_singular_vectors=True):
"""Finds the largest ``k`` singular values/vectors for a sparse matrix.
Args:
a (cupy.ndarray or cupyx.scipy.sparse.csr_matrix): A real or complex
array with dimension ``(m, n)``
k (int): The number of singular values/vectors to compute. Must be
``1 <= k < min(m, n)``.
ncv (int): The number of Lanczos vectors generated. Must be
``k + 1 < ncv < min(m, n)``. If ``None``, default value is used.
tol (float): Tolerance for singular values. If ``0``, machine precision
is used.
which (str): Only 'LM' is supported. 'LM': finds ``k`` largest singular
values.
maxiter (int): Maximum number of Lanczos update iterations.
If ``None``, default value is used.
return_singular_vectors (bool): If ``True``, returns singular vectors
in addition to singular values.
Returns:
tuple:
If ``return_singular_vectors`` is ``True``, it returns ``u``, ``s``
and ``vt`` where ``u`` is left singular vectors, ``s`` is singular
values and ``vt`` is right singular vectors. Otherwise, it returns
only ``s``.
.. seealso:: :func:`scipy.sparse.linalg.svds`
.. note::
This is a naive implementation using cupyx.scipy.sparse.linalg.eigsh as
an eigensolver on ``a.H @ a`` or ``a @ a.H``.
"""
if a.ndim != 2:
raise ValueError('expected 2D (shape: {})'.format(a.shape))
if a.dtype.char not in 'fdFD':
raise TypeError('unsupprted dtype (actual: {})'.format(a.dtype))
m, n = a.shape
if k <= 0:
raise ValueError('k must be greater than 0 (actual: {})'.format(k))
if k >= min(m, n):
raise ValueError('k must be smaller than min(m, n) (actual: {})'
''.format(k))
aH = a.conj().T
if m >= n:
aa = aH @ a
else:
aa = a @ aH
if return_singular_vectors:
w, x = eigsh(aa, k=k, which=which, ncv=ncv, maxiter=maxiter, tol=tol,
return_eigenvectors=True)
else:
w = eigsh(aa, k=k, which=which, ncv=ncv, maxiter=maxiter, tol=tol,
return_eigenvectors=False)
w = cupy.maximum(w, 0)
t = w.dtype.char.lower()
factor = {'f': 1e3, 'd': 1e6}
cond = factor[t] * numpy.finfo(t).eps
cutoff = cond * cupy.max(w)
above_cutoff = (w > cutoff)
n_large = above_cutoff.sum()
s = cupy.zeros_like(w)
s[:n_large] = cupy.sqrt(w[above_cutoff])
if not return_singular_vectors:
return s
x = x[:, above_cutoff]
if m >= n:
v = x
u = a @ v / s[:n_large]
else:
u = x
v = aH @ u / s[:n_large]
u = _augmented_orthnormal_cols(u, k - n_large)
v = _augmented_orthnormal_cols(v, k - n_large)
return u, s, v.conj().T | 5,353,335 |
def register_mongodb(app: Flask) -> Flask:
"""Instantiates database and initializes collections."""
config = app.config
# Instantiate PyMongo client
mongo = create_mongo_client(app=app, config=config)
# Add database
db = mongo.db[get_conf(config, "database", "name")]
# Add database collection for '/service-info'
collection_service_info = mongo.db["service-info"]
# Add database collection for '/data_objects'
collection_data_objects = mongo.db["data_objects"]
collection_data_objects.create_index([("id", ASCENDING)], unique=True, sparse=True)
# Add database to app config
config["database"]["drs_db"] = collection_data_objects
config["database"]["service_info"] = collection_service_info
app.config = config
return app | 5,353,336 |
def epflux_boa(netcdf = False):
"""
Input boa variables U, V, T
Ouput boa variable. Save to netcdf file if netcdf is true
"""
pass | 5,353,337 |
def assert_is_compatible(schema, required_args, optional_args):
"""Raises a :exc:`~cosmic.exceptions.SpecError` if function argument spec
(as returned by :func:`get_args`) is incompatible with the given schema.
By incompatible, it is meant that there exists such a piece of data that
is valid according to the schema, but that could not be applied to the
function by :func:`apply_to_func`.
"""
# No arguments
if len(required_args + optional_args) == 0:
raise SpecError("Function needs to accept arguments")
# One argument can accept anything
if len(required_args + optional_args) == 1:
return
# Multiple arguments means schema must be Struct
if not isinstance(schema, Struct):
raise SpecError("For a function that takes arguments, accepts schema"
" is expected to be a Struct")
# Each non-keyword argument in the function must have a corresponding
# required field in the schema
for r in required_args:
if r not in schema.param.keys() or not schema.param[r]["required"]:
raise SpecError("Action argument '%s' must have a corresponding"
" required field in the accepts schema" % r)
# All fields in the schema must have a corresponding function argument
for f in schema.param.keys():
if f not in set(required_args + optional_args):
raise SpecError("The '%s' field must have a corresponding"
" function argument" % f) | 5,353,338 |
def look(table, limit=0, vrepr=None, index_header=None, style=None,
truncate=None, width=None):
"""
Format a portion of the table as text for inspection in an interactive
session. E.g.::
>>> import petl as etl
>>> table1 = [['foo', 'bar'],
... ['a', 1],
... ['b', 2]]
>>> etl.look(table1)
+-----+-----+
| foo | bar |
+=====+=====+
| 'a' | 1 |
+-----+-----+
| 'b' | 2 |
+-----+-----+
>>> # alternative formatting styles
... etl.look(table1, style='simple')
=== ===
foo bar
=== ===
'a' 1
'b' 2
=== ===
>>> etl.look(table1, style='minimal')
foo bar
'a' 1
'b' 2
>>> # any irregularities in the length of header and/or data
... # rows will appear as blank cells
... table2 = [['foo', 'bar'],
... ['a'],
... ['b', 2, True]]
>>> etl.look(table2)
+-----+-----+------+
| foo | bar | |
+=====+=====+======+
| 'a' | | |
+-----+-----+------+
| 'b' | 2 | True |
+-----+-----+------+
Three alternative presentation styles are available: 'grid', 'simple' and
'minimal', where 'grid' is the default. A different style can be specified
using the `style` keyword argument. The default style can also be changed
by setting ``petl.config.look_style``.
"""
# determine defaults
if limit == 0:
limit = config.look_limit
if vrepr is None:
vrepr = config.look_vrepr
if index_header is None:
index_header = config.look_index_header
if style is None:
style = config.look_style
if width is None:
width = config.look_width
return Look(table, limit=limit, vrepr=vrepr, index_header=index_header,
style=style, truncate=truncate, width=width) | 5,353,339 |
def refresh_devices(config, cache_path):
"""Refresh devices from configuration received"""
global DEBUG, m_devices, Device_Cache
if DEBUG: print("DEBUG: Refreshing device database")
print_progress("Refresh devices")
try:
m_devices = config['devices']
except:
print("ERROR: No device found in config!!")
return None
else:
try:
filep = open(cache_path, 'w')
except:
print("ERROR: Cannot write to device cache %s" % cache_path)
else:
filep.write(ujson.dumps(m_devices))
filep.close()
if DEBUG: print("DEBUG: Written device DB to cache")
return m_devices | 5,353,340 |
def test_visiting_the_site():
"""Visiting the site.""" | 5,353,341 |
def listElements(server, elements, filesToGet):
"""
Description:
Function to print elements list on a distant ssh server.
Selected elements, that are in filesToGet, are red underlined.
Parameters:
- server: the name of the server.
- elements: list of element to print out.
- filesToGet: list of index of selected files (red underlined).
Return: nothing
"""
print("\n[+] Available elements on " + server + ":")
for i, element in enumerate(elements):
if i in filesToGet:
print("\x1b[101m{0:4}{1:4}{2}\x1b[0m".format(str(i), ":", element))
else:
if i % 2 == 0:
print("\x1b[100m{0:4}{1:4}{2}\x1b[0m".format(str(i), ":", element))
else:
print("{0:4}{1:4}{2}".format(str(i), ":", element)) | 5,353,342 |
def warn_vars_naming_style(messages, line, style):
""" Check whether varibales and function argumens fit the naming rule."""
naming_style_name = style.Get('CHECK_VAR_NAMING_STYLE')
if not naming_style_name:
return
def is_expr(uwl):
return (uwl.tokens
and _find_parent(uwl.first.node, None, [syms.expr_stmt]))
def is_assignment(uwl):
return (is_expr(uwl)
and next(filter(lambda t: t.is_name, uwl.tokens), None))
def get_lhs_tokens(uwl):
root = _find_parent(uwl.first.node, None, [syms.expr_stmt])
lvalues = _FindLValues(root).lvalues
for tok in uwl.tokens:
if tok.name == 'EQUAL':
break
if tok.is_name and id(tok.node) in lvalues:
chain = lvalues[id(tok.node)]
if (len(chain) == 1
or (len(chain) == 2 and chain[0] == 'self')):
yield tok
def iter_token_range(first, last):
while True:
yield first
if first is last:
break
first = first.next_token
def iter_parameters(paramlist):
for item in paramlist:
tokens = iter_token_range(item.first_token, item.last_token)
tokens = filter(lambda t: t.name in {'NAME', 'STAR'}, tokens)
first = next(tokens, None)
if first is None:
# This is possible when a comment is added to a function
# argument (in some cases, when there is a trailing comma):
#
# def fn(arg1,
# arg2, #comment
# arg3,
# ):
# pass
#
assert item.first_token.name == 'COMMENT'
continue
if first.name == 'STAR':
yield next(tokens, first)
yield first
def get_func_args(uwl):
for tok in uwl.tokens:
if not tok.parameters:
continue
yield from iter_parameters(tok.parameters)
if is_assignment(line):
tokens = get_lhs_tokens(line)
elif line.tokens and line.is_func_definition:
tokens = get_func_args(line)
else:
return
naming_style = REGEXPS['varname'][naming_style_name]
for tok in tokens:
# explicitly allow UPPER CASE names, because constants sould be
# named this way regargless the naming style
if not (tok.value == 'self'
or tok.value.isupper()
or naming_style.match(tok.value)):
messages.add(tok, line.AsCode(), Warnings.VAR_NAMING_STYLE, variable=tok.value) | 5,353,343 |
def _create_sync_table_from_resource_df(
resource_df: DataFrame,
identity_columns: List[str],
resource_name: str,
sync_db: sqlalchemy.engine.base.Engine,
):
"""
Take fetched data and push to a new temporary sync table. Includes
hash and tentative extractor CreateDate/LastModifiedDates.
Parameters
----------
resource_df: DataFrame
a DataFrame with current fetched data.
identity_columns: List[str]
a List of the identity columns for the resource dataframe.
resource_name: str
the name of the API resource, e.g. "Courses", to be used in SQL
sync_db: sqlalchemy.engine.base.Engine
an Engine instance for creating database connections
"""
with sync_db.connect() as con:
# ensure sync table exists, need column ordering to be identical to regular table
con.execute(f"DROP TABLE IF EXISTS Sync_{resource_name}")
con.execute(
f"""
CREATE TABLE IF NOT EXISTS Sync_{resource_name} (
{SYNC_COLUMNS_SQL}
)
"""
)
sync_df: DataFrame = resource_df.copy()
sync_df = add_hash_and_json_to(sync_df)
# add (possibly composite) primary key, sorting for consistent ordering
add_sourceid_to(sync_df, identity_columns)
now: datetime = datetime.now()
sync_df["CreateDate"] = now
sync_df["LastModifiedDate"] = now
sync_df["SyncNeeded"] = 1
sync_df = sync_df[SYNC_COLUMNS]
sync_df.set_index("SourceId", inplace=True)
# push to temporary sync table
sync_df.to_sql(
f"Sync_{resource_name}", sync_db, if_exists="append", index=True, chunksize=1000
) | 5,353,344 |
def validate_schema(path, data, schema):
"""
Warns and returns the number of errors relating to JSON Schema validation.
Uses the `jsonschema <https://python-jsonschema.readthedocs.io/>`__ module.
:param object schema: the metaschema against which to validate
:returns: the number of errors
:rtype: int
"""
errors = 0
for error in validator(schema, format_checker=FormatChecker()).iter_errors(data):
errors += 1
warn(f"{json.dumps(error.instance, indent=2)}\n{error.message} ({'/'.join(error.absolute_schema_path)})\n",
SchemaWarning)
return errors | 5,353,345 |
def class_dict(base_module, node):
"""class_dict(base_module, node) -> dict
Returns the class dictionary for the module represented by node and
with base class base_module"""
class_dict_ = {}
def update_dict(name, callable_):
if class_dict_.has_key(name):
class_dict_[name] = callable_(class_dict_[name])
elif hasattr(base_module, name):
class_dict_[name] = callable_(getattr(base_module, name))
else:
class_dict_[name] = callable_(None)
def guarded_SimpleScalarTree_wrap_compute(old_compute):
# This builds the scalar tree and makes it cacheable
def compute(self):
self.is_cacheable = lambda *args, **kwargs: True
old_compute(self)
self.vtkInstance.BuildTree()
return compute
def guarded_SetFileName_wrap_compute(old_compute):
# This checks for the presence of file in VTK readers
def compute(self):
# Skips the check if it's a vtkImageReader or vtkPLOT3DReader, because
# it has other ways of specifying files, like SetFilePrefix for
# multiple files
skip = [vtk.vtkBYUReader,
vtk.vtkImageReader,
vtk.vtkDICOMImageReader,
vtk.vtkTIFFReader]
# vtkPLOT3DReader does not exist from version 6.0.0
v = vtk.vtkVersion()
version = [v.GetVTKMajorVersion(),
v.GetVTKMinorVersion(),
v.GetVTKBuildVersion()]
if version < [6, 0, 0]:
skip.append(vtk.vtkPLOT3DReader)
if any(issubclass(self.vtkClass, x) for x in skip):
old_compute(self)
return
if self.has_input('SetFileName'):
name = self.get_input('SetFileName')
elif self.has_input('SetFile'):
name = self.get_input('SetFile').name
else:
raise ModuleError(self, 'Missing filename')
if not os.path.isfile(name):
raise ModuleError(self, 'File does not exist')
old_compute(self)
return compute
def compute_SetDiffuseColorWidget(old_compute):
if old_compute != None:
return old_compute
def call_SetDiffuseColorWidget(self, color):
self.vtkInstance.SetDiffuseColor(color.tuple)
return call_SetDiffuseColorWidget
def compute_SetAmbientColorWidget(old_compute):
if old_compute != None:
return old_compute
def call_SetAmbientColorWidget(self, color):
self.vtkInstance.SetAmbientColor(color.tuple)
return call_SetAmbientColorWidget
def compute_SetSpecularColorWidget(old_compute):
if old_compute != None:
return old_compute
def call_SetSpecularColorWidget(self, color):
self.vtkInstance.SetSpecularColor(color.tuple)
return call_SetSpecularColorWidget
def compute_SetColorWidget(old_compute):
if old_compute != None:
return old_compute
def call_SetColorWidget(self, color):
self.vtkInstance.SetColor(color.tuple)
return call_SetColorWidget
def compute_SetEdgeColorWidget(old_compute):
if old_compute != None:
return old_compute
def call_SetEdgeColorWidget(self, color):
self.vtkInstance.SetEdgeColor(color.tuple)
return call_SetEdgeColorWidget
def compute_SetBackgroundWidget(old_compute):
if old_compute != None:
return old_compute
def call_SetBackgroundWidget(self, color):
self.vtkInstance.SetBackground(color.tuple)
return call_SetBackgroundWidget
def compute_SetBackground2Widget(old_compute):
if old_compute != None:
return old_compute
def call_SetBackground2Widget(self, color):
self.vtkInstance.SetBackground2(color.tuple)
return call_SetBackground2Widget
def compute_SetVTKCell(old_compute):
if old_compute != None:
return old_compute
def call_SetRenderWindow(self, cellObj):
if cellObj.cellWidget:
self.vtkInstance.SetRenderWindow(cellObj.cellWidget.mRenWin)
return call_SetRenderWindow
def compute_SetTransferFunction(old_compute):
# This sets the transfer function
if old_compute != None:
return old_compute
def call_SetTransferFunction(self, tf):
tf.set_on_vtk_volume_property(self.vtkInstance)
return call_SetTransferFunction
def compute_SetPointData(old_compute):
if old_compute != None:
return old_compute
def call_SetPointData(self, pd):
self.vtkInstance.GetPointData().ShallowCopy(pd)
return call_SetPointData
def compute_SetCellData(old_compute):
if old_compute != None:
return old_compute
def call_SetCellData(self, cd):
self.vtkInstance.GetCellData().ShallowCopy(cd)
return call_SetCellData
def compute_SetPointIds(old_compute):
if old_compute != None:
return old_compute
def call_SetPointIds(self, point_ids):
self.vtkInstance.GetPointIds().SetNumberOfIds(point_ids.GetNumberOfIds())
for i in xrange(point_ids.GetNumberOfIds()):
self.vtkInstance.GetPointIds().SetId(i, point_ids.GetId(i))
return call_SetPointIds
def compute_CopyImportString(old_compute):
if old_compute != None:
return old_compute
def call_CopyImportVoidPointer(self, pointer):
self.vtkInstance.CopyImportVoidPointer(pointer, len(pointer))
return call_CopyImportVoidPointer
def guarded_Writer_wrap_compute(old_compute):
# The behavior for vtkWriter subclasses is to call Write()
# If the user sets a name, we will create a file with that name
# If not, we will create a temporary file from the file pool
def compute(self):
old_compute(self)
fn = self.vtkInstance.GetFileName()
if not fn:
o = self.interpreter.filePool.create_file(suffix='.vtk')
self.vtkInstance.SetFileName(o.name)
else:
o = PathObject(fn)
self.vtkInstance.Write()
self.set_output('file', o)
return compute
for var in dir(node.klass):
# Everyone that has a Set.*FileName should have a Set.*File port too
if set_file_name_pattern.match(var):
def get_compute_SetFile(method_name):
def compute_SetFile(old_compute):
if old_compute != None:
return old_compute
def call_SetFile(self, file_obj):
getattr(self.vtkInstance, method_name)(file_obj.name)
return call_SetFile
return compute_SetFile
update_dict('_special_input_function_' + var[:-4],
get_compute_SetFile(var))
if hasattr(node.klass, 'SetFileName'):
# ... BUT we only want to check existence of filenames on
# readers. VTK is nice enough to be consistent with names, but
# this is brittle..
if node.klass.__name__.endswith('Reader'):
if not node.klass.__name__.endswith('TiffReader'):
update_dict('compute', guarded_SetFileName_wrap_compute)
if hasattr(node.klass, 'SetRenderWindow'):
update_dict('_special_input_function_SetVTKCell',
compute_SetVTKCell)
#color gui wrapping
if hasattr(node.klass, 'SetDiffuseColor'):
update_dict('_special_input_function_SetDiffuseColorWidget',
compute_SetDiffuseColorWidget)
if hasattr(node.klass, 'SetAmbientColor'):
update_dict('_special_input_function_SetAmbientColorWidget',
compute_SetAmbientColorWidget)
if hasattr(node.klass, 'SetSpecularColor'):
update_dict('_special_input_function_SetSpecularColorWidget',
compute_SetSpecularColorWidget)
if hasattr(node.klass, 'SetEdgeColor'):
update_dict('_special_input_function_SetEdgeColorWidget',
compute_SetEdgeColorWidget)
if hasattr(node.klass, 'SetColor'):
update_dict('_special_input_function_SetColorWidget',
compute_SetColorWidget)
if (issubclass(node.klass, vtk.vtkRenderer) and
hasattr(node.klass, 'SetBackground')):
update_dict('_special_input_function_SetBackgroundWidget',
compute_SetBackgroundWidget)
if (issubclass(node.klass, vtk.vtkRenderer) and
hasattr(node.klass, 'SetBackground2')):
update_dict('_special_input_function_SetBackground2Widget',
compute_SetBackground2Widget)
if issubclass(node.klass, vtk.vtkWriter):
update_dict('compute', guarded_Writer_wrap_compute)
if issubclass(node.klass, vtk.vtkScalarTree):
update_dict('compute', guarded_SimpleScalarTree_wrap_compute)
if issubclass(node.klass, vtk.vtkVolumeProperty):
update_dict('_special_input_function_SetTransferFunction',
compute_SetTransferFunction)
if issubclass(node.klass, vtk.vtkDataSet):
update_dict('_special_input_function_SetPointData',
compute_SetPointData)
update_dict('_special_input_function_SetCellData',
compute_SetCellData)
if issubclass(node.klass, vtk.vtkCell):
update_dict('_special_input_function_SetPointIds',
compute_SetPointIds)
if issubclass(node.klass, vtk.vtkImageImport):
update_dict('_special_input_function_CopyImportString',
compute_CopyImportString)
return class_dict_ | 5,353,346 |
def assert_records_equal_nonvolatile(first, second, volatile_fields, indent=0):
"""Compare two test_record tuples, ignoring any volatile fields.
'Volatile' fields include any fields that are expected to differ between
successive runs of the same test, mainly timestamps. All other fields
are recursively compared.
"""
if isinstance(first, dict) and isinstance(second, dict):
if set(first) != set(second):
logging.error('%sMismatching keys:', ' ' * indent)
logging.error('%s %s', ' ' * indent, list(first.keys()))
logging.error('%s %s', ' ' * indent, list(second.keys()))
assert set(first) == set(second)
for key in first:
if key in volatile_fields:
continue
try:
assert_records_equal_nonvolatile(first[key], second[key],
volatile_fields, indent + 2)
except AssertionError:
logging.error('%sKey: %s ^', ' ' * indent, key)
raise
elif hasattr(first, '_asdict') and hasattr(second, '_asdict'):
# Compare namedtuples as dicts so we get more useful output.
assert_records_equal_nonvolatile(first._asdict(), second._asdict(),
volatile_fields, indent)
elif hasattr(first, '__iter__') and hasattr(second, '__iter__'):
for idx, (fir, sec) in enumerate(itertools.izip(first, second)):
try:
assert_records_equal_nonvolatile(fir, sec, volatile_fields, indent + 2)
except AssertionError:
logging.error('%sIndex: %s ^', ' ' * indent, idx)
raise
elif (isinstance(first, records.RecordClass) and
isinstance(second, records.RecordClass)):
assert_records_equal_nonvolatile(
{slot: getattr(first, slot) for slot in first.__slots__},
{slot: getattr(second, slot) for slot in second.__slots__},
volatile_fields, indent)
elif first != second:
logging.error('%sRaw: "%s" != "%s"', ' ' * indent, first, second)
assert first == second | 5,353,347 |
def get_playlist_decreasing_popularity():
"""This function is used to return playlists in decreasing popularity"""
all_ = PlaylistPopularityPrefixed.objects.all()
results = [{"playlist_name": obj.playlist_name, "popularity": obj.played} for obj in all_]
return results | 5,353,348 |
def validate_numeric_scalar(var: Any) -> bool:
"""Evaluates whether an argument is a single numeric value.
Args:
var: the input argument to validate
Returns:
var: the value if it passes validation
Raises:
AssertionError: `var` was not numeric.
"""
assert isinstance(var, (int, float)), "Argument must be single numeric value"
return var | 5,353,349 |
def fit_draw_func(data_for_plotting: Dict[base.FitType, histogram.Histogram1D], component: fit.FitComponent,
x: np.ndarray, ax: Axes) -> None:
""" Determine and draw the fit and data on a given axis.
Here, we will draw both the signal and the background dominated data, regardless of what was
actually used for the fitting.
Args:
data_for_plotting: Data to be used for plotting.
component: RP fit component.
x: x values where the points should be plotted.
ax: matplotlib axis where the information should be plotted.
Returns:
None. The current axis is modified.
"""
# Determine the region of the fit.
# It will be the same for all of the plotting data, so we just take the first one.
fit_type, hist = next(iter(data_for_plotting.items()))
# Need to scale the inclusive orientation background down by a factor of 3 because we haven't
# scaled by number of triggers. Note that this scaling is only approximate, and is just for
# convenience when plotting.
scale_factor = 1.
if fit_type.orientation == "inclusive":
scale_factor = 1. / 3.
# Determine the values and the errors of the fit function.
fit_values = component.evaluate_fit(x = x)
errors = component.fit_result.errors
fit_hist = histogram.Histogram1D(bin_edges = hist.bin_edges, y = fit_values, errors_squared = errors ** 2)
fit_hist *= scale_factor
# Plot the main values
plot = ax.plot(x, fit_hist.y, label = "Fit", color = AnalysisColors.fit)
# Plot the fit errors
ax.fill_between(
x, fit_hist.y - fit_hist.errors, fit_hist.y + fit_hist.errors,
facecolor = plot[0].get_color(), alpha = 0.8, zorder = 2
)
# Plot the data
for fit_type, hist in data_for_plotting.items():
h_component = hist.copy()
h_component *= scale_factor
ax.errorbar(
x, h_component.y, yerr = h_component.errors, label = f"{fit_type.region.capitalize()} dom. data",
marker = "o", linestyle = "", fillstyle = "none" if fit_type.region == "background" else "full",
color = AnalysisColors.signal if fit_type.region == "signal" else AnalysisColors.background,
)
# Also plot the background only fit function if the given component fit to the signal region and
# we have the signal data. We plot it last so that the colors are consistent throughout all axes.
if isinstance(component, fit.SignalFitComponent) and fit_type.region == "signal":
# Calculate background function values
values = component.evaluate_background(x)
errors = component.calculate_background_function_errors(x)
fit_hist_component = histogram.Histogram1D(
bin_edges = hist.bin_edges, y = values, errors_squared = errors ** 2
)
fit_hist_component *= scale_factor
# Plot background values and errors behind everything else
plot_background = ax.plot(
fit_hist_component.x, fit_hist_component.y, zorder = 1,
label = "Bkg. component", color = AnalysisColors.fit_background
)
ax.fill_between(
fit_hist_component.x,
fit_hist_component.y - fit_hist_component.errors, fit_hist_component.y + fit_hist_component.errors,
facecolor = plot_background[0].get_color(), alpha = 0.8, zorder = 1,
) | 5,353,350 |
def irrf(valor=0):
"""
-> Função para cálcular o valor do IRRF.
:param valor: Valor base do salário para cálculo do IRRF.
:return: Retorna o valor do IRRF e alíquota utilizada.
"""
irrf = []
if valor < 1903.99:
irrf.append(0)
irrf.append(0)
elif valor >= 1903.99 and valor <= 2826.65:
irrf.append((valor * 7.5) / 100 - 142.80) # Alíquota de 7.5%, menos parcela de dedução.
irrf.append('7,5')
elif valor >= 2826.66 and valor <= 3751.05:
irrf.append((valor * 15) / 100 - 354.80) # Alíquota de 15%, menos parcela de dedução.
irrf.append('15')
elif valor >= 3751.06 and valor <= 4664.68:
irrf.append((valor * 22.5) / 100 - 636.13) # Alíquota de 22.5%, menos parcela de dedução.
irrf.append('22,5')
elif valor > 4664.68:
irrf.append((valor * 27.5) / 100 - 869.36) # Alíquota de 27.5%, menos parcela de dedução.
irrf.append('27,5')
return irrf | 5,353,351 |
def find_adcp_files_within_period(working_directory,max_gap=20.0,max_group_size=6):
"""
Sorts a directory of ADCPRdiWorkHorseData raw files into groups by
closeness in time, with groups being separated by more than
'max_gap_minutes'. This method first sorts the files by start time, and
then splits the observations where they are more than
'max_gap_minutes' apart.
Inputs:
working_directory = directory path containing ADCP raw or netcdf files
max_gap = maximum time allowed between ADCP observations when grouping (minutes)
max_group_size = maximum number of ADCPData objects per group
Returns:
List of lists that contain groups of input ADCPData objects
"""
if os.path.exists(working_directory):
data_files = glob.glob(os.path.join(working_directory,'*[rR].000'))
data_files.extend(glob.glob(os.path.join(working_directory,'*.nc')))
else:
print "Path (%s) not found - exiting."%working_directory
exit()
start_times = list()
for data_file in data_files:
try:
a = adcpy.open_adcp(data_file,
file_type="ADCPRdiWorkhorseData",
num_av=1)
start_times.append(a.mtime[0])
except:
start_times.append(None)
if start_times:
gaps, nn, nnan = find_start_time_gaps(start_times)
data_files_sorted = [ data_files[i] for i in nn ]
# convert nnan boolean list to integer index
nnan_i = nnan * range(len(nnan))
data_files_sorted = [ data_files_sorted[i] for i in nnan_i ]
return group_according_to_gap(data_files_sorted,gaps,max_gap,max_group_size) | 5,353,352 |
def test_get_basic(client):
"""
Tests get endpoint (all crimes)
"""
client.delete("/streetlights")
insert_test_data(client)
rs = client.get("/streetlights")
collection = rs.json["result"]["streetlights"]
assert len(collection) == 5 | 5,353,353 |
def regular_transport_factory(host, port, env, config_file):
"""
Basic unencrypted Thrift transport factory function.
Returns instantiated Thrift transport for use with cql.Connection.
Params:
* host .........: hostname of Cassandra node.
* port .........: port number to connect to.
* env ..........: environment variables (os.environ) - not used by this implementation.
* config_file ..: path to cqlsh config file - not used by this implementation.
"""
tsocket = TSocket.TSocket(host, port)
return TTransport.TFramedTransport(tsocket) | 5,353,354 |
def frames_to_video(images, Fs, output_file_name, codec_spec='h264'):
"""
Given a list of image files and a sample rate, concatenate those images into
a video and write to [output_file_name].
"""
if len(images) == 0:
return
# Determine the width and height from the first image
frame = cv2.imread(images[0])
cv2.imshow('video',frame)
height, width, channels = frame.shape
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*codec_spec)
out = cv2.VideoWriter(output_file_name, fourcc, Fs, (width, height))
for image in images:
frame = cv2.imread(image)
out.write(frame)
out.release()
cv2.destroyAllWindows() | 5,353,355 |
def smoothed_abs(x, eps=1e-8):
"""A smoothed version of |x| with improved numerical stability."""
return jnp.sqrt(jnp.multiply(x, x) + eps) | 5,353,356 |
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap,aspect='auto')
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.savefig('confusion_mat.png', bbox_inches='tight', format='png', dpi=300, pad_inches=0,transparent=True)
plt.show() | 5,353,357 |
def setup_release(keep_days=0):
""" Sets up a full release across the cluster
See :func:`_setup_release` for info
Example:
fab <env> setup_release:keep_days=10 # Makes a new release that will last for 10 days
"""
_setup_release(parse_int_or_exit(keep_days), full_cluster=True) | 5,353,358 |
def increment(t1, seconds):
"""Adds seconds to a Time object."""
assert valid_time(t1)
seconds += time_to_int(t1)
return int_to_time(seconds) | 5,353,359 |
def _apply_write(cls):
"""Add write method if any formats have a registered writer for `cls`."""
skbio_io_write = globals()['write']
write_formats = list_write_formats(cls)
if write_formats:
if not hasattr(cls, 'default_write_format'):
raise NotImplementedError(
"Classes with registered writers must provide a "
"`default_write_format`. Please add `default_write_format` to"
" '%s'." % cls.__name__)
def write(self, fp, format=cls.default_write_format, **kwargs):
skbio_io_write(self, into=fp, format=format, **kwargs)
write.__doc__ = _write_docstring % (
cls.__name__,
_formats_for_docs(write_formats),
cls.__name__,
cls.default_write_format,
_import_paths(write_formats)
)
cls.write = write | 5,353,360 |
def add_absname(file):
"""Prefix a file name with the working directory."""
work_dir = os.path.dirname(__file__)
return os.path.join(work_dir, file) | 5,353,361 |
def dense(input_shape, output_shape, output_activation='linear', name=None):
"""
Build a simple Dense model
Parameters
----------
input_shape: shape
Input shape
output_shape: int
Number of actions (Discrete only so far)
Returns
-------
model: Model
Keras tf model
"""
# Create inputs
inputs = Input(shape=input_shape)
x = Flatten()(inputs)
# Create one dense layer and one layer for output
x = Dense(256, activation='tanh')(x)
x = Dense(256, activation='tanh')(x)
predictions = Dense(output_shape, activation='linear')(x)
# Finally build model
model = Model(inputs=inputs, outputs=predictions, name=name)
model.summary()
return model | 5,353,362 |
def test_missing_enable_ext(monkeypatch: pytest.MonkeyPatch) -> None:
"""Test missing enable-ext option fails.
Check that a workflow that needs `--enable-ext` and
`--enable-dev` fails without those options and passes with them.
"""
monkeypatch.delenv("CWLTOOL_OPTIONS", raising=False)
assert main([get_data("tests/wf/generator/zing.cwl"), "--zing", "zipper"]) == 1
assert (
main(
[
"--enable-ext",
"--enable-dev",
get_data("tests/wf/generator/zing.cwl"),
"--zing",
"zipper",
]
)
== 0
)
monkeypatch.setenv("CWLTOOL_OPTIONS", "--enable-ext --enable-dev")
assert main([get_data("tests/wf/generator/zing.cwl"), "--zing", "zipper"]) == 0 | 5,353,363 |
def dataset_labels(alldata, tag=None):
""" Return label for axis of dataset
Args:
ds (DataSet): dataset
tag (str): can be 'x', 'y' or 'z'
"""
if tag == 'x':
d = alldata.default_parameter_array()
return d.set_arrays[0].label
if tag == 'y':
d = alldata.default_parameter_array()
return d.set_arrays[1].label
if tag is None or tag == 'z':
d = alldata.default_parameter_array()
return d.label
return '?' | 5,353,364 |
def spread(
template: Template,
data: Optional[Any],
flavor: Flavor,
postprocess: Optional[Callable] = None,
start_at: int = 0,
replace_missing_with: Optional[str] = None,
) -> Tuple[List[Union["pygsheets.Cell"]], int]:
"""Spread data into cells.
Parameters
----------
template
A list of expressions which determines how the cells are layed out.
data
Data to render. Can be a dictionary, a dataclass, a list; just as long as the template
expressions can be applied to the data.
flavor
Determines what kind of cells to generate.
postprocess
An optional function to call for each cell once it has been created.
start_at
The row number where the layout begins. Zero-based.
replace_missing_with
An optional value to be used when a variable isn't found in the data. An exception is
raised if a variable is not found and this is not specified.
Returns
-------
cells
The list of cells.
n_rows
The number of rows which the cells span over.
"""
data = data or {}
# Unpack the template
table = []
for c, col in enumerate(template):
cells = []
if callable(col):
col = col(data)
for r, expr in enumerate(col if isinstance(col, list) else [col]):
if callable(expr):
expr = expr(data)
# expr can be:
# - expr
# - (expr, postprocessor)
# - (expr, postprocessor, note)
pp = None
note = None
if isinstance(expr, tuple):
if len(expr) == 2:
expr, pp = expr
else:
expr, pp, note, *_ = expr
cell = _Cell(
r=r + start_at,
c=c,
expr=_normalize_expression(expr),
note=note,
postprocess=pp,
)
cells.append(cell)
table.append(cells)
# We're going to add the positions of the named variables to the data
named_variables = {}
cell_names = {}
for c, col in enumerate(table):
for r, cell in enumerate(col):
if _is_named_formula(cell.expr):
name = cell.expr.split(" = ")[0]
named_variables[name] = cell.address
cell_names[len(cell_names)] = name
elif _is_variable(cell.expr):
cell_names[len(cell_names)] = cell.expr[1:]
else:
cell_names[len(cell_names)] = None
if flavor == Flavor.PYGSHEETS.value:
cells = [
cell.as_pygsheets(
data=data,
named_variables=named_variables,
replace_missing_with=replace_missing_with,
)
for col in table
for cell in col
]
else:
raise ValueError(
f"Unknown flavor {flavor}. Available options: {', '.join(f.value for f in Flavor)}"
)
if postprocess:
for i, cell in enumerate(cells):
cells[i] = postprocess(cell, cell_names[i])
n_rows = max(map(len, table))
return cells, n_rows | 5,353,365 |
def check_html(name):
"""
Given a name of graph to save or write, check if it is of valid syntax
:param: name: the name to check
:type name: str
"""
assert len(name.split(".")) == 2, "invalid file type for %s" % name
assert name.split(
".")[1] == "html", "%s is not a valid html file" % name | 5,353,366 |
def train_validate_test_split(DataFrame, ratios=(0.6,0.2,0.2)):
"""
Parameters
----------
DataFrame : pandas.DataFrame
DataFrame
ratios : tuple
E.g.
(train, validate, test) = (0.6, 0.25, 0.15)
(train, test) = (0.6, 0.4) -> validate = test
Returns
-------
TrainDataset : pandas.DataFrame
ValidateDataset : pandas.DataFrame
TestDataset : pandas.DataFrame
"""
from sklearn.model_selection import train_test_split
N = len(DataFrame.index)
if len(ratios)==3:
train_size = ratios[0]/np.sum(ratios)
test_size = ratios[2]/np.sum(ratios[1:3])
TrainDataset, TestDataset = train_test_split(DataFrame, train_size=train_size, random_state=42)
ValidateDataset, TestDataset = train_test_split(TestDataset, test_size=test_size, random_state=42)
elif len(ratios)==2:
train_size = ratios[0]/np.sum(ratios)
TrainDataset, TestDataset = train_test_split(DataFrame, train_size=train_size, random_state=42)
ValidateDataset = TestDataset
print('Validate = Test')
else:
print('ERROR in splitting train, validate, test')
return None, None, None
n_train = len(TrainDataset.index)
n_validate = len(ValidateDataset.index)
n_test = len(TestDataset.index)
print('Train Samples: {} [{:.1f}%]'.format(n_train, n_train/N*100))
print('Validate Samples: {} [{:.1f}%]'.format(n_validate, n_validate/N*100))
print('Test Samples: {} [{:.1f}%]'.format(n_test, n_test/N*100))
return TrainDataset, ValidateDataset, TestDataset | 5,353,367 |
def is_error(code: Union[Error, int]) -> bool:
"""Returns True, if error is a (fatal) error, not just a warning."""
if isinstance(code, Error): code = code.code
return code >= ERROR | 5,353,368 |
def unpack_triple(item):
"""Extracts the indices and values from an object.
The argument item can either be an instance of SparseTriple or a
sequence of length three.
Example usage:
>>> st = SparseTriple()
>>> ind1, ind2, val = unpack_triple(st)
>>> quad_expr = [[], [], []]
>>> ind1, ind2, val = unpack_triple(quad_expr)
"""
try:
assert item.isvalid()
ind1, ind2, val = item.unpack()
except AttributeError:
ind1, ind2, val = item[0:3]
validate_arg_lengths([ind1, ind2, val])
return ind1, ind2, val | 5,353,369 |
def class_report(data=None, label: str = None):
"""
This function calculates a class report of a given anomaly score.
:param data: dataset [DataFrame]
:param label: anomaly-score of an given feature [string]
:return: Classification report as console output
"""
print(classification_report(data['Jumps'], data[label], target_names=['normal', 'outlier'])) | 5,353,370 |
def test_skip_c() -> None:
""" Test with skip """
run_test(['-c', SKIP], 'tests/expected/skip.txt.c.out') | 5,353,371 |
def max_sequence(arr):
"""
The maximum sum subarray problem consists in finding the maximum sum of a contiguous subsequence in an array or
list of integers.
:param arr: an array or list of integers.
:return: the maximum value found within the subarray.
"""
best = 0
for x in range(len(arr)):
for y in range(len(arr)):
if sum(arr[x:y+1]) > best:
best = sum(arr[x:y+1])
return best | 5,353,372 |
def get_by_username(username):
"""
Retrieve a user from the database by their username
:param username:
:return:
"""
return database.get(User, username, field="username") | 5,353,373 |
def plot_averaged_forecast(actual, predicted, first_column_act, last_column_act,
first_column_pr, last_column_pr):
"""
Function plot averaged forecasts and actual time series
:param actual: time series with averaged actual values
:param predicted: time series with averaged predicted values
:param first_column_act: numpy array with actual first days
:param last_column_act: numpy array with actual seventh days
:param first_column_pr: numpy array with predicted first days
:param last_column_pr: numpy array with predicted seventh days
"""
plt.plot(actual, label='7-day moving average actual')
plt.fill_between(range(0, len(actual)), first_column_act, last_column_act, alpha=0.4)
plt.plot(predicted, label='7-day moving average forecast')
plt.fill_between(range(0, len(actual)), first_column_pr, last_column_pr, alpha=0.4)
plt.ylabel('River level, cm', fontsize=14)
plt.xlabel('Time index', fontsize=14)
plt.grid()
plt.legend(fontsize=12)
plt.show() | 5,353,374 |
def evalPoint(u, v):
"""
Evaluates the surface point corresponding to normalized parameters (u, v)
"""
a, b, c, d = 0.5, 0.3, 0.5, 0.1
s = TWO_PI * u
t = (TWO_PI * (1 - v)) * 2
r = a + b * cos(1.5 * t)
x = r * cos(t)
y = r * sin(t)
z = c * sin(1.5 * t)
dv = PVector()
dv.x = (-1.5 * b * sin(1.5 * t) * cos(t) -
(a + b * cos(1.5 * t)) * sin(t))
dv.y = (-1.5 * b * sin(1.5 * t) * sin(t) +
(a + b * cos(1.5 * t)) * cos(t))
dv.z = 1.5 * c * cos(1.5 * t)
q = dv
q.normalize()
qvn = PVector(q.y, -q.x, 0)
qvn.normalize()
ww = q.cross(qvn)
pt = PVector()
pt.x = x + d * (qvn.x * cos(s) + ww.x * sin(s))
pt.y = y + d * (qvn.y * cos(s) + ww.y * sin(s))
pt.z = z + d * ww.z * sin(s)
return pt | 5,353,375 |
def decode_funcname2(subprogram_die, address):
""" Get the function name from an PC address"""
for DIE in subprogram_die:
try:
lowpc = DIE.attributes['DW_AT_low_pc'].value
# DWARF v4 in section 2.17 describes how to interpret the
# DW_AT_high_pc attribute based on the class of its form.
# For class 'address' it's taken as an absolute address
# (similarly to DW_AT_low_pc); for class 'constant', it's
# an offset from DW_AT_low_pc.
highpc_attr = DIE.attributes['DW_AT_high_pc']
highpc_attr_class = describe_form_class(highpc_attr.form)
if highpc_attr_class == 'address':
highpc = highpc_attr.value
elif highpc_attr_class == 'constant':
highpc = lowpc + highpc_attr.value
else:
print('Error: invalid DW_AT_high_pc class:',
highpc_attr_class)
continue
if lowpc <= address < highpc:
return DIE.attributes['DW_AT_name'].value
except KeyError:
continue
return None | 5,353,376 |
def load_cubes(filespecs, callback=None):
"""
Loads cubes from a list of ABF filenames.
Args:
* filenames - list of ABF filenames to load
Kwargs:
* callback - a function that can be passed to :func:`iris.io.run_callback`
.. note::
The resultant cubes may not be in the same order as in the file.
"""
if isinstance(filespecs, str):
filespecs = [filespecs]
for filespec in filespecs:
for filename in glob.glob(filespec):
field = ABFField(filename)
cube = field.to_cube()
# Were we given a callback?
if callback is not None:
cube = iris.io.run_callback(callback, cube, field, filename)
if cube is None:
continue
yield cube | 5,353,377 |
def get_reddit_slug(permalink):
"""
Get the reddit slug from a submission permalink, with '_' replaced by '-'
Args:
permalink (str): reddit submission permalink
Returns:
str: the reddit slug for a submission
"""
return list(filter(None, permalink.split("/")))[-1].replace("_", "-") | 5,353,378 |
def newton_halley(func, x0, fprime, fprime2, args=(), tol=1.48e-8,
maxiter=50, disp=True):
"""
Find a zero from Halley's method using the jitted version of
Scipy's.
`func`, `fprime`, `fprime2` must be jitted via Numba.
Parameters
----------
func : callable and jitted
The function whose zero is wanted. It must be a function of a
single variable of the form f(x,a,b,c...), where a,b,c... are extra
arguments that can be passed in the `args` parameter.
x0 : float
An initial estimate of the zero that should be somewhere near the
actual zero.
fprime : callable and jitted
The derivative of the function (when available and convenient).
fprime2 : callable and jitted
The second order derivative of the function
args : tuple, optional
Extra arguments to be used in the function call.
tol : float, optional
The allowable error of the zero value.
maxiter : int, optional
Maximum number of iterations.
disp : bool, optional
If True, raise a RuntimeError if the algorithm didn't converge
Returns
-------
results : namedtuple
root - Estimated location where function is zero.
function_calls - Number of times the function was called.
iterations - Number of iterations needed to find the root.
converged - True if the routine converged
"""
if tol <= 0:
raise ValueError("tol is too small <= 0")
if maxiter < 1:
raise ValueError("maxiter must be greater than 0")
# Convert to float (don't use float(x0); this works also for complex x0)
p0 = 1.0 * x0
funcalls = 0
status = _ECONVERR
# Halley Method
for itr in range(maxiter):
# first evaluate fval
fval = func(p0, *args)
funcalls += 1
# If fval is 0, a root has been found, then terminate
if fval == 0:
status = _ECONVERGED
p = p0
itr -= 1
break
fder = fprime(p0, *args)
funcalls += 1
# derivative is zero, not converged
if fder == 0:
p = p0
break
newton_step = fval / fder
# Halley's variant
fder2 = fprime2(p0, *args)
p = p0 - newton_step / (1.0 - 0.5 * newton_step * fder2 / fder)
if abs(p - p0) < tol:
status = _ECONVERGED
break
p0 = p
if disp and status == _ECONVERR:
msg = "Failed to converge"
raise RuntimeError(msg)
return _results((p, funcalls, itr + 1, status)) | 5,353,379 |
def staff_dash(request):
"""Route for displaying the staff dashboard of the site.
"""
# Empty context to populate:
context = {}
def get_account_name(path):
"""Method contains logic to extract the app name from a url path.
Method uses the django.urls.resolve method with basic string splitting.
"""
try:
appname = resolve(path).func.__module__.split(".")[1]
except:
appname = None
return appname
# Ensuring that the user is a staff member if not redirect home:
if request.user.is_staff is False:
return redirect("user_account_dashboard")
else:
# Determining a one month window for queying request data:
prev_month = date.today() - timedelta(days=30)
# Querying all of the requests made to the database in the last month:
max_queryset = Request.objects.filter(time__gt=prev_month)
# QuerySet to Dataframe Conversions:
requests_timeseries = max_queryset.values_list("time", "response", "method", "path", "user")
timeframe_df = pd.DataFrame.from_records(requests_timeseries, columns=["time", "response", "method", "path", "user"])
# Adding columns:
timeframe_df["_count"] = 1
timeframe_df['app'] = timeframe_df["path"].apply(lambda x: get_account_name(x))
timeframe_df.set_index(timeframe_df['time'], inplace=True)
# Resampling/Transforming data:
daily_resample_get = timeframe_df.loc[timeframe_df['method'] == 'GET', "_count"].squeeze().resample('H').sum()
daily_resample_posts = timeframe_df.loc[timeframe_df['method'] != 'GET', "_count"].squeeze().resample('H').sum()
# Extracting Series for all response codes:
daily_200_response = timeframe_df.loc[timeframe_df["response"] < 300, "_count"]
daily_300_response = timeframe_df.loc[
(timeframe_df["response"] >= 300) & (timeframe_df["response"] < 400), "_count"]
daily_400_response = timeframe_df.loc[
(timeframe_df["response"] >= 400) & (timeframe_df["response"] < 500), "_count"]
daily_500_response = timeframe_df.loc[timeframe_df["response"] >= 500, "_count"]
# Building a dict of unique get/post timeseries based on unique apps:
app_timeseries_dict = {}
# Getting relevant list of installed apps:
third_party_apps = [app.split(".")[0] for app in settings.INSTALLED_APPS
if not app.startswith("django.") and
app not in ['rest_framework', 'rest_framework.authtoken', 'rest_auth', 'request']
]
for app in third_party_apps:
# Nested dict structure for GET and POST request storage:
application_dict = {}
# Populating application dict w/ GET and POST request timeseries:
try:
app_timeseries_get = timeframe_df.loc[
(timeframe_df["app"] == app) & (timeframe_df["method"] == "GET"), "_count"].resample("H").sum()
application_dict["GET"] = {
"Data" : app_timeseries_get.values.tolist(),
"Index": app_timeseries_get.index.tolist()
}
except:
application_dict["GET"] = [0] * len(daily_resample_get.index)
try:
app_timeseries_post = timeframe_df.loc[
(timeframe_df["app"] == app) & (timeframe_df["method"] == "POST"), "_count"].resample("H").sum()
application_dict["POST"] = {
"Data": app_timeseries_post.values.tolist(),
"Index": app_timeseries_post.index.tolist()
}
except:
application_dict["POST"] = [0] * len(daily_resample_get.index)
# Fully Building nested dict:
app_timeseries_dict[app] = application_dict
print(len(application_dict["GET"]["Data"]), len(application_dict["GET"]['Index']))
# Seralzing dataframe columns to pass to template:
context['get_datetime'] = daily_resample_get.index.tolist()
# Error-Catching daily response codes when resampling:
response_code_dict = {}
try:
response_code_dict[200] = daily_200_response.squeeze().resample("H").sum().values.tolist()
except Exception:
response_code_dict[200] = [0] * len(daily_resample_get.index)
try:
response_code_dict[300] = daily_300_response.squeeze().resample("H").sum().values.tolist()
except Exception:
response_code_dict[300] = [0] * len(daily_resample_get.index)
try:
response_code_dict[400] = daily_400_response.squeeze().resample("H").sum().values.tolist()
except Exception:
response_code_dict[400] = [0] * len(daily_resample_get.index)
try:
response_code_dict[500] = daily_500_response.squeeze().resample("H").sum().values.tolist()
except Exception:
response_code_dict[500] = [0] * len(daily_resample_get.index)
# Populating Context:
context['app_timeseries'] = app_timeseries_dict
context['get_requests_count'] = daily_resample_get.values.tolist()
context['post_requests_count'] = daily_resample_posts.values.tolist()
context['response_codes'] = response_code_dict
return render(request, "accounts/staff_dashboard.html", context) | 5,353,380 |
def read_v1_file(path: str = "CBETHUSD.csv") -> tuple:
"""
Read the data from the file path, reconstruct the format the the data
and return a 3d matrix.
"""
lst = []
res = []
with open(path) as data:
reader = csv.reader(data)
next(reader) # skip the header row
for row in reader:
lst.append(float(row[1]))
lst_con = []
for i in range(len(lst) - 30):
temp = lst[i:i + 25]
lst_con.append(temp)
res_temp = lst[i + 30] - temp[-1]
res_cat = [0, 0, 0]
if abs(res_temp) < abs(temp[-1] * 0.05):
res_cat[1] = 1
elif res_temp < 0:
res_cat[0] = 1
else:
res_cat[2] = 1
res.append(res_cat)
np_lst = np.array(lst_con).reshape(len(lst_con), 25, 1)
np_res = np.array(res)
return (np_lst, np_res) | 5,353,381 |
def check_attribute(name, paginator, expected, params):
"""
Helper method that checks a single attribute and gives a nice error
message upon test failure.
"""
got = getattr(paginator, name)
assert expected == got | 5,353,382 |
def get_adjacent_th(spec: torch.Tensor, filter_length: int = 5) -> torch.Tensor:
"""Zero-pad and unfold stft, i.e.,
add zeros to the beginning so that, using the multi-frame signal model,
there will be as many output frames as input frames.
Args:
spec (torch.Tensor): input spectrum (B, F, T, 2)
filter_length (int): length for frame extension
Returns:
ret (torch.Tensor): output spectrum (B, F, T, filter_length, 2)
""" # noqa: D400
return (
torch.nn.functional.pad(spec, pad=[0, 0, filter_length - 1, 0])
.unfold(dimension=-2, size=filter_length, step=1)
.transpose(-2, -1)
.contiguous()
) | 5,353,383 |
def K2(eps):
""" Radar dielectric factor |K|**2
Parameters
----------
eps : complex
nd array of complex relative dielectric constants
Returns
-------
nd - float
Radar dielectric factor |K|**2 real
"""
K_complex = (eps-1.0)/(eps+2.0)
return (K_complex*K_complex.conj()).real | 5,353,384 |
def permutation_test_mi(x, y, B=100, random_state=None, **kwargs):
"""Permutation test for mutual information
Parameters
----------
x : 1d array-like
Array of n elements
y : 1d array-like
Array of n elements
n_classes : int
Number of classes
B : int
Number of permutations
random_state : int
Sets seed for random number generator
Returns
-------
p : float
Achieved significance level
"""
np.random.seed(random_state)
# Estimate correlation from original data
theta = mi(x, y)
# Permutations
y_ = y.copy()
theta_p = np.zeros(B)
for i in range(B):
np.random.shuffle(y_)
theta_p[i] = mi(x, y_)
# Achieved significance level
return np.mean(theta_p >= theta) | 5,353,385 |
def convert_time_units(value, value_unit="s", result_unit="s", case_sensitive=True):
"""
Convert `value` from `value_unit` to `result_unit`.
The possible time units are ``'s'``,``'ms'``, ``'us'``, ``'ns'``, ``'ps'``, ``'fs'``, ``'as'``.
If ``case_sensitive==True``, matching units is case sensitive.
"""
if string_utils.string_equal(value_unit,"s",case_sensitive=case_sensitive):
value_s=value
elif string_utils.string_equal(value_unit,"ms",case_sensitive=case_sensitive):
value_s=value*1E-3
elif string_utils.string_equal(value_unit,"us",case_sensitive=case_sensitive):
value_s=value*1E-6
elif string_utils.string_equal(value_unit,"ns",case_sensitive=case_sensitive):
value_s=value*1E-9
elif string_utils.string_equal(value_unit,"ps",case_sensitive=case_sensitive):
value_s=value*1E-12
elif string_utils.string_equal(value_unit,"fs",case_sensitive=case_sensitive):
value_s=value*1E-15
elif string_utils.string_equal(value_unit,"as",case_sensitive=case_sensitive):
value_s=value*1E-18
else:
raise IOError("unrecognized length unit: {0}".format(value_unit))
if string_utils.string_equal(result_unit,"s",case_sensitive=case_sensitive):
return value_s
elif string_utils.string_equal(result_unit,"ms",case_sensitive=case_sensitive):
return value_s*1E3
elif string_utils.string_equal(result_unit,"us",case_sensitive=case_sensitive):
return value_s*1E6
elif string_utils.string_equal(result_unit,"ns",case_sensitive=case_sensitive):
return value_s*1E9
elif string_utils.string_equal(result_unit,"ps",case_sensitive=case_sensitive):
return value_s*1E12
elif string_utils.string_equal(result_unit,"fs",case_sensitive=case_sensitive):
return value_s*1E15
elif string_utils.string_equal(result_unit,"as",case_sensitive=case_sensitive):
return value_s*1E18
else:
raise IOError("unrecognized length unit: {0}".format(result_unit)) | 5,353,386 |
def compute_distribution_clusters(columns: list, dataset_name: str, threshold: float, pool: Pool,
chunk_size: int = None, quantiles: int = 256):
"""
Algorithm 2 of the paper "Automatic Discovery of Attributes in Relational Databases" from M. Zhang et al. [1]. This
algorithm captures which columns contain data with similar distributions based on the EMD distance metric.
Parameters
---------
columns : list(str)
The columns of the database
dataset_name : str
Other name of the dataset
threshold : float
The conservative global EMD cutoff threshold described in [1]
pool: multiprocessing.Pool
The process pool that will be used in the pre-processing of the table's columns
chunk_size : int, optional
The number of chunks of each job process (default let the framework decide)
quantiles : int, optional
The number of quantiles that the histograms are split on (default is 256)
Returns
-------
list(list(str))
A list that contains the distribution clusters that contain the column names in the cluster
"""
combinations = list(column_combinations(columns, dataset_name, quantiles, intersection=False))
total = len(combinations)
if chunk_size is None:
chunk_size = int(calc_chunksize(pool._processes, total))
A: dict = transform_dict(dict(tqdm(pool.imap_unordered(process_emd, combinations, chunksize=chunk_size),
total=total)))
edges_per_column = list(pool.map(parallel_cutoff_threshold, list(cuttoff_column_generator(A, columns, dataset_name,
threshold))))
graph = create_graph(columns, edges_per_column)
connected_components = list(nx.connected_components(graph))
return connected_components | 5,353,387 |
def save_cpx_image(image, name):
"""Save a complex image (represented by real and imaginary channels) as two images."""
save_image(image[0], '{}_real.png'.format(name))
save_image(image[1], '{}_imag.png'.format(name)) | 5,353,388 |
def get_eventframe_sequence(event_deque, is_x_first, is_x_flipped,
is_y_flipped, shape, data_format, frame_width,
frame_gen_method):
"""
Given a single sequence of x-y-ts events, generate a sequence of binary
event frames.
"""
inp = []
while len(event_deque) > 0:
inp.append(get_binary_frame(event_deque, is_x_first, is_x_flipped,
is_y_flipped, shape, data_format,
frame_width, frame_gen_method))
return np.stack(inp, -1) | 5,353,389 |
def creation_sequence_to_weights(creation_sequence):
"""
Returns a list of node weights which create the threshold
graph designated by the creation sequence. The weights
are scaled so that the threshold is 1.0. The order of the
nodes is the same as that in the creation sequence.
"""
# Turn input sequence into a labeled creation sequence
first = creation_sequence[0]
if isinstance(first, str): # creation sequence
if isinstance(creation_sequence, list):
wseq = creation_sequence[:]
else:
wseq = list(creation_sequence) # string like 'ddidid'
elif isinstance(first, tuple): # labeled creation sequence
wseq = [v[1] for v in creation_sequence]
elif isinstance(first, int): # compact creation sequence
wseq = uncompact(creation_sequence)
else:
raise TypeError("Not a valid creation sequence type")
# pass through twice--first backwards
wseq.reverse()
w = 0
prev = 'i'
for j, s in enumerate(wseq):
if s == 'i':
wseq[j] = w
prev = s
elif prev == 'i':
prev = s
w += 1
wseq.reverse() # now pass through forwards
for j, s in enumerate(wseq):
if s == 'd':
wseq[j] = w
prev = s
elif prev == 'd':
prev = s
w += 1
# Now scale weights
if prev == 'd':
w += 1
wscale = 1. / float(w)
return [ww * wscale for ww in wseq]
# return wseq | 5,353,390 |
def parse_args():
"""
Parsing shell command arguments, and override appropriate params
from setting module
:return: None
"""
parser = argparse.ArgumentParser(version=VERSION)
parser.add_argument('-u', action='store', dest='url')
parser.add_argument('-f', action='store', dest='url_file')
parser.add_argument('-t', action='store', dest='target_log_file')
parser.add_argument('-l', action='store', dest='log_file')
parser.add_argument('-p', action='store_true', dest='plotting', default=True)
parser.add_argument('-m', action='store', dest='max_allowed_concurrent', type=int)
parser.add_argument('-b', action='store', dest='base_concurrent', type=int)
parser.add_argument('-s', action='store', dest='step_concurrent', type=int)
result = parser.parse_args()
if result.url:
global url
url = result.url
if result.url_file:
global url_file
url_file = result.url_file
if result.target_log_file:
global target_file
target_file = result.target_log_file
if result.log_file:
global log_file
log_file = result.log_file
if result.plotting:
global plotting
plotting = result.plotting
if result.max_allowed_concurrent:
global max_concurrent
max_concurrent = result.max_allowed_concurrent
if result.base_concurrent:
global base_concurrent
base_concurrent = result.base_concurrent
if result.step_concurrent:
global step_concurrent
step_concurrent = result.step_concurrent | 5,353,391 |
def group():
"""
Model generator.
""" | 5,353,392 |
def get_seg_features(string):
"""
Segment text with jieba
features are represented in bies format
s donates single word
"""
seg_feature = []
for word in jieba.cut(string):
if len(word) == 1:
seg_feature.append(0)
else:
tmp = [2] * len(word)
tmp[0] = 1
tmp[-1] = 3
seg_feature.extend(tmp)
return seg_feature | 5,353,393 |
def print_device_info(nodemap):
"""
This function prints the device information of the camera from the transport
layer; please see NodeMapInfo example for more in-depth comments on printing
device information from the nodemap.
:param nodemap: Transport layer device nodemap.
:type nodemap: INodeMap
:return: True if successful, False otherwise.
:rtype: bool
"""
print('\n*** DEVICE INFORMATION ***\n')
try:
result = True
node_device_information = PySpin.CCategoryPtr(nodemap.GetNode('DeviceInformation'))
if PySpin.IsAvailable(node_device_information) and PySpin.IsReadable(node_device_information):
features = node_device_information.GetFeatures()
for feature in features:
node_feature = PySpin.CValuePtr(feature)
print('%s: %s' % (node_feature.GetName(),
node_feature.ToString() if PySpin.IsReadable(node_feature) else 'Node not readable'))
else:
print('Device control information not available.')
except PySpin.SpinnakerException as ex:
print('Error: %s' % ex.message)
return False
return result | 5,353,394 |
def make_withdrawal(account):
"""Withdrawal Dialog."""
# @TODO: Use questionary to capture the withdrawal and set it equal to amount variable. Be sure that amount is a floating
# point number.
amount = questionary.text("How much would you like to withdraw").ask()
amount = float(amount)
# @TODO: Validates amount of withdrawal. If less than or equal to 0 system exits with error message.
if amount <= account["balance"]:
account["balance"] = account["balance"] - amount
print("Your withdrawl was successful")
return account
else:
sys.exit(
"Your do not have enough money in your account to make this withdrawl. PLease try again."
)
# @TODO: Validates if withdrawal amount is less than or equal to account balance, processes withdrawal and returns account.
# Else system exits with error messages indicating that the account is short of funds. | 5,353,395 |
def coerce_to_pendulum_date(x: PotentialDatetimeType,
assume_local: bool = False) -> Optional[Date]:
"""
Converts something to a :class:`pendulum.Date`.
Args:
x: something that may be coercible to a date
assume_local: if ``True``, assume local timezone; if ``False``, assume
UTC
Returns:
a :class:`pendulum.Date`, or ``None``.
Raises:
pendulum.parsing.exceptions.ParserError: if a string fails to parse
ValueError: if no conversion possible
"""
p = coerce_to_pendulum(x, assume_local=assume_local)
return None if p is None else p.date() | 5,353,396 |
def loss_fixed_depl_noquench(params, loss_data):
"""
MSE loss function for fitting individual stellar mass histories.
Only main sequence efficiency parameters. Quenching is deactivated.
Depletion time is fixed at tau=0Gyr, i.e. gas conversion is instantaenous.
"""
(
lgt,
dt,
dmhdt,
log_mah,
sm_target,
log_sm_target,
sfr_target,
fstar_target,
index_select,
fstar_indx_high,
fstar_tdelay,
ssfrh_floor,
weight,
weight_fstar,
t_fstar_max,
fixed_tau,
q_params,
) = loss_data
sfr_params = [*params[0:4], fixed_tau]
_res = calculate_sm_sfr_fstar_history_from_mah(
lgt,
dt,
dmhdt,
log_mah,
sfr_params,
q_params,
index_select,
fstar_indx_high,
fstar_tdelay,
)
mstar, sfr, fstar = _res
mstar = jnp.log10(mstar)
fstar = jnp.log10(fstar)
sfr_res = 1e8 * (sfr - sfr_target) / sm_target
sfr_res = jnp.clip(sfr_res, -1.0, 1.0)
loss = jnp.mean(((mstar - log_sm_target) / weight) ** 2)
loss += jnp.mean(((fstar - fstar_target) / weight_fstar) ** 2)
loss += jnp.mean((sfr_res / weight) ** 2)
qt = _get_bounded_qt(q_params[0])
loss += _sigmoid(qt - t_fstar_max, 0.0, 50.0, 100.0, 0.0)
return loss | 5,353,397 |
def plot_timeseries_comp(date1, value1, date2, value2, fname_list,
labelx='Time [UTC]', labely='Value',
label1='Sensor 1', label2='Sensor 2',
titl='Time Series Comparison', period1=0, period2=0,
ymin=None, ymax=None, dpi=72):
"""
plots 2 time series in the same graph
Parameters
----------
date1 : datetime object
time of the first time series
value1 : float array
values of the first time series
date2 : datetime object
time of the second time series
value2 : float array
values of the second time series
fname_list : list of str
list of names of the files where to store the plot
labelx : str
The label of the X axis
labely : str
The label of the Y axis
label1, label2 : str
legend label for each time series
titl : str
The figure title
period1, period2 : float
measurement period in seconds used to compute accumulation. If 0 no
accumulation is computed
dpi : int
dots per inch
ymin, ymax : float
The limits of the Y-axis. None will keep the default limit.
Returns
-------
fname_list : list of str
list of names of the created plots
History
--------
201?.??.?? -fvj- created
2017.08.21 -jgr- changed some graphical aspects
"""
if (period1 > 0) and (period2 > 0):
# TODO: document this and check (sometimes artefacts)
value1 *= (period1/3600.)
value1 = np.ma.cumsum(value1)
value2 *= (period2/3600.)
value2 = np.ma.cumsum(value2)
fig, ax = plt.subplots(figsize=[10, 6.5], dpi=dpi)
ax.plot(date1, value1, 'b', label=label1, linestyle='--', marker='o')
ax.plot(date2, value2, 'r', label=label2, linestyle='--', marker='s')
ax.legend(loc='best')
ax.set_xlabel(labelx)
ax.set_ylabel(labely)
ax.set_title(titl)
ax.grid()
ax.set_ylim(bottom=ymin, top=ymax)
ax.set_xlim([date2[0], date2[-1]])
# rotates and right aligns the x labels, and moves the bottom of the
# axes up to make room for them
fig.autofmt_xdate()
# Make a tight layout
fig.tight_layout()
for fname in fname_list:
fig.savefig(fname, dpi=dpi)
plt.close(fig)
return fname_list | 5,353,398 |
def write_basissets(inp, basissets, folder):
"""Writes the unified BASIS_SETS file with the used basissets"""
_write_gdt(inp, basissets, folder, "BASIS_SET_FILE_NAME", "BASIS_SETS") | 5,353,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.