id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
5127062
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
功能实现:返回列表中除第一个元素外的所有元素。
解读:
如果列表的长度大于1,则使用切片表示法返回最后一个元素。
否则,返回整个列表。
"""
def tail(lst):
return lst[1:] if len(lst) > 1 else lst
# Examples
print(tail([1, 2, 3]))
print(tail([1]))
# output:
# [2, 3]
# [1]
|
StarcoderdataPython
|
11268924
|
"""PyMC4."""
from . import utils
from .coroutine_model import Model, model
from .scopes import name_scope, variable_name
from . import coroutine_model
from . import distributions
from . import flow
from .flow import (
evaluate_model_transformed,
evaluate_model,
evaluate_model_posterior_predictive,
evaluate_meta_model,
evaluate_meta_posterior_predictive_model,
)
from . import inference
from .distributions import *
from .forward_sampling import sample_prior_predictive, sample_posterior_predictive
from .inference.sampling import sample
from .mcmc.samplers import *
from . import gp
from . import mcmc
from .variational import *
__version__ = "4.0a2"
|
StarcoderdataPython
|
6495656
|
#!/usr/bin/env python
from __future__ import print_function
import sys, os, glob, operator, time
if sys.version_info < (2, 7):
sys.exit("ERROR: need python 2.7 or later for dep.py")
if __name__ == "__main__":
dt = float(sys.argv[3])-float(sys.argv[2])
hours, rem = divmod(dt, 3600)
minutes, seconds = divmod(rem, 60)
dtstr = str(int(seconds)) + " seconds"
if minutes > 0:
dtstr = str(int(minutes)) + " minutes " + dtstr
if hours > 0:
dtstr = str(int(hours)) + " hours " + dtstr
print("Total build time is", dtstr)
print("More details are available at", sys.argv[1])
log_file_name = sys.argv[1]
log_file_dir = os.path.dirname(log_file_name)
log_files = glob.glob(os.path.join(log_file_dir,"*.log"))
build_time_results = {}
for logf in log_files:
f = open(logf,'r')
t0 = float(f.readline())
t1 = float(f.readline())
build_time_results[os.path.basename(logf)[:-4]] = t1-t0
f.close()
f = open(log_file_name,'w')
f.write("# (File Name, Built Time in seconds)\n")
for it in sorted(build_time_results.items(), key=operator.itemgetter(1),reverse=True):
f.write(str(it)+'\n')
f.close()
|
StarcoderdataPython
|
6570909
|
<reponame>devkral/oscar-web-payments
default_app_config = "demo.apps.OscarDemoConfig"
|
StarcoderdataPython
|
3455341
|
import os
import sys
import bioframe
import click
import cooler
import cooltools
import cooltools.expected
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.lines import Line2D
import numpy as np
import pandas as pd
import pairlib
import pairlib.scalings
import pairtools
from diskcache import Cache
# peri-centromeric/-telomeric region to remove from both sides of chromosomal arms
cache = Cache("~/.hic.cache")
@click.group()
def cli():
pass
def plot_scalings(
scalings, avg_trans_levels, plot_slope, label_subplots, labels, title, out_path
):
"""
Plot scaling curves from a list of (bin, pair frequencies) tuples.
"""
fig = plt.figure(constrained_layout=False)
gs = gridspec.GridSpec(1, 2, width_ratios=[3, 2], wspace=0.4, figure=fig)
scale_ax = fig.add_subplot(gs[0, 0])
slope_ax = fig.add_subplot(gs[0, 1]) if plot_slope else None
for idx, scalings in enumerate(scalings):
dist_bin_mids, pair_frequencies = scalings
scale_ax.loglog(dist_bin_mids, pair_frequencies, label=labels[idx], lw=1)
if avg_trans_levels:
scale_ax.axhline(
avg_trans_levels[idx],
ls="dotted",
c=scale_ax.get_lines()[-1].get_color(),
lw=1,
)
if slope_ax is not None:
slope_ax.semilogx(
np.sqrt(dist_bin_mids.values[1:] * dist_bin_mids.values[:-1]),
np.diff(np.log10(pair_frequencies.values))
/ np.diff(np.log10(dist_bin_mids.values)),
label=labels[idx],
lw=1,
)
scale_ax.grid(lw=0.5, color="gray")
scale_ax.set_aspect(1.0)
scale_ax.set_xlim(1e3, 1e6)
scale_ax.set_ylim(0.0001, 2.0)
scale_ax.set_xlabel("genomic separation (bp)")
scale_ax.set_ylabel("contact frequency")
scale_ax.set_anchor("S")
handles, labels = scale_ax.get_legend_handles_labels()
if avg_trans_levels:
handles.append(Line2D([0], [0], color="black", lw=1, ls="dotted"))
labels.append("average trans")
scale_ax.legend(
handles, labels, loc="upper left", bbox_to_anchor=(1.1, 1.0), frameon=False
)
if slope_ax is not None:
slope_ax.grid(lw=0.5, color="gray")
slope_ax.set_xlim(1e3, 1e6)
slope_ax.set_ylim(-3.0, 0.0)
slope_ax.set_yticks(np.arange(-3, 0.5, 0.5))
slope_ax.set_aspect(1.0)
slope_ax.set_xlabel("distance (bp)")
slope_ax.set_ylabel("log-log slope")
slope_ax.set_anchor("S")
if label_subplots:
scale_ax.set_title("(a)")
slope_ax.set_title("(b)")
fig.suptitle(title)
plt.savefig(
out_path, dpi=300, bbox_inches="tight", facecolor="white", transparent=False
)
plt.show()
plt.close()
def open_pairs_file(path: str) -> pd.DataFrame:
header, pairs_body = pairtools._headerops.get_header(
pairtools._fileio.auto_open(path, "r")
)
cols = pairtools._headerops.extract_column_names(header)
return pd.read_csv(pairs_body, header=None, names=cols, sep="\t")
def calc_pair_freqs(scalings, trans_levels, calc_avg_trans, normalized):
dist_bin_mids = np.sqrt(scalings.min_dist * scalings.max_dist)
pair_frequencies = scalings.n_pairs / scalings.n_bp2
mask = pair_frequencies > 0
avg_trans = None
if calc_avg_trans:
avg_trans = (
trans_levels.n_pairs.astype("float64").sum()
/ trans_levels.np_bp2.astype("float64").sum()
)
if normalized:
norm_fact = pairlib.scalings.norm_scaling_factor(
dist_bin_mids, pair_frequencies, anchor=int(1e3)
)
pair_frequencies = pair_frequencies / norm_fact
avg_trans = avg_trans / norm_fact if avg_trans else None
return (dist_bin_mids[mask], pair_frequencies[mask]), avg_trans
@cli.command("compute-scaling")
@click.argument("pairs_paths", nargs=-1, type=click.Path(exists=True), required=True)
@click.option(
"--out",
"-o",
"out_path",
required=True,
type=click.Path(),
help="The path to the scaling plot output file.",
)
@click.option(
"--region",
"-r",
"region",
type=str,
help="UCSC-style coordinates of the genomic region to calculate scalings for.",
)
@click.option(
"--exclude-chrom",
"exclude_chroms",
type=str,
multiple=True,
help='Exclude the specified chromosome from the scalings. Optionally add ":left" or ":right" to the argument to only exclude the corresponding arm of the chromosome.',
)
@click.option(
"--exclude-end-regions",
"exclude_end_regions",
type=int,
default=10000,
help="Centromeric and telomeric regions of chromosomal arms in bp to exclude from scalings. Default is 10,000.",
)
@click.option(
"--assembly",
"-a",
"assembly",
type=str,
nargs=1,
help="Assembly name to be used for downloading chromsizes.",
)
@click.option(
"--centromeres",
"centromeres_path",
type=click.Path(exists=True),
help="Path to a text file containing centromere start and end positions. If not provided, a download will be attempted.",
)
@click.option(
"--normalized",
"-n",
is_flag=True,
help="Normalize the contact frequency up to 1.0.",
)
@click.option(
"--split-arms",
is_flag=True,
default=False,
help="Plot scalings of left and right chromosomal arms per chromosome, per pairs file. Caching is disabled for this option.",
)
@click.option(
"--plot-slope",
is_flag=True,
default=False,
help="Plot the slopes of the scaling curves.",
)
@click.option(
"--label-subplots",
is_flag=True,
default=False,
help="Label subplots as (a) and (b). Disabled by default.",
)
@click.option(
"--show-average-trans", is_flag=True, help="Show average trans contact frequency."
)
@click.option(
"--label",
"-l",
"labels",
type=str,
multiple=True,
help="One or more labels for the scaling plot curves.",
)
@click.option(
"--title", "-t", "title", type=str, nargs=1, help="Title text for the scaling plot."
)
@click.option(
"--no-cache",
is_flag=True,
help="Do not use cached values. Caching is enabled by default.",
)
def compute_scaling(
pairs_paths,
out_path,
region,
exclude_chroms,
exclude_end_regions,
assembly,
centromeres_path,
split_arms,
normalized,
plot_slope,
label_subplots,
show_average_trans,
labels,
title,
no_cache,
):
"""
Compute and plot contact frequency vs genomic separation curves for one or more pairs files.
"""
labels = list(labels)
# parse left/right arm parameter of chromosomes to exclude
exclude_chroms = [chrom.split(":") for chrom in exclude_chroms]
chromsizes = bioframe.fetch_chromsizes(assembly, filter_chroms=False, as_bed=True)
chromsizes = chromsizes[~chromsizes.chrom.isin(exclude_chroms)]
if centromeres_path:
centromeres = {}
with open(centromeres_path) as file:
for line in file:
cols = line.split(" ")
centromeres[cols[0]] = (int(cols[1]) + int(cols[2])) // 2
else:
centromeres = bioframe.fetch_centromeres(assembly)
centromeres.set_index("chrom", inplace=True)
centromeres = centromeres.mid.to_dict()
if len(labels) != 0 and len(pairs_paths) != len(labels) and not split_arms:
sys.exit("Please provide as many labels as pairs paths.")
if region:
regions = bioframe.select(chromsizes, region).reset_index()
else:
# use chromosomal arms as separate regions if no regions are specified
arms = bioframe.split(chromsizes, centromeres)
# remove user-excluded chromosomes/arms
for chrom in exclude_chroms:
if len(chrom) == 1:
# no arm specified, remove entire chromosome
arms = arms[arms.chrom != chrom[0]]
elif chrom[1] == "left":
# remove specified chromosome with start == 0 (left arm)
arms = arms[~((arms.chrom == chrom[0]) & (arms.start == 0))]
elif chrom[1] == "right":
# remove specified chromosome with start != 0 (right arm)
arms = arms[~((arms.chrom == chrom[0]) & (arms.start != 0))]
# remove 40kb from each side (80kb total) of an arm to remove centromere and telomere regions
arms = bioframe.ops.expand(arms, -exclude_end_regions)
# remove arms arms with a length of < 0 after removing side regions
regions = arms[arms.start < arms.end].reset_index()
all_scalings = []
all_avg_trans_levels = []
for idx, path in enumerate(pairs_paths):
cis_scalings, avg_trans = None, None
if split_arms:
# calculate scalings per arm per chromosome
cis_scalings, trans_levels = pairlib.scalings.compute_scaling(
path,
regions,
chromsizes,
dist_range=(int(1e1), int(1e9)),
n_dist_bins=128,
chunksize=int(1e7),
)
# remove unassigned pairs with start/end positions < 0
cis_scalings = cis_scalings[
(cis_scalings.start1 > 0)
& (cis_scalings.end1 > 0)
& (cis_scalings.start2 > 0)
& (cis_scalings.end2 > 0)
]
sc_agg = (
cis_scalings.groupby(["chrom1", "start1", "min_dist", "max_dist"])
.agg({"n_pairs": "sum", "n_bp2": "sum"})
.reset_index()
)
avail_chroms = set(sc_agg.chrom1)
for chrom in avail_chroms:
# calculate scalings for left/right arms (left arms start at position 0 + exclude_end_regions)
sc_left, avg_trans_left = calc_pair_freqs(
sc_agg[
(sc_agg.chrom1 == chrom)
& (sc_agg.start1 == exclude_end_regions)
],
trans_levels,
show_average_trans,
normalized,
)
sc_right, avg_trans_right = calc_pair_freqs(
sc_agg[
(sc_agg.chrom1 == chrom)
& (sc_agg.start1 != exclude_end_regions)
],
trans_levels,
show_average_trans,
normalized,
)
dir_path = os.path.join(
os.path.dirname(out_path), os.path.basename(path)
)
if not os.path.exists(dir_path):
os.mkdir(dir_path)
chrom_path = os.path.join(
dir_path, "_".join((chrom, os.path.basename(out_path)))
)
(
plot_scalings(
scalings=[sc_left, sc_right],
avg_trans_levels=[avg_trans_left, avg_trans_right],
plot_slope=plot_slope,
labels=["left", "right"],
title=chrom,
out_path=chrom_path,
)
)
else:
if not no_cache:
# get cached values
cached = cache.get(path)
if cached is not None:
cis_scalings = (
cached["cis_scalings"]
if cached["normalized"] == normalized
else None
)
avg_trans = cached["avg_trans"]
if (
no_cache
or cis_scalings is None
or (avg_trans is None and show_average_trans)
):
print(
f"Computing scalings for file {idx + 1}/{len(pairs_paths)} ...",
end="\r",
)
# caching disabled or no cached values found
cis_scalings, trans_levels = pairlib.scalings.compute_scaling(
path,
regions,
chromsizes,
dist_range=(int(1e1), int(1e9)),
n_dist_bins=128,
chunksize=int(1e7),
)
# remove unassigned pairs with start/end positions < 0
cis_scalings = cis_scalings[
(cis_scalings.start1 >= 0)
& (cis_scalings.end1 >= 0)
& (cis_scalings.start2 >= 0)
& (cis_scalings.end2 >= 0)
]
sc_agg = (
cis_scalings.groupby(["min_dist", "max_dist"])
.agg({"n_pairs": "sum", "n_bp2": "sum"})
.reset_index()
)
cis_scalings, avg_trans = calc_pair_freqs(
sc_agg, trans_levels, show_average_trans, normalized
)
if not no_cache:
cache.set(
path,
{
"cis_scalings": cis_scalings,
"avg_trans": avg_trans,
"normalized": normalized,
},
)
else:
print(
f"Retrieved cached values for file {idx + 1}/{len(pairs_paths)}.",
end="\r",
)
# use file names as labels if labels have not been provided
labels.append(os.path.basename) if len(labels) < len(pairs_paths) else None
all_scalings.append(cis_scalings)
all_avg_trans_levels.append(avg_trans) if avg_trans is not None else None
if len(all_scalings) > 0 and not split_arms:
plot_scalings(
all_scalings,
all_avg_trans_levels,
plot_slope,
label_subplots,
labels,
title,
out_path,
)
@cli.command("compute-trans-scaling")
@click.argument("cooler_path", nargs=1, type=click.Path(exists=True), required=True)
@click.option(
"--out",
"-o",
"out_path",
type=click.Path(),
help="The path to the scaling plot output file.",
)
@click.option(
"--resolution",
nargs=1,
type=int,
default=1000,
help="Resolution of the Hi-C data in a .mcool file.",
)
@click.option(
"--region1",
"-r1",
"regions1",
type=str,
multiple=True,
help="The first region of interactions.",
)
@click.option(
"--region2",
"-r2",
"regions2",
type=str,
multiple=True,
help="The second region of interactions.",
)
@click.option(
"--label",
"-l",
"labels",
type=str,
multiple=True,
help="One or more labels for the interaction frequency curves.",
)
@click.option(
"--title", "-t", "title", type=str, nargs=1, help="Title text for the plot."
)
def compute_trans_scaling(
cooler_path, out_path, resolution, regions1, regions2, labels, title
):
chromsizes = bioframe.fetch_chromsizes("sacCer3", filter_chroms=False, as_bed=True)
avg_contacts = cooltools.expected.diagsum_asymm(
clr=cooler.Cooler("::/resolutions/".join((cooler_path, str(resolution)))),
supports1=list(regions1),
supports2=list(regions2),
transforms={"balanced": lambda p: p["count"] * p["weight1"] * p["weight2"]},
)
avg_contacts["balanced.avg"] = avg_contacts["balanced.sum"] / avg_contacts(
"n_valid"
)
print("...")
@cli.command("clear-cache")
def clear_cache():
"""
Erase all cached values.
"""
cache.clear()
print("Cache cleared.")
if __name__ == "__main__":
np.seterr(divide="ignore", invalid="ignore")
cli()
|
StarcoderdataPython
|
5167637
|
<reponame>gmachadoads/mach556
import sqlite3
from sqlite3 import Error
from tkinter import *
from tkinter import messagebox
import os
c = os.path.dirname(__file__)
nomeArquivo = c+'\\nomes.txt'
def ConexaoBanco():
caminho = r'C:\Users\rjgug\OneDrive\Documentos\Python MACH556\MACH556.db'
con = None
try:
con = sqlite3.connect(caminho)
except Error as ex:
print('Erro na conexão com BD.')
print(ex)
return con
vcon = ConexaoBanco()
def inserir(conexao, sql):
try:
c = conexao.cursor()
c.execute(sql)
conexao.commit()
print('Registro inserido.')
except Error as ex:
print('Erro na inserção de dados.')
print(ex)
def gravarDados():
if tb_cpf.get() != '':
vcpf = tb_cpf.get()
vnome = tb_nome.get()
vtel = tb_tel.get()
vend = tb_end.get()
vnum = tb_num.get()
vbairro = tb_bairro.get()
vemail = tb_email.get()
vobs = tb_obs.get('1.0', END)
vsql = 'INSERT INTO tb_clientes' \
' (CPF, NOME, TELEFONE, ENDERECO, NUMERO, BAIRRO, EMAIL, OBS)' \
'VALUES("' + vcpf + '", "' + vnome + '", "' + vtel + '", "' + vend + '", "' + vnum + '", "' + vbairro + '", "' + vemail + '", "' + vobs + '")'
inserir(vcon, vsql)
tb_cpf.delete(0, END)
tb_nome.delete(0, END)
tb_tel.delete(0, END)
tb_end.delete(0, END)
tb_num.delete(0, END)
tb_bairro.delete(0, END)
tb_email.delete(0, END)
tb_obs.delete('1.0', END)
print('Dados gravados!')
else:
print('ERRO!')
def msg():
messagebox.showinfo(title='BD', message='Dados gravados!')
app = Tk()
app.title('MACH556 Informática')
app.geometry('700x550')
app.configure(bg='#585858')
imgLogo = PhotoImage(file=c+'\\Logo.gif')
l_logo = Label(app, image=imgLogo)
l_logo.place(x=80, y=5)
Label(app, text='CPF', bg='#585858', fg='#F2F2F2', anchor=W).place(x=10, y=110, width=100, height=20)
tb_cpf = Entry(app)
tb_cpf.place(x=10, y=130, width=200, height=20)
Label(app, text='Nome', bg='#585858', fg='#F2F2F2', anchor=W).place(x=230, y=110, width=100, height=20)
tb_nome = Entry(app)
tb_nome.place(x=230, y=130, width=200, height=20)
Label(app, text='Telefone', bg='#585858', fg='#F2F2F2', anchor=W).place(x=460, y=110, width=100, height=20)
tb_tel = Entry(app)
tb_tel.place(x=460, y=130, width=200, height=20)
Label(app, text='Endereço', bg='#585858', fg='#F2F2F2', anchor=W).place(x=10, y=160, width=100, height=20)
tb_end = Entry(app)
tb_end.place(x=10, y=180, width=200, height=20)
Label(app, text='Numero', bg='#585858', fg='#F2F2F2', anchor=W).place(x=230, y=160, width=100, height=20)
tb_num = Entry(app)
tb_num.place(x=230, y=180, width=200, height=20)
Label(app, text='Bairro', bg='#585858', fg='#F2F2F2', anchor=W).place(x=460, y=160, width=100, height=20)
tb_bairro = Entry(app)
tb_bairro.place(x=460, y=180, width=200, height=20)
Label(app, text='E-Mail', bg='#585858', fg='#F2F2F2', anchor=W).place(x=10, y=210, width=100, height=20)
tb_email = Entry(app)
tb_email.place(x=10, y=230, width=300, height=20)
Label(app, text='OBS', bg='#585858', fg='#F2F2F2', anchor=W).place(x=10, y=260, width=100, height=20)
tb_obs = Text(app)
tb_obs.place(x=10, y=280, width=650, height=100)
Button(app, text='GRAVAR', command=gravarDados, bg='#fff').place(x=300, y=450, width=100, height=50)
app.mainloop()
|
StarcoderdataPython
|
11398113
|
# Step 1 - Authenticate
consumer_key= 'dNPO3EUKQLJSpofg5vm8oE1Mu'
consumer_secret= '<KEY>'
access_token='<KEY>'
access_token_secret='<KEY>'
from twitter import Api
api = Api(consumer_key=consumer_key,
consumer_secret=consumer_secret,
access_token_key=access_token,
access_token_secret=access_token_secret)
results = api.GetSearch( raw_query="max_id=1244222306924351487&q=corona&count=10&include_entities=1&result_type=recent" , return_json=True )
print(len(results['statuses']))
# print(type(results[0]))
print(results )
# for key , value in results[0].AsDict().items() :
# print(key , value ,"\n" )
|
StarcoderdataPython
|
9749555
|
<reponame>nishithshowri006/bonk-slaps
from django.apps import AppConfig
class BonkConfig(AppConfig):
name = 'bonk'
|
StarcoderdataPython
|
1702789
|
# Lint as: python3
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Factory for instantiating rewriters.
NOTE: For your rewriter to be instanitated, please include it as an import and
a constant for ease of invoking the new rewriter.
"""
import importlib
from typing import Text
from tfx.components.trainer.rewriting import rewriter
TFLITE_REWRITER = 'TFLiteRewriter'
TFJS_REWRITER = 'TFJSRewriter'
def _load_tflite_rewriter():
importlib.import_module('tfx.components.trainer.rewriting.tflite_rewriter')
def _load_tfjs_rewriter():
try:
importlib.import_module('tensorflowjs')
except ImportError as e:
raise RuntimeError(
'tensorflowjs is not installed. Please install [tfjs] extra '
'dependencies to use tfjs_rewriter.') from e
else:
importlib.import_module('tfx.components.trainer.rewriting.tfjs_rewriter')
class _RewriterFactory:
"""Factory class for rewriters."""
_LOADERS = {
TFLITE_REWRITER.lower(): _load_tflite_rewriter,
TFJS_REWRITER.lower(): _load_tfjs_rewriter,
}
_loaded = set()
@classmethod
def _maybe_load_public_rewriter(cls, lower_rewriter_type: Text):
if (lower_rewriter_type in cls._LOADERS
and lower_rewriter_type not in cls._loaded):
cls._LOADERS[lower_rewriter_type]()
cls._loaded.add(lower_rewriter_type)
@classmethod
def get_rewriter_cls(cls, rewriter_type: Text):
rewriter_type = rewriter_type.lower()
cls._maybe_load_public_rewriter(rewriter_type)
for subcls in rewriter.BaseRewriter.__subclasses__():
if subcls.__name__.lower() == rewriter_type:
return subcls
raise ValueError('Failed to find rewriter: {}'.format(rewriter_type))
def create_rewriter(rewriter_type: Text, *args,
**kwargs) -> rewriter.BaseRewriter:
"""Instantiates a new rewriter with the given type and constructor arguments.
Args:
rewriter_type: The rewriter subclass to instantiate (can be all lowercase).
*args: Positional initialization arguments to pass to the rewriter.
**kwargs: Keyward initialization arguments to pass to the rewriter.
Returns:
The instantiated rewriter.
Raises:
ValueError: If unable to instantiate the rewriter.
"""
return _RewriterFactory.get_rewriter_cls(rewriter_type)(*args, **kwargs)
|
StarcoderdataPython
|
32840
|
#
# See top-level LICENSE.rst file for Copyright information
#
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
from ..defs import (task_name_sep, task_state_to_int, task_int_to_state)
from ...util import option_list
from ...io import findfile
from .base import (BaseTask, task_classes)
from desiutil.log import get_logger
import sys,re,os,glob
import numpy as np
# NOTE: only one class in this file should have a name that starts with "Task".
class TaskPSFNight(BaseTask):
"""Class containing the properties of one PSF combined night task.
"""
def __init__(self):
super(TaskPSFNight, self).__init__()
# then put int the specifics of this class
# _cols must have a state
self._type = "psfnight"
self._cols = [
"night",
"band",
"spec",
"state"
]
self._coltypes = [
"integer",
"text",
"integer",
"integer"
]
# _name_fields must also be in _cols
self._name_fields = ["night","band","spec"]
self._name_formats = ["08d","s","d"]
def _paths(self, name):
"""See BaseTask.paths.
"""
props = self.name_split(name)
camera = "{}{}".format(props["band"], props["spec"])
return [ findfile("psfnight", night=props["night"],
camera=camera, groupname=None, nside=None, band=props["band"],
spectrograph=props["spec"]) ]
def _deps(self, name, db, inputs):
"""See BaseTask.deps.
"""
return dict()
def _run_max_procs(self):
# This is a serial task.
return 1
def _run_time(self, name, procs, db):
# Run time on one proc on machine with scale factor == 1.0
return 2.0
def _run_defaults(self):
"""See BaseTask.run_defaults.
"""
return {}
def _option_dict(self, name, opts):
"""Build the full list of options.
This includes appending the filenames and incorporating runtime
options.
"""
from .base import task_classes, task_type
options = OrderedDict()
options["output"] = self.paths(name)[0]
# look for psf for this night on disk
options["input"] = []
props = self.name_split(name)
camera = "{}{}".format(props["band"], props["spec"])
dummy_expid = 99999999
template_input = findfile("psf", night=props["night"], expid=dummy_expid,
camera=camera,
band=props["band"],
spectrograph=props["spec"])
template_input = template_input.replace("{:08d}".format(dummy_expid),"????????")
options["input"] = glob.glob(template_input)
return options
def _option_list(self, name, opts):
"""Build the full list of options.
This includes appending the filenames and incorporating runtime
options.
"""
return option_list(self._option_dict(name,opts))
def _run_cli(self, name, opts, procs, db):
"""See BaseTask.run_cli.
"""
optlist = self._option_list(name, opts)
com = "# command line for psfnight not implemented"
return com
def _run(self, name, opts, comm, db):
"""See BaseTask.run.
"""
from ...scripts import specex
optdict = self._option_dict(name, opts)
specex.mean_psf(optdict["input"], optdict["output"])
return
def getready(self, db, name, cur):
"""Checks whether dependencies are ready"""
log = get_logger()
# look for the state of psf with same night,band,spectro
props = self.name_split(name)
cmd = "select state from psf where night={} and band='{}' and spec={}".format(props["night"],props["band"],props["spec"])
cur.execute(cmd)
states = np.array([ x for (x,) in cur.fetchall() ])
log.debug("states={}".format(states))
# psfnight ready if all psf from the night have been processed, and at least one is done (failures are allowed)
n_done = np.sum(states==task_state_to_int["done"])
n_failed = np.sum(states==task_state_to_int["failed"])
ready = (n_done > 0) & ( (n_done + n_failed) == states.size )
if ready :
self.state_set(db=db,name=name,state="ready",cur=cur)
def postprocessing(self, db, name, cur):
"""For successful runs, postprocessing on DB"""
# run getready for all extraction with same night,band,spec
props = self.name_split(name)
log = get_logger()
tt = "traceshift"
cmd = "select name from {} where night={} and band='{}' and spec={} and state=0".format(tt,props["night"],props["band"],props["spec"])
cur.execute(cmd)
tasks = [ x for (x,) in cur.fetchall() ]
log.debug("checking {}".format(tasks))
for task in tasks :
task_classes[tt].getready( db=db,name=task,cur=cur)
|
StarcoderdataPython
|
4804463
|
<gh_stars>1-10
# coding=utf-8
from sentry_sdk import init as sentry_init
from sentry_sdk.integrations import sqlalchemy as sentry_sqlalchemy
from sqlalchemy.engine import create_engine
from app import settings
from ._base import (
Base,
session,
enable_time_logging,
)
from .task import Task
from .user import User
config = settings.load_config(
(settings.SQLAlchemySettings, settings.SentrySetting,)
)
if config.QUERY_TIME_LOGGING:
enable_time_logging(config.QUERY_TIME_THRESHOLD)
engine = create_engine( # pylint: disable=C0103
config.SQLALCHEMY_DATABASE_URI,
convert_unicode=True,
echo=bool(config.SQLALCHEMY_ENGINE_LOG)
)
session.configure(bind=engine)
sentry_init(
dsn=config.SENTRY_DSN,
integrations=(sentry_sqlalchemy.SqlalchemyIntegration(),)
)
|
StarcoderdataPython
|
6550053
|
<filename>lib/python/treadmill/traits.py<gh_stars>1-10
"""Server traits.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
from treadmill import plugin_manager
from treadmill import sysinfo
_LOGGER = logging.getLogger(__name__)
def format_traits(code, value):
"""Format traits as list of names.
"""
result = []
for trait in code:
if value & code[trait]:
result.append(trait)
result.sort()
return ','.join(result)
def detect(traits):
"""Detect traits usign plugins.
"""
result = []
for trait in traits:
try:
plugin = plugin_manager.load('treadmill.server.traits', trait)
if plugin():
result.append(trait)
except Exception: # pylint: disable=W0703
_LOGGER.exception('Error processing trait plugin: %s', trait)
return result
def create_code(traits):
"""Assign bits to list of traits.
"""
if not traits:
return {}
result = {}
code = 1
for trait in traits:
result[trait] = code
code = code << 1
return result
def encode(code, traits):
"""Code the list of traits into a number.
"""
result = 0
for trait in traits:
if trait in code:
result |= code[trait]
else:
_LOGGER.error('Unknown trait %s', trait)
return result
def has_sse4():
"""Return true if current cpu has sse4 flag.
"""
flags = sysinfo.cpu_flags()
return 'sse4_1' in flags and 'sse4_2' in flags
def has_rdtscp():
"""Return true if current cpu has rdtscp flag.
"""
flags = sysinfo.cpu_flags()
return 'rdtscp' in flags
|
StarcoderdataPython
|
3412809
|
#!/usr/bin/env python3
import timeit
import numpy as np
from pwtools.crys import Trajectory
from pwtools import crys, num, timer
rand = np.random.rand
# example session on 4-core box, _flob compiled w/ OpenMP (make gfortran-omp)
# ---------------------------------------------------------------------------
#
# $ export OMP_NUM_THREADS=1
# --TagTimer--: py_bigmen: time: 3.56550598145
# --TagTimer--: py_loop: time: 0.456802129745
# --TagTimer--: f: time: 0.437112092972
#
# $ export OMP_NUM_THREADS=2
# --TagTimer--: f: time: 0.206064939499
#
# $ export OMP_NUM_THREADS=4
# --TagTimer--: f: time: 0.125560998917
def pydist_bigmem(traj, pbc=True):
# Pure numpy version w/ big temp arrays. Also slowest.
#
# (nstep, natoms, natoms, 3)
distvecs_frac = (
traj.coords_frac[:, :, None, :] - traj.coords_frac[:, None, :, :]
)
if pbc:
distvecs_frac = crys.min_image_convention(distvecs_frac)
distvecs = np.empty((nstep, natoms, natoms, 3))
for ii in range(traj.nstep):
distvecs[ii, ...] = np.dot(distvecs_frac[ii, ...], traj.cell[ii, ...])
# (nstep, natoms, natoms)
dists = np.sqrt((distvecs ** 2.0).sum(axis=-1))
return dists
def pydist_loop(traj, pbc=True):
dists = np.empty((nstep, natoms, natoms))
for ii, struct in enumerate(traj):
dists[ii, ...] = crys.distances(struct, pbc=pbc)
return dists
def fdist(traj):
return crys.distances_traj(traj, pbc=True)
if __name__ == "__main__":
natoms = 100
nstep = 1000
cell = rand(nstep, 3, 3)
stress = rand(nstep, 3, 3)
forces = rand(nstep, natoms, 3)
coords_frac = rand(nstep, natoms, 3)
symbols = ["H"] * natoms
traj = Trajectory(coords_frac=coords_frac, cell=cell, symbols=symbols)
##assert np.allclose(pydist_bigmem(traj), pydist_loop(traj))
##print("... ok")
##assert np.allclose(pydist_loop(traj), fdist(traj))
##print("... ok")
globs = globals()
statements = [
"pydist_bigmem(traj)",
"pydist_loop(traj)",
"fdist(traj)",
]
for stmt in statements:
number = 1
times = np.array(
timeit.repeat(stmt, globals=globs, number=number, repeat=5)
)
print(
f"{number} loops: {times.mean():6.3f} +- {times.std():.4f}: {stmt}"
)
|
StarcoderdataPython
|
4990884
|
import os
import sys
from PIL import Image
def jpg(filename: str, savename: str = "resized.jpg", width: int = 1024, height: int = 1024):
if not(filename.lower().endswith(".jpg")):
filename += ".jpg"
if not(savename.lower().endswith(".jpg")):
savename += ".jpg"
try:
imgjpg = Image.open(filename)
except FileNotFoundError:
sys.exit("No file found in " + str(os.getcwd()))
except Exception as e:
sys.exit("Error: " + str(e))
imgjpg_resized = imgjpg.resize((width, height))
imgjpg_resized.save(savename)
def jpeg(filename: str, savename: str = "resized.jpeg", width: int = 1024, height: int = 1024):
if not(filename.lower().endswith(".jpeg")):
filename += ".jpeg"
if not(savename.lower().endswith(".jpeg")):
savename += ".jpeg"
try:
imgjpeg = Image.open(filename)
except FileNotFoundError:
sys.exit("No file found in " + str(os.getcwd()))
except Exception as e:
sys.exit("Error: " + str(e))
imgjpeg_resized = imgjpeg.resize((width, height))
imgjpeg_resized.save(savename)
def png(filename: str, savename: str = "resized.png", width: int = 1024, height: int = 1024):
if not(filename.lower().endswith(".png")):
filename += ".png"
if not(savename.lower().endswith(".png")):
savename += ".png"
try:
imgpng = Image.open(filename)
except FileNotFoundError:
sys.exit("No file found in " + str(os.getcwd()))
except Exception as e:
sys.exit("Error: " + str(e))
imgpng_resized = imgpng.resize((width, height))
imgpng_resized.save(savename)
for filename in os.listdir(os.getcwd()):
if filename.lower().endswith("jpg"):
jpg(filename=filename, savename=filename, width=1024, height=1024)
elif filename.lower().endswith("jpeg"):
jpeg(filename=filename, savename=filename, width=1024, height=1024)
elif filename.lower().endswith("png"):
png(filename=filename, savename=filename, width=1024, height=1024)
|
StarcoderdataPython
|
3446056
|
<reponame>UCBerkeley-SCET/DataX-Berkeley
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 23 11:59:55 2020
@author: tobias.grab
"""
from skimage.transform import rotate
from skimage.transform import downscale_local_mean
import keras
from keras.layers import Input, Dense, Conv2D, MaxPooling2D, UpSampling2D, BatchNormalization, Flatten, Reshape
from keras.models import Model
from keras import backend as K
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
import tensorflow as tf
from skimage import exposure
import matplotlib.pyplot as plt
import sys
sys.path.append('/usr/local/lib/python2.7/site-packages')
from os import listdir
import cv2
import numpy as np
from scipy import spatial
import tensorflow_addons as tfa
import time
import pandas as pd
if __name__ == "__main__":
top_k=5
implemented_algos=["sift","surf","brisk","akaze","kaze"]
# implemented_algos=["sift"]
database_size=[100,200,400,800,1600,3200]
# database_size=[100,200]
data_path=r"C:\Users\tobias.grab\IWK_data\timetest"
files=listdir(data_path)
path_testimgs=r"C:\Users\tobias.grab\IWK_data\test_testimgs"
test_files=listdir(path_testimgs)
test_imgs=[cv2.imread(path_testimgs + "\\" + filename,0) for ind,filename in enumerate(test_files)]
alg="brisk"
##### Init matching Algorithm
for alg in implemented_algos:
print(alg)
creation=[]
mean_times=[]
std_times=[]
for database_size_now in database_size:
if alg=="sift":
ftAlg = cv2.xfeatures2d.SIFT_create()
category="old but gold"
if alg=="surf":
ftAlg = cv2.xfeatures2d.SURF_create()
category="old but gold"
if alg=="brisk":
ftAlg = cv2.BRISK_create()
category="old but gold"
if alg=="akaze":
ftAlg = cv2.AKAZE_create()
category="old but gold"
if alg=="kaze":
ftAlg = cv2.KAZE_create()
category="old but gold"
bf = cv2.BFMatcher()
start=time.time()
nrOfFiles=len(files)
precomputed_fts=[]
start_creation=time.time()
for file in files[:database_size_now]:
img_from_database = cv2.imread(data_path+'\\'+file,0)
precomputed_fts.append(ftAlg.detectAndCompute(img_from_database, None))
end_creation=time.time()-start_creation
creation.append(end_creation)
times=[]
for ind, test_img in enumerate(test_imgs):
name_testimg=test_files[ind]
(kps1, descs1) = ftAlg.detectAndCompute(test_img, None)
nrOfGoodPerImage=np.zeros([nrOfFiles,1])
DistPerImage=np.zeros([nrOfFiles,1])
bauteilnr=0
start_matching=time.time()
for kps2, descs2 in precomputed_fts:
matches = bf.knnMatch(descs1,descs2,k=2)
matchesMask = [[0,0] for i in range(len(matches))]
for i,(m,n) in enumerate(matches):
if m.distance < 0.75*n.distance:
matchesMask[i]=[1,0]
nrOfGoodPerImage[bauteilnr]=np.sum(matchesMask[:])
bauteilnr=bauteilnr+1
idx = np.argpartition(-nrOfGoodPerImage[:,0], top_k)
topk_scores=abs(nrOfGoodPerImage[idx[:top_k]])
stop_matching=elapsed=time.time()-start_matching
# print(stop_matching)
times.append(stop_matching)
mean_times.append(np.mean(times))
std_times.append(np.std(times))
print(alg)
print("mean\n")
[print(t) for t in mean_times]
print("std\n")
[print(t) for t in std_times]
print("create\n")
[print(t) for t in creation]
|
StarcoderdataPython
|
8147087
|
from robpy import kw, test
@test
def foo():
bar()
@kw
def bar():
print 'bar'
|
StarcoderdataPython
|
6603595
|
<filename>reader.py
# -*- coding: utf-8 -*-
"""
File Name: reader
Description : 读取图像信息
Author : mick.yi
date: 2018/12/26
"""
import os
import codecs
import matplotlib.pyplot as plt
def get_mslm_infos(annotation_file, img_dir):
"""
读取mslm数据集信息
:param annotation_file: 标注文件路径
:param img_dir: 图像存放路径
:return:
"""
with codecs.open(annotation_file, 'r', 'utf-8') as f:
lines = f.readlines()
img_infos = []
label_set = set()
for id, line in enumerate(lines):
img_name, label = line.split('\t')
img_info = dict()
img_info['img_path'] = os.path.join(img_dir, img_name)
img_info['label'] = label
img_info['img_id'] = id # 增加图像id编号
img_infos.append(img_info)
label_set.add(label)
return img_infos, label_set
def load_img(img_path):
img = plt.imread(img_path)
return img[:, :, :3]
|
StarcoderdataPython
|
11375576
|
r"""Compute action detection performance for the AVA dataset.
Please send any questions about this code to the Google Group ava-dataset-users:
https://groups.google.com/forum/#!forum/ava-dataset-users
Example usage:
python -O get_ava_performance.py \
-l ava/ava_action_list_v2.1_for_activitynet_2018.pbtxt.txt \
-g ava_val_v2.1.csv \
-e ava_val_excluded_timestamps_v2.1.csv \
-d your_results.csv
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from collections import defaultdict
import csv
import logging
import pprint
import sys
import time
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from ava import object_detection_evaluation
from ava import standard_fields
def print_time(message, start):
logging.info("==> %g seconds to %s", time.time() - start, message)
def make_image_key(video_id, timestamp):
"""Returns a unique identifier for a video id & timestamp."""
return "%s,%04d" % (video_id, int(timestamp))
def read_csv(csv_file, class_whitelist=None):
"""Loads boxes and class labels from a CSV file in the AVA format.
CSV file format described at https://research.google.com/ava/download.html.
Args:
csv_file: A file object.
class_whitelist: If provided, boxes corresponding to (integer) class labels
not in this set are skipped.
Returns:
boxes: A dictionary mapping each unique image key (string) to a list of
boxes, given as coordinates [y1, x1, y2, x2].
labels: A dictionary mapping each unique image key (string) to a list of
integer class lables, matching the corresponding box in `boxes`.
scores: A dictionary mapping each unique image key (string) to a list of
score values lables, matching the corresponding label in `labels`. If
scores are not provided in the csv, then they will default to 1.0.
"""
start = time.time()
boxes = defaultdict(list)
labels = defaultdict(list)
scores = defaultdict(list)
reader = csv.reader(csv_file)
for row in reader:
assert len(row) in [7, 8], "Wrong number of columns: " + row
image_key = make_image_key(row[0], row[1])
x1, y1, x2, y2 = [float(n) for n in row[2:6]]
action_id = int(row[6])
if class_whitelist and action_id not in class_whitelist:
continue
score = 1.0
if len(row) == 8:
score = float(row[7])
boxes[image_key].append([y1, x1, y2, x2])
labels[image_key].append(action_id)
scores[image_key].append(score)
print_time("read file " + csv_file.name, start)
return boxes, labels, scores
def read_exclusions(exclusions_file):
"""Reads a CSV file of excluded timestamps.
Args:
exclusions_file: A file object containing a csv of video-id,timestamp.
Returns:
A set of strings containing excluded image keys, e.g. "aaaaaaaaaaa,0904",
or an empty set if exclusions file is None.
"""
excluded = set()
if exclusions_file:
reader = csv.reader(exclusions_file)
for row in reader:
assert len(row) == 2, "Expected only 2 columns, got: " + row
excluded.add(make_image_key(row[0], row[1]))
return excluded
def read_labelmap(labelmap_file):
"""Reads a labelmap without the dependency on protocol buffers.
Args:
labelmap_file: A file object containing a label map protocol buffer.
Returns:
labelmap: The label map in the form used by the object_detection_evaluation
module - a list of {"id": integer, "name": classname } dicts.
class_ids: A set containing all of the valid class id integers.
"""
labelmap = []
class_ids = set()
name = ""
class_id = ""
for line in labelmap_file:
if line.startswith(" name:"):
name = line.split('"')[1]
elif line.startswith(" id:") or line.startswith(" label_id:"):
class_id = int(line.strip().split(" ")[-1])
labelmap.append({"id": class_id, "name": name})
class_ids.add(class_id)
return labelmap, class_ids
def split_list(alist, wanted_parts=1):
length = len(alist)
return [alist[i * length // wanted_parts: (i + 1) * length // wanted_parts]
for i in range(wanted_parts)]
def split_interleave(A):
lists = split_list(A, wanted_parts=4)
D = [val for tup in zip(*lists) for val in tup]
return D
def run_evaluation_threshold(labelmap, groundtruth, exclusions, iou):
# sns.palplot(sns.diverging_palette(128, 240, n=10))
# seq_col_brew = sns.color_palette("Blues_r", 4) # For sequential, blue gradient in reverse
# Qualitative data palette
# current_palette = sns.color_palette("Paired")
# sns.set_palette(current_palette)
# Make sure not to mess this up
filters = []
filters.append("0.1")
filters.append("0.2")
filters.append("0.3")
filters.append("0.4")
filters.append("0.5")
filters.append("0.6")
filters.append("0.7")
filters.append("0.8")
filters.append("0.9")
root_dir = '../../../data/AVA/files/'
ftype = "fusion"
all_detections = []
ts = "1809281055"
for f in filters:
all_detections.append(open("../thresholds/context_" + ftype + "/predictions_fusion_avg_fovea_" + ts + "_" + f + ".csv", 'rb'))
all_gndtruths = []
for i in range(len(filters)):
all_gndtruths.append(open(root_dir + "AVA_Val_Custom_Corrected.csv", 'rb'))
#all_gndtruths.append(open("AVA_Test_Custom_Corrected.csv", 'rb'))
#all_gndtruths.append(open("AVA_Test_Custom_Corrected.csv", 'rb'))
"""Runs evaluations given input files.
Args:
labelmap: file object containing map of labels to consider, in pbtxt format
groundtruth: file object
detections: file object
exclusions: file object or None.
"""
categories, class_whitelist = read_labelmap(labelmap)
logging.info("CATEGORIES (%d):\n%s", len(categories),
pprint.pformat(categories, indent=2))
excluded_keys = read_exclusions(exclusions)
# Reads detections data.
x_axis = []
xpose_ax = []
xobj_ax = []
xhuman_ax = []
ypose_ax = []
yobj_ax = []
yhuman_ax = []
colors_pose = []
colors_obj = []
colors_human = []
finalmAPs = []
colors = []
maxY = -1.0
for detections, gndtruth, filter_type in zip(all_detections, all_gndtruths, filters):
pascal_evaluator = None
metrics = None
actions = None
start = 0
pascal_evaluator = object_detection_evaluation.PascalDetectionEvaluator(
categories, matching_iou_threshold=iou)
# Reads the ground truth data.
boxes, labels, _ = read_csv(gndtruth, class_whitelist)
start = time.time()
for image_key in boxes:
if image_key in excluded_keys:
logging.info(("Found excluded timestamp in ground truth: %s. "
"It will be ignored."), image_key)
continue
pascal_evaluator.add_single_ground_truth_image_info(
image_key, {
standard_fields.InputDataFields.groundtruth_boxes:
np.array(boxes[image_key], dtype=float),
standard_fields.InputDataFields.groundtruth_classes:
np.array(labels[image_key], dtype=int),
standard_fields.InputDataFields.groundtruth_difficult:
np.zeros(len(boxes[image_key]), dtype=bool)
})
print_time("convert groundtruth", start)
# Run evaluation
boxes, labels, scores = read_csv(detections, class_whitelist)
start = time.time()
for image_key in boxes:
if image_key in excluded_keys:
logging.info(("Found excluded timestamp in detections: %s. "
"It will be ignored."), image_key)
continue
pascal_evaluator.add_single_detected_image_info(
image_key, {
standard_fields.DetectionResultFields.detection_boxes:
np.array(boxes[image_key], dtype=float),
standard_fields.DetectionResultFields.detection_classes:
np.array(labels[image_key], dtype=int),
standard_fields.DetectionResultFields.detection_scores:
np.array(scores[image_key], dtype=float)
})
print_time("convert detections", start)
start = time.time()
metrics = pascal_evaluator.evaluate()
print_time("run_evaluator", start)
# TODO Show a pretty histogram here besides pprint
actions = list(metrics.keys())
final_value = 0.0
for m in actions:
ms = m.split("/")[-1]
if ms == 'mAP@' + str(iou) + 'IOU':
final_value = metrics[m]
finalmAPs.append(final_value)
else:
# x_axis.append(ms)
# y_axis.append(metrics[m])
for cat in categories:
if cat['name'].split("/")[-1] == ms:
if maxY < metrics[m]:
maxY = metrics[m]
if cat['id'] <= 10:
xpose_ax.append("(" + filter_type + ") " + ms)
ypose_ax.append(metrics[m])
colors_pose.append('red')
elif cat['id'] <= 22:
xobj_ax.append("(" + filter_type + ") " + ms)
yobj_ax.append(metrics[m])
colors_obj.append('blue')
else:
xhuman_ax.append("(" + filter_type + ") " + ms)
yhuman_ax.append(metrics[m])
colors_human.append('green')
# Make a confusion matrix for this run
pascal_evaluator = None
x_axis = split_interleave(xpose_ax) + split_interleave(xobj_ax) + split_interleave(xhuman_ax)
y_axis = split_interleave(ypose_ax) + split_interleave(yobj_ax) + split_interleave(yhuman_ax)
colors = split_interleave(colors_pose) + split_interleave(colors_obj) + split_interleave(colors_human)
print(filters)
print(finalmAPs)
plt.ylabel('frame-mAP')
top = 0.1 # offset a bit so it looks good
sns.set_style("whitegrid")
clrs = ['blue' if (x < max(finalmAPs)) else 'red' for x in finalmAPs]
g = sns.barplot(filters, finalmAPs, palette=clrs)
ax = g
# annotate axis = seaborn axis
for p in ax.patches:
ax.annotate("%.4f" % p.get_height(), (p.get_x() + p.get_width() / 2., p.get_height()),
ha='center', va='center', fontsize=10, color='gray', rotation=90, xytext=(0, 20),
textcoords='offset points')
_ = g.set_ylim(0, top) # To make space for the annotations
plt.show()
def run_evaluation(labelmap, groundtruth, exclusions, iou):
root_dir = '../../../data/AVA/files/'
test_dir = "../test_outputs/"
# Make sure not to mess this up
experiments_filters = {}
experiments_detections = {}
experiments_filters['pose'] = ['Pose']
experiments_detections['pose'] = [open(test_dir + "/rgb_rgb/output_test_rgb.csv", 'rb')]
experiments_filters['rgb-streams-aug'] = ['RGB', 'Crop', 'Gauss', 'Fovea']
experiments_detections['rgb-streams-aug'] = [open(test_dir + "/rgb_rgb/output_test_rgb.csv", 'rb'), open(test_dir + "/rgb_crop/output_test_crop.csv", 'rb'), open(test_dir + "/rgb_gauss/output_test_gauss.csv", 'rb'), open(test_dir + "/rgb_fovea/output_test_fovea.csv", 'rb')]
experiments_filters['flow vs flowcrop'] = ['Flow', 'Flowcrop']
experiments_detections['flow vs flowcrop'] = [open(test_dir + "/flow/output_test_flowcrop.csv", 'rb'), ]
#all_detections.append(open(test_dir + "/flow/output_test_flow.csv", 'rb'))
experiments_filters['two-streams'] = ['Two-Stream-RGB', 'Two-Stream-Crop', 'Two-Stream-Gauss', 'Two-Stream-Fovea']
experiments_detections['two-streams'] = [open(test_dir + "/rgb_rgb/output_test_rgb.csv", 'rb'), open(test_dir + "/rgb_crop/output_test_crop.csv", 'rb'), open(test_dir + "/rgb_gauss/output_test_gauss.csv", 'rb'), open(test_dir + "/rgb_fovea/output_test_fovea.csv", 'rb')]
experiments_filters['two-streams-aug'] = ['RGB', 'Crop', 'Gauss', 'Fovea']
experiments_detections['two-streams-aug'] = [open(test_dir + "/rgb_rgb/output_test_rgb.csv", 'rb'), open(test_dir + "/rgb_crop/output_test_crop.csv", 'rb'), open(test_dir + "/rgb_gauss/output_test_gauss.csv", 'rb'), open(test_dir + "/rgb_fovea/output_test_fovea.csv", 'rb')]
experiments_filters['mlp vs lstm'] = ['RGB', 'Crop', 'Gauss', 'Fovea']
experiments_detections['mlp vs lstm'] = [open(test_dir + "/rgb_rgb/output_test_rgb.csv", 'rb'), open(test_dir + "/rgb_crop/output_test_crop.csv", 'rb'), open(test_dir + "/rgb_gauss/output_test_gauss.csv", 'rb'), open(test_dir + "/rgb_fovea/output_test_fovea.csv", 'rb')]
experiments_filters['lstmA vs lstmB'] = ['RGB', 'Crop', 'Gauss', 'Fovea']
experiments_detections['lstmA vs lstmB'] = [open(test_dir + "/rgb_rgb/output_test_rgb.csv", 'rb'), open(test_dir + "/rgb_crop/output_test_crop.csv", 'rb'), open(test_dir + "/rgb_gauss/output_test_gauss.csv", 'rb'), open(test_dir + "/rgb_fovea/output_test_fovea.csv", 'rb')]
experiments_filters['context-fusion mlp vs lstm'] = ['RGB', 'Crop', 'Gauss', 'Fovea']
experiments_detections['context-fusion mlp vs lstm'] = [open(test_dir + "/rgb_rgb/output_test_rgb.csv", 'rb'), open(test_dir + "/rgb_crop/output_test_crop.csv", 'rb'), open(test_dir + "/rgb_gauss/output_test_gauss.csv", 'rb'), open(test_dir + "/rgb_fovea/output_test_fovea.csv", 'rb')]
experiments_filters['balancing sampling'] = ['RGB', 'Crop', 'Gauss', 'Fovea']
experiments_detections['balancing sampling'] = [open(test_dir + "/rgb_rgb/output_test_rgb.csv", 'rb'), open(test_dir + "/rgb_crop/output_test_crop.csv", 'rb'), open(test_dir + "/rgb_gauss/output_test_gauss.csv", 'rb'), open(test_dir + "/rgb_fovea/output_test_fovea.csv", 'rb')]
experiments_filters['balancing weights'] = ['RGB', 'Crop', 'Gauss', 'Fovea']
experiments_detections['balancing weights'] = [open(test_dir + "/rgb_rgb/output_test_rgb.csv", 'rb'), open(test_dir + "/rgb_crop/output_test_crop.csv", 'rb'), open(test_dir + "/rgb_gauss/output_test_gauss.csv", 'rb'), open(test_dir + "/rgb_fovea/output_test_fovea.csv", 'rb')]
experiments_filters['balancing prior'] = ['RGB', 'Crop', 'Gauss', 'Fovea']
experiments_detections['balancing prior'] = [open(test_dir + "/rgb_rgb/output_test_rgb.csv", 'rb'), open(test_dir + "/rgb_crop/output_test_crop.csv", 'rb'), open(test_dir + "/rgb_gauss/output_test_gauss.csv", 'rb'), open(test_dir + "/rgb_fovea/output_test_fovea.csv", 'rb')]
# experiment =
filters = []
# filters.append("pose")
# filters.append("rgb-base")
# filters.append("rgb-prior")
# filters.append("rgb-sampling")
# filters.append("rgb-weights")
# filters.append("rgb-kinetics")
# filters.append("flow-kinetics")
# filters.append("rgb")
# filters.append("crop")
# filters.append("gauss")
# filters.append("fovea")
# filters.append("flowcrop")
# filters.append("flow")
# filters.append("MLP")
#filters.append("best case scenario thresh 0.1")
#filters.append("two pass scenario thresh 0.1")
filters.append("fovea")
filters.append("dense-gt")
#filters.append("sampling no aug")
filters.append("dense-2pass")
#filters.append("weights no aug")
# filters.append("LSTM5-A-512")
# filters.append("random")
# filters.append("LSTM5-B-512")
# filters.append("LSTM10")
# filters.append("2st(rgb)")
# filters.append("2st(crop)")
# filters.append("2st(gauss)")
# filters.append("2st(fovea)")
#filters.append("2st(crop) + flowcrop")
#filters.append("2st(gauss) + flowcrop")
#filters.append("2st(fovea) + flowcrop")
#filters.append("2st(fovea) + mlp")
#filters.append("2st(crop) + mlp")
#filters.append("2st(gauss) + mlp")
# filters.append("2stream")
#filters.append("2stream + lstm (extra pass)")
# filters.append("gauss")
#filters.append("gauss aug")
#filters.append("LSTMA 512 5 2")
#filters.append("LSTMA 512 5 3")
#filters.append("LSTMA 512 5 3")
#filters.append("LSTMA 1024 5 3")
#filters.append("LSTMA 2048 5 3")
#filters.append("LSTMB 512 3 3")
#filters.append("LSTMB 1024 3 3")
#filters.append("LSTMB 2048 3 3")
# filters.append("2st(gauss) + lstm")
all_detections = []
#all_detections.append(open(test_dir + "context/lstmA/output_test_ctx_lstm_512_5_3.csv", 'rb'))
#all_detections.append(open(test_dir + "context/lstmA/output_test_ctx_lstm_1024_5_3.csv", 'rb'))
#all_detections.append(open(test_dir + "context/lstmA/output_test_ctx_lstm_2048_5_3.csv", 'rb'))
#all_detections.append(open(test_dir + "context/lstmB/output_test_ctx_lstm_512_3_3.csv", 'rb'))
#all_detections.append(open(test_dir + "context/lstmB/output_test_ctx_lstm_1024_3_3.csv", 'rb'))
#all_detections.append(open(test_dir + "context/lstmB/output_test_ctx_lstm_2048_3_3.csv", 'rb'))
# Pose
# all_detections.append(open(test_dir + "output_test_flowcrop.csv", 'rb'))
# Balancing
#all_detections.append(open(test_dir + "output_test_flowcrop.csv", 'rb'))
#all_detections.append(open(test_dir + "/rgb_gauss/output_test_gauss.csv", 'rb'))
#all_detections.append(open(test_dir + "/augmentation/predictions_rgb_gauss_1807241628_1000.csv", 'rb'))
#all_detections.append(open(test_dir + "/augmentation/output_test_sampling_gauss_1809221859.csv", 'rb'))
#all_detections.append(open(test_dir + "/augmentation/output_test_weights_gauss_1809221904.csv", 'rb'))
# RGB Streams
#all_detections.append(open(test_dir + "/kinetics_init/output_test_rgb_kineticsinit_gauss_1809220212.csv", 'rb'))
#all_detections.append(open(test_dir + "/kinetics_init/output_test_flow_kineticsinit_1809220244.csv", 'rb'))
# Flow Streams
# Context (LSTMs)
#filters.append("LSTMB 512 3 3")
#filters.append("LSTMB 512 3 2")
#filters.append("LSTMB 512 3 1")
#all_detections.append(open(test_dir + "context/lstmB/output_test_ctx_lstm_512_3_1.csv", 'rb'))
#all_detections.append(open(test_dir + "context/lstmB/output_test_ctx_lstm_512_3_2.csv", 'rb'))
#all_detections.append(open(test_dir + "context/lstmB/output_test_ctx_lstm_512_3_3.csv", 'rb'))
#all_detections.append(open(test_dir + "context/lstmA/output_test_ctx_lstm_512_5_1.csv", 'rb'))
#all_detections.append(open(test_dir + "context/lstmA/output_test_ctx_lstm_512_5_2.csv", 'rb'))
#all_detections.append(open(test_dir + "context/lstmA/output_test_ctx_lstm_512_5_3.csv", 'rb'))
#all_detections.append(open(test_dir + "context/lstmB/output_test_ctx_lstm_32_3_3.csv", 'rb'))
#all_detections.append(open(test_dir + "context/lstmB/output_test_ctx_lstm_32_5_3.csv", 'rb'))
#all_detections.append(open(test_dir + "context/lstmB/output_test_ctx_lstm_32_10_3.csv", 'rb'))
#all_detections.append(open(test_dir + "context/mlp/output_test_ctx.csv", 'rb'))
#all_detections.append(open(test_dir + "context/mlp/output_test_ctx_mlp_1809212356.csv", 'rb'))
#all_detections.append(open(test_dir + "context/lstmA/output_test_ctx_lstm_512_5_3_1809220010.csv", 'rb'))
#all_detections.append(open(test_dir + "random/output_test_random_1809221552.csv", 'rb'))
#all_detections.append(open(test_dir + "context/lstmB/output_test_ctx_lstm_512_5_3_1809211924.csv", 'rb'))
#all_detections.append(open(test_dir + "context/lstmB/output_test_ctx_lstm_128_10_3_1809211930.csv", 'rb'))
# 6 2-streams + baseline
#all_detections.append(open(test_dir + "/two-streams/output_test_2stream_rgb_1809220100.csv", 'rb'))
#all_detections.append(open(test_dir + "/two-streams/output_test_2stream_crop.csv", 'rb'))
#all_detections.append(open(test_dir + "/two-streams/output_test_2stream_gauss.csv", 'rb'))
all_detections.append(open(test_dir + "/two-streams/output_test_2stream_fovea.csv", 'rb'))
#all_detections.append(open(test_dir + "/two-streams/output_test_2stream_flowcrop_crop_1809220117.csv", 'rb'))
#all_detections.append(open(test_dir + "/two-streams/output_test_2stream_flowcrop_gauss_1809220152.csv", 'rb'))
#all_detections.append(open(test_dir + "/two-streams/output_test_2stream_flowcrop_fovea_1809220136.csv", 'rb'))
# Context Fusions
# all_detections.append(open(test_dir + "/context_fusion/output_test_3stream_fovea.csv", 'rb'))
# all_detections.append(open(test_dir + "/context_fusion/output_test_3stream_crop.csv", 'rb'))
# all_detections.append(open(test_dir + "/context_fusion/output_test_3stream_gauss.csv", 'rb'))
all_detections.append(open(test_dir + "/context_fusion/output_test_LSTM_FCfusion_contextGT_gauss_1810011737.csv", 'rb'))
all_detections.append(open(test_dir + "/context_fusion/output_test_LSTM_FCfusion_context_secondpass_gauss_1810011754.csv", 'rb'))
# LSTMs
#all_detections.append(open(test_dir + "/two-streams/output_test_2stream_rgb_1809220100.csv", 'rb'))
#all_detections.append(open(test_dir + "/two-streams/output_test_2stream_crop.csv", 'rb'))
#all_detections.append(open(test_dir + "/two-streams/output_test_2stream_gauss.csv", 'rb'))
#all_detections.append(open(test_dir + "/two-streams/output_test_2stream_fovea.csv", 'rb'))
#all_detections.append(open(test_dir + "/context_fusion/output_test_ctx_lstm_fusion_thresh_512_5_3_1809242315.csv", 'rb'))
#all_detections.append(open(test_dir + "/context_fusion/output_test_ctx_lstm_512_5_3_1809242252.csv", 'rb'))
#all_detections.append(open(test_dir + "/context_fusion/output_test_ctx_lstmavggoodpedro_512_5_3_1809242338.csv", 'rb'))
#all_detections.append(open(test_dir + "/context_fusion/output_test_ctx_lstmavg_twophase_thresh02_512_5_3_1809281219.csv", 'rb'))
#all_detections.append(open(test_dir + "/context_fusion/output_test_ctx_lstmavg_threephase_512_5_3_1809281317.csv", 'rb'))
#all_detections.append(open(test_dir + "/context_fusion/output_test_ctx_lstm_fusion_thresh01_512_5_3_1809281400.csv", 'rb'))
#all_detections.append(open(test_dir + "/context_fusion/output_test_ctx_lstmavg_twophase_thresh01_512_5_3_1809281423.csv", 'rb'))
#all_detections.append(open(test_dir + "rgb_gauss/output_test_gauss.csv", 'rb'))
#all_detections.append(open(test_dir + "augmentation/output_test_sampling_gauss_1809221859.csv", 'rb'))
#all_detections.append(open(test_dir + "augmentation/output_test_samplingnoaug_gauss_1809281439.csv", 'rb'))
#all_detections.append(open(test_dir + "augmentation/output_test_weightsnew_gauss_1809291104.csv", 'rb'))
#all_detections.append(open(test_dir + "augmentation/output_test_weightsaug_gauss_1809261228.csv", 'rb'))
# output_test_ctx_lstm_512_5_3_1809242252.csv
#all_detections.append(open(test_dir + "/two-streams/output_test_2stream_flowcrop_crop_1809220117.csv", 'rb'))
#all_detections.append(open(test_dir + "/two-streams/output_test_2stream_flowcrop_gauss_1809220152.csv", 'rb'))
#all_detections.append(open(test_dir + "/two-streams/output_test_2stream_flowcrop_fovea_1809220136.csv", 'rb'))
# ---------------------------------
# New run to compare new flow
#all_detections.append(open(test_dir + "/flow/output_test_flowcrop.csv", 'rb'))
#all_detections.append(open(test_dir + "/flow/output_test_flow.csv", 'rb'))
# New 2 and 3 streams
# all_detections.append(open(test_dir + "output_test_gauss.csv", 'rb'))
# all_detections.append(open(test_dir + "output_test_gauss_extra.csv", 'rb'))
# all_detections.append(open(test_dir + "output_test_3stream_gauss.csv", 'rb'))
# all_detections.append(open(test_dir + "output_test_3stream_crop.csv", 'rb'))
# Flow, context, 2-stream, 3-stream run
#all_detections.append(open(test_dir + "output_test_ctx.csv", 'rb'))
#all_detections.append(open(test_dir + "output_test_flow.csv", 'rb'))
#all_detections.append(open(test_dir + "output_test_2stream.csv", 'rb'))
#all_detections.append(open(test_dir + "output_test_3stream.csv", 'rb'))
# RGB run
# all_detections.append(open(test_dir + "/rgb_rgb/output_test_rgb.csv", 'rb'))
# all_detections.append(open(test_dir + "/rgb_crop/output_test_crop.csv", 'rb'))
# all_detections.append(open(test_dir + "/rgb_gauss/output_test_gauss.csv", 'rb'))
# all_detections.append(open(test_dir + "/rgb_fovea/output_test_fovea.csv", 'rb'))
balancing = False
all_gndtruths = []
for i in range(len(all_detections)):
if balancing is False:
all_gndtruths.append(open(root_dir + "AVA_Test_Custom_Corrected.csv", 'rb'))
else:
all_gndtruths.append(open(root_dir + "AVA_Test_Custom_Corrected_Balanced.csv", 'rb'))
"""Runs evaluations given input files.
Args:
labelmap: file object containing map of labels to consider, in pbtxt format
groundtruth: file object
detections: file object
exclusions: file object or None.
"""
categories, class_whitelist = read_labelmap(labelmap)
logging.info("CATEGORIES (%d):\n%s", len(categories), pprint.pformat(categories, indent=2))
excluded_keys = read_exclusions(exclusions)
# Reads detections data.
x_axis = []
xpose_ax = []
xobj_ax = []
xhuman_ax = []
ypose_ax = []
yobj_ax = []
yhuman_ax = []
colors_pose = []
colors_obj = []
colors_human = []
finalmAPs = []
colors = []
maxY = -1.0
for detections, gndtruth, filter_type in zip(all_detections, all_gndtruths, filters):
pascal_evaluator = None
metrics = None
actions = None
start = 0
pascal_evaluator = object_detection_evaluation.PascalDetectionEvaluator(
categories, matching_iou_threshold=iou)
# Reads the ground truth data.
boxes, labels, _ = read_csv(gndtruth, class_whitelist)
start = time.time()
for image_key in boxes:
if image_key in excluded_keys:
logging.info(("Found excluded timestamp in ground truth: %s. "
"It will be ignored."), image_key)
continue
pascal_evaluator.add_single_ground_truth_image_info(
image_key, {
standard_fields.InputDataFields.groundtruth_boxes:
np.array(boxes[image_key], dtype=float),
standard_fields.InputDataFields.groundtruth_classes:
np.array(labels[image_key], dtype=int),
standard_fields.InputDataFields.groundtruth_difficult:
np.zeros(len(boxes[image_key]), dtype=bool)
})
print_time("convert groundtruth", start)
# Run evaluation
boxes, labels, scores = read_csv(detections, class_whitelist)
start = time.time()
for image_key in boxes:
if image_key in excluded_keys:
logging.info(("Found excluded timestamp in detections: %s. "
"It will be ignored."), image_key)
continue
pascal_evaluator.add_single_detected_image_info(
image_key, {
standard_fields.DetectionResultFields.detection_boxes:
np.array(boxes[image_key], dtype=float),
standard_fields.DetectionResultFields.detection_classes:
np.array(labels[image_key], dtype=int),
standard_fields.DetectionResultFields.detection_scores:
np.array(scores[image_key], dtype=float)
})
print_time("convert detections", start)
start = time.time()
metrics = pascal_evaluator.evaluate()
print_time("run_evaluator", start)
# TODO Show a pretty histogram here besides pprint
actions = list(metrics.keys())
final_value = 0.0
for m in actions:
ms = m.split("/")[-1]
if ms == 'mAP@' + str(iou) + 'IOU':
final_value = metrics[m]
finalmAPs.append(final_value)
else:
# x_axis.append(ms)
# y_axis.append(metrics[m])
for cat in categories:
if cat['name'].split("/")[-1] == ms:
if maxY < metrics[m]:
maxY = metrics[m]
if cat['id'] <= 10:
xpose_ax.append("[" + filter_type + "] " + ms)
ypose_ax.append(metrics[m])
colors_pose.append('pose')
elif cat['id'] <= 22:
xobj_ax.append("[" + filter_type + "] " + ms)
yobj_ax.append(metrics[m])
colors_obj.append('human-object')
else:
xhuman_ax.append("[" + filter_type + "] " + ms)
yhuman_ax.append(metrics[m])
colors_human.append('human-human')
# Make a confusion matrix for this run
pascal_evaluator = None
x_axis = split_interleave(xpose_ax) + split_interleave(xobj_ax) + split_interleave(xhuman_ax)
y_axis = split_interleave(ypose_ax) + split_interleave(yobj_ax) + split_interleave(yhuman_ax)
colors = split_interleave(colors_pose) + split_interleave(colors_obj) + split_interleave(colors_human)
plt.ylabel('frame-mAP')
top = maxY + 0.1 # offset a bit so it looks good
sns.set_style("whitegrid")
g = sns.barplot(y_axis, x_axis, hue=colors, palette=['red', 'blue', 'green'])
ax = g
#ax.legend(loc='lower right')
# annotate axis = seaborn axis
# for p in ax.patches:
# ax.annotate("%.3f" % p.get_height(), (p.get_x() + p.get_width() / 2., p.get_height()),
# ha='center', va='center', fontsize=10, color='gray', rotation=90, xytext=(0, 20),
# textcoords='offset points')
# ax.set_ylim(-1, len(y_axis))
sns.set()
ax.tick_params(labelsize=6)
for p in ax.patches:
p.set_height(p.get_height() * 3)
ax.annotate("%.3f" % p.get_width(), (p.get_x() + p.get_width(), p.get_y()),
xytext=(5, -5), fontsize=8, color='gray', textcoords='offset points')
_ = g.set_xlim(0, top) # To make space for the annotations
pprint.pprint(metrics, indent=2)
ax.set(ylabel="", xlabel="AP")
plt.xticks(rotation=0)
title = ""
file = open("results.txt", "w")
for filter_type, mAP in zip(filters, finalmAPs):
ft = filter_type + ': mAP@' + str(iou) + 'IOU = ' + str(mAP) + '\n'
title += ft
file.write(ft)
file.close()
# ax.figure.tight_layout()
ax.figure.subplots_adjust(left=0.2) # change 0.3 to suit your needs.
plt.title(title)
plt.gca().xaxis.grid(True)
plt.show()
if len(all_detections) == 1:
sz = 2
grid_sz = [1, 1]
elif len(all_detections) == 2:
sz = 3
grid_sz = [1, 2]
elif len(all_detections) == 3:
sz = 4
grid_sz = [2, 2]
else:
sz = 5
grid_sz = [2, 2]
for i in range(1, sz):
print(i)
plt.subplot(grid_sz[0], grid_sz[1], i)
if i <= len(all_detections):
# Confusion matrix
classes = []
for k in categories:
classes.append(k['name'])
cm = confusion_matrix(all_gndtruths[i - 1], all_detections[i - 1], x_axis)
g = sns.heatmap(cm, annot=True, fmt="d", xticklabels=classes[:10], yticklabels=classes[:10], linewidths=0.5, linecolor='black', cbar=True)
#t = 0
# for ytick_label, xtick_label in zip(g.axes.get_yticklabels(), g.axes.get_xticklabels()):
# if t <= 9:
# ytick_label.set_color("r")
# xtick_label.set_color("r")
# elif t <= 22:
# ytick_label.set_color("b")
# xtick_label.set_color("b")
# else:
# ytick_label.set_color("g")
# xtick_label.set_color("g")
# t += 1
plt.xticks(rotation=-90)
plt.title("Pose Confusion Matrix (" + filters[i - 1] + ")")
plt.show()
def confusion_matrix(groundtruth, detections, x_axis):
# cm = np.zeros([len(x_axis), len(x_axis)], np.int32)
cm = np.zeros([10, 10], np.int32)
gnd_dict = {}
det_dict = {}
# print(groundtruth)
# print(detections)
# Load gndtruth
groundtruth.seek(0)
reader = csv.reader(groundtruth)
# print("Parsing file")
sep = "@"
for row in reader:
video = row[0]
kf = row[1]
#bbs = str(row[2]) + "@" + str(row[3]) + "@" + str(row[4]) + "@" + str(row[5])
bbs = str("{:.3f}".format(float(row[2]))) + sep + str("{:.3f}".format(float(row[3]))) + sep + \
str("{:.3f}".format(float(row[4]))) + sep + str("{:.3f}".format(float(row[5])))
i = video + "@" + kf.lstrip("0") + "@" + bbs
gnd_dict[i] = []
groundtruth.seek(0)
for row in reader:
video = row[0]
kf = row[1]
#bbs = str(row[2]) + "@" + str(row[3]) + "@" + str(row[4]) + "@" + str(row[5])
bbs = str("{:.3f}".format(float(row[2]))) + sep + str("{:.3f}".format(float(row[3]))) + sep + \
str("{:.3f}".format(float(row[4]))) + sep + str("{:.3f}".format(float(row[5])))
i = video + "@" + kf.lstrip("0") + "@" + bbs
gnd_dict[i].append(row[6])
# Load predictions
detections.seek(0)
reader = csv.reader(detections)
for row in reader:
video = row[0]
kf = row[1]
# bbs = str(row[2]) + "@" + str(row[3]) + "@" + str(row[4]) + "@" + str(row[5])
bbs = str("{:.3f}".format(float(row[2]))) + sep + str("{:.3f}".format(float(row[3]))) + sep + \
str("{:.3f}".format(float(row[4]))) + sep + str("{:.3f}".format(float(row[5])))
i = video + "@" + kf.lstrip("0") + "@" + bbs
det_dict[i] = []
detections.seek(0)
for row in reader:
video = row[0]
kf = row[1]
# bbs = str(row[2]) + "@" + str(row[3]) + "@" + str(row[4]) + "@" + str(row[5])
bbs = str("{:.3f}".format(float(row[2]))) + sep + str("{:.3f}".format(float(row[3]))) + sep + \
str("{:.3f}".format(float(row[4]))) + sep + str("{:.3f}".format(float(row[5])))
i = video + "@" + kf.lstrip("0") + "@" + bbs
det_dict[i].append(row[6])
# TODO For softmax actions normal count
for key, gnd_acts in gnd_dict.items():
# print("KEY: " + key)
det_acts = det_dict[key]
# print(gnd_acts)
# print(det_acts)
gnd_pose = -1
det_pose = -1
for a in gnd_acts:
if int(a) <= 10:
# print(a)
gnd_pose = int(a) - 1
for a in det_acts:
if int(a) <= 10:
det_pose = int(a) - 1
if gnd_pose != -1 and det_pose != -1:
cm[gnd_pose, det_pose] += 1
cm[det_pose, gnd_pose] += 1
# TODO For the other two, if there is a correct predicted action count it, if there is an incorrect prediction either count it as None (if there was no action)
# or add 1 to all the other correct actions
return cm
def parse_arguments():
"""Parses command-line flags.
Returns:
args: a named tuple containing three file objects args.labelmap,
args.groundtruth, and args.detections.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"-l",
"--labelmap",
help="Filename of label map",
type=argparse.FileType("r"),
default="../ava_action_list_newsplit_v2.1_for_activitynet_2018.pbtxt.txt")
parser.add_argument(
"-g",
"--groundtruth",
help="CSV file containing ground truth.",
type=argparse.FileType("rb"),
required=True)
parser.add_argument(
"-e",
"--exclusions",
help=("Optional CSV file containing videoid,timestamp pairs to exclude "
"from evaluation."),
type=argparse.FileType("r"),
required=False)
parser.add_argument(
"-i",
"--iou",
help="Optional IoU value ",
type=float,
required=False)
return parser.parse_args()
def main():
# Wheter or not to test thresholds
threshold = False
logging.basicConfig(level=logging.INFO)
args = parse_arguments()
print(args)
if threshold is False:
run_evaluation(**vars(args))
else:
run_evaluation_threshold(**vars(args))
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
4838949
|
import glob
import logging
import os
import docker
from tabulate import tabulate
import yaml
logging.basicConfig(
format='%(asctime)s - %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S'
)
SKIP_IMAGES = [
'ceph-ansible',
'installer',
'kolla-ansible',
'osism-ansible',
'rally'
]
SKIP_LATEST_IMAGES = [
'aptly',
'ara_server',
'ara_web',
'cobbler'
]
OSISM_VERSION = os.environ.get("OSISM_VERSION", "latest")
DRY_RUN = os.environ.get("DRY_RUN", False) == "True"
if not DRY_RUN:
DOCKER_CLIENT = docker.APIClient(base_url='unix:///var/run/docker.sock')
IMAGES = os.environ.get("IMAGES", None)
if IMAGES:
IMAGES = IMAGES.split(",")
def process(version):
logging.info("processing version %s" % version)
with open("etc/images.yml", "rb") as fp:
images = yaml.load(fp, Loader=yaml.SafeLoader)
result = []
all_docker_images = []
repository_version = version
for filename in glob.glob("%s/*.yml" % version):
with open(filename, "rb") as fp:
versions = yaml.load(fp, Loader=yaml.SafeLoader)
all_docker_images.append(versions.get('docker_images', {}))
if os.path.basename(filename) == 'base.yml' and version != 'latest':
repository_version = versions['repository_version']
for docker_images in all_docker_images:
for image in docker_images:
if IMAGES and image not in IMAGES:
logging.info("skipping - %s" % image)
continue
logging.info("checking - %s" % image)
if image in SKIP_IMAGES:
logging.info("skipping - %s" % image)
continue
if image in SKIP_LATEST_IMAGES and repository_version == 'latest':
logging.info("skipping - %s" % image)
continue
if image not in images:
logging.error("skipping - definiton of %s is missing in etc/images.yml" % image)
continue
if not images[image][:5] == 'osism':
if image == 'ceph':
target = 'osism/ceph-daemon'
else:
target = "osism/" + images[image][images[image].find('/') + 1:]
else:
target = images[image]
source = images[image]
target_tag = repository_version
source_tag = docker_images[image]
if image in ['cephclient', 'openstackclient']:
target_tag = docker_images[image] + '-' + target_tag
if image == 'ceph' and 'stable-3.1' in source_tag:
target_tag = "%s-%s" % (source_tag.split("-")[-1], target_tag)
source_tag = "%s-ubuntu-16.04-x86_64" % source_tag
if image == 'ceph' and ('stable-3.2' in source_tag or 'stable-4.0' in source_tag):
target_tag = "%s-%s" % (source_tag.split("-")[-1], target_tag)
source = 'osism/ceph-daemon'
source_tag = "%s-centos-7-x86_64" % source_tag
if image == 'ceph' and ('stable-5.0' in source_tag):
target_tag = "%s-%s" % (source_tag.split("-")[-1], target_tag)
source = 'osism/ceph-daemon'
source_tag = "%s-centos-8-x86_64" % source_tag
if image == 'ceph' and 'latest' in source_tag:
logging.info("skipping - %s (latest)" % image)
continue
if image == 'cephclient' and 'latest' in source_tag:
logging.info("skipping - %s (latest)" % image)
continue
if image == 'openstackclient' and 'latest' in source_tag:
logging.info("skipping - %s (latest)" % image)
continue
logging.info("pulling - %s:%s" % (source, source_tag))
if not DRY_RUN:
DOCKER_CLIENT.pull(source, source_tag)
docker_image = DOCKER_CLIENT.inspect_image("%s:%s" % (source, source_tag))
result.append([source, source_tag, docker_image["Id"], docker_image["Created"]])
logging.info("tagging - %s:%s" % (target, target_tag))
if not DRY_RUN:
DOCKER_CLIENT.tag("%s:%s" % (source, source_tag), target, target_tag)
logging.info("pushing - %s:%s" % (target, target_tag))
if not DRY_RUN:
DOCKER_CLIENT.push(target, target_tag)
logging.info("removing - %s:%s" % (source, source_tag))
if not DRY_RUN:
DOCKER_CLIENT.remove_image("%s:%s" % (source, source_tag))
logging.info("removing - %s:%s" % (target, target_tag))
if not DRY_RUN:
DOCKER_CLIENT.remove_image("%s:%s" % (target, target_tag))
return result
result = process(OSISM_VERSION)
if not DRY_RUN:
print(tabulate(result, headers=["Image", "Tag", "Hash", "Created"]))
|
StarcoderdataPython
|
3406192
|
<reponame>CareBT/carebt
# Copyright 2021 <NAME> (<EMAIL>)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from unittest.mock import call
from carebt.abstractLogger import LogLevel
from carebt.behaviorTreeRunner import BehaviorTreeRunner
from carebt.nodeStatus import NodeStatus
from tests.actionNodes import AddTwoNumbersAction
from tests.actionNodes import AddTwoNumbersActionMissingOutput
from tests.actionNodes import AddTwoNumbersActionWithFailure
from tests.actionNodes import AddTwoNumbersLongRunningAction
from tests.actionNodes import AddTwoNumbersLongRunningActionMissingCallback
from tests.actionNodes import AddTwoNumbersLongRunningActionMissingCallback2
from tests.actionNodes import AddTwoNumbersLongRunningActionWithAbort
from tests.actionNodes import AddTwoNumbersMultiTickActionWithTimeout
from tests.actionNodes import AddTwoNumbersThrottledMultiTickAction
from tests.actionNodes import HelloWorldAction
from tests.global_mock import mock
class TestActionNode:
"""Tests the `ActionNode`."""
########################################################################
def test_HelloWorldAction(self):
"""Tests HelloWorldAction."""
mock.reset_mock()
bt_runner = BehaviorTreeRunner()
bt_runner.get_logger().set_log_level(LogLevel.INFO)
bt_runner.run(HelloWorldAction)
assert mock.called
assert bt_runner.get_tick_count() == 1
assert bt_runner._instance.get_status() == NodeStatus.SUCCESS
assert bt_runner._instance.get_contingency_message() == ''
print(mock.call_args_list)
assert mock.call_args_list == [call('__init__ HelloWorldAction'),
call('HelloWorldAction: Hello World !!!'),
call('__del__ HelloWorldAction')]
def test_HelloWorldAction_alice(self):
"""Tests HelloWorldAction with name = Alice."""
mock.reset_mock()
bt_runner = BehaviorTreeRunner()
bt_runner.get_logger().set_log_level(LogLevel.INFO)
bt_runner.run(HelloWorldAction, '"Alice"')
assert mock.called
assert bt_runner.get_tick_count() == 1
assert bt_runner._instance.get_status() == NodeStatus.SUCCESS
assert bt_runner._instance.get_contingency_message() == ''
print(mock.call_args_list)
assert mock.call_args_list == [call('__init__ HelloWorldAction'),
call('HelloWorldAction: Hello Alice !!!'),
call('__del__ HelloWorldAction')]
def test_HelloWorldAction_empty(self):
"""Tests HelloWorldAction with name = empty string."""
mock.reset_mock()
bt_runner = BehaviorTreeRunner()
bt_runner.get_logger().set_log_level(LogLevel.INFO)
bt_runner.run(HelloWorldAction, '""')
assert mock.called
assert bt_runner.get_tick_count() == 1
assert bt_runner._instance.get_status() == NodeStatus.SUCCESS
assert bt_runner._instance.get_contingency_message() == ''
print(mock.call_args_list)
assert mock.call_args_list == [call('__init__ HelloWorldAction'),
call('HelloWorldAction: Hello !!!'),
call('__del__ HelloWorldAction')]
########################################################################
def test_AddTwoNumbersActionMissingOutput(self):
"""Tests the case when the output is not bound."""
mock.reset_mock()
bt_runner = BehaviorTreeRunner()
bt_runner.get_logger().set_log_level(LogLevel.INFO)
bt_runner.run(AddTwoNumbersActionMissingOutput, '2 3 => ?result')
assert mock.called
assert bt_runner.get_tick_count() == 1
assert bt_runner._instance.get_status() == NodeStatus.SUCCESS
assert bt_runner._instance.get_contingency_message() == ''
print(mock.call_args_list)
assert mock.call_args_list == [call('__init__ AddTwoNumbersActionMissingOutput'),
call('on_init AddTwoNumbersActionMissingOutput'),
call('on_tick AddTwoNumbersActionMissingOutput'),
call('on_delete AddTwoNumbersAction'),
call('__del__ AddTwoNumbersAction')]
########################################################################
def test_AddTwoNumbersAction_3_5(self):
"""Test two valid inputs one output."""
mock.reset_mock()
bt_runner = BehaviorTreeRunner()
bt_runner.run(AddTwoNumbersAction, '3 5 => ?result')
print(mock.call_args_list)
assert mock.call_args_list == [call('__init__ AddTwoNumbersAction'),
call('on_init AddTwoNumbersAction'),
call('AddTwoNumbersAction: calculating: 3 + 5 = 8'),
call('on_delete AddTwoNumbersAction'),
call('__del__ AddTwoNumbersAction')]
assert bt_runner._instance._result == 8
assert bt_runner._instance.get_status() == NodeStatus.SUCCESS
assert bt_runner._instance.get_contingency_message() == ''
def test_AddTwoNumbersAction_0_0(self):
"""Test two valid inputs (0 + 0) one output."""
mock.reset_mock()
bt_runner = BehaviorTreeRunner()
bt_runner.run(AddTwoNumbersAction, '0 0 => ?result')
print(mock.call_args_list)
assert mock.call_args_list == [call('__init__ AddTwoNumbersAction'),
call('on_init AddTwoNumbersAction'),
call('AddTwoNumbersAction: calculating: 0 + 0 = 0'),
call('on_delete AddTwoNumbersAction'),
call('__del__ AddTwoNumbersAction')]
assert bt_runner._instance._result == 0
assert bt_runner._instance.get_status() == NodeStatus.SUCCESS
assert bt_runner._instance.get_contingency_message() == ''
def test_AddTwoNumbersAction_9(self):
"""Test one input is missing, but default of missing ?y is 5678."""
mock.reset_mock()
bt_runner = BehaviorTreeRunner()
bt_runner.run(AddTwoNumbersAction, '9 => ?result')
print(mock.call_args_list)
assert mock.call_args_list == [call('__init__ AddTwoNumbersAction'),
call('on_init AddTwoNumbersAction'),
call('AddTwoNumbersAction: calculating: 9 + 5678 = 5687'),
call('on_delete AddTwoNumbersAction'),
call('__del__ AddTwoNumbersAction')]
assert bt_runner._instance._result == 5687
assert bt_runner._instance.get_status() == NodeStatus.SUCCESS
assert bt_runner._instance.get_contingency_message() == ''
def test_AddTwoNumbersAction(self):
"""Test both inputs are missing, but default of missing ?x is 1234 and ?y is 5678."""
mock.reset_mock()
bt_runner = BehaviorTreeRunner()
bt_runner.run(AddTwoNumbersAction, '=> ?result')
print(mock.call_args_list)
assert mock.call_args_list == [call('__init__ AddTwoNumbersAction'),
call('on_init AddTwoNumbersAction'),
call('AddTwoNumbersAction: calculating: 1234 + 5678 = 6912'), # noqa: E501
call('on_delete AddTwoNumbersAction'),
call('__del__ AddTwoNumbersAction')]
assert bt_runner._instance._result == 6912
assert bt_runner._instance.get_status() == NodeStatus.SUCCESS
assert bt_runner._instance.get_contingency_message() == ''
def test_AddTwoNumbersAction_missing_out(self):
"""Test both inputs present, but output is missing."""
mock.reset_mock()
bt_runner = BehaviorTreeRunner()
bt_runner.run(AddTwoNumbersAction, '1 2')
print(mock.call_args_list)
assert mock.call_args_list == [call('__init__ AddTwoNumbersAction'),
call('on_init AddTwoNumbersAction'),
call('AddTwoNumbersAction: calculating: 1 + 2 = 3'),
call('on_delete AddTwoNumbersAction'),
call('__del__ AddTwoNumbersAction')]
assert not hasattr(bt_runner._instance, '_result')
assert bt_runner._instance.get_status() == NodeStatus.SUCCESS
assert bt_runner._instance.get_contingency_message() == ''
########################################################################
def test_AddTwoNumbersActionWithFailure_3_5(self):
"""Test two valid inputs one output."""
mock.reset_mock()
bt_runner = BehaviorTreeRunner()
bt_runner.run(AddTwoNumbersActionWithFailure, '3 5 => ?result')
print(mock.call_args_list)
assert mock.call_args_list == [call('__init__ AddTwoNumbersActionWithFailure'),
call('on_init AddTwoNumbersActionWithFailure'),
call('AddTwoNumbersActionWithFailure: calculating: 3 + 5 = 8'), # noqa: E501
call('on_delete AddTwoNumbersActionWithFailure'),
call('__del__ AddTwoNumbersActionWithFailure')]
assert bt_runner._instance._result == 8
assert bt_runner._instance.get_status() == NodeStatus.SUCCESS
assert bt_runner._instance.get_contingency_message() == ''
def test_AddTwoNumbersActionWithFailure_7(self):
"""Test one input missing -> node should fail."""
mock.reset_mock()
bt_runner = BehaviorTreeRunner()
bt_runner.run(AddTwoNumbersActionWithFailure, '7 => ?result')
print(mock.call_args_list)
assert mock.call_args_list == [call('__init__ AddTwoNumbersActionWithFailure'),
call('on_init AddTwoNumbersActionWithFailure'),
call('AddTwoNumbersActionWithFailure: You did not provide two numbers!'), # noqa: E501
call('on_delete AddTwoNumbersActionWithFailure'),
call('__del__ AddTwoNumbersActionWithFailure')]
assert not hasattr(bt_runner._instance, '_result')
assert bt_runner._instance.get_status() == NodeStatus.FAILURE
assert bt_runner._instance.get_contingency_message() == 'NOT_TWO_NUMBERS_PROVIDED'
########################################################################
def test_AddTwoNumbersMultiTickActionWithTimeout_5_3_5(self):
"""Test that calculation takes 5 ticks and one tick takes 10ms."""
mock.reset_mock()
bt_runner = BehaviorTreeRunner()
bt_runner.set_tick_rate_ms(10)
start = datetime.now()
bt_runner.run(AddTwoNumbersMultiTickActionWithTimeout, '5 3 5 => ?result')
end = datetime.now()
delta = end - start
assert int(delta.total_seconds() * 1000) > 55
assert int(delta.total_seconds() * 1000) < 70
print(mock.call_args_list)
assert mock.call_args_list == [call('__init__ AddTwoNumbersMultiTickActionWithTimeout'),
call('on_init AddTwoNumbersMultiTickActionWithTimeout'),
call('AddTwoNumbersMultiTickActionWithTimeout: (tick_count = 1/5)'), # noqa: E501
call('AddTwoNumbersMultiTickActionWithTimeout: (tick_count = 2/5)'), # noqa: E501
call('AddTwoNumbersMultiTickActionWithTimeout: (tick_count = 3/5)'), # noqa: E501
call('AddTwoNumbersMultiTickActionWithTimeout: (tick_count = 4/5)'), # noqa: E501
call('AddTwoNumbersMultiTickActionWithTimeout: (tick_count = 5/5)'), # noqa: E501
call('AddTwoNumbersMultiTickActionWithTimeout: DONE 3 + 5 = 8'), # noqa: E501
call('on_delete AddTwoNumbersMultiTickActionWithTimeout'),
call('__del__ AddTwoNumbersMultiTickActionWithTimeout')]
assert bt_runner._instance._result == 8
assert bt_runner._instance.get_status() == NodeStatus.SUCCESS
assert bt_runner._instance.get_contingency_message() == ''
assert bt_runner.get_tick_count() == 6
def test_AddTwoNumbersMultiTickActionWithTimeout_5_3_5_timeout(self):
"""Test that calculation takes 5 ticks and one tick takes 500ms -> the timeout occures."""
mock.reset_mock()
bt_runner = BehaviorTreeRunner()
bt_runner.set_tick_rate_ms(500)
bt_runner.run(AddTwoNumbersMultiTickActionWithTimeout, '5 3 5 => ?result')
print(mock.call_args_list)
assert mock.call_args_list == [call('__init__ AddTwoNumbersMultiTickActionWithTimeout'),
call('on_init AddTwoNumbersMultiTickActionWithTimeout'),
call('AddTwoNumbersMultiTickActionWithTimeout: (tick_count = 1/5)'), # noqa: E501
call('AddTwoNumbersMultiTickActionWithTimeout: (tick_count = 2/5)'), # noqa: E501
call('on_timeout AddTwoNumbersMultiTickActionWithTimeout'), # noqa: E501
call('on_delete AddTwoNumbersMultiTickActionWithTimeout'),
call('__del__ AddTwoNumbersMultiTickActionWithTimeout')]
assert not hasattr(bt_runner._instance, '_result')
assert bt_runner._instance.get_status() == NodeStatus.ABORTED
assert bt_runner._instance.get_contingency_message() == 'TIMEOUT'
########################################################################
def test_AddTwoNumbersThrottledMultiTickAction_5_3_5(self):
"""Test that calculation takes 5 ticks and this forwarded ticks are throttled to 500ms."""
mock.reset_mock()
bt_runner = BehaviorTreeRunner()
bt_runner.set_tick_rate_ms(10)
start = datetime.now()
bt_runner.run(AddTwoNumbersThrottledMultiTickAction, '5 3 5 => ?result')
end = datetime.now()
delta = end - start
assert int(delta.total_seconds() * 1000) > 2400
assert int(delta.total_seconds() * 1000) < 2600
print(mock.call_args_list)
assert mock.call_args_list == [call('__init__ AddTwoNumbersThrottledMultiTickAction'),
call('on_init AddTwoNumbersThrottledMultiTickAction'),
call('AddTwoNumbersThrottledMultiTickAction: (tick_count = 1/5)'), # noqa: E501
call('AddTwoNumbersThrottledMultiTickAction: (tick_count = 2/5)'), # noqa: E501
call('AddTwoNumbersThrottledMultiTickAction: (tick_count = 3/5)'), # noqa: E501
call('AddTwoNumbersThrottledMultiTickAction: (tick_count = 4/5)'), # noqa: E501
call('AddTwoNumbersThrottledMultiTickAction: (tick_count = 5/5)'), # noqa: E501
call('AddTwoNumbersThrottledMultiTickAction: DONE 3 + 5 = 8'), # noqa: E501
call('on_delete AddTwoNumbersThrottledMultiTickAction'),
call('__del__ AddTwoNumbersThrottledMultiTickAction')]
assert bt_runner._instance._result == 8
assert bt_runner._instance.get_status() == NodeStatus.SUCCESS
assert bt_runner._instance.get_contingency_message() == ''
########################################################################
def test_AddTwoNumbersLongRunningAction_500_3_5(self):
"""Test a long running calculation."""
mock.reset_mock()
bt_runner = BehaviorTreeRunner()
start = datetime.now()
bt_runner.run(AddTwoNumbersLongRunningAction, '500 3 5 => ?result')
end = datetime.now()
delta = end - start
assert int(delta.total_seconds() * 1000) > 500
assert int(delta.total_seconds() * 1000) < 600
print(mock.call_args_list)
assert mock.call_args_list == [call('__init__ AddTwoNumbersLongRunningAction'),
call('on_init AddTwoNumbersLongRunningAction'),
call('AddTwoNumbersLongRunningAction: calculating 500 ms ...'), # noqa: E501
call('AddTwoNumbersLongRunningAction: done: 3 + 5 = 8'), # noqa: E501
call('on_delete AddTwoNumbersLongRunningAction'),
call('__del__ AddTwoNumbersLongRunningAction')]
assert bt_runner._instance._result == 8
assert bt_runner._instance.get_status() == NodeStatus.SUCCESS
assert bt_runner._instance.get_contingency_message() == ''
########################################################################
def test_AddTwoNumbersLongRunningActionWithAbort_100_3_5(self):
"""Test a long running calculation which is faster (100ms) than the timeout (1000ms)."""
mock.reset_mock()
bt_runner = BehaviorTreeRunner()
bt_runner.set_tick_rate_ms(10)
start = datetime.now()
bt_runner.run(AddTwoNumbersLongRunningActionWithAbort, '100 3 5 => ?result')
end = datetime.now()
delta = end - start
assert int(delta.total_seconds() * 1000) > 100
assert int(delta.total_seconds() * 1000) < 200
print(mock.call_args_list)
assert mock.call_args_list == [call('__init__ AddTwoNumbersLongRunningActionWithAbort'),
call('on_init AddTwoNumbersLongRunningActionWithAbort'),
call('AddTwoNumbersLongRunningActionWithAbort: calculating 100 ms ... (timeout = 1000 ms)'), # noqa: E501
call('AddTwoNumbersLongRunningActionWithAbort: done_callback: 3 + 5 = 8'), # noqa: E501
call('on_delete AddTwoNumbersLongRunningActionWithAbort'),
call('__del__ AddTwoNumbersLongRunningActionWithAbort')]
assert bt_runner._instance._result == 8
assert bt_runner._instance.get_status() == NodeStatus.SUCCESS
assert bt_runner._instance.get_contingency_message() == ''
def test_AddTwoNumbersLongRunningActionWithAbort_1500_3_5(self):
"""Test AddTwoNumbersLongRunningActionWithAbort example node.
Test a long running calculation which is slower (1500ms) than
the timeout (1000ms). In the timeout handler, the node is aborted
with message `TIMEOUT`. The `?result` is not set.
"""
mock.reset_mock()
bt_runner = BehaviorTreeRunner()
start = datetime.now()
bt_runner.run(AddTwoNumbersLongRunningActionWithAbort, '1500 3 5 => ?result')
end = datetime.now()
delta = end - start
assert int(delta.total_seconds() * 1000) > 1000
assert int(delta.total_seconds() * 1000) < 1100
print(mock.call_args_list)
assert mock.call_args_list == [call('__init__ AddTwoNumbersLongRunningActionWithAbort'),
call('on_init AddTwoNumbersLongRunningActionWithAbort'),
call('AddTwoNumbersLongRunningActionWithAbort: calculating 1500 ms ... (timeout = 1000 ms)'), # noqa: E501
call('on_timeout AddTwoNumbersLongRunningActionWithAbort'),
call('on_abort AddTwoNumbersLongRunningActionWithAbort'),
call('on_delete AddTwoNumbersLongRunningActionWithAbort'),
call('__del__ AddTwoNumbersLongRunningActionWithAbort')]
assert not hasattr(bt_runner._instance, '_result')
assert bt_runner._instance.get_status() == NodeStatus.ABORTED
assert bt_runner._instance.get_contingency_message() == 'TIMEOUT'
########################################################################
def test_AddTwoNumbersLongRunningActionMissingCallback_100_3_5(self):
"""Test AddTwoNumbersLongRunningActionMissingCallback example node.
Test a long running calculation which is faster (100ms) than
the timeout (1000ms)
"""
mock.reset_mock()
bt_runner = BehaviorTreeRunner()
bt_runner.set_tick_rate_ms(10)
start = datetime.now()
bt_runner.run(AddTwoNumbersLongRunningActionMissingCallback, '100 3 5 => ?result')
end = datetime.now()
delta = end - start
assert int(delta.total_seconds() * 1000) > 100
assert int(delta.total_seconds() * 1000) < 200
print(mock.call_args_list)
assert mock.call_args_list == [call('__init__ AddTwoNumbersLongRunningActionMissingCallback'), # noqa: E501
call('on_init AddTwoNumbersLongRunningActionMissingCallback'), # noqa: E501
call('AddTwoNumbersLongRunningActionMissingCallback: calculating 100 ms ... (timeout = 1000 ms)'), # noqa: E501
call('AddTwoNumbersLongRunningActionMissingCallback: done_callback: 3 + 5 = 8'), # noqa: E501
call('on_delete AddTwoNumbersLongRunningActionMissingCallback'), # noqa: E501
call('__del__ AddTwoNumbersLongRunningActionMissingCallback')] # noqa: E501
assert bt_runner._instance._result == 8
assert bt_runner._instance.get_status() == NodeStatus.SUCCESS
assert bt_runner._instance.get_contingency_message() == ''
def test_AddTwoNumbersLongRunningActionMissingCallback_1500_3_5(self):
"""Test AddTwoNumbersLongRunningActionMissingCallback example node.
Tests a long running calculation which is slower (1500ms) than
the timeout (1000ms). the timeout handler is not overridden.
"""
mock.reset_mock()
bt_runner = BehaviorTreeRunner()
start = datetime.now()
bt_runner.run(AddTwoNumbersLongRunningActionMissingCallback, '1500 3 5 => ?result')
end = datetime.now()
delta = end - start
assert int(delta.total_seconds() * 1000) > 1000
assert int(delta.total_seconds() * 1000) < 1100
print(mock.call_args_list)
assert mock.call_args_list == [call('__init__ AddTwoNumbersLongRunningActionMissingCallback'), # noqa: E501
call('on_init AddTwoNumbersLongRunningActionMissingCallback'), # noqa: E501
call('AddTwoNumbersLongRunningActionMissingCallback: calculating 1500 ms ... (timeout = 1000 ms)'), # noqa: E501
call('on_abort AddTwoNumbersLongRunningActionMissingCallback'), # noqa: E501
call('on_delete AddTwoNumbersLongRunningActionMissingCallback'), # noqa: E501
call('__del__ AddTwoNumbersLongRunningActionMissingCallback')] # noqa: E501
assert not hasattr(bt_runner._instance, '_result')
assert bt_runner._instance.get_status() == NodeStatus.ABORTED
assert bt_runner._instance.get_contingency_message() == 'TIMEOUT'
########################################################################
def test_AddTwoNumbersLongRunningActionMissingCallback2_1500_3_5(self):
"""Test AddTwoNumbersLongRunningActionMissingCallback2 example node.
Test a long running calculation which is slower (1500ms) than
the timeout (1000ms). on_timeout handler is not overridden and
also on_abort is not overridden.
"""
mock.reset_mock()
bt_runner = BehaviorTreeRunner()
start = datetime.now()
bt_runner.run(AddTwoNumbersLongRunningActionMissingCallback2, '1500 3 5 => ?result')
end = datetime.now()
delta = end - start
assert int(delta.total_seconds() * 1000) > 1000
assert int(delta.total_seconds() * 1000) < 1100
print(mock.call_args_list)
assert mock.call_args_list == [call('__init__ AddTwoNumbersLongRunningActionMissingCallback2'), # noqa: E501
call('on_init AddTwoNumbersLongRunningActionMissingCallback2'), # noqa: E501
call('AddTwoNumbersLongRunningActionMissingCallback2: calculating 1500 ms ... (timeout = 1000 ms)'), # noqa: E501
call('on_delete AddTwoNumbersLongRunningActionMissingCallback2'), # noqa: E501
call('__del__ AddTwoNumbersLongRunningActionMissingCallback2')] # noqa: E501
assert not hasattr(bt_runner._instance, '_result')
assert bt_runner._instance.get_status() == NodeStatus.ABORTED
assert bt_runner._instance.get_contingency_message() == 'TIMEOUT'
|
StarcoderdataPython
|
119551
|
from gym_pcgrl.envs.pcgrl_env import PcgrlEnv
from gym_pcgrl.envs.pcgrl_env_3D import PcgrlEnv3D
|
StarcoderdataPython
|
1932210
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
try:
from http.server import HTTPServer, BaseHTTPRequestHandler
except ImportError:
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
try:
import socketserver
except ImportError:
import SocketServer as socketserver
import socket
import ssl
import mimetypes
import webbrowser
import struct
import base64
import hashlib
import sys
import threading
import signal
import time
import os
import re
try:
from urllib import unquote
from urllib import quote
from urlparse import urlparse
from urlparse import parse_qs
except ImportError:
from urllib.parse import unquote
from urllib.parse import quote
from urllib.parse import unquote_to_bytes
from urllib.parse import urlparse
from urllib.parse import parse_qs
import cgi
import weakref
import zlib
import select
def gzip_encode(content):
gzip_compress = zlib.compressobj(9, zlib.DEFLATED, zlib.MAX_WBITS | 16)
data = gzip_compress.compress(content) + gzip_compress.flush()
return data
clients = {}
runtimeInstances = weakref.WeakValueDictionary()
pyLessThan3 = sys.version_info < (3,)
_MSG_ACK = '3'
_MSG_JS = '2'
_MSG_UPDATE = '1'
def to_websocket(data):
# encoding end decoding utility function
if pyLessThan3:
return quote(data)
return quote(data, encoding='utf-8')
def from_websocket(data):
# encoding end decoding utility function
if pyLessThan3:
return unquote(data)
return unquote(data, encoding='utf-8')
def encode_text(data):
if not pyLessThan3:
return data.encode('utf-8')
return data
def get_method_by_name(root_node, name):
val = None
if hasattr(root_node, name):
val = getattr(root_node, name)
return val
def get_method_by_id(_id):
global runtimeInstances
return runtimeInstances.get(str(_id), None)
def parse_session_cookie(cookie_to_cook):
""" cookie_to_cook = http_header['cookie']
"""
#print("cookie_to_cook: %s"%str(cookie_to_cook))
session_value = None
tokens = cookie_to_cook.split(";")
for tok in tokens:
if 'remi_session=' in tok:
#print("found session id: %s"%str(tok))
try:
session_value = int(tok.replace('remi_session=', ''))
except Exception:
pass
return session_value
class WebSocketsHandler(socketserver.StreamRequestHandler):
magic = b'258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
def __init__(self, headers, *args, **kwargs):
self.headers = headers
self.handshake_done = False
self._log = logging.getLogger('remi.server.ws')
socketserver.StreamRequestHandler.__init__(self, *args, **kwargs)
def setup(self):
socketserver.StreamRequestHandler.setup(self)
self._log.info('connection established: %r' % (self.client_address,))
self.handshake_done = False
def handle(self):
global clients
self._log.debug('handle')
# on some systems like ROS, the default socket timeout
# is less than expected, we force it to infinite (None) as default socket value
self.request.settimeout(None)
if self.handshake():
while True:
if not self.read_next_message():
clients[self.session].websockets.discard(self)
self.handshake_done = False
self._log.debug('ws ending websocket service')
break
@staticmethod
def bytetonum(b):
if pyLessThan3:
b = ord(b)
return b
def read_next_message(self):
# noinspection PyBroadException
try:
try:
length = self.rfile.read(2)
except ValueError:
# socket was closed, just return without errors
return False
length = self.bytetonum(length[1]) & 127
if length == 126:
length = struct.unpack('>H', self.rfile.read(2))[0]
elif length == 127:
length = struct.unpack('>Q', self.rfile.read(8))[0]
masks = [self.bytetonum(byte) for byte in self.rfile.read(4)]
decoded = ''
for char in self.rfile.read(length):
decoded += chr(self.bytetonum(char) ^ masks[len(decoded) % 4])
self.on_message(from_websocket(decoded))
except socket.timeout:
return False
except Exception:
self._log.error('Error managing incoming websocket message', exc_info=True)
return False
return True
def send_message(self, message):
if not self.handshake_done:
self._log.warning("ignoring message %s (handshake not done)" % message[:10])
return False
self._log.debug('send_message: %s... -> %s' % (message[:10], self.client_address))
out = bytearray()
out.append(129)
length = len(message)
if length <= 125:
out.append(length)
elif 126 <= length <= 65535:
out.append(126)
out += struct.pack('>H', length)
else:
out.append(127)
out += struct.pack('>Q', length)
if not pyLessThan3:
message = message.encode('utf-8')
out = out + message
readable, writable, errors = select.select([], [self.request,], [], 0) #last parameter is timeout, when 0 is non blocking
#self._log.debug('socket status readable=%s writable=%s errors=%s'%((self.request in readable), (self.request in writable), (self.request in error$
writable = self.request in writable
if not writable:
return False
self.request.sendall(out)
return True
def handshake(self):
self._log.debug('handshake')
key = self.headers['Sec-WebSocket-Key']
self.session = None
if 'cookie' in self.headers:
if self.headers['cookie']!=None:
self.session = parse_session_cookie(self.headers['cookie'])
if self.session == None:
return False
if not self.session in clients.keys():
return False
digest = hashlib.sha1((key.encode("utf-8")+self.magic))
digest = digest.digest()
digest = base64.b64encode(digest)
response = 'HTTP/1.1 101 Switching Protocols\r\n'
response += 'Upgrade: websocket\r\n'
response += 'Connection: Upgrade\r\n'
response += 'Sec-WebSocket-Accept: %s\r\n\r\n' % digest.decode("utf-8")
self._log.info('handshake complete')
self.request.sendall(response.encode("utf-8"))
self.handshake_done = True
#if an update happens since the websocket connection to its handshake,
# it gets not displayed. it is required to inform App about handshake done,
# to get a full refresh
clients[self.session].websocket_handshake_done(self)
return True
def on_message(self, message):
global runtimeInstances
self.send_message(_MSG_ACK)
with clients[self.session].update_lock:
# noinspection PyBroadException
try:
# saving the websocket in order to update the client
if self not in clients[self.session].websockets:
clients[self.session].websockets.add(self)
# parsing messages
chunks = message.split('/')
self._log.debug('on_message: %s' % chunks[0])
if len(chunks) > 3: # msgtype,widget,function,params
# if this is a callback
msg_type = 'callback'
if chunks[0] == msg_type:
widget_id = chunks[1]
function_name = chunks[2]
params = message[
len(msg_type) + len(widget_id) + len(function_name) + 3:]
param_dict = parse_parametrs(params)
callback = get_method_by_name(runtimeInstances[widget_id], function_name)
if callback is not None:
callback(**param_dict)
except Exception:
self._log.error('error parsing websocket', exc_info=True)
def close(self, terminate_server=True):
try:
self.request.setblocking(False)
self.request.shutdown(socket.SHUT_RDWR)
self.finish()
if terminate_server:
self.server.shutdown()
except Exception:
self._log.error("exception in WebSocketsHandler.close method", exc_info=True)
def parse_parametrs(p):
"""
Parses the parameters given from POST or websocket reqs
expecting the parameters as: "11|par1='asd'|6|par2=1"
returns a dict like {par1:'asd',par2:1}
"""
ret = {}
while len(p) > 1 and p.count('|') > 0:
s = p.split('|')
l = int(s[0]) # length of param field
if l > 0:
p = p[len(s[0]) + 1:]
field_name = p.split('|')[0].split('=')[0]
field_value = p[len(field_name) + 1:l]
p = p[l + 1:]
ret[field_name] = field_value
return ret
# noinspection PyPep8Naming
class App(BaseHTTPRequestHandler, object):
"""
This class will handles any incoming request from the browser
The main application class can subclass this
In the do_POST and do_GET methods it is expected to receive requests such as:
- function calls with parameters
- file requests
"""
re_static_file = re.compile(r"^([\/]*[\w\d]+:[-_. $@?#£'%=()\/\[\]!+°§^,\w\d]+)") #https://regex101.com/r/uK1sX1/6
re_attr_call = re.compile(r"^/*(\w+)\/(\w+)\?{0,1}(\w*\={1}(\w|\.)+\&{0,1})*$")
def __init__(self, request, client_address, server, **app_args):
self._app_args = app_args
self.root = None
self._log = logging.getLogger('remi.request')
super(App, self).__init__(request, client_address, server)
def _get_list_from_app_args(self, name):
try:
v = self._app_args[name]
if isinstance(v, (tuple, list)):
vals = v
else:
vals = [v]
except KeyError:
vals = []
return vals
def _instance(self):
global clients
global runtimeInstances
"""
This method is used to get the Application instance previously created
managing on this, it is possible to switch to "single instance for
multiple clients" or "multiple instance for multiple clients" execution way
"""
self.session = 0
#checking previously defined session
if 'cookie' in self.headers:
self.session = parse_session_cookie(self.headers['cookie'])
#if not a valid session id
if self.session == None:
self.session = 0
if not self.session in clients.keys():
self.session = 0
#if no session id
if self.session == 0:
if self.server.multiple_instance:
self.session = int(time.time()*1000)
#send session to browser
del self.headers['cookie']
#if the client instance doesn't exist
if not(self.session in clients):
self.update_interval = self.server.update_interval
from remi import gui
head = gui.HEAD(self.server.title)
# use the default css, but append a version based on its hash, to stop browser caching
head.add_child('internal_css', "<link href='/res:style.css' rel='stylesheet' />\n")
body = gui.BODY()
body.add_class('remi-main')
body.onload.connect(self.onload)
body.ononline.connect(self.ononline)
body.onpagehide.connect(self.onpagehide)
body.onpageshow.connect(self.onpageshow)
body.onresize.connect(self.onresize)
self.page = gui.HTML()
self.page.add_child('head', head)
self.page.add_child('body', body)
if not hasattr(self, 'websockets'):
self.websockets = set()
self.update_lock = threading.RLock()
if not hasattr(self, '_need_update_flag'):
self._need_update_flag = False
self._stop_update_flag = False
if self.update_interval > 0:
self._update_thread = threading.Thread(target=self._idle_loop)
self._update_thread.setDaemon(True)
self._update_thread.start()
runtimeInstances[str(id(self))] = self
clients[self.session] = self
else:
#restore instance attributes
client = clients[self.session]
self.websockets = client.websockets
self.page = client.page
self.update_lock = client.update_lock
self.update_interval = client.update_interval
self._need_update_flag = client._need_update_flag
if hasattr(client, '_update_thread'):
self._update_thread = client._update_thread
net_interface_ip = self.headers.get('Host', "%s:%s"%(self.connection.getsockname()[0],self.server.server_address[1]))
websocket_timeout_timer_ms = str(self.server.websocket_timeout_timer_ms)
pending_messages_queue_length = str(self.server.pending_messages_queue_length)
self.page.children['head'].set_internal_js(str(id(self)), net_interface_ip, pending_messages_queue_length, websocket_timeout_timer_ms)
def main(self, *_):
""" Subclasses of App class *must* declare a main function
that will be the entry point of the application.
Inside the main function you have to declare the GUI structure
and return the root widget. """
raise NotImplementedError("Applications must implement 'main()' function.")
def _idle_loop(self):
""" This is used to exec the idle function in a safe context and a separate thread
"""
while not self._stop_update_flag:
time.sleep(self.update_interval)
with self.update_lock:
try:
self.idle()
except Exception:
self._log.error("exception in App.idle method", exc_info=True)
if self._need_update_flag:
try:
self.do_gui_update()
except Exception:
self._log.error('''exception during gui update. It is advisable to
use App.update_lock using external threads.''', exc_info=True)
def idle(self):
""" Idle function called every UPDATE_INTERVAL before the gui update.
Useful to schedule tasks. """
pass
def _need_update(self, emitter=None, child_ignore_update=False):
if child_ignore_update:
#the widgets tree is processed to make it available for a intentional
# client update and to reset the changed flags of changed widget.
# Otherwise it will be updated on next update cycle.
changed_widget_dict = {}
self.root.repr(changed_widget_dict)
return
if self.update_interval == 0:
#no interval, immadiate update
self.do_gui_update()
else:
#will be updated after idle loop
self._need_update_flag = True
def do_gui_update(self):
""" This method gets called also by Timer, a new thread, and so needs to lock the update
"""
with self.update_lock:
changed_widget_dict = {}
self.root.repr(changed_widget_dict)
for widget in changed_widget_dict.keys():
html = changed_widget_dict[widget]
__id = str(widget.identifier)
self._send_spontaneous_websocket_message(_MSG_UPDATE + __id + ',' + to_websocket(html))
self._need_update_flag = False
def websocket_handshake_done(self, ws_instance_to_update):
msg = ""
with self.update_lock:
msg = "0" + self.root.identifier + ',' + to_websocket(self.page.children['body'].innerHTML({}))
ws_instance_to_update.send_message(msg)
def set_root_widget(self, widget):
self.page.children['body'].append(widget, 'root')
self.root = widget
self.root.disable_refresh()
self.root.attributes['data-parent-widget'] = str(id(self))
self.root._parent = self
self.root.enable_refresh()
msg = "0" + self.root.identifier + ',' + to_websocket(self.page.children['body'].innerHTML({}))
self._send_spontaneous_websocket_message(msg)
def _send_spontaneous_websocket_message(self, message):
for ws in list(self.websockets):
# noinspection PyBroadException
try:
if ws.send_message(message):
#if message sent ok, continue with nect client
continue
except Exception:
self._log.error("sending websocket spontaneous message", exc_info=True)
self._log.debug("removing websocket instance, communication error with client")
#here arrives if the message was not sent ok, then the client is removed
try:
self.websockets.remove(ws)
except Exception:
pass # happens when there are multiple clients
else:
ws.close(terminate_server=False)
def execute_javascript(self, code):
self._send_spontaneous_websocket_message(_MSG_JS + code)
def notification_message(self, title, content, icon=""):
"""This function sends "javascript" message to the client, that executes its content.
In this particular code, a notification message is shown
"""
code = """
var options = {
body: "%(content)s",
icon: "%(icon)s"
}
if (!("Notification" in window)) {
alert("%(content)s");
}else if (Notification.permission === "granted") {
var notification = new Notification("%(title)s", options);
}else if (Notification.permission !== 'denied') {
Notification.requestPermission(function (permission) {
if (permission === "granted") {
var notification = new Notification("%(title)s", options);
}
});
}
""" % {'title': title, 'content': content, 'icon': icon}
self.execute_javascript(code)
def do_POST(self):
self._instance()
file_data = None
# listener_widget = None
# listener_function = None
try:
# Parse the form data posted
filename = self.headers['filename']
listener_widget = runtimeInstances[self.headers['listener']]
listener_function = self.headers['listener_function']
form = cgi.FieldStorage(fp=self.rfile,
headers=self.headers,
environ={'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': self.headers['Content-Type']})
# Echo back information about what was posted in the form
for field in form.keys():
field_item = form[field]
if field_item.filename:
# The field contains an uploaded file
file_data = field_item.file.read()
file_len = len(file_data)
self._log.debug('post: uploaded %s as "%s" (%d bytes)\n' % (field, field_item.filename, file_len))
get_method_by_name(listener_widget, listener_function)(file_data, filename)
else:
# Regular form value
self._log.debug('post: %s=%s\n' % (field, form[field].value))
if file_data is not None:
# the filedata is sent to the listener
self._log.debug('GUI - server.py do_POST: fileupload name= %s' % (filename))
self.send_response(200)
except Exception:
self._log.error('post: failed', exc_info=True)
self.send_response(400)
self.send_header('Content-type', 'text/plain')
self.end_headers()
def do_HEAD(self):
self.send_response(200)
self.end_headers()
def do_AUTHHEAD(self):
self.send_response(401)
self.send_header('WWW-Authenticate', 'Basic realm=\"Protected\"')
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_GET(self):
# check here request header to identify the type of req, if http or ws
# if this is a ws req, instance a ws handler, add it to App's ws list, return
if "Upgrade" in self.headers:
if self.headers['Upgrade'].lower() == 'websocket':
#passing arguments to websocket handler, otherwise it will lost the last message,
# and will be unable to handshake
ws = WebSocketsHandler(self.headers, self.request, self.client_address, self.server)
return
"""Handler for the GET requests."""
do_process = False
if self.server.auth is None:
do_process = True
else:
if not ('Authorization' in self.headers) or self.headers['Authorization'] is None:
self._log.info("Authenticating")
self.do_AUTHHEAD()
self.wfile.write(encode_text('no auth header received'))
elif self.headers['Authorization'] == 'Basic ' + self.server.auth.decode():
do_process = True
else:
self.do_AUTHHEAD()
self.wfile.write(encode_text(self.headers['Authorization']))
self.wfile.write(encode_text('not authenticated'))
if do_process:
path = str(unquote(self.path))
# noinspection PyBroadException
try:
self._instance()
# build the page (call main()) in user code, if not built yet
with self.update_lock:
# build the root page once if necessary
if not 'root' in self.page.children['body'].children.keys():
self._log.info('built UI (path=%s)' % path)
self.set_root_widget(self.main(*self.server.userdata))
self._process_all(path)
except Exception:
self._log.error('error processing GET request', exc_info=True)
def _get_static_file(self, filename):
filename = filename.replace("..", "") #avoid backdirs
__i = filename.find(':')
if __i < 0:
return None
key = filename[:__i]
path = filename[__i+1:]
key = key.replace("/","")
paths = {'res': os.path.join(os.path.dirname(__file__), "res")}
static_paths = self._app_args.get('static_file_path', {})
if not type(static_paths)==dict:
self._log.error("App's parameter static_file_path must be a Dictionary.", exc_info=False)
static_paths = {}
paths.update(static_paths)
if not key in paths:
return None
return os.path.join(paths[key], path)
def _process_all(self, func):
self._log.debug('get: %s' % func)
static_file = self.re_static_file.match(func)
attr_call = self.re_attr_call.match(func)
if (func == '/') or (not func):
self.send_response(200)
self.send_header("Set-Cookie", "remi_session=%s; SameSite=Lax"%(self.session))
self.send_header('Content-type', 'text/html')
self.end_headers()
with self.update_lock:
# render the HTML
page_content = self.page.repr()
self.wfile.write(encode_text("<!DOCTYPE html>\n"))
self.wfile.write(encode_text(page_content))
elif static_file:
filename = self._get_static_file(static_file.groups()[0])
if not filename:
self.send_response(404)
return
mimetype, encoding = mimetypes.guess_type(filename)
self.send_response(200)
self.send_header('Content-type', mimetype if mimetype else 'application/octet-stream')
if self.server.enable_file_cache:
self.send_header('Cache-Control', 'public, max-age=86400')
self.end_headers()
with open(filename, 'rb') as f:
content = f.read()
self.wfile.write(content)
elif attr_call:
with self.update_lock:
param_dict = parse_qs(urlparse(func).query)
# parse_qs returns patameters as list, here we take the first element
for k in param_dict:
param_dict[k] = param_dict[k][0]
widget, func = attr_call.group(1, 2)
try:
content, headers = get_method_by_name(get_method_by_id(widget), func)(**param_dict)
if content is None:
self.send_response(503)
return
self.send_response(200)
except IOError:
self._log.error('attr %s/%s call error' % (widget, func), exc_info=True)
self.send_response(404)
return
except (TypeError, AttributeError):
self._log.error('attr %s/%s not available' % (widget, func))
self.send_response(503)
return
for k in headers:
self.send_header(k, headers[k])
self.end_headers()
try:
self.wfile.write(content)
except TypeError:
self.wfile.write(encode_text(content))
def close(self):
""" Command to initiate an App to close
"""
self._log.debug('shutting down...')
self.server.server_starter_instance.stop()
def on_close(self):
""" Called by the server when the App have to be terminated
"""
self._stop_update_flag = True
for ws in list(self.websockets):
ws.close()
def onload(self, emitter):
""" WebPage Event that occurs on webpage loaded
"""
self._log.debug('App.onload event occurred')
def onerror(self, message, source, lineno, colno, error):
""" WebPage Event that occurs on webpage errors
"""
self._log.debug("""App.onerror event occurred in webpage:
\nMESSAGE:%s\nSOURCE:%s\nLINENO:%s\nCOLNO:%s\ERROR:%s\n"""%(message, source, lineno, colno, error))
def ononline(self, emitter):
""" WebPage Event that occurs on webpage goes online after a disconnection
"""
self._log.debug('App.ononline event occurred')
def onpagehide(self, emitter):
""" WebPage Event that occurs on webpage when the user navigates away
"""
self._log.debug('App.onpagehide event occurred')
def onpageshow(self, emitter, width, height):
""" WebPage Event that occurs on webpage gets shown
"""
self._log.debug('App.onpageshow event occurred')
def onresize(self, emitter, width, height):
""" WebPage Event that occurs on webpage gets resized
"""
self._log.debug('App.onresize event occurred. Width:%s Height:%s'%(width, height))
class ThreadedHTTPServer(socketserver.ThreadingMixIn, HTTPServer):
daemon_threads = False
# noinspection PyPep8Naming
def __init__(self, server_address, RequestHandlerClass,
auth, multiple_instance, enable_file_cache, update_interval,
websocket_timeout_timer_ms, pending_messages_queue_length,
title, server_starter_instance, certfile, keyfile, ssl_version, *userdata):
HTTPServer.__init__(self, server_address, RequestHandlerClass)
self.auth = auth
self.multiple_instance = multiple_instance
self.enable_file_cache = enable_file_cache
self.update_interval = update_interval
self.websocket_timeout_timer_ms = websocket_timeout_timer_ms
self.pending_messages_queue_length = pending_messages_queue_length
self.title = title
self.server_starter_instance = server_starter_instance
self.userdata = userdata
self.certfile = certfile
self.keyfile = keyfile
self.ssl_version = ssl_version
if self.ssl_version!=None:
self.socket = ssl.wrap_socket(self.socket, keyfile=self.keyfile, certfile=self.certfile, server_side=True, ssl_version=self.ssl_version, do_handshake_on_connect=True)
class Server(object):
# noinspection PyShadowingNames
def __init__(self, gui_class, title='', start=True, address='127.0.0.1', port=0, username=None, password=<PASSWORD>,
multiple_instance=False, enable_file_cache=True, update_interval=0.1, start_browser=True,
websocket_timeout_timer_ms=1000, pending_messages_queue_length=1000,
certfile=None, keyfile=None, ssl_version=None, userdata=()):
self._gui = gui_class
self._title = title or gui_class.__name__
self._sserver = None
self._sth = None
self._base_address = ''
self._address = address
self._sport = port
self._multiple_instance = multiple_instance
self._enable_file_cache = enable_file_cache
self._update_interval = update_interval
self._start_browser = start_browser
self._websocket_timeout_timer_ms = websocket_timeout_timer_ms
self._pending_messages_queue_length = pending_messages_queue_length
self._certfile = certfile
self._keyfile = keyfile
self._ssl_version = ssl_version
self._userdata = userdata
if username and password:
self._auth = base64.b64encode(encode_text("%s:%s" % (username, password)))
else:
self._auth = None
if not isinstance(userdata, tuple):
raise ValueError('userdata must be a tuple')
self._log = logging.getLogger('remi.server')
self._alive = True
if start:
self._myid = threading.Thread.ident
self.start()
self.serve_forever()
@property
def title(self):
return self._title
@property
def address(self):
return self._base_address
def start(self):
# Create a web server and define the handler to manage the incoming
# request
self._sserver = ThreadedHTTPServer((self._address, self._sport), self._gui, self._auth,
self._multiple_instance, self._enable_file_cache,
self._update_interval, self._websocket_timeout_timer_ms,
self._pending_messages_queue_length, self._title,
self, self._certfile, self._keyfile, self._ssl_version, *self._userdata)
shost, sport = self._sserver.socket.getsockname()[:2]
self._log.info('Started httpserver http://%s:%s/'%(shost,sport))
# when listening on multiple net interfaces the browsers connects to localhost
if shost == '0.0.0.0':
shost = '127.0.0.1'
self._base_address = 'http://%s:%s/' % (shost,sport)
if self._start_browser:
try:
import android
android.webbrowser.open(self._base_address)
except ImportError:
# use default browser instead of always forcing IE on Windows
if os.name == 'nt':
webbrowser.get('windows-default').open(self._base_address)
else:
webbrowser.open(self._base_address)
self._sth = threading.Thread(target=self._sserver.serve_forever)
self._sth.daemon = False
self._sth.start()
def serve_forever(self):
# we could join on the threads, but join blocks all interrupts (including
# ctrl+c, so just spin here
# noinspection PyBroadException
try:
def sig_manager(sig, callstack):
self.stop()
self._log.info('*** signal %d received.' % sig)
return signal.SIG_IGN
prev_handler = signal.signal(signal.SIGINT, sig_manager)
except Exception:
# signal.pause() is missing for Windows; wait 1ms and loop instead
pass
except KeyboardInterrupt:
pass
while self._alive:
try:
time.sleep(1)
except Exception:
self._alive = False
self._log.debug(' ** serve_forever() quitting')
def stop(self):
global clients
self._alive = False
self._sserver.shutdown()
for client in clients.values():
client.on_close()
class StandaloneServer(Server):
def __init__(self, gui_class, title='', width=800, height=600, resizable=True, fullscreen=False, start=True,
userdata=()):
Server.__init__(self, gui_class, title=title, start=False, address='127.0.0.1', port=0, username=None,
password=<PASSWORD>,
multiple_instance=False, enable_file_cache=True, update_interval=0.1, start_browser=False,
websocket_timeout_timer_ms=1000, pending_messages_queue_length=1000, userdata=userdata)
self._application_conf = {'width': width, 'height': height, 'resizable': resizable, 'fullscreen': fullscreen}
if start:
self.serve_forever()
def serve_forever(self):
try:
import webview
except ImportError:
raise ImportError('PyWebView is missing. Please install it by:\n '
'pip install pywebview\n '
'more info at https://github.com/r0x0r/pywebview')
else:
Server.start(self)
webview.create_window(self.title, self.address, **self._application_conf)
webview.start()
Server.stop(self)
def start(main_gui_class, **kwargs):
"""This method starts the webserver with a specific App subclass."""
debug = True if sys.flags.debug else kwargs.pop('debug', False)
standalone = kwargs.pop('standalone', False)
logging.basicConfig(level=logging.DEBUG if debug else logging.INFO,
format='%(name)-16s %(levelname)-8s %(message)s')
logging.getLogger('remi').setLevel(
level=logging.DEBUG if debug else logging.INFO)
if standalone:
s = StandaloneServer(main_gui_class, start=True, **kwargs)
else:
s = Server(main_gui_class, start=True, **kwargs)
|
StarcoderdataPython
|
3476835
|
version = '1.0.0b5'
|
StarcoderdataPython
|
22251
|
# -*- coding: utf-8 -*-
# Citations
from flask import (Blueprint, request, render_template, flash, url_for,
redirect, session)
from flask.ext.login import login_required, current_user
import logging, sys, re
from sqlalchemy.exc import IntegrityError, InvalidRequestError
from DictionaryOfNewZealandEnglish.database import db
from DictionaryOfNewZealandEnglish.headword.citation.forms import *
from DictionaryOfNewZealandEnglish.headword.citation.models import *
import datetime as dt
blueprint = Blueprint("citations", __name__, url_prefix='/headwords/citations',
static_folder="../static")
@blueprint.route("/edit", methods=["GET", "POST"])
@login_required
def edit():
if not current_user.is_admin:
return redirect(url_for('public.home'))
headword = Headword.query.get( request.args.get('headword_id') )
citation_id = request.args.get('citation_id')
citation = Citation.query.get( citation_id )
form = CitationForm(request.form, obj=citation)
if request.method == "GET":
date = __pretty_print_date(citation)
return render_template("headwords/citations/edit.html", form=form,
citation_id=citation_id,
date=date,
headword=headword)
if request.method == "POST":
data = __set_data_for_citation(citation, form)
citation = Citation.query.get( citation_id )
date = __pretty_print_date(citation)
return render_template("headwords/citations/edit.html", form=form,
citation_id=citation_id,
date=date,
headword=headword)
@blueprint.route("/new", methods=["GET"])
@login_required
def new():
if not current_user.is_admin:
return redirect(url_for('public.home'))
headword = Headword.query.get( request.args.get('headword_id') )
form = CitationForm(request.form)
return render_template("headwords/citations/new.html", form=form,
headword=headword)
@blueprint.route("/create", methods=["POST"])
@login_required
def create():
if not current_user.is_admin:
return redirect(url_for('public.home'))
form = CitationForm(request.form)
headword = Headword.query.get( request.args.get('headword_id') )
try:
citation_id = __create_citation(form, headword)
circa = ""
if form.circa.data:
circa = "circa "
date_obj = __form_date(form)
date = __pretty_print_date(date_obj, form.circa.data)
flash("New citation created: {0} ({1}{2})".format(form.author.data,
circa,
date, 'success'))
return render_template("headwords/citations/edit.html",
form=form,
citation_id=citation_id,
date=date,
headword=headword)
except (IntegrityError) as e:
db.session.rollback()
flash("Input error %s" % e)
return render_template("headwords/citations/new.html",
form=form,
headword=headword)
except (InvalidRequestError):
return render_template("headwords/citations/new.html",
form=form,
headword=headword)
@blueprint.route("/delete", methods=["GET"])
@login_required
def delete():
if not current_user.is_admin:
return redirect(url_for('public.home'))
citation = Citation.query.get( request.args.get('citation_id') )
headword = Headword.query.get( request.args.get('headword_id') )
if citation in headword.citations:
headword.citations.remove(citation)
db.session.add(headword)
db.session.commit()
citations = headword.citations
return render_template("headwords/show.html", headword=headword,
citations=citations)
#############################################################################
### Private
def __create_citation(form, headword):
date = __form_date(form)
citation = Citation.create(
date = date,
circa = form.circa.data,
author = form.author.data,
source_id = form.source.data.id,
vol_page = form.vol_page.data,
edition = form.edition.data,
quote = form.quote.data,
notes = form.notes.data,
archived = False,
updated_at = dt.datetime.utcnow(),
updated_by = current_user.username
)
h = Headword.query.get(headword.id)
h.citations.append(citation)
db.session.add(h)
db.session.commit()
return citation.id
def __form_date(form):
if form.date.data == "":
flash("No date entered.", 'warning')
raise InvalidRequestError
form_date = re.split(r'/\s*', form.date.data)
if len(form_date) < 3:
if form.circa.data:
# pad out data to fit into datetime type
if len(form_date) == 2:
y = form_date[1].strip()
m = form_date[0].strip()
d = "1"
if len(form_date) == 1:
y = form_date[0].strip()
m = "1"
d = "1"
else:
flash("Partial date entered, perhaps 'Circa' should be checked.", 'warning')
raise InvalidRequestError
else:
y = form_date[2].strip()
m = form_date[1].strip()
d = form_date[0].strip()
# dt.datetime(y, m, d)
print "### form_date {0} / {1} / {2}".format(y,m,d)
date = dt.datetime(int(y), int(m), int(d))
return date
def __pretty_print_date(obj, circa=False):
print "### citation {0} {1}".format(obj, circa)
if isinstance(obj, Citation):
d = obj.date.day
m = obj.date.month
y = obj.date.year
circa = obj.circa
if isinstance(obj, dt.datetime):
d = obj.day
m = obj.month
y = obj.year
if circa:
if d == 1:
if m == 1:
m = ""
else:
m = "{0} / ".format(m)
d = ""
else:
d = "{0} / ".format(d)
m = "{0} / ".format(m)
print "test 1 {0}{1}{2}".format(d, m, y)
return "{0}{1}{2}".format(d, m, y)
else:
print "test 2 {0} / {1} / {2}".format(d, m, y)
return "{0} / {1} / {2}".format(d, m, y)
def __set_data_for_citation(citation, form):
try:
date = __form_date(form)
Citation.update(citation,
date = date,
circa = form.circa.data,
author = form.author.data,
source_id = form.source.data.id,
vol_page = form.vol_page.data,
edition = form.edition.data,
quote = form.quote.data,
notes = form.notes.data,
archived = form.archived.data,
updated_at = dt.datetime.utcnow(),
updated_by = current_user.username
)
flash("Edit of citation is saved.", 'success')
return True
except (IntegrityError, InvalidRequestError):
db.session.rollback()
flash("Edit of citation failed.", 'warning')
return False
|
StarcoderdataPython
|
4879493
|
<filename>engine/__init__.py
from .celery import app as celery_app
____all___ = ('celery_app',)
|
StarcoderdataPython
|
3337335
|
from collections import defaultdict
N, NUM_EDGES, Q = [int(x) for x in input().split()]
edges = defaultdict(list)
for i in range(NUM_EDGES):
v1, v2 = [int(x) for x in input().split()]
edges[v1].append(v2)
edges[v2].append(v1)
colors = [int(x) for x in input().split()]
for i in range(Q):
# print(colors)
f, *v = [int(x) for x in input().split()]
# print(f, v)
if f == 1:
n, = v
c = colors[n - 1]
print(c)
# sprincle
# print(edges)
for v in edges[n]:
colors[v - 1] = c
elif f == 2:
n, c = v
print(colors[n - 1])
colors[n - 1] = c
|
StarcoderdataPython
|
3451303
|
<reponame>santiagomvc/code2doc
import pandas as pd
import os
import yaml
from datetime import datetime
from utils.transform_text_utils import create_vocab, transform_examples
from utils.code2doc_utils import Code2DocTrain
class ReadParams:
def __init__(self, config):
super().__init__()
self.config = config
def run(self):
# Reading files
config = self.config
with open(config['FILES']['base_params_file'], 'r') as base_params_file:
params = yaml.safe_load(base_params_file)
if os.path.exists(config['FILES']['train_params_file']):
with open(config['FILES']['train_params_file'], 'r') as train_params_file:
train_params = yaml.safe_load(train_params_file)
params = {**params, **train_params}
if 'run_date' in params.keys():
params['run_date'] = datetime.strptime(params['run_date'], '%Y-%m-%d')
else:
params['run_date'] = datetime.now()
# updating config with params
language = params['language']
config_names = config['NAMES']
data_dir = config['PATHS']['data_dir']
extra_data_path = config['PATHS']['extra_data_path']
language_dir = f'{data_dir}/{language}'
raw_data_dir = f'{language_dir}/{language}/{extra_data_path}'
config['PATHS']['language_dir'] = language_dir
config['PATHS']['raw_data_dir'] = raw_data_dir
config['PATHS']['train_dir'] = f"{raw_data_dir}/{config_names['train_dir_name']}"
config['PATHS']['valid_dir'] = f"{raw_data_dir}/{config_names['valid_dir_name']}"
config['PATHS']['test_dir'] = f"{raw_data_dir}/{config_names['test_dir_name']}"
return config, params
class DownloadData:
def __init__(self, config, params):
super().__init__()
self.config = config
self.params = params
def run(self):
# Importing liraries
import os
import shutil
import zipfile
import requests
# Reading configuations
language = self.params['language']
language_dir = self.config['PATHS']['language_dir']
zip_name = f'{language}.zip'
raw_data_url = self.config['PATHS']['raw_data_url']
url = f'{raw_data_url}/{zip_name}'
# Deleting folder if already exists
if os.path.exists(language_dir) and os.path.isdir(language_dir):
shutil.rmtree(language_dir)
# Creating folder to save the files
os.makedirs(language_dir, exist_ok=True)
# Download the zipped files to a temporal location
r = requests.get(url = url)
with open(zip_name,'wb') as fd:
fd.write(r.content)
# Unzipping files into defined folder
with zipfile.ZipFile(zip_name, 'r') as zip_ref:
zip_ref.extractall(language_dir)
# removing temp file
os.remove(zip_name)
class ReadData:
def __init__(self, config, params):
super().__init__()
self.config = config
self.params = params
def run(self):
# Reading folder paths
train_dir = self.config['PATHS']['train_dir']
valid_dir = self.config['PATHS']['valid_dir']
test_dir = self.config['PATHS']['test_dir']
training_data = []
# iterates over each folder
for data_dir in [train_dir, valid_dir, test_dir]:
# Empty list to save results
dfs_list = []
# List files in the folder
data_dir_files = os.listdir(data_dir)
# Saves each file as a DF in a list
for data_file in data_dir_files:
train_df_partition = pd.read_json(f'{data_dir}/{data_file}', compression = 'gzip', lines=True)
dfs_list.append(train_df_partition)
# Creates ands saves a df with all the data in the folder
df = pd.concat(dfs_list, ignore_index=True)
training_data.append(df)
return training_data
class PrepareData:
def __init__(self, config, params):
super().__init__()
self.config = config
self.params = params
def run(self, training_data):
training_sets_list = []
for df in training_data:
training_sets_list.append(df.code_tokens.values.tolist())
training_sets_list.append(df.docstring_tokens.values.tolist())
training_sets_names = ['X_train', 'Y_train',
'X_valid', 'Y_valid',
'X_test', 'Y_test']
training_sets = dict(zip(training_sets_names, training_sets_list))
return(training_sets)
class TransformText:
def __init__(self, config, params):
super().__init__()
self.config = config
self.params = params
def run(self, training_sets):
# import libraries
import numpy as np
# Extracting parameters
min_input_len = self.params['min_input_len']
min_output_len = self.params['min_output_len']
max_input_len = self.params['max_input_len']
max_output_len = self.params['max_output_len']
# Extracting training sets
code_train, doc_train = training_sets['X_train'], training_sets['Y_train']
code_valid, doc_valid = training_sets['X_valid'], training_sets['Y_valid']
# Creating vocabulary
(code_idx2tkn, code_tkn2idx) = create_vocab(code_train)
(doc_idx2tkn, doc_tkn2idx) = create_vocab(doc_train)
# defining parameters for transforming examples
code_params = {'tkn2idx': code_tkn2idx, 'min_len': min_input_len, 'max_len': max_input_len}
doc_params = {'tkn2idx': doc_tkn2idx, 'min_len': min_output_len, 'max_len': max_output_len}
# indexing and padding the sequences
X_train, Y_train = transform_examples(code_train, doc_train, code_params, doc_params)
X_valid, Y_valid = transform_examples(code_valid, doc_valid, code_params, doc_params)
# Creating a dictionary with the training data
training_input = {
'X_train': X_train,
'Y_train': Y_train,
'X_valid': X_valid,
'Y_valid': Y_valid
}
# Creating a dictionary with all the vocabs
vocabs = {
'code_i2t': code_idx2tkn,
'code_t2i': code_tkn2idx,
'doc_i2t': doc_idx2tkn,
'doc_t2i': doc_tkn2idx
}
# Return X_train and Y_train
return(training_input, vocabs)
class BuildModel:
def __init__(self, config, params):
super().__init__()
self.config = config
self.params = params
def run(self, training_input, vocabs):
# Importing required libraries
from datetime import datetime
from tensorflow import keras
# Extracting needed params
loss = self.params['loss']
optimizer = self.params['optimizer']
metrics = self.params['metrics']
batch_size = self.params['batch_size']
epochs = self.params['epochs']
model_save_dir = self.config['PATHS']['trainings_dir']
model_save_file = self.config['FILES']['model_file']
model_save_logs = self.config['PATHS']['trainings_log_dir']
# Creating folder to save model if it doesn't exists
os.makedirs(model_save_dir, exist_ok=True)
# Extracting training input
X_train, Y_train = training_input['X_train'], training_input['Y_train']
X_valid, Y_valid = training_input['X_valid'], training_input['Y_valid']
# definign inputs for training data
enc_input_train = X_train
dec_input_train = Y_train[:,:-1]
dec_output_train = Y_train[:,1:]
# definign inputs for validation data
enc_input_valid = X_valid
dec_input_valid = Y_valid[:,:-1]
dec_output_valid = Y_valid[:,1:]
# Extracting needed data
_, enc_dim = enc_input_train.shape
_, dec_dim = dec_input_train.shape
enc_vocab_size = len(vocabs['code_t2i'].keys())
dec_vocab_size = len(vocabs['doc_t2i'].keys())
# Defining keras logging callback
logdir = model_save_logs + '/' + datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir)
# Creating the model class
code2doc = Code2DocTrain(enc_dim, dec_dim, enc_vocab_size, dec_vocab_size)
# Extracting the model object
model = code2doc.enc_dec_model()
# Defining hyperparameters for the model
model.compile(loss=loss, optimizer=optimizer, metrics=metrics)
# Fitting the model to the data
model.fit([enc_input_train, dec_input_train], dec_output_train,
batch_size=batch_size, epochs=epochs,
validation_data=([enc_input_valid, dec_input_valid], dec_output_valid),
callbacks=[tensorboard_callback])
#Saving the model
model.save(model_save_file)
return(model)
# class EvalModel:
# def __init__(self, config, params):
# super().__init__()
# self.config = config
# self.params = params
# def run(self, model, training_sets, vocabs):
# # Extracting parameters
# min_input_len = self.params['min_input_len']
# min_output_len = self.params['min_output_len']
# max_input_len = self.params['max_input_len']
# max_output_len = self.params['max_output_len']
# score_save_dir = self.config['PATHS']['trainings_dir']
# score_save_file = self.config['FILES']['model_score']
# # Creating folder to save score if it doesn't exists
# os.makedirs(score_save_dir, exist_ok=True)
# # getting dictionaries with vocabulary
# code_tkn2idx = vocabs['code_t2i']
# doc_tkn2idx = vocabs['doc_t2i']
# # defining parameters for transforming examples
# code_params = {'tkn2idx': code_tkn2idx, 'min_len': min_input_len, 'max_len': max_input_len}
# doc_params = {'tkn2idx': doc_tkn2idx, 'min_len': min_output_len, 'max_len': max_output_len}
# # Extracting eval data
# code_val, doc_val = training_sets['X_valid'], training_sets['Y_valid']
# # indexing and padding the sequences
# X_valid, Y_valid = transform_examples(code_val, doc_val, code_params, doc_params)
# # Defining inputs and outputs
# enc_input = X_valid
# dec_input = Y_valid[:,:-1]
# dec_output = Y_valid[:,1:]
# # evaluating model performance
# score = model.evaluate([enc_input, dec_input], dec_output)
# # Saving model performance
# with open(score_save_file, 'w') as f:
# print(score, file=f)
# return(score)
|
StarcoderdataPython
|
3427177
|
import os,json
import snakemake as smk
from utils import *
import pathlib
channel_files = snakemake.input
if isinstance(channel_files,str):
if channel_files.split('.')[-1] == 'chanlist':
with open(channel_files) as f:
channel_files = f.read().splitlines()
else:
channel_files = [channel_files]
out_dict = {}
for channel in channel_files:
with open(channel,"r") as r:
channel_dict = json.load(r)
name, experiment, period, run,channel_name, tier = os.path.basename(channel).split("-")
out_dict[channel_name] = channel_dict
pathlib.Path(os.path.dirname(snakemake.output[0])).mkdir(parents=True, exist_ok=True)
with open(snakemake.output[0],"w") as w:
json.dump(out_dict, w,indent=4)
|
StarcoderdataPython
|
11242472
|
<gh_stars>0
from django.urls import path
from . import views
urlpatterns = [
path("create_user/<slug:name>/<slug:email>", views.create_user, name="create_user"),
path("get_user/<slug:name>", views.get_user, name="get user info"),
path("transfer/<slug:src>/<slug:dst>/<int:amount>", views.transfer, name="transfer")
]
|
StarcoderdataPython
|
9644405
|
<gh_stars>0
# Program to handle unique game IDs per player
def new_game_id():
game_id = 0 # REIMPLEMENT TO GET UNIQUE VALUE NOT IN GLOBAL ARRAY
return game_id
|
StarcoderdataPython
|
11207815
|
"""
Implement atoi which converts a string to an integer.
The function first discards as many whitespace characters as necessary until the first non-whitespace character is found. Then, starting from this character, takes an optional initial plus or minus sign followed by as many numerical digits as possible, and interprets them as a numerical value.
The string can contain additional characters after those that form the integral number, which are ignored and have no effect on the behavior of this function.
If the first sequence of non-whitespace characters in str is not a valid integral number, or if no such sequence exists because either str is empty or it contains only whitespace characters, no conversion is performed.
If no valid conversion could be performed, a zero value is returned.
Note:
Only the space character ' ' is considered as whitespace character.
Assume we are dealing with an environment which could only store integers within the 32-bit signed integer range: [−231, 231 − 1]. If the numerical value is out of the range of representable values, INT_MAX (231 − 1) or INT_MIN (−231) is returned.
Example 1:
Input: "42"
Output: 42
Example 2:
Input: " -42"
Output: -42
Explanation: The first non-whitespace character is '-', which is the minus sign.
Then take as many numerical digits as possible, which gets 42.
Example 3:
Input: "4193 with words"
Output: 4193
Explanation: Conversion stops at digit '3' as the next character is not a numerical digit.
Example 4:
Input: "words and 987"
Output: 0
Explanation: The first non-whitespace character is 'w', which is not a numerical
digit or a +/- sign. Therefore no valid conversion could be performed.
Example 5:
Input: "-91283472332"
Output: -2147483648
Explanation: The number "-91283472332" is out of the range of a 32-bit signed integer.
Thefore INT_MIN (−231) is returned.
"""
# V0
class Solution(object):
def myAtoi(self, str):
"""
:type str: str
:rtype: int
"""
str = str.strip()
try:
res = re.search('(^[\+\-]?\d+)', str).group()
# res = re.search(r"\d+", s).group()
res = int(res)
res = res if res <= 2147483647 else 2147483647 # 2**31 == 2147483648
res = res if res >= -2147483648 else -2147483648 # -(1)*(2**31) == - 2147483648
except:
res = 0
return res
# V1
# https://blog.csdn.net/coder_orz/article/details/52053932
class Solution(object):
def myAtoi(self, str):
"""
:type str: str
:rtype: int
"""
if not str:
return 0
str = str.strip()
number, flag = 0, 1
if str[0] == '-':
str = str[1:]
flag = -1
elif str[0] == '+':
str = str[1:]
for c in str:
if c >= '0' and c <= '9': # '3' > '2' -> True
number = 10*number + ord(c) - ord('0') # string to integer
else:
break
number = flag * number
number = number if number <= 2147483647 else 2147483647
number = number if number >= -2147483648 else -2147483648
return number
# V1'
# https://blog.csdn.net/coder_orz/article/details/52053932
# IDEA : REGULAR EXPRESSION
class Solution(object):
def myAtoi(self, str):
"""
:type str: str
:rtype: int
"""
str = str.strip()
try:
res = re.search('(^[\+\-]?\d+)', str).group()
res = int(res)
res = res if res <= 2147483647 else 2147483647
res = res if res >= -2147483648 else -2147483648
except:
res = 0
return res
# V1''
# https://www.jiuzhang.com/solution/string-to-integer-atoi/#tag-highlight-lang-python
class Solution(object):
# string -> int, remove prefix and +, -. Please note the max, min interval of int when transform
def atoi(self, str):
str = str.strip()
if str == "" :
return 0
i = 0
sign = 1
ret = 0
length = len(str)
MaxInt = (1 << 31) - 1
if str[i] == '+':
i += 1
elif str[i] == '-' :
i += 1
sign = -1
for i in range(i, length) :
if str[i] < '0' or str[i] > '9' :
break
ret = ret * 10 + int(str[i])
if ret > sys.maxint:
break
ret *= sign
if ret >= MaxInt:
return MaxInt
if ret < MaxInt * -1 :
return MaxInt * - 1 - 1
return ret
# V2
class Solution(object):
def myAtoi(self, str):
"""
:type str: str
:rtype: int
"""
INT_MAX = 2147483647
INT_MIN = -2147483648
result = 0
if not str:
return result
i = 0
while i < len(str) and str[i].isspace():
i += 1
if len(str) == i:
return result
sign = 1
if str[i] == "+":
i += 1
elif str[i] == "-":
sign = -1
i += 1
while i < len(str) and '0' <= str[i] <= '9':
if result > (INT_MAX - int(str[i])) / 10:
return INT_MAX if sign > 0 else INT_MIN
result = result * 10 + int(str[i])
i += 1
return sign * result
|
StarcoderdataPython
|
3307299
|
<reponame>begyy/PayMe
from .methods_subscribe_api import PayComResponse
class Paycom(PayComResponse):
ORDER_FOUND = 200
ORDER_NOT_FOND = -31050
INVALID_AMOUNT = -31001
def check_order(self, amount, account, *args, **kwargs):
"""
>>> self.check_order(amount=amount, account=account)
"""
pass
def successfully_payment(self, account, transaction, *args, **kwargs):
"""
>>> self.successfully_payment(account=account, transaction=transaction)
"""
pass
def cancel_payment(self, account, transaction, *args, **kwargs):
"""
>>> self.cancel_payment(account=account,transaction=transaction)
"""
pass
|
StarcoderdataPython
|
11376515
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipaySocialQuestionnareTaskPublishResponse(AlipayResponse):
def __init__(self):
super(AlipaySocialQuestionnareTaskPublishResponse, self).__init__()
self._ext_info = None
self._out_request_no = None
self._qstn_id = None
self._rmt_qstn_id = None
@property
def ext_info(self):
return self._ext_info
@ext_info.setter
def ext_info(self, value):
self._ext_info = value
@property
def out_request_no(self):
return self._out_request_no
@out_request_no.setter
def out_request_no(self, value):
self._out_request_no = value
@property
def qstn_id(self):
return self._qstn_id
@qstn_id.setter
def qstn_id(self, value):
self._qstn_id = value
@property
def rmt_qstn_id(self):
return self._rmt_qstn_id
@rmt_qstn_id.setter
def rmt_qstn_id(self, value):
self._rmt_qstn_id = value
def parse_response_content(self, response_content):
response = super(AlipaySocialQuestionnareTaskPublishResponse, self).parse_response_content(response_content)
if 'ext_info' in response:
self.ext_info = response['ext_info']
if 'out_request_no' in response:
self.out_request_no = response['out_request_no']
if 'qstn_id' in response:
self.qstn_id = response['qstn_id']
if 'rmt_qstn_id' in response:
self.rmt_qstn_id = response['rmt_qstn_id']
|
StarcoderdataPython
|
12831199
|
from chess_game.models.board import Board
def test_board_init():
board = Board()
assert 8 == len(board.board)
for row in board.board:
assert 8 == len(row)
def test_game_board_has_pawns():
board = Board()
board_white_pawn_row = board.board[1]
for cell in board_white_pawn_row:
assert 'wp0' == str(cell)
board_black_pawn_row = board.board[6]
for cell in board_black_pawn_row:
assert 'bp0' == str(cell)
def test_game_board_has_empty_spaces():
board = Board()
for row in range(2, 6):
board_row = board.board[row]
for cell in board_row:
assert '' == str(cell)
def test_game_board_has_white_pieces():
expected_white_pieces = ["wr0", "wh0", "wb0", "wk0", "wq0", "wb0", "wh0", "wr0"]
board = Board()
white_row = board.board[0]
for index, cell in enumerate(white_row):
assert str(cell) == expected_white_pieces[index]
def test_game_board_has_black_pieces():
expected_black_pieces = ["br0", "bh0", "bb0", "bk0", "bq0", "bb0", "bh0", "br0"]
board = Board()
black_row = board.board[7]
for index, cell in enumerate(black_row):
assert str(cell) == expected_black_pieces[index]
|
StarcoderdataPython
|
8114282
|
import math
import random
import obnlib as z
APP_CODE = "#H02"
APP_NAME = "<NAME>"
APP_RELEASE = "2022"
APP_VERSION = "V0.1"
APP_FPS = 50
last_score = 0
#------------------------------------------------------------------------------
IMAGE_TITLE = z.Image(\
b"\xFF\xFF\xFF\x30\x30\x30\x30\xFF\xFF\xFF\x00\xE0\xF0\xF8\x18\x18" \
b"\x18\xF8\xF0\xE0\x00\xFF\xFF\xFF\x00\xFF\xFF\xFF\x00\xE0\xF0\xF8" \
b"\x18\x18\x18\xF8\xF0\xE0\x00\x18\xF8\xF8\xF8\x00\xF0\x38\xF8\xF8" \
b"\xC0\xF0\xF8\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" \
b"\x00\x00\x00\x00\x00\x00\x07\x87\xC7\xE0\xE0\xE0\xF0\xF7\xF7\xF7" \
b"\xF0\xF1\xE3\xC7\x06\x06\x06\x07\x03\x01\x00\xC7\xE7\xF7\x30\x37" \
b"\x77\x07\x00\x01\x03\x87\x86\x86\x86\x07\x03\x01\x00\x00\x81\x87" \
b"\x87\x87\x01\x00\x01\xF7\xF7\xF7\x01\x80\x80\x80\x80\x00\x00\x80" \
b"\x80\x80\x80\x00\x00\x00\x80\x80\x80\x00\x80\x80\x00\x03\x07\x1F" \
b"\x7F\xFF\xFF\xFF\x71\x3F\x3F\x7F\x78\x7F\x3F\x00\x00\x00\x00\x00" \
b"\x00\x71\x63\x67\x67\x7F\x3E\x1C\x00\x1E\x3F\x7F\x6D\x6D\x6F\x6F" \
b"\x0E\x00\x1E\x3F\x7F\x6D\x6D\x6F\x6F\x0E\x00\x7F\x7F\x7F\x0C\x7F" \
b"\x7F\x73\x40\x1E\x3F\x7F\x6D\x6D\x6F\x6F\x0E\x00\x7F\x7F\x7F\x01" \
b"\x01\x01", 70, 24)
class TitleState:
ID = 0
def prepare(self):
menu_items = [z.Menu.Item("START GAME", self.menu_start_game),
z.Menu.Item(None, self.menu_sound),
z.Menu.Item("CREDIT", self.menu_credit)]
self.menu_item_sound = menu_items[1]
self.set_sound_menu_label()
self.menu = z.Menu(menu_items)
self.start = False
self.credit = False
self.dirty = True
def update(self):
if self.credit:
if z.btn_d(z.BTN_A|z.BTN_B):
self.credit = False
self.dirty = True
z.click()
else:
self.menu.update()
return GameState.ID if self.start else self.ID
def draw(self):
if self.dirty:
z.cls()
if self.credit:
z.credit(APP_NAME, APP_RELEASE)
else:
z.blit(IMAGE_TITLE, 1, 0)
global last_score
if last_score > 0:
z.text(str(last_score), z.SCRN_W + 1, 0, z.TEXT_R, 1, True)
if not self.credit and (self.dirty or self.menu.dirty):
self.menu.draw()
self.dirty = False
def menu_start_game(self):
self.start = True
def menu_sound(self):
z.sound(not z.sound_on)
self.set_sound_menu_label()
z.click()
def menu_credit(self):
self.credit = True
self.dirty = True
z.click()
def set_sound_menu_label(self):
label = "SOUND " + ("ON" if z.sound_on else "OFF")
self.menu_item_sound.label = label
#------------------------------------------------------------------------------
UNIT = 5
IMAGE_PLAYER = z.Image([
b"\x03\x1F\x0F\x19\x0E",
b"\x13\x0F\x0F\x09\x1E",
b"\x16\x1E\x1E\x12\x1C",
b"\x03\x1F\x0F\x1B\x0E",
b"\x03\x0F\x1F\x09\x0E"], 5, 5)
class GameState:
ID = 1
def prepare(self):
global last_score
last_score = 0
self.cave = Cave()
self.player = Player()
self.dots = Dots()
self.pause = False
self.gameover = False
self.counter = APP_FPS * 2
self.dirty = True
z.play("O4S6CDEFG12", 3)
def update(self):
next_state = self.ID
dir = 0
forward = 0.0
if self.counter == 0:
dir = (z.btn(z.BTN_R) or z.btn(z.BTN_A)) - z.btn(z.BTN_L)
forward = 1.0
if z.btn_d(z.BTN_B):
self.pause = not self.pause
self.dirty = True
else:
self.counter -= 1
if self.gameover:
forward = 0.5
if z.btn_d(z.BTN_A):
self.prepare()
elif z.btn_d(z.BTN_B) or self.counter == 0:
next_state = TitleState.ID
else:
dir = 1
if not self.pause and z.frames & 1:
self.dirty = True
self.cave.update(forward)
self.player.update(self.cave, dir)
if forward > 0:
self.dots.update(self.cave)
if self.cave.phase >= Cave.PHASE_SHAKE:
z.tone(random.randint(40, 100), 40)
if self.player.move == UNIT - 1 and self.counter == 0:
z.play("O6S1DBF", 1)
if not self.gameover and self.player.dead:
global last_score
last_score = self.cave.score
self.gameover = True
self.counter = APP_FPS * 8
z.play("O4S6ED+DS8C+C<BS12A+A", 2)
return next_state
def draw(self):
if not self.dirty:
return
z.cls()
self.cave.draw()
self.player.draw(self.cave)
self.dots.draw()
if self.gameover:
z.text("GAMEOVER", 36, 0, z.TEXT_C, 1, True)
z.text("SCORE " + str(self.cave.score), 36, 35, z.TEXT_C, 1, True)
elif self.counter > 0:
z.text("READY?", 36, 10, z.TEXT_C, 1, True)
elif self.pause:
z.text("PAUSE", 36, 10, z.TEXT_C, 1, True)
elif self.player.pos >= Cave.COLUMN_MID - 1:
z.text(str(self.cave.score), 0, 0, z.TEXT_L, 1, True)
self.dirty = False
class Cave:
HEIGHT = z.SCRN_H
GAP_MAX_INIT = HEIGHT / 2
COLUMN_MAX = 16
COLUMN_MID = COLUMN_MAX // 2
PHASE_MAX = 256
PHASE_SHAKE = PHASE_MAX - 12
def __init__(self):
self.score = 0
self.hollow_cnt = 2
self.columns = []
top = self.HEIGHT/2
bottom = top + Player.HEIGHT
for _ in range(self.COLUMN_MID):
self.columns.append(Column(top, bottom))
for _ in range(self.COLUMN_MID, self.COLUMN_MAX):
self.add_column()
self.phase = 0
self.gap_max = self.GAP_MAX_INIT
self.gap = 0
self.base_top = 0
self.base_bottom = 0
self.offset = 0
def update(self, forward):
self.gap = int((1.0 - math.cos(self.phase*math.pi*2.0/self.PHASE_MAX))
* self.gap_max / 2.0)
self.base_bottom = self.gap // 2
self.base_top = self.base_bottom - self.gap
self.phase += forward
if self.phase >= self.PHASE_MAX:
self.phase = 0
self.gap_max += 0.5
elif self.phase >= self.PHASE_SHAKE and self.phase % 2 < 1:
self.base_top += 1
self.base_bottom += 1
def add_column(self):
c = self.columns[-1]
last_diff = c.bottom - c.top
diff = random.randrange(2)
self.hollow_cnt -= 1
if self.hollow_cnt <= 0:
diff = Player.HEIGHT - diff
self.hollow_cnt = random.randint(2 + self.score//128,
2 + self.score//64)
adjust = (c.bottom - (self.HEIGHT+Player.HEIGHT)/2 + 1.5) // 3
r = Player.HEIGHT*2 + 1 - abs(diff - last_diff) - abs(adjust)
bottom = c.bottom + random.randrange(int(r)) - Player.HEIGHT
if diff > last_diff:
bottom += diff - last_diff
if adjust < 0:
bottom -= adjust
self.columns.append(Column(bottom - diff, bottom))
def draw(self):
for i, c in enumerate(self.columns):
x = int(i*UNIT - self.offset)
top = int(self.base_top + c.top - 1)
bottom = int(self.base_bottom + c.bottom)
if i > 0:
if top < last_top:
z.line(x - 1, top, x - 1, last_top)
if top > last_top:
z.line(x, last_top, x, top)
if bottom < last_bottom:
z.line(x, bottom, x, last_bottom)
if bottom > last_bottom:
z.line(x - 1, last_bottom, x - 1, bottom)
z.line(x, top, x + 4, top)
z.line(x, bottom, x + 4, bottom)
z.pset(x + c.tx, top - c.ty)
z.pset(x + c.bx, bottom + c.by)
last_top = top
last_bottom = bottom
class Column:
def __init__(self, top, bottom):
self.top = top
self.bottom = bottom
self.tx = random.randrange(UNIT)
self.ty = random.randrange(Cave.HEIGHT // 2)
self.bx = random.randrange(UNIT)
self.by = random.randrange(Cave.HEIGHT // 2)
class Player:
HEIGHT = IMAGE_PLAYER.h
def __init__(self):
self.dir = 0
self.pos = 0
self.jump = 0
self.move = 0
self.dead = False
def update(self, cave, dir):
if self.dead:
return
c = cave.columns[self.pos]
if dir != 0 and self.move == 0 and self.pos + dir >= 0:
self.dir = dir
c = cave.columns[self.pos]
n = cave.columns[self.pos + dir]
diff = min(c.bottom, n.bottom) - max(c.top, n.top)
if diff + cave.gap >= Player.HEIGHT:
self.pos += dir
self.move = UNIT
self.jump = c.bottom - n.bottom
c = n
if self.move > 0:
self.move -= 1
if self.pos >= Cave.COLUMN_MID:
cave.offset += 1
if self.move == 0:
cave.score += 1
cave.offset -= UNIT
cave.columns.pop(0)
cave.add_column()
self.pos -= 1
if cave.phase == 0 and c.bottom - c.top < self.HEIGHT/2:
self.dead = True
def draw(self, cave):
c = cave.columns[self.pos]
x = self.pos*UNIT - self.move*self.dir - cave.offset
if self.dead:
y = cave.base_bottom + c.top
else:
y = cave.base_bottom + c.bottom + self.jump*self.move/UNIT + 0.5
if y > z.SCRN_H:
y = z.SCRN_H
y -= self.HEIGHT
if cave.base_top + c.top > y:
y = cave.base_top + c.top
z.blit(IMAGE_PLAYER, int(x), int(y), self.move, 0, self.dir<0)
class Dots:
def __init__(self):
self.dots = []
self.offset = 0
def update(self, cave):
scroll = 1 if self.offset != cave.offset else 0
self.offset = cave.offset
base = cave.base_bottom
for d in reversed(self.dots):
if d.update(scroll, base):
self.dots.remove(d)
if random.random() > cave.phase/Cave.PHASE_MAX + 0.25:
x = random.randrange(z.SCRN_W)
c = cave.columns[(x+cave.offset) // UNIT]
self.dots.append(Dot(x, c.top + cave.base_top - 1, c.bottom))
def draw(self):
for d in self.dots:
d.draw()
class Dot:
def __init__(self, x, y, b):
self.x = x
self.y = y
self.b = b
self.v = 0
def update(self, scroll, base):
self.x -= scroll
self.y += self.v
self.v += 0.125
return self.x < 0 or self.y >= self.b + base
def draw(self):
z.pset(self.x, int(self.y))
#------------------------------------------------------------------------------
if z.check(0.02):
z.start(APP_FPS, APP_CODE, APP_VERSION, [TitleState(), GameState()])
|
StarcoderdataPython
|
355198
|
<gh_stars>1-10
from __future__ import unicode_literals
from django.apps import apps
def handler_process_signal(sender, **kwargs):
Quota = apps.get_model(app_label='quotas', model_name='Quota')
for quota in Quota.objects.filter(enabled=True):
backend_instance = quota.get_backend_instance()
if backend_instance.sender == sender and kwargs['signal'].__class__ == backend_instance.signal.__class__:
backend_instance.process(**kwargs)
|
StarcoderdataPython
|
6439159
|
<gh_stars>1-10
from abaqusConstants import *
from .Constraint import Constraint
from ..Region.Region import Region
class MultipointConstraint(Constraint):
"""The MultipointConstraint object defines a constraint between a group of
MultipointConstraint nodes located on a region and a reference point.
The MultipointConstraint object is derived from the ConstrainedSketchConstraint object.
Attributes
----------
suppressed: Boolean
A Boolean specifying whether the constraint is suppressed or not. The default value is
OFF.
Notes
-----
This object can be accessed by:
.. code-block:: python
import interaction
mdb.models[name].constraints[name]
The corresponding analysis keywords are:
- MPC
"""
# A Boolean specifying whether the constraint is suppressed or not. The default value is
# OFF.
suppressed: Boolean = OFF
def __init__(self, name: str, surface: Region, controlPoint: Region, mpcType: SymbolicConstant,
csys: str = None, userType: int = 0, userMode: SymbolicConstant = DOF_MODE_MPC):
"""This method creates a MultipointConstraint object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].MultipointConstraint
Parameters
----------
name
A String specifying the constraint repository key.
surface
A Region object specifying the surface on which the MultipointConstraint nodes are
located.
controlPoint
A Region object specifying the constraint control point.
mpcType
A SymbolicConstant specifying the MPC type of the constraint. Possible values are
BEAM_MPC, ELBOW_MPC, PIN_MPC, LINK_MPC, TIE_MPC, and USER_MPC.
csys
None or a DatumCsys object specifying the initial orientation of the local coordinate
system for the MultipointConstraint's degrees of freedom. If *localCsys*=None, the
MultipointConstraint is defined in the global coordinate system. The default value is
None.
userType
An Int specifying to differentiate between different constraint types in a user-defined
MultipointConstraint. The default value is 0.The *userType* argument applies only when
*mpcType*=USER_MPC.
userMode
A SymbolicConstant specifying the mode of the constraint when it is user-defined.
Possible values are DOF_MODE_MPC and NODE_MODE_MPC. The default value is
DOF_MODE_MPC.The *userMode* argument applies only when *mpcType*=USER_MPC.
Returns
-------
A MultipointConstraint object.
"""
super().__init__()
pass
def setValues(self, csys: str = None, userType: int = 0, userMode: SymbolicConstant = DOF_MODE_MPC):
"""This method modifies the MultipointConstraint object.
Parameters
----------
csys
None or a DatumCsys object specifying the initial orientation of the local coordinate
system for the MultipointConstraint's degrees of freedom. If *localCsys*=None, the
MultipointConstraint is defined in the global coordinate system. The default value is
None.
userType
An Int specifying to differentiate between different constraint types in a user-defined
MultipointConstraint. The default value is 0.The *userType* argument applies only when
*mpcType*=USER_MPC.
userMode
A SymbolicConstant specifying the mode of the constraint when it is user-defined.
Possible values are DOF_MODE_MPC and NODE_MODE_MPC. The default value is
DOF_MODE_MPC.The *userMode* argument applies only when *mpcType*=USER_MPC.
"""
pass
|
StarcoderdataPython
|
3358294
|
# Copyright (C) 2010 by the Massachusetts Institute of Technology.
# All rights reserved.
# Export of this software from the United States of America may
# require a specific license from the United States Government.
# It is the responsibility of any person or organization contemplating
# export to obtain such a license before exporting.
#
# WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
# distribute this software and its documentation for any purpose and
# without fee is hereby granted, provided that the above copyright
# notice appear in all copies and that both that copyright notice and
# this permission notice appear in supporting documentation, and that
# the name of M.I.T. not be used in advertising or publicity pertaining
# to distribution of the software without specific, written prior
# permission. Furthermore if you modify this software you must label
# your software as modified software and not distribute it in such a
# fashion that it might be confused with the original M.I.T. software.
# M.I.T. makes no representations about the suitability of
# this software for any purpose. It is provided "as is" without express
# or implied warranty.
# Invoked by the testrealm target in the top-level Makefile. Creates
# a test realm and spawns a shell pointing at it, for convenience of
# manual testing. If a numeric argument is present after options,
# creates that many fully connected test realms and point the shell at
# the first one.
from k5test import *
# A list of directories containing programs in the build tree.
progpaths = [
'kdc',
os.path.join('kadmin', 'server'),
os.path.join('kadmin', 'cli'),
os.path.join('kadmin', 'dbutil'),
os.path.join('kadmin', 'ktutil'),
os.path.join('clients', 'kdestroy'),
os.path.join('clients', 'kinit'),
os.path.join('clients', 'klist'),
os.path.join('clients', 'kpasswd'),
os.path.join('clients', 'ksu'),
os.path.join('clients', 'kvno'),
os.path.join('clients', 'kswitch'),
'slave'
]
# Add program directories to the beginning of PATH.
def supplement_path(env):
# Construct prefixes; these will end in a trailing separator.
path_prefix = manpath_prefix = ''
for dir in progpaths:
path_prefix += os.path.join(buildtop, dir) + os.pathsep
# Assume PATH exists in env for simplicity.
env['PATH'] = path_prefix + env['PATH']
if args:
realms = cross_realms(int(args[0]), start_kadmind=True)
realm = realms[0]
else:
realm = K5Realm(start_kadmind=True)
env = realm.env.copy()
supplement_path(env)
pwfilename = os.path.join('testdir', 'passwords')
pwfile = open(pwfilename, 'w')
pwfile.write('user: %s\nadmin: %s\n' % (password('<PASSWORD>'), password('<PASSWORD>')))
pwfile.close()
print
print 'Realm files are in %s' % realm.testdir
print 'KRB5_CONFIG is %s' % env['KRB5_CONFIG']
print 'KRB5_KDC_PROFILE is %s' % env['KRB5_KDC_PROFILE']
print 'KRB5CCNAME is %s' % env['KRB5CCNAME']
print 'KRB5_KTNAME is %s' % env['KRB5_KTNAME']
print 'KRB5RCACHEDIR is %s' % env['KRB5RCACHEDIR']
print 'Password for user is %s (see also %s)' % (password('<PASSWORD>'), pwfilename)
print 'Password for admin is %s' % password('admin')
print
subprocess.call([os.getenv('SHELL')], env=env)
success('Create test krb5 realm.')
|
StarcoderdataPython
|
369872
|
<gh_stars>0
import requests
import contextlib
from .path import Path
from urllib.parse import urlparse
from tqdm import tqdm
from .pbar import file_proc_bar
def download_bar(iterable, chunk_size = None, total_size = None, exist_size = 0):
def bar():
with file_proc_bar(total=total_size) as pbar:
pbar.update(exist_size)
for x in iterable:
yield x
pbar.update(chunk_size)
return bar()
class URL:
def __init__(self, url):
self.url = url
self.o = urlparse(url)
def to_str(self, ts = str):
return ts(self.url)
def __str__(self):
return self.to_str(str)
def __repr__(self):
return self.to_str(repr)
@property
def path(self):
return self.o.path
@property
def name(self):
return self.path.split('/')[-1]
def download(self, save_path, continuous = False, enable_print = True, enable_bar = True, chunk_size = 4<<10):
url = self.url
save_path = Path(save_path)
r = requests.get(url, stream = True)
total_size = int(r.headers['Content-Length'] )
exist_size = 0
if continuous and save_path.exists():
assert(save_path.is_file())
exist_size = save_path.size
if exist_size:
r.close()
if exist_size == total_size: #has downloaded
if enable_print:
print(f'Downloading: {url}')
print(f'Save as: {save_path}')
if enable_bar:
with file_proc_bar(total=total_size) as pbar:
pbar.update(total_size)
return
#redo request
r = requests.get(url, stream = True, headers = {'Range': f'bytes={exist_size}-'})
fw = save_path.open('ab')
else: #none was downloaded
fw = save_path.open('wb')
else:
save_path.prnt.mkdir()
fw = save_path.open('wb')
it = r.iter_content(chunk_size=chunk_size)
if enable_print:
if enable_bar:
it = download_bar(it, chunk_size = chunk_size, total_size = total_size, exist_size = exist_size)
print(f'Downloading: {url}')
print(f'Save as: {save_path}')
for data in it:
fw.write(data)
fw.close()
r.close()
__all__ = ['URL']
|
StarcoderdataPython
|
5041137
|
import torch
import unittest
from magic_vnet.nestvnet import *
class NestVNetTest(unittest.TestCase):
data = torch.rand((1, 1, 32, 32, 32))
target_size = torch.Size([1, 1, 32, 32, 32])
def test_nestvnet(self):
model = NestVNet(1, 2)
out = model(self.data)
self.assertEqual(out.size(), self.target_size)
def test_nestvnet_cse(self):
model = NestVNet_CSE(1, 2)
out = model(self.data)
self.assertEqual(out.size(), self.target_size)
def test_nestvnet_sse(self):
model = NestVNet_SSE(1, 2)
out = model(self.data)
self.assertEqual(out.size(), self.target_size)
def test_nestvbnet(self):
model = NestVBNet(1, 2)
out = model(self.data)
self.assertEqual(out.size(), self.target_size)
def test_sknestvnet(self):
model = SK_NestVNet(1, 2, se_type=True)
out = model(self.data)
self.assertEqual(out.size(), self.target_size)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
5030351
|
import sys
import random
sys.path.append('../LU_model')
import db
sys.path.pop()
sys.path.append('../data_resource')
import CrawlerTimeTable
DB_IP = "172.16.17.32" # doctorbot GCP ip
DB_PORT = 27017 # default MongoDB port
DB_NAME = "doctorbot" # use the collection
class intent_slot_generator(object):
def __init__(self):
client = db.MongoClient(DB_IP, DB_PORT)
collection_division = client[DB_NAME]["division"]
collection_disease = client[DB_NAME]["disease"]
disease_list = [line.rstrip('\n') for line in open("../data_resource/disease_dict.txt", "r")]
#print(disease_list)
notfind = True
while notfind :
notfind = False
disease = disease_list[random.randint(0, len(disease_list)-1)]
#print(disease)
while collection_division.find({"disease": disease}).count() < 1:
disease = disease_list[random.randint(0, len(disease_list) - 1)]
#print(disease)
for collection in collection_disease.find({"disease_c": disease}):
division = collection['department'][0]
doctor_list = []
for collection in collection_division.find({"disease": disease}):
doctor_list.extend(collection['doctor'])
#print(doctor_list)
if len(doctor_list) is 0:
notfind = True
elif len(doctor_list) > 1:
name = doctor_list[random.randint(0, len(doctor_list)-1)]
else:
name = doctor_list[0]
if not notfind:
time_list = CrawlerTimeTable.Timetable(name).get_time()
#print(time_list)
if len(time_list) is 0:
notfind = True
elif len(time_list) > 1:
time = time_list[random.randint(0, len(time_list)-1)]
else:
time = time_list[0]
self.goal = {'intent': random.randint(1, 5),
'slot': {'disease': disease, 'division': division, 'doctor': name, 'time': time}}
#def main():
#print(intent_slot_generator().goal)
#if __name__ == '__main__':
# main()
|
StarcoderdataPython
|
5125788
|
# -*- coding: utf-8 -*-
import scrapy
import csv
class WikiSpider(scrapy.Spider):
name = 'wiki'
start_urls = ['https://en.wikipedia.org/wiki/List_of_countries_by_intentional_homicide_rate']
def parse(self, response):
country_names_pre = response.xpath('(//table)[2]//a/text()').getall()
country_names = country_names_pre[3:]
rank = []
for i in response.xpath('(//table)[2]//td[1]/text()').getall():
data = i.replace('\n','').replace(' ','')
if len(data) > 0 and int(data):
rank.append(data)
area = []
for i in response.xpath('(//table)[2]//td[2]/text()').getall():
data = i.replace('\n','')
if len(data) > 0:
area.append(data)
subarea = []
for i in response.xpath('(//table)[2]//td[3]/text()').getall():
data = i.replace('\n','')
if len(data) > 0:
subarea.append(data)
rate = []
for i in response.xpath('(//table)[2]//td[4]/text()').getall():
data = i.replace('\n','')
if len(data) > 0:
rate.append(data)
count = []
for i in response.xpath('(//table)[2]//td[5]/text()').getall():
data = i.replace('\n','')
if len(data) > 0:
count.append(data)
year = []
for i in response.xpath('(//table)[2]//td[6]/text()').getall():
data = i.replace('\n','')
if len(data) > 0:
year.append(data)
array = []
for key,name in enumerate(country_names):
buff = []
buff.append(name)
buff.append(rank[key])
buff.append(area[key])
buff.append(subarea[key])
buff.append(rate[key])
buff.append(count[key])
buff.append(year[key])
array.append(buff)
print(buff)
with open('intentional_homicide_rates.csv','w') as write:
writer = csv.writer(write)
for i in array:
writer.writerow(i)
|
StarcoderdataPython
|
3336545
|
import os
import torch
from torch.nn import functional as F
from stft_core.modeling.utils import cat
from stft_core.structures.bounding_box import BoxList #infer
from stft_core.structures.boxlist_ops import boxlist_iou
INF = 100000000
def get_num_gpus():
return int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
def reduce_sum(tensor):
if get_num_gpus() <= 1:
return tensor
import torch.distributed as dist
tensor = tensor.clone()
dist.all_reduce(tensor, op=dist.ReduceOp.SUM)
return tensor
def sigmoid_focal_loss(
inputs,
targets,
alpha: float = -1,
gamma: float = 2,
reduction: str = "none",
):
p = torch.sigmoid(inputs)
ce_loss = F.binary_cross_entropy_with_logits(
inputs, targets, reduction="none"
)
p_t = p * targets + (1 - p) * (1 - targets)
loss = ce_loss * ((1 - p_t) ** gamma)
if alpha >= 0:
alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
loss = alpha_t * loss
if reduction == "mean":
loss = loss.mean()
elif reduction == "sum":
loss = loss.sum()
return loss
sigmoid_focal_loss_jit = torch.jit.script(
sigmoid_focal_loss
) # type: torch.jit.ScriptModule
def iou_loss(inputs, targets, weight=None, box_mode="xyxy", loss_type="iou", reduction="none"):
"""
Compute iou loss of type ['iou', 'giou', 'linear_iou']
Args:
inputs (tensor): pred values
targets (tensor): target values
weight (tensor): loss weight
box_mode (str): 'xyxy' or 'ltrb', 'ltrb' is currently supported.
loss_type (str): 'giou' or 'iou' or 'linear_iou'
reduction (str): reduction manner
Returns:
loss (tensor): computed iou loss.
"""
if box_mode == "ltrb":
inputs = torch.cat((-inputs[..., :2], inputs[..., 2:]), dim=-1)
targets = torch.cat((-targets[..., :2], targets[..., 2:]), dim=-1)
elif box_mode != "xyxy":
raise NotImplementedError
eps = torch.finfo(torch.float32).eps
inputs_area = (inputs[..., 2] - inputs[..., 0]).clamp_(min=0) \
* (inputs[..., 3] - inputs[..., 1]).clamp_(min=0)
targets_area = (targets[..., 2] - targets[..., 0]).clamp_(min=0) \
* (targets[..., 3] - targets[..., 1]).clamp_(min=0)
w_intersect = (torch.min(inputs[..., 2], targets[..., 2])
- torch.max(inputs[..., 0], targets[..., 0])).clamp_(min=0)
h_intersect = (torch.min(inputs[..., 3], targets[..., 3])
- torch.max(inputs[..., 1], targets[..., 1])).clamp_(min=0)
area_intersect = w_intersect * h_intersect
area_union = targets_area + inputs_area - area_intersect
ious = area_intersect / area_union.clamp(min=eps)
if loss_type == "iou":
loss = -ious.clamp(min=eps).log()
elif loss_type == "linear_iou":
loss = 1 - ious
elif loss_type == "giou":
g_w_intersect = torch.max(inputs[..., 2], targets[..., 2]) \
- torch.min(inputs[..., 0], targets[..., 0])
g_h_intersect = torch.max(inputs[..., 3], targets[..., 3]) \
- torch.min(inputs[..., 1], targets[..., 1])
ac_uion = g_w_intersect * g_h_intersect
gious = ious - (ac_uion - area_union) / ac_uion.clamp(min=eps)
loss = 1 - gious
else:
raise NotImplementedError
if weight is not None:
loss = loss * weight.view(loss.size())
if reduction == "mean":
loss = loss.sum() / max(weight.sum().item(), eps)
else:
if reduction == "mean":
loss = loss.mean()
if reduction == "sum":
loss = loss.sum()
return loss
def smooth_l1_loss(input,
target,
beta: float,
reduction: str = "none",
size_average=False):
"""
Smooth L1 loss defined in the Fast R-CNN paper as:
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
"""
if beta < 1e-5:
# if beta == 0, then torch.where will result in nan gradients when
# the chain rule is applied due to pytorch implementation details
# (the False branch "0.5 * n ** 2 / 0" has an incoming gradient of
# zeros, rather than "no gradient"). To avoid this issue, we define
# small values of beta to be exactly l1 loss.
loss = torch.abs(input - target)
else:
n = torch.abs(input - target)
cond = n < beta
loss = torch.where(cond, 0.5 * n**2 / beta, n - 0.5 * beta)
if reduction == "mean" or size_average:
loss = loss.mean()
elif reduction == "sum":
loss = loss.sum()
return loss
def permute_to_N_HWA_K(tensor, K):
"""
Transpose/reshape a tensor from (N, (A x K), H, W) to (N, (HxWxA), K)
"""
assert tensor.dim() == 4, tensor.shape
N, _, H, W = tensor.shape
tensor = tensor.view(N, -1, K, H, W)
tensor = tensor.permute(0, 3, 4, 1, 2)
tensor = tensor.reshape(N, -1, K) # Size=(N,HWA,K)
return tensor
def permute_all_cls_and_box_to_N_HWA_K_and_concat(
box_cls, box_delta, box_center, border_cls, border_delta, num_classes=2):
"""
Rearrange the tensor layout from the network output, i.e.:
list[Tensor]: #lvl tensors of shape (N, A x K, Hi, Wi)
to per-image predictions, i.e.:
Tensor: of shape (N x sum(Hi x Wi x A), K)
"""
# for each feature level, permute the outputs to make them be in the
# same format as the labels. Note that the labels are computed for
# all feature levels concatenated, so we keep the same representation
# for the objectness, the box_delta and the centerness
box_cls_flattened = [permute_to_N_HWA_K(x, num_classes) for x in box_cls]
box_delta_flattened = [permute_to_N_HWA_K(x, 4) for x in box_delta]
box_center_flattened = [permute_to_N_HWA_K(x, 1) for x in box_center]
border_cls_flattened = [permute_to_N_HWA_K(x, num_classes) for x in border_cls]
border_delta_flattened = [permute_to_N_HWA_K(x, 4) for x in border_delta]
# concatenate on the first dimension (representing the feature levels), to
# take into account the way the labels were generated (with all feature maps
# being concatenated as well)
box_cls = cat(box_cls_flattened, dim=1).view(-1, num_classes)
box_delta = cat(box_delta_flattened, dim=1).view(-1, 4)
box_center = cat(box_center_flattened, dim=1).view(-1, 1)
border_cls = cat(border_cls_flattened, dim=1).view(-1, num_classes)
border_delta = cat(border_delta_flattened, dim=1).view(-1, 4)
return box_cls, box_delta, box_center, border_cls, border_delta
class Shift2BoxTransform(object):
def __init__(self, weights):
"""
Args:
weights (4-element tuple): Scaling factors that are applied to the
(dl, dt, dr, db) deltas.
"""
self.weights = weights
def get_deltas(self, shifts, boxes):
"""
Get box regression transformation deltas (dl, dt, dr, db) that can be used
to transform the `shifts` into the `boxes`. That is, the relation
``boxes == self.apply_deltas(deltas, shifts)`` is true.
Args:
shifts (Tensor): shifts, e.g., feature map coordinates
boxes (Tensor): target of the transformation, e.g., ground-truth
boxes.
"""
assert isinstance(shifts, torch.Tensor), type(shifts)
assert isinstance(boxes, torch.Tensor), type(boxes)
deltas = torch.cat((shifts - boxes[..., :2], boxes[..., 2:] - shifts),
dim=-1) * shifts.new_tensor(self.weights)
return deltas
def apply_deltas(self, deltas, shifts):
"""
Apply transformation `deltas` (dl, dt, dr, db) to `shifts`.
Args:
deltas (Tensor): transformation deltas of shape (N, k*4), where k >= 1.
deltas[i] represents k potentially different class-specific
box transformations for the single shift shifts[i].
shifts (Tensor): shifts to transform, of shape (N, 2)
"""
assert torch.isfinite(deltas).all().item()
shifts = shifts.to(deltas.dtype)
if deltas.numel() == 0:
return torch.empty_like(deltas)
deltas = deltas.view(deltas.size()[:-1] + (-1, 4)) / shifts.new_tensor(self.weights)
boxes = torch.cat((shifts.unsqueeze(-2) - deltas[..., :2],
shifts.unsqueeze(-2) + deltas[..., 2:]),
dim=-1).view(deltas.size()[:-2] + (-1, ))
return boxes
class BorderLossComputation(object):
"""
This class computes the FCOS losses.
"""
def __init__(self, cfg):
self.num_classes = cfg.MODEL.FCOS.NUM_CLASSES - 1
self.fpn_strides = cfg.MODEL.FCOS.FPN_STRIDES
self.object_sizes_of_interest = [
[-1, 64],
[64, 128],
[128, 256],
[256, 512],
[512, INF],
]
self.shift2box_transform = Shift2BoxTransform(
weights=(1.0, 1.0, 1.0, 1.0))
self.focal_loss_alpha = cfg.MODEL.FCOS.LOSS_ALPHA
self.focal_loss_gamma = cfg.MODEL.FCOS.LOSS_GAMMA
self.iou_loss_type = cfg.MODEL.FCOS.IOU_LOSS_TYPE
self.center_sampling_radius = cfg.MODEL.FCOS.CENTER_SAMPLING_RADIUS
self.border_iou_thresh = cfg.MODEL.BORDER.IOU_THRESH
self.border_bbox_std = cfg.MODEL.BORDER.BBOX_STD
@torch.no_grad()
def get_ground_truth(self, shifts, targets, pre_boxes_list):
"""
Args:
shifts (list[list[Tensor]]): a list of N=#image elements. Each is a
list of #feature level tensors. The tensors contains shifts of
this image on the specific feature level.
targets (list[Instances]): a list of N `Instances`s. The i-th
`Instances` contains the ground-truth per-instance annotations
for the i-th input image. Specify `targets` during training only.
Returns:
gt_classes (Tensor):
An integer tensor of shape (N, R) storing ground-truth
labels for each shift.
R is the total number of shifts, i.e. the sum of Hi x Wi for all levels.
Shifts in the valid boxes are assigned their corresponding label in the
[0, K-1] range. Shifts in the background are assigned the label "K".
Shifts in the ignore areas are assigned a label "-1", i.e. ignore.
gt_shifts_deltas (Tensor):
Shape (N, R, 4).
The last dimension represents ground-truth shift2box transform
targets (dl, dt, dr, db) that map each shift to its matched ground-truth box.
The values in the tensor are meaningful only when the corresponding
shift is labeled as foreground.
gt_centerness (Tensor):
An float tensor (0, 1) of shape (N, R) whose values in [0, 1]
storing ground-truth centerness for each shift.
border_classes (Tensor):
An integer tensor of shape (N, R) storing ground-truth
labels for each shift.
R is the total number of shifts, i.e. the sum of Hi x Wi for all levels.
Shifts in the valid boxes are assigned their corresponding label in the
[0, K-1] range. Shifts in the background are assigned the label "K".
Shifts in the ignore areas are assigned a label "-1", i.e. ignore.
border_shifts_deltas (Tensor):
Shape (N, R, 4).
The last dimension represents ground-truth shift2box transform
targets (dl, dt, dr, db) that map each shift to its matched ground-truth box.
The values in the tensor are meaningful only when the corresponding
shift is labeled as foreground.
"""
gt_classes = []
gt_shifts_deltas = []
gt_centerness = []
border_classes = []
border_shifts_deltas = []
for shifts_per_image, targets_per_image, pre_boxes in zip(shifts, targets, pre_boxes_list):
object_sizes_of_interest = torch.cat([
shifts_i.new_tensor(size).unsqueeze(0).expand(
shifts_i.size(0), -1) for shifts_i, size in zip(
shifts_per_image, self.object_sizes_of_interest)
], dim=0)
shifts_over_all_feature_maps = torch.cat(shifts_per_image, dim=0)
gt_boxes = targets_per_image.bbox
area = targets_per_image.area()
center = targets_per_image.center()
deltas = self.shift2box_transform.get_deltas(
shifts_over_all_feature_maps, gt_boxes.unsqueeze(1))
if self.center_sampling_radius > 0:
centers = targets_per_image.center()
is_in_boxes = []
for stride, shifts_i in zip(self.fpn_strides, shifts_per_image):
radius = stride * self.center_sampling_radius
center_boxes = torch.cat((
torch.max(centers - radius, gt_boxes[:, :2]),
torch.min(centers + radius, gt_boxes[:, 2:]),
), dim=-1)
center_deltas = self.shift2box_transform.get_deltas(
shifts_i, center_boxes.unsqueeze(1))
is_in_boxes.append(center_deltas.min(dim=-1).values > 0)
is_in_boxes = torch.cat(is_in_boxes, dim=1)
else:
# no center sampling, it will use all the locations within a ground-truth box
is_in_boxes = deltas.min(dim=-1).values > 0
max_deltas = deltas.max(dim=-1).values
# limit the regression range for each location
is_cared_in_the_level = \
(max_deltas >= object_sizes_of_interest[None, :, 0]) & \
(max_deltas <= object_sizes_of_interest[None, :, 1])
gt_positions_area = targets_per_image.area().unsqueeze(1).repeat(
1, shifts_over_all_feature_maps.size(0))
gt_positions_area[~is_in_boxes] = INF
gt_positions_area[~is_cared_in_the_level] = INF
# if there are still more than one objects for a position,
# we choose the one with minimal area
positions_min_area, gt_matched_idxs = gt_positions_area.min(dim=0)
# ground truth box regression
gt_shifts_reg_deltas_i = self.shift2box_transform.get_deltas(
shifts_over_all_feature_maps, targets_per_image[gt_matched_idxs].bbox)
labels_per_im = targets_per_image.get_field("labels")
# ground truth classes
has_gt = len(targets_per_image) > 0
if has_gt:
gt_classes_i = labels_per_im[gt_matched_idxs]
# Shifts with area inf are treated as background.
gt_classes_i[positions_min_area == INF] = self.num_classes+1 #value is 2 for not gt
else:
gt_classes_i = torch.zeros_like(gt_matched_idxs)+self.num_classes+1 #value is 2 for not gt
# ground truth centerness
left_right = gt_shifts_reg_deltas_i[:, [0, 2]]
top_bottom = gt_shifts_reg_deltas_i[:, [1, 3]]
gt_centerness_i = torch.sqrt(
(left_right.min(dim=-1).values / left_right.max(dim=-1).values).clamp_(min=0)
* (top_bottom.min(dim=-1).values / top_bottom.max(dim=-1).values).clamp_(min=0)
)
gt_classes.append(gt_classes_i)
gt_shifts_deltas.append(gt_shifts_reg_deltas_i)
gt_centerness.append(gt_centerness_i)
# border
iou = boxlist_iou(BoxList(pre_boxes, targets_per_image.size, targets_per_image.mode), targets_per_image)
(max_iou, argmax_iou) = iou.max(dim=1)
invalid = max_iou < self.border_iou_thresh
gt_target = gt_boxes[argmax_iou]
border_cls_target = labels_per_im[argmax_iou]
border_cls_target[invalid] = self.num_classes+1
border_bbox_std = pre_boxes.new_tensor(self.border_bbox_std)
pre_boxes_wh = pre_boxes[:, 2:4] - pre_boxes[:, 0:2]
pre_boxes_wh = torch.cat([pre_boxes_wh, pre_boxes_wh], dim=1)
border_off_target = (gt_target - pre_boxes) / (pre_boxes_wh * border_bbox_std)
border_classes.append(border_cls_target)
border_shifts_deltas.append(border_off_target)
return (
torch.stack(gt_classes),
torch.stack(gt_shifts_deltas),
torch.stack(gt_centerness),
torch.stack(border_classes),
torch.stack(border_shifts_deltas),
)
def __call__(self, shifts, pred_class_logits, pred_shift_deltas, pred_centerness,
targets, bd_based_box, border_box_cls, border_bbox_reg):
(
gt_classes,
gt_shifts_deltas,
gt_centerness,
gt_classes_border,
gt_deltas_border,
) = self.get_ground_truth(shifts, targets, bd_based_box)
(
pred_class_logits,
pred_shift_deltas,
pred_centerness,
border_class_logits,
border_shift_deltas,
) = permute_all_cls_and_box_to_N_HWA_K_and_concat(
pred_class_logits, pred_shift_deltas, pred_centerness,
border_box_cls, border_bbox_reg, self.num_classes
) # Shapes: (N x R, K) and (N x R, 4), respectively.
# fcos
gt_classes = gt_classes.flatten().long()
gt_shifts_deltas = gt_shifts_deltas.view(-1, 4)
gt_centerness = gt_centerness.view(-1, 1)
valid_idxs = gt_classes >= 0
foreground_idxs = (gt_classes >= 0) & (gt_classes != (self.num_classes+1))
num_foreground = foreground_idxs.sum()
acc_centerness_num = gt_centerness[foreground_idxs].sum()
gt_classes_target = torch.zeros_like(pred_class_logits)
gt_classes_target[foreground_idxs, gt_classes[foreground_idxs]-1] = 1
num_gpus = get_num_gpus()
# sync num_pos from all gpus
num_foreground_avg_per_gpu = max(reduce_sum(num_foreground).item() / float(num_gpus), 1.0)
acc_centerness_num_avg_per_gpu = max(reduce_sum(acc_centerness_num).item() / float(num_gpus), 1.0)
# logits loss
loss_cls = sigmoid_focal_loss_jit(
pred_class_logits[valid_idxs],
gt_classes_target[valid_idxs],
alpha=self.focal_loss_alpha,
gamma=self.focal_loss_gamma,
reduction="sum",
) / num_foreground_avg_per_gpu
# regression loss
loss_box_reg = iou_loss(
pred_shift_deltas[foreground_idxs],
gt_shifts_deltas[foreground_idxs],
gt_centerness[foreground_idxs],
box_mode="ltrb",
loss_type=self.iou_loss_type,
reduction="sum",
) / acc_centerness_num_avg_per_gpu
# centerness loss
loss_centerness = F.binary_cross_entropy_with_logits(
pred_centerness[foreground_idxs],
gt_centerness[foreground_idxs],
reduction="sum",
) / num_foreground_avg_per_gpu
# borderdet
gt_classes_border = gt_classes_border.flatten().long()
gt_deltas_border = gt_deltas_border.view(-1, 4)
valid_idxs_border = gt_classes_border >= 0.
foreground_idxs_border = (gt_classes_border >= 0) & (gt_classes_border != (self.num_classes+1))
num_foreground_border = foreground_idxs_border.sum()
gt_classes_border_target = torch.zeros_like(border_class_logits)
gt_classes_border_target[
foreground_idxs_border, gt_classes_border[foreground_idxs_border]-1] = 1
num_foreground_border = max(reduce_sum(num_foreground_border).item() / float(num_gpus), 1.0)
# loss_border_cls
loss_border_cls = sigmoid_focal_loss_jit(
border_class_logits[valid_idxs_border],
gt_classes_border_target[valid_idxs_border],
alpha=self.focal_loss_alpha,
gamma=self.focal_loss_gamma,
reduction="sum",
) / num_foreground_border
if foreground_idxs_border.numel() > 0:
loss_border_reg = (
smooth_l1_loss(
border_shift_deltas[foreground_idxs_border],
gt_deltas_border[foreground_idxs_border],
beta=0,
reduction="sum"
) / num_foreground_border
)
else:
loss_border_reg = border_shift_deltas.sum()
return loss_cls, loss_box_reg, loss_centerness, loss_border_cls, loss_border_reg
def make_border_loss_evaluator(cfg):
loss_evaluator = BorderLossComputation(cfg)
return loss_evaluator
|
StarcoderdataPython
|
6581524
|
<reponame>billyevans/prefix_storage<filename>fill.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import httplib2
import sys
import md5
if __name__ == '__main__':
count = 0
for line in sys.stdin:
conn = httplib2.HTTPConnectionWithTimeout("localhost", int(sys.argv[1]))
key = line.rstrip()
val = md5.md5(key)
conn.request("POST", "/" + key, val.hexdigest())
res = conn.getresponse()
if res.status != 200:
raise Exception("Wrong status - {0}".format(res.status))
count += 1
print("{0} keys written.".format(count))
|
StarcoderdataPython
|
12823261
|
<filename>APPRoot/loveword/middleware/meipai_parse.py
# -*- coding:utf-8 -*-
import base64
import requests
import json
import re
import execjs
"""
# 方法一:
class MeiPai(object):
def __init__(self, url):
self.url = url
self.session = requests.Session()
with open("static/loveword/js/meipai_encrypt.js", "r", encoding="utf-8") as f:
resource = f.read()
self.ctx = execjs.compile(resource)
def get_video(self):
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.125 Safari/537.36",
"Upgrade-Insecure-Requests": "1",
"Host": "www.meipai.com",
"Referer": "http://www.meipai.com/"
}
pattern = re.compile('data-video="(.*?)"', re.S)
pattern2 = re.compile('<meta name="description" content="(.*?)"', re.S)
try:
response = self.session.get(url=self.url, headers=headers, timeout=10)
if response.status_code == 200:
video_bs64 = re.findall(pattern, response.text)[0]
title = re.findall(pattern2, response.text)[0]
video_url = self.ctx.call("getmp4", video_bs64)
info = {
"title": title,
"video": "https:"+video_url
}
return json.dumps(info, ensure_ascii=False)
except Exception as e:
return json.dumps({"info": "暂无相关数据,请检查相关数据:" + str(e)}, ensure_ascii=False)
"""
# 方法二
class MeiPai(object):
def __init__(self, url):
self.url = url
self.session = requests.Session()
def getHex(self, a):
hex_1 = a[:4][::-1]
str_1 = a[4:]
return str_1, hex_1
def getDec(self, a):
b = str(int(a, 16))
c = list(b[:2])
d = list(b[2:])
return c, d
def substr(self, a, b):
k = int(b[0])
c = a[:k]
d = a[k:k + int(b[1])]
temp = a[int(b[0]):].replace(d, "")
result = c + temp
return result
def getPos(self, a, b):
b[0] = len(a) - int(b[0]) - int(b[1])
return b
def get_video(self):
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.125 Safari/537.36",
"Upgrade-Insecure-Requests": "1",
"Host": "www.meipai.com",
"Referer": "http://www.meipai.com/"
}
pattern = re.compile('data-video="(.*?)"', re.S)
pattern2 = re.compile('<meta name="description" content="(.*?)"', re.S)
try:
response = self.session.get(url=self.url, headers=headers, timeout=10)
if response.status_code == 200:
video_bs64 = re.findall(pattern, response.text)[0]
title = re.findall(pattern2, response.text)[0]
str1, hex1 = self.getHex(video_bs64)
pre, tail = self.getDec(hex1)
d = self.substr(str1, pre)
kk = self.substr(d, self.getPos(d, tail))
a = base64.b64decode(kk)
info = {
"title": title,
"video": "https:"+a.decode(encoding='utf-8')
}
return json.dumps(info, ensure_ascii=False)
except Exception as e:
return json.dumps({"info": "暂无相关数据,请检查相关数据:" + str(e)}, ensure_ascii=False)
|
StarcoderdataPython
|
138699
|
<gh_stars>0
from collections import defaultdict
from itertools import chain
from .graphutils import *
from .milltypes import (WordType, WordType, AtomicType, FunctorType, DiamondType, BoxType, EmptyType, ModalType,
invariance_check)
from functools import reduce
from .transformations import majority_vote, _cats_of_type, order_nodes, body_replacements
# # # Extraction variables # # #
# Mapping from phrasal categories and POS tags to Atomic Types
_CatDict = {'advp': 'ADV', 'ahi': 'AHI', 'ap': 'AP', 'cp': 'CP', 'detp': 'DETP', 'inf': 'INF', 'np': 'NP',
'oti': 'OTI', 'pp': 'PP', 'ppart': 'PPART', 'ppres': 'PPRES', 'rel': 'REL', 'smain': 'SMAIN',
'ssub': 'SSUB', 'sv1': 'SV1', 'svan': 'SVAN', 'ti': 'TI', 'whq': 'WHQ', 'whrel': 'WHREL',
'whsub': 'WHSUB'}
_PtDict = {x: x.upper() for x in {'adj', 'bw', 'let', 'lid', 'n', 'spec', 'tsw', 'tw', 'vg', 'vnw', 'vz', 'ww'}}
CatDict = {k: AtomicType(v) for k, v in _CatDict.items()}
PtDict = {k: AtomicType(v) for k, v in _PtDict.items()}
# Head and modifier dependencies
HeadDeps = frozenset(['hd', 'rhd', 'whd', 'cmp', 'crd', 'det'])
ModDeps = frozenset(['mod', 'predm', 'app'])
# Obliqueness Hierarchy
ObliquenessOrder = (
('mod', 'app', 'predm'), # modifiers
('body', 'relcl', 'whbody', 'cmpbody'), # clause bodies
('sup',), # preliminary subject
('su',), # primary subject
('pobj',), # preliminary object
('obj1',), # primary object
('predc', 'obj2', 'se', 'pc', 'hdf'), # verb secondary arguments
('ld', 'me', 'vc'), # verb complements
('obcomp',), # comparison complement
('svp',), # separable verb part
('det',), # NP head
)
ArgSeq = List[Tuple[WordType, Optional[str]]]
# Callable version
class ObliquenessSort(object):
def __init__(self, order: Iterable[Iterable[str]]):
order = {k: i for i, k in enumerate(reversed(list(chain.from_iterable(order))))}
self.order = defaultdict(lambda: -1, {**order, **{'cnj': -2}})
def __call__(self, argcolors: ArgSeq) -> ArgSeq:
return sorted(argcolors, key=lambda x: (self.order[snd(x)], str(fst(x))), reverse=True)
_obliqueness_sort = ObliquenessSort(ObliquenessOrder)
class ExtractionError(AssertionError):
def __init__(self, message: str, meta: Any = None):
super().__init__(message)
self.message = message
self.meta = meta
def is_gap(dag: DAG, node: Node, head_deps: FrozenSet[str] = HeadDeps) -> bool:
incoming = set(map(lambda edge: edge.dep, dag.incoming(node)))
return len(incoming) > 1 and len(incoming.intersection(head_deps)) > 0
def is_copy(dag: DAG, node: Node) -> bool:
incoming = list(map(lambda edge: edge.dep, dag.incoming(node)))
return len(incoming) > 1 and any(map(lambda inc: len(list(filter(lambda other: other == inc, incoming))) > 1,
incoming))
def get_type_plain(dag: DAG, node: Node, type_dict: Dict[str, AtomicType], pos_set: str) -> AtomicType:
if pos_set in dag.attribs[node]:
return type_dict[dag.attribs[node][pos_set]]
else:
cat = dag.attribs[node]['cat']
if cat == 'conj':
return type_dict[majority_vote(dag, dag.successors(node), pos_set)]
else:
return type_dict[cat]
def make_functor(argument: WordType, result: WordType, dep: Optional[str]) -> Union[FunctorType, ModalType]:
if dep is None:
return FunctorType(argument=argument, result=result)
if dep in ModDeps:
return BoxType(FunctorType(argument=argument, result=result), dep)
if dep == 'np_hd':
return BoxType(FunctorType(argument=argument, result=result), 'det')
return FunctorType(DiamondType(argument, dep), result)
def make_ho_functor(argument: WordType, result: WordType, dep: Optional[str]) -> FunctorType:
if dep is None or dep in HeadDeps or dep == 'np_hd':
return make_functor(argument, result, None)
return make_functor(argument, result, dep)
def modifier_of(modified: WordType, dep: str) -> BoxType:
return BoxType(FunctorType(modified, modified), dep)
def get_argument(wordtype: WordType) -> WordType:
if isinstance(wordtype, FunctorType):
return wordtype.argument
if isinstance(wordtype, ModalType) and isinstance(wordtype.content, FunctorType):
return wordtype.content.argument
raise TypeError(f'Cannot extract argument from {wordtype} of type {type(wordtype)}')
def binarize(argcolors: List[Tuple[WordType, Optional[str]]], result: WordType,
sorting_fn: Callable[[ArgSeq], ArgSeq] = _obliqueness_sort) -> Union[FunctorType, ModalType]:
argcolors = sorting_fn(argcolors)
return reduce(lambda x, y:
make_functor(argument=y[0], result=x, dep=y[1]), argcolors, result)
def binarize_hots(argcolors: List[Tuple[WordType, Optional[str]]], result: WordType,
sorting_fn: Callable[[ArgSeq], ArgSeq] = _obliqueness_sort) -> WordType:
argcolors = sorting_fn(argcolors)
return reduce(lambda x, y:
make_ho_functor(argument=y[0], result=x, dep=y[1]), argcolors, result)
def rebinarize(argcolors: List[Tuple[WordType, Optional[str]]], result: WordType,
sorting_fn: Callable[[ArgSeq], ArgSeq] = _obliqueness_sort) -> Union[ModalType, FunctorType]:
if not argcolors:
return result
arguments, colors = list(zip(*argcolors))
x = result
while isinstance(x, DiamondType):
arguments += (x.argument,)
colors += (x.diamond,)
x = x.result
return binarize(list(zip(arguments, colors)), result, sorting_fn)
def type_top(dag: DAG, type_dict: Dict[str, AtomicType], pos_set: str) -> None:
root = fst(list(dag.get_roots()))
root_type = get_type_plain(dag, root, type_dict, pos_set)
dag.attribs[root]['type'] = root_type
def type_bot(dag: DAG, type_dict: Dict[str, AtomicType], pos_set: str, hd_deps: FrozenSet[str] = HeadDeps,
mod_deps: FrozenSet[str] = ModDeps) -> bool:
heads = set(map(lambda edge: edge.target,
list(filter(lambda edge: edge.dep in hd_deps, dag.edges))))
heads = heads.difference(set(filter(lambda node: is_gap(dag, node, hd_deps), dag.nodes)))
typed = set(filter(lambda node: 'type' in dag.attribs[node].keys(), dag.nodes))
fringe = heads.union(typed)
changed = False
while True:
fringe, attribs = type_bot_step(dag, fringe, type_dict, pos_set, hd_deps, mod_deps)
if not attribs:
break
dag.attribs.update(attribs)
changed = True
return changed
def type_bot_step(dag: DAG[Node, str], fringe: Nodes, type_dict: Dict[str, AtomicType], pos_set: str,
hd_deps: FrozenSet[str] = HeadDeps, mod_deps: FrozenSet[str] = ModDeps) \
-> Tuple[Nodes, Dict[Node, Dict]]:
def is_fringe(_node: Node) -> bool:
is_fn = len(set(filter(lambda edge: edge.dep in mod_deps.union({'cnj'}), dag.incoming(_node))))
in_fringe = _node in fringe
is_leaf = dag.is_leaf(_node)
has_no_daughters = all(list(map(lambda out:
out.dep in mod_deps
or out.target in fringe
or dag.attribs[out.source]['cat'] == 'conj',
dag.outgoing(_node))))
return not is_fn and (is_leaf or has_no_daughters) and not in_fringe
def is_cnj_fringe(_node: Node) -> bool:
gap = is_gap(dag, _node, hd_deps)
typed = 'type' in dag.attribs[_node].keys()
single_inc = len(set(dag.incoming(_node))) == 1
is_cnj = single_inc and fst(list(dag.incoming(_node))).dep == 'cnj'
typed_parent = single_inc and 'type' in dag.attribs[fst(list(dag.incoming(_node))).source]
return not gap and not typed and is_cnj and typed_parent
new_fringe = set(filter(lambda n: is_fringe(n), dag.nodes))
new_cnj_fringe = set(filter(lambda n: is_cnj_fringe(n), dag.nodes))
if new_cnj_fringe.intersection(new_fringe):
raise ExtractionError('Fringes overlap', meta=dag.meta)
fringe_types = [(node, get_type_plain(dag, node, type_dict, pos_set)) for node in new_fringe] + \
[(node, dag.attribs[fst(list(dag.incoming(node))).source]['type']) for node in new_cnj_fringe]
return (new_cnj_fringe.union(new_fringe).union(fringe),
{**{node: {**dag.attribs[node], **{'type': _type}} for node, _type in fringe_types}})
def type_mods(dag: DAG, mod_deps: FrozenSet[str] = ModDeps) -> bool:
changed = False
while True:
temp = type_mods_step(dag, mod_deps)
if not temp:
break
changed = True
dag.attribs.update(temp)
return changed
def type_mods_step(dag: DAG[Node, str], mod_deps: FrozenSet[str]) -> Dict[Node, Dict]:
typed_nodes = set(filter(lambda node: 'type' in dag.attribs[node].keys(), dag.nodes))
modding_edges = list(filter(lambda edge:
edge.source in typed_nodes
and edge.dep in mod_deps
and edge.target not in typed_nodes,
dag.edges))
mod_types = [(edge.target, modifier_of(dag.attribs[edge.source]['type'], edge.dep)) for edge in modding_edges]
return {node: {**dag.attribs[node], **{'type': _type}} for node, _type in mod_types}
def type_heads_step(dag: DAG, head_deps: FrozenSet[str], mod_deps: FrozenSet[str]) -> Optional[Dict[str, Dict]]:
def make_hd_functor(result: WordType, argcs: Tuple[List[WordType], List[str]]) -> Union[FunctorType, ModalType]:
# noinspection PyTypeChecker
return rebinarize(list(zip(*argcs)), result) if argcs else result
_heading_edges: List[Edge] = list(filter(lambda edge:
edge.dep in head_deps
and 'type' in dag.attribs[edge.source].keys()
and 'type' not in dag.attribs[edge.target].keys()
and not is_gap(dag, edge.target, head_deps),
dag.edges))
heads_nodes = list(map(lambda edge:
(edge, order_nodes(dag, list(set(map(lambda e2: e2.target,
filter(lambda e1: e1.dep in head_deps,
dag.outgoing(edge.source))))))),
_heading_edges))
double_heads = list(map(lambda edge: edge.target,
map(fst, filter(lambda pair: fst(pair).target != fst(snd(pair)), heads_nodes))))
single_heads = list(map(fst, filter(lambda pair: fst(pair) not in double_heads, heads_nodes)))
heading_edges: List[Tuple[Edge, List[Edge]]] \
= list(filter(lambda pair: all(list(map(lambda out: 'type' in dag.attribs[out.target].keys(),
snd(pair)))),
map(lambda pair: (fst(pair),
list(filter(lambda edge:
edge.dep not in mod_deps
and edge != fst(pair)
and edge.target not in double_heads,
snd(pair)))),
map(lambda edge: (edge, dag.outgoing(edge.source)), single_heads))))
targets: List[str] = list(map(lambda pair: fst(pair).target, heading_edges))
types: List[WordType] = list(map(lambda pair: dag.attribs[fst(pair).source]['type'], heading_edges))
def extract_argcs(edges: List[Edge]) -> Tuple[List[WordType], List[str]]:
args = list(map(lambda edge: dag.attribs[edge.target]['type'], edges))
cs = list(map(lambda edge: edge.dep, edges))
return args, cs
argcolors: List[Tuple[List[WordType], List[str]]] = list(map(lambda pair:
extract_argcs(snd(pair)),
heading_edges))
head_types = [(node, make_hd_functor(res, argcs)) for node, res, argcs in zip(targets, types, argcolors)] + \
[(node, EmptyType()) for node in double_heads]
return {**{node: {**dag.attribs[node], **{'type': _type}} for node, _type in head_types}}
def type_heads(dag: DAG, head_deps: FrozenSet[str] = HeadDeps, mod_deps: FrozenSet[str] = ModDeps) -> bool:
changed = False
while True:
attribs = type_heads_step(dag, head_deps, mod_deps)
if not attribs:
break
changed = True
dag.attribs.update(attribs)
return changed
def type_core(dag: DAG, type_dict: Dict[str, AtomicType], pos_set: str, head_deps: FrozenSet[str],
mod_deps: FrozenSet[str]):
changed = True
while changed:
bot = type_bot(dag, type_dict, pos_set, head_deps, mod_deps)
mod = type_mods(dag, mod_deps)
head = type_heads(dag, head_deps, mod_deps)
changed = mod or head or bot
def type_gaps(dag: DAG, head_deps: FrozenSet[str] = HeadDeps):
def make_gap_functor(emb_type: WordType, interm: Tuple[WordType, str], top: Tuple[WordType, str]) -> FunctorType:
argument = FunctorType(argument=DiamondType(emb_type, modality=snd(interm)), result=fst(interm))
outer_dep = body_replacements[snd(top)] if snd(top) != 'hd' else 'hd'
return FunctorType(argument=DiamondType(argument, modality=outer_dep), result=fst(top))
def get_interm_top(gap: Node) -> Tuple[Tuple[WordType, str], Tuple[WordType, str]]:
incoming = dag.incoming(gap)
top_ = list(filter(lambda inc: inc.dep in head_deps, incoming))
if len(set(map(lambda edge: dag.attribs[edge.source]['type'], top_))) != 1:
raise ExtractionError('Multiple top types.')
top = fst(top_)
top_type: WordType = dag.attribs[top.source]['type']
top_dep: str = top.dep
interm = list(filter(lambda node: gap in dag.points_to(node), dag.successors(top.source)))
if len(interm) != 1:
raise ExtractionError('Multiple intermediate nodes.')
interm_type: WordType = dag.attribs[fst(interm)]['type']
interm_color_ = list(map(lambda edge: edge.dep, filter(lambda edge: edge.dep not in head_deps, incoming)))
if len(set(interm_color_)) > 1:
raise ExtractionError('Multiple intermediate colors.')
interm_color = fst(interm_color_)
return (interm_type, interm_color), (top_type, top_dep)
gap_nodes = list(filter(lambda node: is_gap(dag, node, head_deps), dag.nodes))
gap_nodes = list(filter(lambda node: '_gap_typed' not in dag.attribs[node].keys(), gap_nodes))
if not gap_nodes:
return None
if any(list(map(lambda node: 'type' not in dag.attribs[node].keys(), gap_nodes))):
raise ExtractionError('Untyped gap.')
emb_types = list(map(lambda node: dag.attribs[node]['type'], gap_nodes))
interms, tops = list(zip(*map(get_interm_top, gap_nodes)))
gap_types_ = list(map(make_gap_functor, emb_types, interms, tops))
gap_types = {node: {**dag.attribs[node], **{'type': gap_type, '_gap_typed': 1}}
for node, gap_type in zip(gap_nodes, gap_types_)}
dag.attribs.update(gap_types)
non_term_gaps = list(filter(lambda node: not dag.is_leaf(node), gap_nodes))
descendants = list(chain.from_iterable(map(dag.points_to, non_term_gaps)))
# clear type information from non-terminal gap descendants
descendant_attribs = {node: {k: v for k, v in dag.attribs[node].items()
if k != 'type' and k != '_gap_typed'} for node in descendants}
dag.attribs.update(descendant_attribs)
def type_copies(dag: DAG[Node, str], head_deps: FrozenSet[str] = HeadDeps, mod_deps: FrozenSet[str] = ModDeps):
def daughterhood_conditions(daughter: Edge[Node, str]) -> bool:
return daughter.dep not in head_deps.union(mod_deps)
def normalize_gap_copies(typecolors: List[Tuple[WordType, Set[str]]]) -> ArgSeq:
def normalize_gap_copy(tc: Tuple[WordType, Set[str]]) -> Tuple[WordType, str]:
if len(snd(tc)) == 1:
return fst(tc), fst(list(snd(tc)))
elif len(snd(tc)) == 2:
color = fst(list(filter(lambda c: c not in head_deps, snd(tc))))
return get_argument(get_argument(fst(tc))).content, color if color not in mod_deps else None
# return fst(tc).content.argument.content.argument, color if color not in mod_deps else None
else:
raise ExtractionError('Multi-colored copy.', meta=dag.meta)
return list(map(normalize_gap_copy, typecolors))
def make_polymorphic_x(initial: WordType, missing: ArgSeq) -> WordType:
# missing = list(map(lambda pair: (fst(pair), snd(pair) if snd(pair) not in mod_deps else 'embedded'),
# missing))
return binarize_hots(missing, initial)
def make_crd_type(poly_x: WordType, repeats: int) -> WordType:
ret = poly_x
while repeats:
ret = FunctorType(DiamondType(poly_x, 'cnj'), ret)
repeats -= 1
return ret
conjuncts = list(_cats_of_type(dag, 'conj'))
gap_conjuncts = list(filter(lambda node: is_gap(dag, node, head_deps), conjuncts))
if gap_conjuncts:
raise ExtractionError('Gap conjunction.')
# the edges coming out of each conjunct
conj_outgoing_edges: List[Edges] = list(map(lambda c: dag.outgoing(c), conjuncts))
# the list of coordinator edges coming out of each conjunct
crds = list(map(lambda cg:
list(filter(lambda edge: edge.dep == 'crd', cg)),
conj_outgoing_edges))
if any(list(map(lambda conj_group: len(conj_group) == 0, crds))):
raise ExtractionError('Headless conjunction.', meta={'dag': dag.meta})
# the list of non-excluded edges coming out of each conjunct
conj_outgoing_edges = list(map(lambda cg:
set(filter(daughterhood_conditions, cg)),
conj_outgoing_edges))
# the list of non-excluded nodes pointed by each conjunct
conj_targets: List[Nodes] = list(map(lambda cg:
set(map(lambda edge: edge.target, cg)),
conj_outgoing_edges))
# the list including only typed branches
conj_targets = list(filter(lambda cg:
all(list(map(lambda daughter: 'type' in dag.attribs[daughter].keys(), cg))),
conj_targets))
initial_typegroups: List[Set[WordType]] \
= list(map(lambda conj_group: set(map(lambda daughter: dag.attribs[daughter]['type'], conj_group)),
conj_targets))
if any(list(map(lambda conj_group: len(conj_group) != 1, initial_typegroups))):
raise ExtractionError('Non-polymorphic conjunction.', meta={'dag': dag.meta})
initial_types: List[WordType] = list(map(lambda conj_group: fst(list(conj_group)), initial_typegroups))
downsets: List[List[Nodes]] \
= list(map(lambda group_targets:
list(map(lambda daughter: dag.points_to(daughter).union({daughter}),
group_targets)),
conj_targets))
common_downsets: List[Nodes] = list(map(lambda downset: set.intersection(*downset), downsets))
minimal_downsets: List[Nodes] = list(map(lambda downset:
set(filter(lambda node:
len(dag.pointed_by(node).intersection(downset)) == 0,
downset)),
common_downsets))
accounted_copies = set.union(*minimal_downsets) if common_downsets else set()
all_copies = set(filter(lambda node: is_copy(dag, node), dag.nodes))
if accounted_copies != all_copies:
raise ExtractionError('Unaccounted copies.', meta=dag.meta)
if any(list(map(lambda acc: 'type' not in dag.attribs[acc].keys(), accounted_copies))):
raise ExtractionError('Untyped copies.', meta=dag.meta)
copy_colorsets = list(map(lambda downset: list(map(lambda node: (dag.attribs[node]['type'],
set(map(lambda edge: edge.dep,
dag.incoming(node)))),
downset)),
minimal_downsets))
copy_types_and_colors = list(map(normalize_gap_copies, copy_colorsets))
polymorphic_xs = list(map(make_polymorphic_x, initial_types, copy_types_and_colors))
crd_types = list(map(make_crd_type, polymorphic_xs, list(map(len, conj_targets))))
secondary_crds = list(map(lambda crd: crd.target,
chain.from_iterable(crd[1::] for crd in crds)))
primary_crds = list(map(lambda crd: crd.target, map(fst, crds)))
copy_types = {crd: {**dag.attribs[crd], **{'type': crd_type}} for crd, crd_type in zip(primary_crds, crd_types)}
dag.attribs.update(copy_types)
secondary_types = {crd: {**dag.attribs[crd], **{'type': EmptyType()}} for crd in secondary_crds}
dag.attribs.update(secondary_types)
def type_dag(dag: DAG[str, str], type_dict: Dict[str, AtomicType], pos_set: str, hd_deps: FrozenSet[str],
mod_deps: FrozenSet[str], check: bool = True) -> DAG:
def fully_typed(dag_: DAG) -> bool:
return all(list(map(lambda node: 'type' in dag.attribs[node].keys(), dag_.nodes)))
type_top(dag, type_dict, pos_set)
while not fully_typed(dag):
type_core(dag, type_dict, pos_set, hd_deps, mod_deps)
type_gaps(dag, hd_deps)
type_copies(dag, hd_deps, mod_deps)
if check:
try:
premises = list(map(lambda node: dag.attribs[node]['type'], dag.get_leaves()))
except KeyError:
raise ExtractionError('Untyped leaves.')
goal = dag.attribs[fst(list(dag.get_roots()))]['type']
if not invariance_check(premises, goal):
raise ExtractionError('Invariance check failed.', meta=dag.meta)
return dag
def untype_dag(dag: DAG) -> None:
for k in dag.attribs.keys():
if 'type' in dag.attribs[k].keys():
del dag.attribs[k]['type']
if '_gap_typed' in dag.attribs[k].keys():
del dag.attribs[k]['_gap_typed']
class Extraction:
def __init__(self, cat_dict: Dict[str, AtomicType], pos_dict: Dict[str, AtomicType], pos_set: str,
head_deps: FrozenSet[str], mod_deps: FrozenSet[str],
):
self.type_dict = {**cat_dict, **pos_dict}
self.pos_set = pos_set
self.head_deps = head_deps
self.mod_deps = mod_deps
def __call__(self, dag: DAG, raise_errors: bool = False) -> Optional[DAG]:
try:
return type_dag(dag, self.type_dict, self.pos_set, self.head_deps, self.mod_deps)
except ExtractionError as error:
if raise_errors:
raise error
else:
return None
extractor = Extraction(CatDict, PtDict, 'pt', HeadDeps, ModDeps)
|
StarcoderdataPython
|
5164917
|
#
# Copyright 2020-2021 Xilinx, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This script accepts an 8-bit, YUV420, pre-encoded file and will send the encoded output to a user-defined output path.
# The aspect ratio must 16:9 and the resolution 720p or more.
# The script starts by automatically detecting the number of devices available in the system and then determines how many
# jobs can be run on each device based on the resolution of the input. The input file is then split in as many segments
# of equal length. Parallel FFmpeg jobs are submited to transcode all the segments simultaneously. The '-xlnx_hwdev' option
# is used to dispatch each job on a specific device. Once all the segments have been processed, FFmpeg is used to concatenate
# the results and form the final output stream.
# Usage: python 13_ffmpeg_transcode_only_split_stitch.py [options]
# Options:
# -s INPUT_FILE, --sourcefile=INPUT_FILE
# input file to convert
# -d OUTPUT_FILE, --destinationfile=OUTPUT_FILE
# output file path
# -i INPUT_FORMAT, --icodec=INPUT_FORMAT
# input file algorithm standard <h264, hevc, h265>
# default: h264
# -o OUTPUT_FORMAT, --ocodec=OUTPUT_FORMAT
# output file algorithm standard <h264, hevc, h265>
# default: hevc
# -b BITRATE, --bitrate=BITRATE
# output bitrate in Mbit/s. Must be a float or integer value between 1.0 and 25.0
# default: 5.0
# (example: use -b 3 to specify an output bitrate of 3Mbits/sec)
ASPECT_RATIO = (16/9)
import subprocess
from optparse import OptionParser
import time
from datetime import datetime
import json
import re
def count_substrings(string, substring):
string_size = len(string)
substring_size = len(substring)
count = 0
for i in range(0,string_size-substring_size+1):
if string[i:i+substring_size] == substring:
count+=1
return count
def main():
(filename, ofilename, input_encoder, output_encoder, bitrate) = parse_options()
output = subprocess.Popen("xbutil scan",
shell = True,
stdout = subprocess.PIPE).stdout.read()
outputS = str(output)
result = outputS.find('Found total ')
if (result == -1):
print ("Can't determine number of U30s in the system, exiting ...")
raise SystemExit
num_devices = int(re.search(r'\d+', outputS).group())
print ("There are " + str(int(num_devices/2)) + " cards, " + str(num_devices) + " devices in the system")
xres = int(re.search(r'\d+', outputS).group())
if input_encoder == "h265":
input_encoder = "hevc"
if input_encoder != "hevc" and input_encoder != "h264":
print ("Input encoder needs to be h264, h265 or hevc")
raise SystemExit
if output_encoder == "h265":
output_encoder = "hevc"
if output_encoder != "hevc" and output_encoder != "h264":
print ("Output encoder needs to be h264, h265 or hevc")
raise SystemExit
if bitrate < 1.0 or bitrate > 25.0:
print ("Bitrate should be between 1.0 ... 25.0 Mbit/s")
raise SystemExit
br =str(bitrate)
if ofilename[-4:] != ".mp4":
print ("Only mp4 output file format supported")
raise SystemExit
if filename[-4:] != ".mp4" and filename[-4:] != ".mov" and filename[-4:] != ".mkv" and filename[-4:] != ".MOV":
print ("Only mp4 & mov & mkv input file format supported")
raise SystemExit
if filename == ofilename:
print ("Source and destination filename cannot be the same")
raise SystemExit
startSec = time.time()
#ffprobe -v error -select_streams v:0 -show_entries stream=width,height,duration,r_frame_rate -of default=nw=1
output = subprocess.Popen("ffprobe -v error -select_streams v:0 -show_entries stream=width -of default=nw=1 "+filename+" 2>&1",
shell = True,
stdout = subprocess.PIPE).stdout.read()
outputS = str(output)
result = outputS.find('width=')
if (result == -1):
print ("Can't determine clip resolution, exiting ...")
raise SystemExit
xres = int(re.search(r'\d+', outputS).group())
output = subprocess.Popen("ffprobe -v error -select_streams v:0 -show_entries stream=height -of default=nw=1 "+filename+" 2>&1",
shell = True,
stdout = subprocess.PIPE).stdout.read()
outputS = str(output)
result = outputS.find('height=')
if (result == -1):
print ("Can't determine clip resolution, exiting ...")
raise SystemExit
yres = int(re.search(r'\d+', outputS).group())
# find out length of the clip such that we can determine segments sizes
output = subprocess.Popen("ffprobe "+filename+" 2>&1",
shell = True,
stdout = subprocess.PIPE).stdout.read()
outputS = str(output)
#extract the framerate from the string
result = outputS.find('fps, ')
if (result == -1):
print ("Can't determine framerate, exiting ...")
raise SystemExit
tmpS = outputS[result+5:result+14]
framerateS = tmpS.split()
framerate = float (framerateS[0])
print("")
#extract the video duration from the string
result = outputS.find('Duration: ')
if (result == -1):
print ("Can't determine video length, exiting ...")
raise SystemExit
video_lengthS = outputS[result+10:result+18]
try:
pt = datetime.strptime(video_lengthS,'%H:%M:%S')
video_length = pt.second + pt.minute*60 + pt.hour*3600
print("Video clip parameters:")
print (" length in seconds : "+str(video_length))
print (" length in hh:mm:ss: "+video_lengthS)
except ValueError:
print ("Can't determine video length, exiting ...")
raise SystemExit
print(" resolution: "+ str(xres)+"x"+str(yres))
print(" framerate: "+ str(framerate))
totFrames = video_length * framerate
if float((xres/yres)/(ASPECT_RATIO)) != 1.0 :
print ("Example script only supports 16:9 aspect ratios (e.g. 4k, 1080p, 720p)")
raise SystemExit
elif xres == 3840:
device_split_count = 1 * (int(60/framerate))
maxFPS=num_devices * 60
elif xres == 1920:
device_split_count = 4 * (int(60/framerate))
maxFPS=num_devices * 240
elif xres == 1280:
device_split_count = 9 * (int(60/framerate))
maxFPS=num_devices * 540
else:
print ("Resolutions lower than 720p not implemented, exiting!")
raise SystemExit
split_count = device_split_count * num_devices
framesinClip = framerate * video_length / split_count
split_length = int(video_length / split_count) + 1
print ("")
print ("Start splitting clip in " + str(split_count)+ " segments")
# creating cmd to be run for splitting into segments
if split_count != 1:
split_cmd = "ffmpeg -nostdin -loglevel info -vsync 0 -i " + filename + " -c copy -f segment -segment_time " \
+ str(split_length) + " -y tmpfile" + "%2d." + filename[-3:] + " > stdout.log 2>&1 \n"
else:
split_cmd = "cp " + filename + " tmpfile00." + filename[-3:]
# run the command in a blocking way
output = subprocess.Popen(split_cmd, shell = True, stdout = subprocess.PIPE).stdout.read()
# check if the number of segments written equals the desired split_count
output = subprocess.Popen("ls tmpfile* | wc -l", shell = True, stdout = subprocess.PIPE).stdout.read()
if int(output) < split_count:
print ("Video file may not be splittable ...")
print ("Only able to create " + str(int(output)) + " segments for parallel processing")
raise SystemExit
if int(output) > split_count:
print ("Too many tmpfiles; Please delete old tmpfiles ...")
raise SystemExit
print ("")
clipNum = 0
for n in range(0, num_devices):
for m in range(0, device_split_count):
transcode_cmd = "ffmpeg -loglevel info -xlnx_hwdev "+ str(n)+" -vsync 0 -c:v mpsoc_vcu_" + input_encoder + " -i tmpfile" + \
format(clipNum, '02d') + filename[-4:] + \
" -periodicity-idr 120 -b:v " + br + "M -max-bitrate " + \
br + "M -c:v mpsoc_vcu_" \
+ output_encoder + " -y tmpfileout" + \
format(clipNum, '02d') + ofilename[-4:] + " > stdout" +str(n)+".log 2>&1 & \n"
output = subprocess.Popen(transcode_cmd, shell = True)
time.sleep(0.1)
clipNum += 1
print ("Start transcoding segments")
# wait until all ffmpeg processes are done
pidsExist = True
tail_cmd = "tail -1 stdout0.log"
ps_cmd = "ps -ef | grep ffmpeg"
percentDone = 10
print("")
print(" 0 percent of transcoding completed")
while pidsExist:
time.sleep(0.1)
output = subprocess.Popen(ps_cmd, shell = True, stdout = subprocess.PIPE).stdout.read()
nr = count_substrings(str(output), "ffmpeg -loglevel info -xlnx_hwdev")
if nr == 0:
pidsExist = False
output = subprocess.Popen(tail_cmd, shell = True, stdout = subprocess.PIPE).stdout.read()
outputS = str(output)
outputpartS = outputS[-150:]
result = outputpartS.find('frame=')
if result != -1:
frameS = outputpartS[result+6:result+20].split()
frame = int(frameS[0])
if int(100.0 * frame/framesinClip) > percentDone:
if percentDone > 95:
percentDone = 150
else:
print(" " + str(percentDone) + " percent of transcoding completed")
if percentDone > 89:
percentDone = percentDone + 5
else:
percentDone = percentDone + 10
print("100 percent of transcoding completed")
#start concatenating the transcoded files
print("")
print ("Start concatenating segments into final clip")
cmd = "printf \"file '%s'\\n\" tmpfileout* > mylist.txt"
output = subprocess.Popen(cmd, shell = True, stdout = subprocess.PIPE).stdout.read()
cmd = "rm -f " + ofilename
output = subprocess.Popen(cmd, shell = True, stdout = subprocess.PIPE).stdout.read()
cmd = "ffmpeg -f concat -safe 0 -i mylist.txt -c copy " + ofilename + " > stdout.log 2>&1"
output = subprocess.Popen(cmd, shell = True, stdout = subprocess.PIPE).stdout.read()
cmd = "rm tmpfile*"
output = subprocess.Popen(cmd, shell = True, stdout = subprocess.PIPE).stdout.read()
cmd = "rm mylist.txt"
output = subprocess.Popen(cmd, shell = True, stdout = subprocess.PIPE).stdout.read()
cmd = "rm stdout*.log"
# output = subprocess.Popen(cmd, shell = True, stdout = subprocess.PIPE).stdout.read()
endSec = time.time()
totSec = int(endSec-startSec)
print(" ")
if totSec > 119:
print("Time from start to completion : "+ str(totSec) + \
" seconds (" + str(int(totSec/60)) + " minutes and " + \
str(totSec - 60*(int(totSec/60))) + " seconds)")
elif totSec > 59:
print("Time from start to completion : "+ str(totSec) + \
" seconds (1 minute and " + \
str(totSec - 60) + " seconds)")
else:
print("Time from start to completion : "+ str(totSec) + \
" seconds")
print(" ")
print("This clip was processed "+str(round(1.0*video_length/totSec,1))+" times faster than realtime")
print(" ")
print("This clip was effectively processed at " + str(round(totFrames/totSec,2)) + " FPS")
print(" ")
print("Efficiency=" + str(round((totFrames/totSec)/maxFPS,2)*100) + "%")
def destroy():
# Release resource
print("Exiting ...")
def parse_options():
parser = OptionParser()
parser.add_option("-s", "--sourcefile",
dest = "ifilename",
help = "input file to convert",
type = "string",
action = "store"
)
parser.add_option("-d", "--destinationfile",
dest = "ofilename",
help = "output file",
type = "string",
action = "store"
)
parser.add_option("-i", "--icodec",
dest = "input_encoder",
help = "input encode standard <h264, hevc, h265> \
default h264",
type = "string",
action = "store", default = "h264"
)
parser.add_option("-o", "--ocodec",
dest = "output_encoder",
help = "output encode standard <h264, hevc, h265> \
default hevc",
type = "string",
action = "store", default = "hevc"
)
parser.add_option("-b", "--bitrate",
dest = "bitrate",
help = "output bitrate in Mbit/s. Must be a float or integer value between 1.0 and 25.0 (example: use -b 3 to specify an output bitrate of 3Mbits/sec) \
default 5.0",
type = "float",
action = "store", default = 5.0
)
(options, args) = parser.parse_args()
if options.ifilename and options.ofilename:
return (options.ifilename, options.ofilename, \
options.input_encoder, options.output_encoder,options.bitrate)
else:
parser.print_help()
raise SystemExit
if __name__ == '__main__':
try:
main()
# When 'Ctrl+C' is pressed, the child program
# destroy() will be executed.
except KeyboardInterrupt:
destroy()
|
StarcoderdataPython
|
6655320
|
from marionette_driver import Wait, expected
from marionette_harness import MarionetteTestCase
class Test(MarionetteTestCase):
# Starting 84, opening the security level panel by clicking the button
# started to fail (the panel was not visible). Opening via JavaScript
# seems to still work for 84 and 78.
def open_security_level_panel(self):
m = self.marionette
with m.using_context('chrome'):
self.marionette.execute_script(
'document.getElementById("security-level-button").click();')
def test_security_level_ui(self):
custom_pref = 'javascript.options.wasm'
m = self.marionette
m.timeout.implicit = 5
with m.using_context('chrome'):
self.open_security_level_panel()
m.find_element(
'id', 'securityLevel-advancedSecuritySettings').click()
with m.using_context('content'):
# Test whether changing the security level value works
spotlight = m.find_element('class name', 'spotlight')
self.assertEqual(
spotlight.get_attribute("data-subcategory"), "securitylevel")
self.assertEqual(4, m.get_pref(
'extensions.torbutton.security_slider'))
m.find_element(
'css selector', '#securityLevel-vbox-safer radio').click()
self.assertEqual(2, m.get_pref(
'extensions.torbutton.security_slider'))
m.find_element(
'css selector', '#securityLevel-vbox-safest radio').click()
self.assertEqual(1, m.get_pref(
'extensions.torbutton.security_slider'))
m.find_element(
'css selector', '#securityLevel-vbox-standard radio').click()
self.assertEqual(4, m.get_pref(
'extensions.torbutton.security_slider'))
# Test custom security settings
elem = m.find_element('id', 'securityLevel-restoreDefaults')
self.assertEqual(elem.is_displayed(), False)
m.set_pref(custom_pref, False)
self.assertEqual(elem.is_displayed(), True)
elem.click()
self.assertEqual(True, m.get_pref(custom_pref))
# Test Learn More link
m.find_element('id', 'securityLevel-learnMore').click()
Wait(m, timeout=m.timeout.page_load).until(
lambda _: len(m.window_handles) > 1)
m.switch_to_window(m.window_handles[1])
Wait(m, timeout=m.timeout.page_load).until(
lambda _: m.get_url() != "about:blank")
self.assertTrue(
m.get_url() in ["https://tb-manual.torproject.org/en-US/security-settings/", "https://tb-manual.torproject.org/security-settings/"])
# Test Learn More link from panel
self.open_security_level_panel()
m.find_element('id', 'securityLevel-learnMore').click()
Wait(m, timeout=m.timeout.page_load).until(
lambda _: len(m.window_handles) > 2)
with m.using_context('content'):
m.switch_to_window(m.window_handles[2])
Wait(m, timeout=m.timeout.page_load).until(
lambda _: m.get_url() != "about:blank")
self.assertTrue(
m.get_url() in ["https://tb-manual.torproject.org/en-US/security-settings/", "https://tb-manual.torproject.org/security-settings/"])
# Test custom settings from panel
m.set_pref(custom_pref, False)
elem = m.find_element('id', 'securityLevel-restoreDefaults')
self.assertEqual(elem.is_displayed(), False)
self.open_security_level_panel()
self.assertEqual(elem.is_displayed(), True)
elem.click()
self.assertEqual(True, m.get_pref(custom_pref))
|
StarcoderdataPython
|
8114183
|
<reponame>ikstream/Zeus-Scanner<gh_stars>100-1000
#!/usr/bin/env python
import io
import sys
import time
import shlex
import warnings
import subprocess
from var import blackwidow
from var.search import selenium_search
from var.auto_issue.github import request_issue_creation
from lib.header_check import main_header_check
from lib.core.parse import ZeusParser
from lib.core.errors import (
InvalidInputProvided,
InvalidProxyType,
ZeusArgumentException
)
from lib.core.common import (
start_up,
shutdown,
prompt
)
from lib.core.settings import (
setup,
logger,
set_color,
get_latest_log_file,
get_random_dork,
fix_log_file,
config_headers,
config_search_engine,
find_running_opts,
run_attacks,
CURRENT_LOG_FILE_PATH,
SPIDER_LOG_PATH,
URL_REGEX, URL_QUERY_REGEX,
URL_LOG_PATH,
BANNER
)
warnings.simplefilter("ignore")
if __name__ == "__main__":
# this will take care of most of the Unicode errors.
reload(sys)
sys.setdefaultencoding("utf-8")
sys.setrecursionlimit(1500)
opt = ZeusParser.cmd_parser()
ZeusParser().single_show_args(opt)
# verify all the arguments passed before we continue
# with the process
ZeusParser().verify_args()
# run the setup on the program
setup(verbose=opt.runInVerbose)
if not opt.hideBanner:
print(BANNER)
start_up()
if opt.runInVerbose:
being_run = find_running_opts(opt)
logger.debug(set_color(
"running with options '{}'".format(being_run), level=10
))
logger.info(set_color(
"log file being saved to '{}'".format(get_latest_log_file(CURRENT_LOG_FILE_PATH))
))
def __run_attacks_main(**kwargs):
"""
main method to run the attacks
"""
log_to_use = kwargs.get("log", None)
if log_to_use is None:
options = (opt.dorkToUse, opt.useRandomDork, opt.dorkFileToUse)
log_to_use = URL_LOG_PATH if any(o for o in options) else SPIDER_LOG_PATH
try:
urls_to_use = get_latest_log_file(log_to_use)
except TypeError:
urls_to_use = None
else:
urls_to_use = log_to_use
if urls_to_use is None:
logger.error(set_color(
"unable to run attacks appears that no file was created for the retrieved data", level=40
))
shutdown()
options = [
opt.runSqliScan, opt.runPortScan,
opt.adminPanelFinder, opt.runXssScan,
opt.performWhoisLookup, opt.performClickjackingScan,
opt.pgpLookup
]
if any(options):
with open(urls_to_use) as urls:
for i, url in enumerate(urls.readlines(), start=1):
current = i
if "webcache" in url:
logger.warning(set_color(
"ran into unexpected webcache URL skipping", level=30
))
current -= 1
else:
if not url.strip() == "http://" or url == "https://":
logger.info(set_color(
"currently running on '{}' (target #{})".format(
url.strip(), current
), level=25
))
logger.info(set_color(
"fetching target meta-data"
))
identified = main_header_check(
url, verbose=opt.runInVerbose, agent=agent_to_use,
proxy=proxy_to_use, xforward=opt.forwardedForRandomIP,
identify_plugins=opt.identifyPlugin, identify_waf=opt.identifyProtection,
show_description=opt.showPluginDescription
)
if not identified:
logger.error(set_color(
"target is refusing to allow meta-data dumping, skipping", level=40
))
run_attacks(
url.strip(),
sqlmap=opt.runSqliScan, nmap=opt.runPortScan, pgp=opt.pgpLookup,
xss=opt.runXssScan, whois=opt.performWhoisLookup, admin=opt.adminPanelFinder,
clickjacking=opt.performClickjackingScan, github=opt.searchGithub,
verbose=opt.runInVerbose, batch=opt.runInBatch,
auto_start=opt.autoStartSqlmap, xforward=opt.forwardedForRandomIP,
sqlmap_args=opt.sqlmapArguments, nmap_args=opt.nmapArguments,
show_all=opt.showAllConnections, do_threading=opt.threadPanels,
tamper_script=opt.tamperXssPayloads, timeout=opt.controlTimeout,
proxy=proxy_to_use, agent=agent_to_use, conf_file=opt.sqlmapConfigFile,
threads=opt.amountOfThreads, force_ssl=opt.forceSSL
)
print("\n")
else:
logger.warning(set_color(
"malformed URL discovered, skipping", level=30
))
proxy_to_use, agent_to_use = config_headers(
proxy=opt.proxyConfig, proxy_file=opt.proxyFileRand,
p_agent=opt.usePersonalAgent, rand_agent=opt.useRandomAgent,
verbose=opt.runInVerbose
)
search_engine = config_search_engine(
verbose=opt.runInVerbose, ddg=opt.useDDG,
aol=opt.useAOL, bing=opt.useBing, enum=opt.fileToEnumerate
)
try:
# use a personal dork as the query
if opt.dorkToUse is not None and not opt.searchMultiplePages:
logger.info(set_color(
"starting dork scan with query '{}'".format(opt.dorkToUse)
))
try:
selenium_search.parse_search_results(
opt.dorkToUse, search_engine, verbose=opt.runInVerbose, proxy=proxy_to_use,
agent=agent_to_use, pull_all=opt.noExclude, parse_webcache=opt.parseWebcache,
forward_for=opt.forwardedForRandomIP, tor=opt.useTor, batch=opt.runInBatch,
show_success=opt.showSuccessRate
)
except InvalidProxyType:
supported_proxy_types = ("socks5", "socks4", "https", "http")
logger.fatal(set_color(
"the provided proxy is not valid, specify the protocol and try again, supported "
"proxy protocols are {} (IE socks5://127.0.0.1:9050)".format(
", ".join(list(supported_proxy_types))), level=50
))
except Exception as e:
if "Permission denied:" in str(e):
logger.fatal(set_color(
"your permissions are not allowing Zeus to run, "
"try running Zeus with sudo", level=50
))
shutdown()
else:
logger.exception(set_color(
"ran into exception '{}'".format(e), level=50
))
request_issue_creation()
pass
__run_attacks_main()
# search multiple pages of Google
elif opt.dorkToUse is not None or opt.useRandomDork and opt.searchMultiplePages:
if opt.dorkToUse is not None:
dork_to_use = opt.dorkToUse
elif opt.useRandomDork:
dork_to_use = get_random_dork()
else:
dork_to_use = None
if dork_to_use is None:
logger.warning(set_color(
"there has been no dork to specified to do the searching, defaulting to random dork", level=30
))
dork_to_use = get_random_dork()
dork_to_use = dork_to_use.strip()
if opt.amountToSearch is None:
logger.warning(set_color(
"did not specify amount of links to find defaulting to 75", level=30
))
link_amount_to_search = 75
else:
link_amount_to_search = opt.amountToSearch
logger.info(set_color(
"searching Google using dork '{}' for a total of {} links".format(
dork_to_use, link_amount_to_search
)
))
try:
selenium_search.search_multiple_pages(
dork_to_use, link_amount_to_search, proxy=proxy_to_use,
agent=agent_to_use, verbose=opt.runInVerbose,
xforward=opt.forwardedForRandomIP, batch=opt.runInBatch,
show_success=opt.showSuccessRate
)
except Exception as e:
if "Error 400" in str(e):
logger.fatal(set_color(
"failed to connect to search engine".format(e), level=50
))
else:
logger.exception(set_color(
"failed with unexpected error '{}'".format(e), level=50
))
shutdown()
__run_attacks_main()
# use a file full of dorks as the queries
elif opt.dorkFileToUse is not None:
with io.open(opt.dorkFileToUse, encoding="utf-8") as dorks:
for dork in dorks.readlines():
dork = dork.strip()
logger.info(set_color(
"starting dork scan with query '{}'".format(dork)
))
try:
selenium_search.parse_search_results(
dork, search_engine, verbose=opt.runInVerbose, proxy=proxy_to_use,
agent=agent_to_use, pull_all=opt.noExclude, parse_webcache=opt.parseWebcache,
tor=opt.useTor, batch=opt.runInBatch
)
except Exception as e:
logger.exception(set_color(
"ran into exception '{}'".format(e), level=50
))
request_issue_creation()
pass
__run_attacks_main()
# use a random dork as the query
elif opt.useRandomDork:
random_dork = get_random_dork().strip()
if opt.runInVerbose:
logger.debug(set_color(
"choosing random dork from etc/dorks.txt", level=10
))
logger.info(set_color(
"using random dork '{}' as the search query".format(random_dork)
))
try:
selenium_search.parse_search_results(
random_dork, search_engine, verbose=opt.runInVerbose,
proxy=proxy_to_use, agent=agent_to_use, pull_all=opt.noExclude, parse_webcache=opt.parseWebcache,
tor=opt.useTor, batch=opt.runInBatch
)
__run_attacks_main()
except Exception as e:
logger.exception(set_color(
"ran into exception '{}' and cannot continue, saved to current log file".format(e),
level=50
))
request_issue_creation()
pass
# spider a given webpage for all available URL's
elif opt.spiderWebSite:
problem_identifiers = ["http://", "https://"]
if not URL_REGEX.match(opt.spiderWebSite):
err_msg = "URL did not match a true URL{}"
if not any(m in opt.spiderWebSite for m in problem_identifiers):
err_msg = err_msg.format(" issue seems to be that http:// "
"or https:// is not present in the URL")
else:
err_msg = err_msg.format("")
raise InvalidInputProvided(
err_msg
)
else:
if URL_QUERY_REGEX.match(opt.spiderWebSite):
question_msg = (
"it is recommended to not use a URL that has a GET(query) parameter in it, "
"would you like to continue"
)
if not opt.runInBatch:
is_sure = prompt(
question_msg, opts="yN"
)
else:
is_sure = prompt(
question_msg, opts="yN", default="y"
)
if is_sure.lower().startswith("y"):
pass
else:
shutdown()
blackwidow.blackwidow_main(opt.spiderWebSite, agent=agent_to_use, proxy=proxy_to_use,
verbose=opt.runInVerbose, forward=opt.forwardedForRandomIP)
__run_attacks_main()
# enumerate a file and run attacks on the URL's provided
elif opt.fileToEnumerate is not None:
logger.info(set_color(
"found a total of {} URL's to enumerate in given file".format(
len(open(opt.fileToEnumerate).readlines())
)
))
__run_attacks_main(log=opt.fileToEnumerate)
else:
logger.critical(set_color(
"failed to provide a mandatory argument, you will be redirected to the help menu", level=50
))
time.sleep(2)
zeus_help_menu_command = shlex.split("python zeus.py --help")
subprocess.call(zeus_help_menu_command)
except IOError as e:
if "Invalid URL" in str(e):
logger.exception(set_color(
"URL provided is not valid, schema appears to be missing", level=50
))
request_issue_creation()
shutdown()
elif "HTTP Error 429: Too Many Requests" in str(e):
logger.fatal(set_color(
"WhoIs doesn't like it when you send to many requests at one time, "
"try updating the timeout with the --time-sec flag (IE --time-sec 10)", level=50
))
shutdown()
elif "No such file or directory" in str(e):
logger.fatal(set_color(
"provided file does not exist, make sure you have the full path", level=50
))
shutdown()
else:
logger.exception(set_color(
"Zeus has hit an unexpected error and cannot continue, error code '{}'".format(e), level=50
))
request_issue_creation()
except KeyboardInterrupt:
logger.fatal(set_color(
"user aborted process", level=50
))
shutdown()
except UnboundLocalError:
logger.warning(set_color(
"do not interrupt the browser when selenium is running, "
"it will cause Zeus to crash", level=30
))
except ZeusArgumentException:
shutdown()
except Exception as e:
if "url did not match a true url" in str(e).lower():
logger.error(set_color(
"you did not provide a URL that is capable of being processed, "
"the URL provided to the spider needs to contain protocol as well "
"ie. 'http://google.com' (it is advised not to add the GET parameter), "
"fix the URL you want to scan and try again", level=40
))
shutdown()
elif "Service geckodriver unexpectedly exited" in str(e):
logger.fatal(set_color(
"it seems your firefox version is not compatible with the geckodriver version, "
"please re-install Zeus and try again", level=50
))
shutdown()
elif "Max retries exceeded with url" in str(e):
logger.fatal(set_color(
"you have hit the max retries, to continue using Zeus "
"it is recommended to use a proxy (--proxy/--proxy-file) "
"along with a new user-agent (--random-agent/--agent).", level=50
))
shutdown()
else:
logger.exception(set_color(
"ran into exception '{}' exception has been saved to log file".format(e), level=50
))
request_issue_creation()
# fix the log file before shutting down incase you want to look at it
fix_log_file()
shutdown()
|
StarcoderdataPython
|
3260485
|
# from app import ssp_app, mail
from app.ssp_module.models import User
from app.app import create_app, db
from flask import render_template, request, flash, redirect, url_for
from flask_login import current_user, login_user, login_required, logout_user
from flask_mail import Message
from flask import Blueprint
from app.utils import get_reset_token, decode_user_token, send_mail
ssp_bp = Blueprint('ssp', __name__, url_prefix='/')
@ssp_bp.route('/')
@ssp_bp.route('/index')
def index():
return render_template('index.html')
@ssp_bp.route('/login_page')
def login_page():
return render_template('login.html')
@ssp_bp.route('/about')
def about():
return render_template('about.html')
@ssp_bp.route('/forgot')
def forgot_password():
return render_template('forgot_password.html')
@ssp_bp.route('/contact')
def contact_us():
return render_template('contact.html')
@ssp_bp.route('/desk')
@login_required
def desk():
return render_template('desk/dashboard.html')
@ssp_bp.route('/login', methods=['POST'])
def login():
try:
email, password = request.form.get('username'), request.form.get('password')
user_ = User.query.filter_by(email=email).first()
if not User.query.filter_by(email=email).first() or not user_.check_password(password):
flash('Invalid username or password')
return redirect(url_for('login_page'))
flash('Logged in successfully')
login_user(user_, remember=user_)
return redirect(url_for('ssp.desk'))
except Exception as e:
return redirect(url_for('ssp.login_page'))
@ssp_bp.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for('ssp.login_page'))
@ssp_bp.route('/forgot_password', methods=['POST'])
def sendnew_password():
from app.app import mail, create_app
user = User.query.filter_by(email=request.form.get('username')).first()
if not user:
flash("Oops, it seems we dont have your account" , "danger")
return redirect(url_for('ssp.forgot_password'))
try:
token = request.url_root + "/verify/"+ token_details(create_app, request.form.get('username'))
send_mail(subject="Password Reset Instruction",
recipients=[request.form.get('username')], template = 'reset_password_mail.html',
data={'user': user.full_name, 'token': token}, flash_msg="Password intruction sent successfully")
except Exception as e:
raise e
return redirect(url_for('ssp.forgot_password'))
def token_details(app, user):
token = {'data': user, 'key': app().secret_key}
return get_reset_token(token)
@ssp_bp.route('/verify/<token>', methods=['GET', 'POST'])
def verify_token(token):
try:
decode, app = decode_user_token(token), create_app()
if decode.get('identity').get('key') == app.secret_key and request.method == 'GET':
return render_template('reset_password.html')
elif decode.get('identity').get('key') == app.secret_key and request.method == 'POST':
user = decode.get('identity').get('data')
user = User.query.filter_by(email=user).first()
user.set_password(request.form.get('password'))
db.session.add(user)
db.session.commit()
flash("Password has been set successfully", "success")
return render_template('reset_password.html')
except Exception as e:
raise e
|
StarcoderdataPython
|
3510018
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import pprint
from collections import Counter
from base import BaseObject
from datadict import LoadStopWords
from nlusvc import TextAPI
class AddLanguageVariability(BaseObject):
""" Service to Augment the Synonyms ('Language Variability') file with new entries
all output from this file represents net-new non-duplicated entries into KB
"""
_text_api = TextAPI(is_debug=False)
_stopwords = LoadStopWords(is_debug=True).load(refresh_cache=True)
def __init__(self,
is_debug: bool = True):
"""
Created:
25-Jul-2019
<EMAIL>
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/493
Updated:
10-Aug-2019
<EMAIL>
* completely reritten in pursuit of
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/701
* Developer's Note:
it's likely that we'll end up with multiple recipes for language variability
each recipe will be a separate domain component and all will be orchestrated via a service
Updated:
13-Jan-2020
"""
BaseObject.__init__(self, __name__)
self.is_debug = is_debug
def _count_tokens(self,
terms: list) -> Counter:
"""
Purpose:
Tokenize input and count the tokens
:param terms:
a list of unstructured text
the list may be of any size
each item in the list may be of any size
:return:
a collection.Counter instance containing a count of tokens in the list
"""
c = Counter()
for term in terms:
[c.update({x: 1}) for x in term.lower().split(' ')
if x not in self._stopwords]
return c
@staticmethod
def _filter_tokens_by_count(c: Counter,
min_threshold: int = 1) -> Counter:
"""
Purpose:
filter a collection.Counter instance by count
:param c:
a collection.Counter instance
:param min_threshold:
the minimium valid count
:return:
a new collection.Counter instance
"""
return Counter({x: c[x] for x in c if c[x] >= min_threshold})
@staticmethod
def _subsumed(c: Counter) -> set:
"""
Purpose:
Find Subsumed Tokens
Sample Input:
[ 'redhat_certified_system_administrator',
'open_stack',
'system_administrator',
'redhat' ]
Sample Output:
[ 'redhat_certified_system_administrator',
'system_administrator',
'redhat' ]
a 'subsumed' token is one that contains another known token as a sub-string
:param c:
a collection.Counter instance
:return:
a set of subsumed tokens
"""
subsumed = set()
for t1 in c:
for t2 in c:
if t1 == t2:
continue
if t1 in t2 or t2 in t1:
subsumed.add(t1)
subsumed.add(t2)
return subsumed
@staticmethod
def _patterns(delta: set,
subsumed: set) -> list:
"""
Purpose:
Create a list of patterns for token formation
:param delta:
a set of tokens that are not subsumed by any other token
Sample Input:
{ 'open_stack' }
:param subsumed:
a set of subsumed tokens (generated from the 'subsumed' function)
Sample Input:
{ 'redhat_certified_system_administrator',
'system_administrator',
'redhat' }
:return:
a list of candidate patterns
"""
s = set()
for t1 in subsumed:
for t2 in delta:
if t1 == t2:
continue
s.add(f"{t1}+{t2}")
s.add(f"{t1}_{t2}")
return sorted(s)
def process(self,
terms: list,
min_threshold: int = 2) -> list:
"""
Purpose:
Given a list of terms, create variations for the synonyms_kb.csv file
:param terms:
any list of terms or phrases
:param min_threshold:
the minimum token count that is acceptable
any token beneath this threshold is typically considered low-value
perhaps useful only for outlier and edge patterns
:return:
the variations
"""
c = self._count_tokens(terms)
if self.is_debug:
self.logger.debug('\n'.join([
"Token Count:",
pprint.pformat(c.most_common(25))]))
c = self._filter_tokens_by_count(c, min_threshold)
if self.is_debug:
self.logger.debug('\n'.join([
f"Token Filter (min-threshold={min_threshold}):",
pprint.pformat(c.most_common(25))]))
tokens = set([x for x in c])
subsumed = self._subsumed(c)
if self.is_debug:
self.logger.debug(f"Token Subsumption: {subsumed}")
delta = tokens.difference(subsumed)
if self.is_debug:
self.logger.debug(f"Token Delta: {delta}")
patterns = self._patterns(delta, subsumed)
if self.is_debug:
self.logger.debug(f"Pattern Generation: {patterns}")
return patterns
|
StarcoderdataPython
|
8185575
|
#!/usr/bin/python3
# -*- coding:utf-8 -*-
import tensorflow as tf
from tensorflow.contrib import rnn
class Decoder():
def __init__(self, vocab_size,
rnn_size, embeddings):
self.vocab_size = vocab_size
self.rnn_size = rnn_size
self.embeddings = embeddings
self.decoder_inputs = None
self.decoder_targets = None
self.decoder_outputs = None
self.decoder_state = None
def lstm_model(self, encoder_final_state):
self.decoder_inputs = tf.placeholder(shape=(None, None),
dtype=tf.int32,
name='decoder_input')
self.decoder_targets = tf.placeholder(shape=(None, None),
dtype=tf.int32,
name='decoder_targets')
decoder_inputs_embedded = tf.nn.embedding_lookup(self.embeddings, self.decoder_inputs)
decoder_cell = rnn.LSTMCell(self.rnn_size)
self.decoder_outputs, self.decoder_state = tf.nn.dynamic_rnn(decoder_cell, decoder_inputs_embedded,
initial_state=encoder_final_state,
dtype=tf.float32, time_major=True,
scope='plain_decoder')
return self.decoder_outputs, self.decoder_state
def optimization(self):
decoder_logits = tf.contrib.layers.linear(self.decoder_outputs, self.vocab_size)
decoder_prediction = tf.argmax(decoder_logits, 2)
stepwise_cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=tf.one_hot(self.decoder_targets,
depth=self.vocab_size,
dtype=tf.float32),
logits=decoder_logits)
loss = tf.reduce_mean(stepwise_cross_entropy)
train_op = tf.train.AdamOptimizer().minimize(loss)
return train_op, loss, decoder_prediction
|
StarcoderdataPython
|
9771399
|
# encoding: utf-8
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
__version__ = '0.0.6'
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', 'Arguments to pass to py.test')]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = ['-x', 'flask_accept/test']
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
def readme():
with open('README.rst') as f:
return f.read()
setup(
name='flask_accept',
version=__version__,
description='Custom Accept header routing support for Flask',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Intended Audience :: Developers',
],
keywords='flask accept mimetype headers api versioning',
author='<NAME>',
author_email='<EMAIL>',
url='http://github.com/di/flask-accept',
long_description=readme(),
packages=find_packages(exclude=['examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=['flask'],
tests_require=[
'pytest',
'flake8',
'flask_restful',
'readme_renderer',
'flask_restplus'
],
cmdclass={'test': PyTest},
)
|
StarcoderdataPython
|
3258701
|
<filename>ppf/core/controller.py
class controller(object):
def __init__(self, trade, model, env, historical_df = 0):
self.__trade = trade
self.__model = model
self.__env = env
self.__historical_df = historical_df
self.__symbol_table = {}
self.__event = None
def get_trade(self):
return self.__trade
def get_model(self):
return self.__model
def get_environment(self):
return self.__env
def get_event(self):
return self.__event
def set_event(self, event):
self.__event = event
def get_adjuvant_table(self):
leg = self.__trade.legs()[self.__event.leg_id()]
adjuvant_table = None
if leg.has_adjuvant_table():
adjuvant_table = leg.adjuvant_table()
return adjuvant_table
def insert_symbol(self, name, at):
self.__symbol_table[name] = (at, self.__model.state().create_variable())
def update_symbol(self, name, symbol, at):
self.__symbol_table[name] = (at, symbol)
def retrieve_symbol(self, name):
if not self.__symbol_table.has_key(name):
raise RuntimeError, "name not found in symbol table"
return self.__symbol_table.get(name)[1]
def retrieve_symbol_update_time(self, name):
if not self.__symbol_table.has_key(name):
raise RuntimeError, "name not found in symbol table"
return self.__symbol_table.get(name)[0]
def retrieve_symbols_to_rollback(self, at):
symbols = []
for symbol in self.__symbol_table:
pair = self.__symbol_table.get(symbol)
if pair[0] > at:
symbols.append(symbol)
return symbols
def pay_df(self, t, state):
if t < 0:
historical_df = self.__model.state().create_variable()
historical_df = self.__historical_df
return historical_df
else:
flow = self.__event.flow()
fill = self.__model.fill()
requestor = self.__model.requestor()
T = self.__env.relative_date(flow.pay_date())/365.0
return fill.numeraire_rebased_bond(t, T, flow.pay_currency(), self.__env, requestor, state)
endif
def libor(self, t, state):
flow = self.__event.flow()
id = self.__event.reset_id()
obs = flow.observables()[id]
if t < 0:
fix = obs.fix()
if fix.is_fixed():
fixing = self.__model.state().create_variable()
fixing = fix.value()
return fixing
else:
raise RuntimeError, 'libor in the past with no fixing'
endif
else:
fill = self.__model.fill()
requestor = self.__model.requestor()
return fill.libor(t, obs, self.__env, requestor, state)
endif
def swap(self, t, state):
id = self.__event.reset_id()
obs = flow.observables()[id]
if t < 0:
fix = obs.fix()
if fix.is_fixed():
fixing = self.__model.state().create_variable()
fixing = fix.value()
return fixing
else:
raise RuntimeError, 'libor in the past with no fixing'
endif
else:
fill = self.__model.fill()
requestor = self.__model.requestor()
return fill.swap(t, obs, self.__env, requestor, state)
endif
def rollback(self, T, t, symbol):
requestor = self.__model.requestor()
state = self.__model.state()
return self.__model.rollback().rollback(t, T, state, requestor, self.__env, symbol)
def rollback_max(self, T, t, symbol_one, symbol_two):
requestor = self.__model.requestor()
state = self.__model.state()
res1 = self.__model.rollback().rollback(t, T, state, requestor, self.__env, symbol_one)
res2 = self.__model.rollback().rollback_max(t, T, state, requestor, self.__env, symbol_two-symbol_one)
return res1+res2
def evolve(self, t, T):
requestor = self.__model.requestor()
state = self.__model.state()
self.__model.evolve().evolve(t, T, state, requestor, self.__env)
def numeraire(self, t):
if t < 0:
raise RuntimeError, "attempting to call 'numeraire' in the past"
fill = self.__model.fill()
requestor = self.__model.requestor()
state = self.__model.state().fill(t, requestor, self.__env)
return fill.numeraire(t, self.__event.pay_currency(), self.__env, requestor, state)
def explanatory_variables(self, t):
if t < 0:
raise RuntimeError, "attempting to call 'explanatory_variables' in the past"
fill = self.__model.fill()
requestor = self.__model.requestor()
state = self.__model.state().fill(t, requestor, self.__env)
exercise = self.__model.exercise()
return exercise(t, fill, state, requestor, self.__env)
def __call__(self, t):
leg = self.__trade.legs()[self.__event.leg_id()]
payoff = leg.payoff()
pay_rcv = leg.pay_receive()
return pay_rcv*payoff(t, self)
|
StarcoderdataPython
|
4951512
|
from django.test import TestCase
from .models import Location, Category, Image
from unittest import skip
class LocationTestClass(TestCase):
'''
Class that tests the location
'''
def setUp(self):
'''
Creates new instances before a test
'''
self.nairobi = Location(name = "nairobi")
self.nairobi.save_location()
def tearDown(self):
'''
Clears database after each test
'''
Location.objects.all().delete()
def test_location_instance(self):
'''
This will test whether the new location created is an instance of the Location class
'''
self.assertTrue(isinstance(self.nairobi, Location))
def test_save_location_method(self):
'''
This tests whether new loaction is added to the db
'''
self.nairobi.save_location()
locations = Location.objects.all()
self.assertTrue(len(locations)> 0)
def test_delete_location(self):
'''
This tests whether location is deleted
'''
self.nairobi.save_location()
locations1= Location.objects.all()
self.assertEqual(len(locations1),1)
self.nairobi.delete_location()
locations2= Location.objects.all()
self.assertEqual(len(locations2),0)
def test_update_location(self):
'''
Tests whether the location name is updated
'''
self.nairobi.save_location()
self.nairobi.update_location(self.nairobi.id,'naivasha')
new_update = Location.objects.get(name = "naivasha")
self.assertEqual(new_update.name, 'naivasha')
def test_display_locations(self):
'''
This tests whether the display location function is getting the locations from the db
'''
self.nairobi.save_location()
self.assertEqual(len(Location.display_all_locations()), 1)
class CategoryTestClass(TestCase):
'''
Class that tests the category
'''
def setUp(self):
'''
Creates new instances before a test
'''
self.nature = Category(name = "nature")
self.nature.save_category()
def tearDown(self):
'''
Clears database after each test
'''
Category.objects.all().delete()
def test_category_instance(self):
'''
This will test whether the new location created is an instance of the Location class
'''
self.assertTrue(isinstance(self.nature, Category))
def test_save_category_method(self):
'''
This tests whether new loaction is added to the db
'''
self.nature.save_category()
categories = Category.objects.all()
self.assertTrue(len(categories)> 0)
def test_delete_category(self):
'''
This tests whether category is deleted
'''
self.nature.save_category()
categories1 = Category.objects.all()
self.assertEqual(len(categories1),1)
self.nature.delete_category()
categories2 = Category.objects.all()
self.assertEqual(len(categories2),0)
def test_update_category(self):
'''
Tests whether the category name is updated
'''
self.nature.save_category()
self.nature.update_category(self.nature.id,'natural')
new_update = Category.objects.get(name = "natural")
self.assertEqual(new_update.name, 'natural')
class ImageTestClass(TestCase):
'''
Class that tests the images
'''
def setUp(self):
'''
Creates new instances before a test
'''
self.nature = Category( name= "nature")
self.nairobi = Location(name = "nairobi")
self.flower = Image(image = "image", name ='flower', description = 'beautiful', category= self.nature, location= self.nairobi)
self.nature.save_category()
self.nairobi.save_location()
self.flower.save_image()
def tearDown(self):
'''
Clears database after each test
'''
Image.objects.all().delete()
Category.objects.all().delete()
Location.objects.all().delete()
def test_image_instance(self):
'''
This will test whether the new image created is an instance of the Image class
'''
self.assertTrue(isinstance(self.flower, Image))
def test_save_image_method(self):
'''
This tests whether new image is added to the db
'''
self.flower.save_image()
images = Image.objects.all()
self.assertTrue(len(images)> 0)
def test_display_images(self):
'''
This tests whether the display image function is getting the images from the db
'''
self.flower.save_image()
self.assertEqual(len(Image.display_all_images()), 1)
def test_delete_images(self):
'''
This tests whether image is deleted
'''
self.flower.save_image()
images1 = Image.objects.all()
self.assertEqual(len(images1),1)
self.flower.delete_image()
images2 = Image.objects.all()
self.assertEqual(len(images2),0)
def test_update_image_description(self):
'''
Tests whether the image description is updated
'''
self.flower.save_image()
self.flower.update_image_description(self.flower.id,'pink')
new_update = Image.objects.get(name = "flower")
self.assertEqual(new_update.description, 'pink')
def test_get_image_by_id(self):
'''
Tests whether image is retrieved by id
'''
self.flower.save_image()
image = Image.get_image_by_id(self.flower.id)
self.assertEqual(image.name, self.flower.name)
def test_search_image(self):
'''
Tests whether image is retrieved by category
'''
self.nature.save_category()
self.flower.save_image()
images = Image.search_image("nature")
self.assertTrue(len(images) > 0)
def test_search_location(self):
'''
Tests whether image is retrieved by location
'''
self.nairobi.save_location()
self.flower.save_image()
images = Image.filter_by_location("nairobi")
self.assertTrue(len(images) > 0)
|
StarcoderdataPython
|
5066663
|
#!/usr/bin/env python3
"""
Module to contain the runtime options for the CCPP Framework.
Function to parse arguments to the CCPP Framework and store them in an
object which allows various framework functions to access CCPP
Framework runtime information and parameter values.
"""
# Python library imports
import argparse
import os
_EPILOG = '''
'''
###############################################################################
class CCPPFrameworkEnv:
###############################################################################
"""Object and methods to hold the runtime environment and parameter
options for the CCPP Framework"""
def __init__(self, logger, ndict=None, verbose=0, clean=False,
host_files=None, scheme_files=None, suites=None,
preproc_directives=[], generate_docfiles=False, host_name='',
kind_types=[], use_error_obj=False, force_overwrite=False,
output_root=os.getcwd(), ccpp_datafile="datatable.xml"):
"""Initialize a new CCPPFrameworkEnv object from the input arguments.
<ndict> is a dict with the parsed command-line arguments (or a
dictionary created with the necessary arguments).
<logger> is a logger to be used by users of this object.
"""
emsg = ''
esep = ''
if ndict and ('verbose' in ndict):
self.__verbosity = ndict['verbose']
del ndict['verbose']
else:
self.__verbosity = verbose
# end if
if ndict and ('clean' in ndict):
self.__clean = ndict['clean']
del ndict['clean']
else:
self.__clean = clean
# end if
if ndict and ('host_files' in ndict):
self.__host_files = ndict['host_files']
del ndict['host_files']
if host_files and logger:
wmsg = "CCPPFrameworkEnv: Using ndict, ignoring 'host_files'"
logger.warning(wmsg)
# end if
elif host_files is None:
emsg += esep + "Error: 'host_files' list required"
esep = '\n'
else:
self.__host_files = host_files
# end if
if ndict and ('scheme_files' in ndict):
self.__scheme_files = ndict['scheme_files']
del ndict['scheme_files']
if scheme_files and logger:
wmsg = "CCPPFrameworkEnv: Using ndict, ignoring 'scheme_files'"
logger.warning(wmsg)
# end if
elif scheme_files is None:
emsg += esep + "Error: 'scheme_files' list required"
esep = '\n'
else:
self.__scheme_files = scheme_files
# end if
if ndict and ('suites' in ndict):
self.__suites = ndict['suites']
del ndict['suites']
if suites and logger:
wmsg = "CCPPFrameworkEnv: Using ndict, ignoring 'suites'"
logger.warning(wmsg)
# end if
elif suites is None:
emsg += esep + "Error: 'suites' list required"
esep = '\n'
else:
self.__suites = suites
# end if
if ndict and ('preproc_directives' in ndict):
preproc_defs = ndict['preproc_directives']
del ndict['preproc_directives']
else:
preproc_defs = preproc_directives
# end if
# Turn preproc_defs into a dictionary, start with a list to process
if isinstance(preproc_defs, list):
# Someone already handed us a list
preproc_list = preproc_defs
elif (not preproc_defs) or (preproc_defs == 'UNSET'):
# No preprocessor definitions
preproc_list = list()
elif ',' in preproc_defs:
# String of definitions, separated by commas
preproc_list = [x.strip() for x in preproc_defs.split(',')]
elif isinstance(preproc_defs, str):
# String of definitions, separated by spaces
preproc_list = [x.strip() for x in preproc_defs.split(' ') if x]
else:
wmsg = "Error: Bad preproc list type, '{}'"
emsg += esep + wmsg.format(type(preproc_defs))
esep = '\n'
# end if
# Turn the list into a dictionary
self.__preproc_defs = {}
for item in preproc_list:
tokens = [x.strip() for x in item.split('=', 1)]
if len(tokens) > 2:
emsg += esep + "Error: Bad preproc def, '{}'".format(item)
esep = '\n'
else:
key = tokens[0]
if key[0:2] == '-D':
key = key[2:]
# end if
if len(tokens) > 1:
value = tokens[1]
else:
value = None
# end if
self.__preproc_defs[key] = value
# end if
# end for
if ndict and ('generate_docfiles' in ndict):
self.__generate_docfiles = ndict['generate_docfiles']
del ndict['generate_docfiles']
else:
self.__generate_docfiles = generate_docfiles
# end if
if ndict and ('host_name' in ndict):
self.__host_name = ndict['host_name']
del ndict['host_name']
else:
self.__host_name = host_name
# end if
self.__generate_host_cap = self.host_name != ''
self.__kind_dict = {}
if ndict and ("kind_type" in ndict):
kind_list = ndict["kind_type"]
del ndict["kind_type"]
else:
kind_list = kind_types
# end if
# Note that the command line uses repeated calls to 'kind_type'
for kind in kind_list:
kargs = [x.strip() for x in kind.strip().split('=')]
if len(kargs) != 2:
emsg += esep
emsg += "Error: '{}' is not a valid kind specification "
emsg += "(should be of the form <kind_name>=<kind_spec>)"
emsg = emsg.format(kind)
esep = '\n'
else:
kind_name, kind_spec = kargs
# Do not worry about duplicates, just use last value
self.__kind_dict[kind_name] = kind_spec
# end if
# end for
# We always need a kind_phys so add a default if necessary
if "kind_phys" not in self.__kind_dict:
self.__kind_dict["kind_phys"] = "REAL64"
# end if
if ndict and ('use_error_obj' in ndict):
self.__use_error_obj = ndict['use_error_obj']
del ndict['use_error_obj']
else:
self.__use_error_obj = use_error_obj
# end if
if ndict and ('force_overwrite' in ndict):
self.__force_overwrite = ndict['force_overwrite']
del ndict['force_overwrite']
else:
self.__force_overwrite = force_overwrite
# end if
# Make sure we know where output is going
if ndict and ('output_root' in ndict):
self.__output_root = ndict['output_root']
del ndict['output_root']
else:
self.__output_root = output_root
# end if
self.__output_dir = os.path.abspath(self.output_root)
# Make sure we can create output database
if ndict and ('ccpp_datafile' in ndict):
self.__datatable_file = os.path.normpath(ndict['ccpp_datafile'])
del ndict['ccpp_datafile']
else:
self.__datatable_file = ccpp_datafile
# end if
if not os.path.isabs(self.datatable_file):
self.__datatable_file = os.path.join(self.output_dir,
self.datatable_file)
# end if
self.__logger = logger
## Check to see if anything is left in dictionary
if ndict:
for key in ndict:
emsg += esep + "Error: Unknown key in <ndict>, '{}'".format(key)
esep = '\n'
# end for
# end if
# Raise an exception if any errors were found
if emsg:
raise ValueError(emsg)
# end if
@property
def verbosity(self):
"""Return the <verbosity> property for this CCPPFrameworkEnv object."""
return self.__verbosity
@property
def clean(self):
"""Return the <clean> property for this CCPPFrameworkEnv object."""
return self.__clean
@property
def host_files(self):
"""Return the <host_files> property for this CCPPFrameworkEnv object."""
return self.__host_files
@property
def scheme_files(self):
"""Return the <scheme_files> property for this
CCPPFrameworkEnv object."""
return self.__scheme_files
@property
def suites(self):
"""Return the <suites> property for this
CCPPFrameworkEnv object."""
return self.__suites
@property
def preproc_defs(self):
"""Return the <preproc_defs> property for this
CCPPFrameworkEnv object."""
return self.__preproc_defs
@property
def generate_docfiles(self):
"""Return the <generate_docfiles> property for this
CCPPFrameworkEnv object."""
return self.__generate_docfiles
@property
def host_name(self):
"""Return the <host_name> property for this CCPPFrameworkEnv object."""
return self.__host_name
@property
def generate_host_cap(self):
"""Return the <generate_host_cap> property for this
CCPPFrameworkEnv object."""
return self.__generate_host_cap
def kind_spec(self, kind_type):
"""Return the kind specification for kind type, <kind_type>
for this CCPPFrameworkEnv object.
If there is no entry for <kind_type>, return None."""
kind_spec = None
if kind_type in self.__kind_dict:
kind_spec = self.__kind_dict[kind_type]
# end if
return kind_spec
def kind_types(self):
"""Return a list of all kind types defined in this
CCPPFrameworkEnv object."""
return self.__kind_dict.keys()
@property
def use_error_obj(self):
"""Return the <use_error_obj> property for this
CCPPFrameworkEnv object."""
return self.__use_error_obj
@property
def force_overwrite(self):
"""Return the <force_overwrite> property for this
CCPPFrameworkEnv object."""
return self.__force_overwrite
@property
def output_root(self):
"""Return the <output_root> property for this
CCPPFrameworkEnv object."""
return self.__output_root
@property
def output_dir(self):
"""Return the <output_dir> property for this CCPPFrameworkEnv object."""
return self.__output_dir
@property
def datatable_file(self):
"""Return the <datatable_file> property for this
CCPPFrameworkEnv object."""
return self.__datatable_file
@property
def logger(self):
"""Return the <logger> property for this CCPPFrameworkEnv object."""
return self.__logger
###############################################################################
def parse_command_line(args, description, logger=None):
###############################################################################
"""Create an ArgumentParser to parse and return a CCPPFrameworkEnv
object containing the command-line arguments and related quantities."""
ap_format = argparse.RawTextHelpFormatter
parser = argparse.ArgumentParser(description=description,
formatter_class=ap_format, epilog=_EPILOG)
parser.add_argument("--host-files", metavar='<host files filename>',
type=str, required=True,
help="""Comma separated list of host filenames to process
Filenames with a '.meta' suffix are treated as host model metadata files
Filenames with a '.txt' suffix are treated as containing a list of .meta
filenames""")
parser.add_argument("--scheme-files", metavar='<scheme files filename>',
type=str, required=True,
help="""Comma separated list of scheme filenames to process
Filenames with a '.meta' suffix are treated as scheme metadata files
Filenames with a '.txt' suffix are treated as containing a list of .meta
filenames""")
parser.add_argument("--suites", metavar='<Suite definition file(s)>',
type=str, required=True,
help="""Comma separated list of suite definition filenames to process
Filenames with a '.xml' suffix are treated as suite definition XML files
Other filenames are treated as containing a list of .xml filenames""")
parser.add_argument("--preproc-directives",
metavar='VARDEF1[,VARDEF2 ...]', type=str, default='',
help="Proprocessor directives used to correctly parse source files")
parser.add_argument("--ccpp-datafile", type=str,
metavar='<data table XML filename>',
default="datatable.xml",
help="Filename for information on content generated by the CCPP Framework")
parser.add_argument("--output-root", type=str,
metavar='<directory for generated files>',
default=os.getcwd(),
help="directory for generated files")
parser.add_argument("--host-name", type=str, default='',
help='''Name of host model to use in CCPP API
If this option is passed, a host model cap is generated''')
parser.add_argument("--clean", action='store_true', default=False,
help='Remove files created by this script, then exit')
parser.add_argument("--kind-type", type=str, action='append',
metavar="kind_type", default=list(),
help="""Data size for real(<kind_type>) data.
Entry in the form of <kind_type>=<kind_val>
e.g., --kind-type "kind_phys=REAL64"
Enter more than one --kind-type entry to define multiple CCPP kinds.
<kind_val> SHOULD be a valid ISO_FORTRAN_ENV type""")
parser.add_argument("--generate-docfiles",
metavar='HTML | Latex | HTML,Latex', type=str,
help="Generate LaTeX and/or HTML documentation")
parser.add_argument("--use-error-obj", action='store_true', default=False,
help="""Host model and caps use an error object
instead of ccpp_error_message and ccpp_error_code.""")
parser.add_argument("--force-overwrite", action='store_true', default=False,
help="""Overwrite all CCPP-generated files, even
if unmodified""")
parser.add_argument("--verbose", action='count', default=0,
help="Log more activity, repeat for increased output")
pargs = parser.parse_args(args)
return CCPPFrameworkEnv(logger, vars(pargs))
|
StarcoderdataPython
|
13125
|
<filename>asteroids/whatsobservable.py
import datetime
import ephem
import os.path
import os
import numpy as np
import pdb
from pandas import DataFrame
__version__ = '0.1.2'
class Error(Exception):
pass
def _convert_datetime_to_pyephem_date_string(in_datetime):
return in_datetime.strftime('%Y/%m/%d %H:%M:%S')
def _find_cached_file(filename):
base = os.path.expanduser('~/')
# Look in a few likely locations before doing a giant search
filenames_to_test = [os.path.join(base, filename),
os.path.join(base, 'refdata', filename),
os.path.join(base, 'Dropbox', filename),
os.path.join(base, 'Dropbox', 'refdata', filename)]
for cur_filename in filenames_to_test:
if os.path.isfile(cur_filename):
return cur_filename
# didn't find it, so do a giant search
for root, dirs, files in os.walk(base):
if filename in files:
return os.path.join(root, filename)
return "File Not Found"
def get_latlon_from_observatory_code(code):
if type(code) is int:
code = '%03i' % code
elif type(code) is str:
code = code[:3] # trim any remainder, like @399
try:
obscode_filename = _find_cached_file('ObsCodes.html')
# TODO: add a verbose option to print path to ObsCodes.html
obscodes = open(obscode_filename, 'r').read().splitlines()
except:
raise Error("Problem reading ObsCodes.html file from disk. \n"
"Most likely you need to go download a copy from: \n"
" http://www.minorplanetcenter.net/iau/lists/ObsCodes.html")
curobsline = [a for a in obscodes if a.startswith(code)][0]
output = {'obscode':curobsline[0:3],
'longitude':float(curobsline[4:13]),
'cos':float(curobsline[13:21]),
'sin':float(curobsline[21:30]),
'name':curobsline[30:].strip()}
# From the documentation:
# "The following list gives the observatory code, longitude (in degrees east of Greenwich) and the parallax
# constants (rho cos phi' and rho sin phi', where phi' is the geocentric latitude and rho is the geocentric
# distance in earth radii) for each observatory. It is updated nightly."
output['latitude'] = np.degrees(np.arctan2(output['sin'], output['cos']))
# Unsure where the following comment came from:
# geocentric distance in earth radii:
# output['sin']/np.sin(np.radians(output['latitude']))
# NOTE: while ObsCodes.html is clear about being geocentric, it is unclear what pyephem wants, so blaze ahead
# TODO: confirm whether pyephem wants geocentric
return output
def pack_mpc_date(in_datetime):
"""
Convert a datetime.date or datetime.datetime object into the MPC packed date format, as described at:
http://www.minorplanetcenter.net/iau/info/PackedDates.html
Copy of the packing definition from the above web page:
Packed Dates
Dates of the form YYYYMMDD may be packed into five characters to conserve space.
The first two digits of the year are packed into a single character in column 1 (I = 18, J = 19, K = 20). Columns 2-3 contain the last two digits of the year. Column 4 contains the month and column 5 contains the day, coded as detailed below:
Month Day Character Day Character
in Col 4 or 5 in Col 4 or 5
Jan. 1 1 17 H
Feb. 2 2 18 I
Mar. 3 3 19 J
Apr. 4 4 20 K
May 5 5 21 L
June 6 6 22 M
July 7 7 23 N
Aug. 8 8 24 O
Sept. 9 9 25 P
Oct. 10 A 26 Q
Nov. 11 B 27 R
Dec. 12 C 28 S
13 D 29 T
14 E 30 U
15 F 31 V
16 G
Examples:
1996 Jan. 1 = J9611
1996 Jan. 10 = J961A
1996 Sept.30 = J969U
1996 Oct. 1 = J96A1
2001 Oct. 22 = K01AM
This system can be extended to dates with non-integral days. The decimal fraction of the day is simply appended to the five characters defined above.
Examples:
1998 Jan. 18.73 = J981I73
2001 Oct. 22.138303 = K01AM138303
"""
if in_datetime.year >= 1800 and in_datetime.year < 1900:
century = 'I'
elif in_datetime.year >= 1900 and in_datetime.year < 2000:
century = 'J'
elif in_datetime.year >= 2000 and in_datetime.year < 2100:
century = 'K'
else:
raise Error("Year is not within 1800-2099: " + in_datetime.isoformat())
year = in_datetime.strftime('%y')
translate = {}
for i in range(10):
translate[i] = str(i)
for i in range(10,32):
translate[i] = chr(ord('A') + i - 10)
month = translate[in_datetime.month]
day = translate[in_datetime.day]
try:
decimaldays = ('%7.5f' % ((in_datetime.hour + (in_datetime.minute / 60.) + (in_datetime.second / 3600.)) / 24.))[2:]
except:
decimaldays = ''
return century + year + month + day + decimaldays
def unpack_mpc_date(in_packed):
"""
Convert a MPC packed date format (as described below) to a datetime.date or datetime.datetime object
http://www.minorplanetcenter.net/iau/info/PackedDates.html
Copy of the packing definition from the above web page:
Packed Dates
Dates of the form YYYYMMDD may be packed into five characters to conserve space.
The first two digits of the year are packed into a single character in column 1 (I = 18, J = 19, K = 20). Columns 2-3 contain the last two digits of the year. Column 4 contains the month and column 5 contains the day, coded as detailed below:
Month Day Character Day Character
in Col 4 or 5 in Col 4 or 5
Jan. 1 1 17 H
Feb. 2 2 18 I
Mar. 3 3 19 J
Apr. 4 4 20 K
May 5 5 21 L
June 6 6 22 M
July 7 7 23 N
Aug. 8 8 24 O
Sept. 9 9 25 P
Oct. 10 A 26 Q
Nov. 11 B 27 R
Dec. 12 C 28 S
13 D 29 T
14 E 30 U
15 F 31 V
16 G
Examples:
1996 Jan. 1 = J9611
1996 Jan. 10 = J961A
1996 Sept.30 = J969U
1996 Oct. 1 = J96A1
2001 Oct. 22 = K01AM
This system can be extended to dates with non-integral days. The decimal fraction of the day is simply appended to the five characters defined above.
Examples:
1998 Jan. 18.73 = J981I73
2001 Oct. 22.138303 = K01AM138303
"""
translate = {}
for i in range(10):
translate[str(i)] = i
for i in range(10,32):
translate[chr(ord('A') + i - 10)] = i
if in_packed[0] == 'I':
year = 1800
elif in_packed[0] == 'J':
year = 1900
elif in_packed[0] == 'K':
year = 2000
else:
raise Error('Unrecognized century code at start of: ' + in_packed)
year += int(in_packed[1:3])
month = translate[in_packed[3]]
day = translate[in_packed[4]]
if len(in_packed) == 5:
return datetime.date(year, month, day)
else:
decimaldays = float('0.' + in_packed[5:])
hour = int(decimaldays * 24.)
minute = int((decimaldays * 24. - hour) * 60.)
second = int(round(decimaldays * 24. * 60. * 60. - (hour * 3600.) - (minute * 60.)))
return datetime.datetime(year, month, day, hour, minute, second)
#TODO: clean up the following comments and incorporate into the code
# can get all numbered asteroids (and other junk) from minor planet center in MPCORB.DAT file:
# [MPCORB.DAT](http://www.minorplanetcenter.net/iau/MPCORB/MPCORB.DAT)
# [Format is described in more detail](http://www.minorplanetcenter.org/iau/info/MPOrbitFormat.html)
# 944 Hidalgo line as of 2013-07-26 is:
#Des'n H G Epoch M Peri. Node Incl. e n a Reference #Obs #Opp Arc rms Perts Computer
#00944 10.77 0.15 K134I 215.40344 56.65077 21.56494 42.54312 0.6617811 0.07172582 5.7370114 0 MPO263352 582 21 1920-2010 0.77 M-v 38h MPCLINUX 0000 (944) Hidalgo 20100222
# But, I want in xephem format, [described here](http://www.clearskyinstitute.com/xephem/help/xephem.html#mozTocId468501)
# and minor planet provides a subset in xephem format [here](http://www.minorplanetcenter.net/iau/Ephemerides/Bright/2013/Soft03Bright.txt):
# though to ensure I was comparing same exact orbit solutions, used 944 Hidalgo from
# http://www.minorplanetcenter.net/iau/Ephemerides/Distant/Soft03Distant.txt
# From MPO263352
#944 Hidalgo,e,42.5431,21.5649,56.6508,5.737011,0.0717258,0.66178105,215.4034,04/18.0/2013,2000,H10.77,0.15
# So, for my purposes, the xephem format, separated by commas is:
# NUMBER NAME - easy enough....
# e - for ecliptic elliptical orbit
# i = inclination, degrees (directly from MPCORB.DAT)
# O = longitude of ascending node, degrees (directly from MPCORB.DAT)
# o = argument of perihelion, degrees (directly from MPCORB.DAT)
# a = mean distance (aka semi-major axis), AU (directly from MPCORB.DAT)
# n = mean daily motion, degrees per day (computed from a**3/2 if omitted) (directly from MPCORB.DAT)
# e = eccentricity, must be < 1 (directly from MPCORB.DAT)
# M = mean anomaly, i.e., degrees from perihelion (directly from MPCORB.DAT)
# E = epoch date, i.e., time of M MM/DD.D/YYYY
# in MPCORB.DAT epoch date is packed according to rules:
# http://www.minorplanetcenter.net/iau/info/PackedDates.html
# Subfield 10A First date these elements are valid, optional
# SubField 10B Last date these elements are valid, optional
# D = the equinox year, i.e., time of i, O and o (always J2000.0 in MPCORB.DAT, so 2000
# First component of magnitude model, either g from (g,k) or H from (H,G). Specify which by preceding the number with a "g" or an "H". In absence of either specifier the default is (H,G) model. See Magnitude models.
# corresponds to H in MPCORB.DAT, just need to preface with an 'H'
# Second component of magnitude model, either k or G (directly from MPCORB.DAT)
# s = angular size at 1 AU, arc seconds, optional - I don't care, so skip....
def convert_mpcorb_to_xephem(input):
"""
convert from, e.g.:
[MPCORB.DAT](http://www.minorplanetcenter.net/iau/MPCORB/MPCORB.DAT)
[Format is described in more detail](http://www.minorplanetcenter.org/iau/info/MPOrbitFormat.html)
Des'n H G Epoch M Peri. Node Incl. e n a Reference #Obs #Opp Arc rms Perts Computer
# 944 Hidalgo line as of 2013-07-26 is:
00944 10.77 0.15 K134I 215.40344 56.65077 21.56494 42.54312 0.6617811 0.07172582 5.7370114 0 MPO263352 582 21 1920-2010 0.77 M-v 38h MPCLINUX 0000 (944) Hidalgo 20100222
to
# From MPO263352
944 Hidalgo,e,42.5431,21.5649,56.6508,5.737011,0.0717258,0.66178105,215.4034,04/18.0/2013,2000,H10.77,0.15
input is a single line of text, output will include a newline character within it (but no newline at end)
"""
output = '# From ' + input[107:116] + '\n'
output += input[166:194].strip().replace('(','').replace(')','') + ','
output += 'e,'
output += input[59:68].strip() + ',' # i = inclination, degrees
output += input[48:57].strip() + ',' # O = longitude of ascending node, degrees
output += input[37:46].strip() + ',' # o = argument of perihelion, degrees
output += input[92:103].strip() + ',' # a = mean distance (aka semi-major axis), AU
output += input[80:91].strip() + ',' # n = mean daily motion, degrees per day (computed from a**3/2 if omitted)
output += input[70:79].strip() + ',' # e = eccentricity, must be < 1
output += input[26:35].strip() + ',' # M = mean anomaly, i.e., degrees from perihelion
output += unpack_mpc_date(input[20:25].strip()).strftime('%m/%d/%Y') + ',' # E = epoch date, i.e., time of M
output += '2000,' # D = the equinox year, i.e., time of i, O and o (always J2000.0 in MPCORB.DAT
output += 'H' + input[8:13].strip() + ',' # First component of magnitude model
output += input[14:19].strip() # Second component of magnitude model
return output
def minorplanets(in_datetime, observatory_code,
max_objects=None,
max_magnitude=None, require_magnitude=True,
max_zenithdistance_deg=90.0,
min_heliocentric_distance_AU=None, max_heliocentric_distance_AU=None,
min_topocentric_distance_AU=None, max_topocentric_distance_AU=None):
"""
in_datetime - datetime.datetime(), e.g. datetime.datetime.utcnow()
observatory_code - the Code of the observatory in
http://www.minorplanetcenter.net/iau/lists/ObsCodes.html
can be either string or integer.
max_objects - default is None, otherwise limits the return to this number
of observable objects
max_magnitude - default is None, otherwise limits return to objects
brighter than or equal to this magnitude
(as calculated by PyEphem from the MPC data)
(TODO: confirm whether this is V-band, R-band,
or other...)
require_magnitude - default is True. If False and max_magnitude is None,
then return all objects, whether PyEphem can calculate
a magnitude or not.
max_zenithdistance_deg - default is 90 degrees (horizon)
min/max_heliocentric_distance_AU - defaults are None
min/max_topocentric_distance_AU - defaults are None
"""
obs_info = get_latlon_from_observatory_code(observatory_code)
obs = ephem.Observer()
obs.lat = np.radians(obs_info['latitude'])
obs.lon = np.radians(obs_info['longitude'])
obs.date = _convert_datetime_to_pyephem_date_string(in_datetime)
mpc_filename = _find_cached_file('MPCORB.DAT')
if mpc_filename == 'File Not Found':
raise Error("Problem reading MPCORB.DAT file from disk. \n"
"Most likely you need to go download a copy from: \n"
" http://www.minorplanetcenter.net/iau/MPCORB/MPCORB.DAT")
if max_magnitude is not None:
require_magnitude = True
matching_objects = []
with open(mpc_filename) as f:
in_header = True
for line in f:
if in_header is False and len(line) > 1:
if (not require_magnitude) or (require_magnitude and (line[8:13] != ' ')):
eph = ephem.readdb(convert_mpcorb_to_xephem(line).splitlines()[1])
eph.compute(obs)
if (max_magnitude is None) or (eph.mag <= max_magnitude):
if ((max_zenithdistance_deg is None) or
(np.degrees(np.pi/2. - eph.alt) <= max_zenithdistance_deg)):
if ((min_heliocentric_distance_AU is None) or
(eph.sun_distance >= min_heliocentric_distance_AU)):
if ((max_heliocentric_distance_AU is None) or
(eph.sun_distance <= max_heliocentric_distance_AU)):
if ((min_topocentric_distance_AU is None) or
(eph.earth_distance >= min_topocentric_distance_AU)):
if ((max_topocentric_distance_AU is None) or
(eph.earth_distance <= max_topocentric_distance_AU)):
matching_objects.append(eph)
else:
if line.startswith('-------------------'):
in_header = False
if max_objects is not None:
if len(matching_objects) >= max_objects:
break
name = [a.name for a in matching_objects]
d = {}
d['rise_time'] = [a.rise_time.datetime() if a.rise_time is not None else np.nan for a in matching_objects]
d['transit_time'] = [a.transit_time.datetime() if a.transit_time is not None else np.nan for a in matching_objects]
d['set_time'] = [a.set_time.datetime() if a.set_time is not None else np.nan for a in matching_objects]
d['raJ2000_deg'] = [np.degrees(a.a_ra) for a in matching_objects]
d['decJ2000_deg'] = [np.degrees(a.a_dec) for a in matching_objects]
d['mag'] = [a.mag for a in matching_objects]
d['R_AU'] = [a.sun_distance for a in matching_objects]
d['delta_AU'] = [a.earth_distance for a in matching_objects]
moon = ephem.Moon()
moon.compute(obs.date)
d['O-E-M_deg'] = [np.degrees(ephem.separation(moon, a)) for a in matching_objects]
output = DataFrame(d, index=name)
output = output[['rise_time', 'transit_time', 'set_time', 'raJ2000_deg', 'decJ2000_deg',
'mag', 'R_AU', 'delta_AU', 'O-E-M_deg']] # re-order columns to something sensible
return output
|
StarcoderdataPython
|
4994092
|
<reponame>axfontai/HermesServer
#librairies indispensables : bokeh - pandas
from bokeh.io import curdoc, show
from bokeh.plotting import figure, output_file
from bokeh.transform import jitter, factor_cmap, dodge
from bokeh.models import (ColumnDataSource, Drag, PanTool, BoxZoomTool, LassoSelectTool,
WheelZoomTool, ResetTool, SaveTool, HoverTool, BoxSelectTool,
LinearColorMapper, BasicTicker, PrintfTickFormatter, ColorBar, )
from bokeh.models.widgets import MultiSelect, DateRangeSlider, Panel, Tabs, Button, Select
from bokeh.models.annotations import Span
from bokeh.palettes import Set3, Spectral6, RdYlGn4, Category10
from bokeh.layouts import column, row
from bokeh.models.glyphs import Text
from bokeh.tile_providers import STAMEN_TERRAIN
import pandas as pd
import sqlalchemy as sqlal
import numpy as np
# Utiliser la commande bokeh serve --show BokehServerHermes3.py pour
#lancer le serveur local et profiter des callbacks
# dir dans lequel sont installés les db sqlite3
SQLiteDir = "C:\\Users\\afontaine\\Documents\\Python\\SQLite\\"
HermesDB = "Hermes2018.db" #DB sqlite3 avec donnees des véhicules
MeldpuntenDB = "Meldpunten.db" #DB sqlite3 avec données th des pts d'emission
#SQLite => SQLAlchemy engine
hermes_engine = sqlal.create_engine('sqlite:///' + SQLiteDir + HermesDB)
meldpunten_engine = sqlal.create_engine('sqlite:///' + SQLiteDir + MeldpuntenDB)
# liste des b3s dans la base de donnée hermes
B3S_list=sqlal.inspect(hermes_engine).get_table_names()
#carrefour affiché en premier
CurrentB3S = 'B3S274'
StartDate = pd.Timestamp('2018-09-14 00:00:00')
FirstDate = '2017-01-01 00:00:00'
FirstDate = pd.Timestamp('2018-01-01 00:00:00')
def List_to_SQL(list):
first = True
sql_str = ''
for i in list:
if first:
sql_str = str(i) + ')'
first = False
else:
sql_str = str(i) + ',' + sql_str
sql_str = '(' + sql_str
return sql_str
def list_kiko(b3s):
# Extrait en en liste et str les kikos de la db
ListKiKo = pd.read_sql_query('SELECT DISTINCT KiKo FROM ' + b3s + ';', hermes_engine)
ListKiKoStr = List_to_SQL(ListKiKo['KiKo'])
ListKiKo = ListKiKo['KiKo'].tolist()
ListKiKo = [str(i) for i in ListKiKo]
return ListKiKoStr, ListKiKo
def list_lignes(b3s):
# Extrait en en liste et str les numeros de lignes de la db
ListLignes = pd.read_sql_query('SELECT DISTINCT NumLigne FROM ' + b3s + ';', hermes_engine)
ListLignesStr = List_to_SQL(ListLignes['NumLigne'])
ListLignes = ListLignes['NumLigne'].tolist()
ListLignes = [str(i) for i in ListLignes]
return ListLignesStr, ListLignes
def first_date(b3s):
# Extrait en Timestamp la date la plus ancienne de la db
FirstDate = pd.read_sql_query('SELECT min(Jour) FROM ' + b3s + ';', hermes_engine)
FirstDate = list(FirstDate['min(Jour)'])
# FirstDateStr = FirstDate[0]
FirstDate = pd.Timestamp(FirstDate[0])
return FirstDate
def last_date(b3s):
# Extrait en Timestamp la date la plus récente de la db
LastDate = pd.read_sql_query('SELECT max(Jour) FROM ' + b3s + ';', hermes_engine)
LastDate = list(LastDate['max(Jour)'])
LastDateStr = str(LastDate[0])
LastDateTimestamp = pd.Timestamp(LastDate[0])
return LastDateStr, LastDateTimestamp
def FormatDataFrame(DataFrame):
# Formatte la DF pour qu'elle soit lisible
DataFrame['JourDatetime'] = pd.to_datetime(DataFrame['Jour'], format='%Y-%m-%d %H:%M:%S')
DataFrame['H_VA'] = pd.to_datetime(DataFrame['H_VA'], format='%H:%M:%S')
DataFrame['H_AA'] = pd.to_datetime(DataFrame['H_AA'], format='%H:%M:%S')
DataFrame['H_LF'] = pd.to_datetime(DataFrame['H_LF'], format='%H:%M:%S')
DataFrame['H_AF'] = pd.to_datetime(DataFrame['H_AF'], format='%H:%M:%S')
DataFrame['TpsVA_LF'] = DataFrame['H_LF'].sub(DataFrame['H_VA'])
DataFrame['TpsVA_LF'] = DataFrame['TpsVA_LF'].dt.total_seconds()
DataFrame = DataFrame.replace(0.0, np.NaN)
return DataFrame
def SQLquery(b3s, KiKo, NumLigne, StartDate, EndDate):
# Cree une requete SQL lisible par sqlite3
ColumnInDF = """ Jour, NumLigne, H_VA, H_AA, H_LF, H_AF, KiKo, NumParc, WeekDay,
TpsVA_AA, SpeedVA_AA, TpsAA_LF, SpeedAA_LF, TpsLF_AF, SpeedLF_AF, SpeedVA_AF,
X_VA_merc, Y_VA_merc, X_AA_merc, Y_AA_merc, X_AF_merc, Y_AF_merc """
RemoveFalseX = ' AND X_VA BETWEEN 4.2 AND 4.5 AND X_AA BETWEEN 4.2 AND 4.5 AND X_LF BETWEEN 4.2 AND 4.5 AND X_AF BETWEEN 4.2 AND 4.5'
RemoveFalseY = ' AND Y_VA BETWEEN 50.2 AND 51 AND Y_AA BETWEEN 50.2 AND 51 AND Y_LF BETWEEN 50.2 AND 51 AND Y_AF BETWEEN 50.2 AND 51'
RemoveNaN = ' AND SpeedVA_AF != 0.0 AND SpeedVA_AF < 80'
query = 'SELECT' + ColumnInDF + 'FROM ' + b3s + ' WHERE KiKo IN ' + KiKo + ' AND NumLigne IN ' + NumLigne + ' AND Jour BETWEEN ' + '"' + StartDate + '"' + ' AND ' + '"' + EndDate + '"' + RemoveFalseX + RemoveFalseY + RemoveNaN + ';'
DataFrame = pd.read_sql(query, hermes_engine)
DataFrame = FormatDataFrame(DataFrame)
return DataFrame
def Meldpuntenquery(b3s, KiKo, NumLigne):
# Cree une requete pour SQL pour extraire une DF depuis sql
ColumnInDF = """ KiKo, NumLigne, Type_Message, Priorite, Sens, Type_Gestion, Power, X_effectif_webmerc,
Y_effectif_webmerc, TpsTheoriques """
query = 'SELECT' + ColumnInDF + 'FROM ' + b3s + ' WHERE KiKo IN ' + KiKo + ' AND NumLigne IN ' + NumLigne + ';'
Dataframe = pd.read_sql_query(query, meldpunten_engine)
# Dataframe['TpsTheoriques'] = Dataframe.apply(lambda row: float(row['TpsTheorique']), axis=1)
return Dataframe
# Recupère les valeurs de base pour travailler sur la db
ListKiKoStr, ListKiKo = list_kiko(CurrentB3S)
ListlignesStr, ListLignes = list_lignes(CurrentB3S)
LastDateStr, LastDateTimestamp = last_date(CurrentB3S)
# cree un Column Data Source lisible par bokeh et utilisee par les graphes
StartDict = dict(Jour=[], NumLigne=[], H_VA=[], H_AA=[], H_LF=[], H_AF=[], KiKo=[], NumParc=[], WeekDay=[], TpsVA_LF=[],
TpsLF_AF=[], SpeedLF_AF=[], SpeedVA_AF=[], X_VA_merc=[], Y_VA_merc=[], X_AA_merc=[], Y_AA_merc=[]
, X_AF_merc=[], Y_AF_merc=[], JourDatetime=[])
events_sources = ColumnDataSource(data=StartDict)
StartDictMeldpunten = dict(KiKo=[], NumLigne=[], Type_Message=[], Priorite=[], Sens=[], Type_Gestion=[], Power=[],
X_effectif_webmerc=[], Y_effectif_webmerc=[], TpsTheorique=[])
meldpunten_source = ColumnDataSource(data=StartDictMeldpunten)
# cree les curseurs utilisees avec les valeurs par defaut
# B3S à Selectionner
B3SSelect = Select(title="Carrefour", value=CurrentB3S, options=B3S_list, width=200)
# Dates pour les points:
DeltaDates = DateRangeSlider(title="Dates a prendre en compte", start=FirstDate, end=LastDateTimestamp,
value=(StartDate, LastDateTimestamp), step=1, width=450)
# KiKos
multi_select_KiKo = MultiSelect(title="KiKos a selectionner", value=ListKiKo, options=ListKiKo, width=200)
# Lignes
multi_select_Lignes = MultiSelect(title="Lignes a selectionner", value=ListLignes, options=ListLignes, width=200)
# Bouton pour faire l'update
button_update = Button(label="Changer les dates", width=100)
# GRAPHE DES DONNEES
# cree un graphe scatter avec tout les elements repartis par heure et jour de la semaine
DAYS = ['Dim', 'Sam', 'Ven', 'Jeu', 'Mer', 'Mar', 'Lun']
hover = HoverTool(tooltips=[("Jour", "@Jour"), ("Ligne", "@NumLigne"), ("Véhicule", "@NumParc"), ("KiKo", "@KiKo"),
("Vitesse", "@SpeedVA_AF")])
TOOLS = [PanTool(), BoxZoomTool(), LassoSelectTool(), BoxSelectTool(), WheelZoomTool(), ResetTool(), SaveTool(),
hover]
Jitter = figure(plot_width=560, plot_height=370, y_range=DAYS, x_axis_type='datetime', tools=TOOLS,
output_backend="webgl")
Jitter.xaxis[0].formatter.days = ['%Hh']
Jitter.x_range.range_padding = 0
Jitter.ygrid.grid_line_color = None
tab_points = Panel(child=Jitter, title="Données")
# données du graphe scatter avec tout les elements repartis par heure et jour de la semaine
CircleChart = Jitter.circle(x='H_VA', y=jitter('WeekDay', width=0.8, range=Jitter.y_range), size=3, legend="KiKo",
color=factor_cmap('KiKo', palette=Category10[10], factors=ListKiKo), source=events_sources,
alpha=0.8, hover_color='gold')
# GRAPHE DES VITESSES MOYENNES
# initialise les données
VitessesStartDict = dict(heure=[], Jour=[], rate=[])
VitesseSource = ColumnDataSource(data=VitessesStartDict)
# fonction pour extraire les vitesses depuis la DataFrame
def vitesses_mediannes(df):
DAYS = ['Lun', 'Mar', 'Mer', 'Jeu', 'Ven', 'Sam', 'Dim']
d = {'heure': [], 'Lun': [], 'Mar': [], 'Mer': [], 'Jeu': [], 'Ven': [], 'Sam': [], 'Dim': []}
heure1 = pd.to_datetime('1900-01-01 00:00:00')
for i in range(48):
templist = []
heure2 = heure1 + pd.Timedelta(minutes=30)
for day in DAYS:
d[day].append(df['SpeedVA_AF'][(df['H_VA'] > heure1) & (df['H_VA'] < heure2) & (df['WeekDay'] == day)].mean(
skipna=True))
d['heure'].append(heure1 + pd.Timedelta(minutes=15))
heure1 = heure2
df = pd.DataFrame(data=d)
return df
def vitesse_df(Dataframe):
VitesseDF = vitesses_mediannes(Dataframe)
VitesseDF = VitesseDF.set_index('heure')
VitesseDF.columns.name = 'Jour'
VitesseSource = pd.DataFrame(VitesseDF.stack(), columns=['rate']).reset_index()
return VitesseSource
# cree un graphe avec les vitesses mediannes par demi-heure
colors = ["#550b1d", "#550b1d", "#933b41", "#cc7878", "#ddb7b1", "#dfccce", '#e2e2e2', "#c9d9d3", "#a5bab7",
"#75968f", "#577C74", "#577C74", '#33514B', '#33514B', "#1F3631", "#1F3631"]
mapper = LinearColorMapper(palette=colors, low=0, high=40)
TOOLS_vitesse = "hover,save,pan,box_zoom,reset,wheel_zoom"
Vitesses = figure(plot_width=597, plot_height=370, x_axis_type='datetime', y_range=DAYS, tools=TOOLS_vitesse)
Vitesses.grid.grid_line_color = None
Vitesses.axis.major_tick_line_color = None
Vitesses.xaxis[0].formatter.days = ['%Hh']
Vitesses.x_range.range_padding = 0
# barre de légende
bar = ColorBar(color_mapper=mapper, location=(0, -10), major_label_text_font_size="5pt", width=5, height=335)
Vitesses.add_layout(bar, 'right')
tab_vitesses = Panel(child=Vitesses, title="Vitesses Moyennes")
Vitesses.select_one(HoverTool).tooltips = [('vitesse moyenne', '@rate'), ]
# données du graphe de vitesses
VitessesChart = Vitesses.rect(x='heure', y='Jour', width=1800000, height=1, source=VitesseSource,
fill_color={'field': 'rate', 'transform': mapper}, line_color=None)
# CARTE
# cree une carte avec les points
TOOLS3 = [PanTool(), BoxZoomTool(), LassoSelectTool(), BoxSelectTool(), WheelZoomTool(),
ResetTool(), SaveTool(), hover]
map = figure(plot_width=600, plot_height=600, tools=TOOLS3, match_aspect=True, aspect_scale=1.0)
map.add_tile(STAMEN_TERRAIN)
map.axis.visible = False
# données de la carte
MapAF = map.circle(x='X_AF_merc', y='Y_AF_merc', size=4, alpha=0.05, color='green', source=events_sources,
hover_color='gold')
MapVA = map.circle(x='X_VA_merc', y='Y_VA_merc', size=4, alpha=0.05, color='red', source=events_sources,
hover_color='gold')
MapAA = map.circle(x='X_AA_merc', y='Y_AA_merc', size=4, alpha=0.05, color='blue', source=events_sources,
hover_color='gold')
colormap = ['darkred', 'darkblue', 'darkgreen']
messages = ['VA', 'AA', 'AF']
MapTh = map.square(x='X_effectif_webmerc', y='Y_effectif_webmerc', size=10, source=meldpunten_source,
color=factor_cmap('Type_Message', palette=colormap, factors=messages))
# HISTOGRAMMES
# cree un histogramme avec les valeurs de temps de parcours
TOOLS2 = [PanTool(), BoxZoomTool(), WheelZoomTool(), ResetTool(), SaveTool(), HoverTool()]
hist = figure(title="Distribution temps de parcours pour le " + CurrentB3S, plot_width=1200, plot_height=400,
tools=TOOLS2, x_range=(0, 120), y_range=(0,0.5))
hist.xaxis.axis_label = 'temps en secondes'
# initialise les données des histogrammes
histVA_LF, edgesVA_LF = np.histogram([0, 120], density=True, bins=range(0, 121))
histAA_LF, edgesAA_LF = np.histogram([0, 120], density=True, bins=range(0, 121))
histLF_AF, edgesLF_AF = np.histogram([0, 120], density=True, bins=range(0, 121))
HistLF_AF = hist.quad(top=histLF_AF, bottom=0, left=edgesLF_AF[:-1], right=edgesLF_AF[1:], fill_color='yellowgreen',
fill_alpha=0.5, line_color='darkgreen')
HistVA_LF = hist.quad(top=histVA_LF, bottom=0, left=edgesVA_LF[:-1], right=edgesVA_LF[1:], fill_color='coral',
fill_alpha=0.5, line_color='red')
HistAA_LF = hist.quad(top=histAA_LF, bottom=0, left=edgesAA_LF[:-1], right=edgesAA_LF[1:], fill_color='steelblue',
fill_alpha=0.5, line_color='navy')
# donnees theoriques du tableau
TempsTheoriques = dict(TpsTheoriques=[], TpsTheoriques2=[])
Spans = hist.quad(top=0.5, bottom=0, left='TpsTheorique', right='TpsTheorique', source=meldpunten_source,
color=factor_cmap('Type_Message', palette=colormap, factors=messages))
BoxTh = hist.quad(top=0.5, bottom=0.4, left='TpsTheorique', right='TpsTheorique', source=meldpunten_source,
color=factor_cmap('Type_Message', palette=colormap, factors=messages))
# Initialise le tableau avec les donnees des histogrammes dans l'histogramme des temps de parcours
GlyphSource = ColumnDataSource(dict(x=[60, 80, 90, 100], y=[0.1, 0.1, 0.1, 0.1],
text=["", "", "", ""], colors=['black', 'coral', 'steelblue', 'yellowgreen']))
glyph = Text(x='x', y='y', text='text', text_font_size='12pt', text_baseline='bottom', text_color='colors',
text_font_style='bold')
hist.add_glyph(GlyphSource, glyph)
def update_database():
# mise a jour des valeurs et de la nouvelle DataFrame
# Reset des valeurs de la requete
newb3s = B3SSelect.value
newKiKo = multi_select_KiKo.value
newKiKo = List_to_SQL(newKiKo)
newNumLigne = multi_select_Lignes.value
newNumLigne = List_to_SQL(newNumLigne)
newStartDate = DeltaDates.value_as_datetime[0].strftime('%Y-%m-%d %H:%M:%S')
newEndDate = DeltaDates.value_as_datetime[1].strftime('%Y-%m-%d %H:%M:%S')
# mise a jour des Datasource des donnees vehicules pour la carte et le graphes des données
NewDataFrame = SQLquery(newb3s, newKiKo, newNumLigne, newStartDate, newEndDate)
events_sources.data = dict(Jour=NewDataFrame['Jour'], NumLigne=NewDataFrame['NumLigne'],
H_VA=NewDataFrame['H_VA'], H_AA=NewDataFrame['H_AA'],
H_LF=NewDataFrame['H_LF'], H_AF=NewDataFrame['H_AF'],
KiKo=NewDataFrame['KiKo'], NumParc=NewDataFrame['NumParc'],
WeekDay=NewDataFrame['WeekDay'],
TpsVA_LF=NewDataFrame['TpsVA_LF'], TpsLF_AF=NewDataFrame['TpsLF_AF'],
SpeedLF_AF=NewDataFrame['SpeedLF_AF'], SpeedVA_AF=NewDataFrame['SpeedVA_AF'],
X_VA_merc=NewDataFrame['X_VA_merc'], Y_VA_merc=NewDataFrame['Y_VA_merc'],
X_AA_merc=NewDataFrame['X_AA_merc'], Y_AA_merc=NewDataFrame['Y_AA_merc'],
X_AF_merc=NewDataFrame['X_AF_merc'], Y_AF_merc=NewDataFrame['Y_AF_merc'],
JourDatetime=NewDataFrame['JourDatetime'])
# mise a jour des Datasource des donnees theoriques pour la carte
NewMeldpuntenDataFrame = Meldpuntenquery(newb3s, newKiKo, newNumLigne)
meldpunten_source.data = dict(KiKo=NewMeldpuntenDataFrame['KiKo'],
NumLigne=NewMeldpuntenDataFrame['NumLigne'],
Type_Message=NewMeldpuntenDataFrame['Type_Message'],
Priorite=NewMeldpuntenDataFrame['Priorite'],
Sens=NewMeldpuntenDataFrame['Sens'],
Type_Gestion=NewMeldpuntenDataFrame['Type_Gestion'],
Power=NewMeldpuntenDataFrame['Power'],
X_effectif_webmerc=NewMeldpuntenDataFrame['X_effectif_webmerc'],
Y_effectif_webmerc=NewMeldpuntenDataFrame['Y_effectif_webmerc'],
TpsTheorique=NewMeldpuntenDataFrame['TpsTheoriques'])
# Nouvelle Source pour le graphe des vitesses
vitesse_NewDF = vitesse_df(NewDataFrame)
VitesseSource.data = dict(heure=vitesse_NewDF['heure'],
Jour=vitesse_NewDF['Jour'],
rate=vitesse_NewDF['rate'])
# données de l'histogramme avec les valeurs de temps de parcours
histVA_LF, edgesVA_LF = np.histogram(NewDataFrame['TpsVA_LF'], density=True, bins=range(0, 121))
histAA_LF, edgesAA_LF = np.histogram(NewDataFrame['TpsAA_LF'], density=True, bins=range(0, 121))
histLF_AF, edgesLF_AF = np.histogram(NewDataFrame['TpsLF_AF'], density=True, bins=range(0, 121))
# mise a jour des histogrammes
HistVA_LF.data_source.data["top"] = histVA_LF
HistAA_LF.data_source.data["top"] = histAA_LF
HistLF_AF.data_source.data["top"] = histLF_AF
# Mise a jour du Tableau avec les donnees des histogrammes dans l'histogramme des temps de parcours
label_text_lignes = ("Moyenne = \nMediane = \nEcart-type = ")
label_text_VA_LF = ("VA_LF \n" + str(NewDataFrame['TpsVA_LF'].mean())[:6] + '\n' + str(
NewDataFrame['TpsVA_LF'].median())[:6] + '\n' + str(NewDataFrame['TpsVA_LF'].std())[:6])
label_text_AA_LF = ("AA_LF \n" + str(NewDataFrame['TpsAA_LF'].mean())[:6] + '\n' + str(
NewDataFrame['TpsAA_LF'].median())[:6] + '\n' + str(NewDataFrame['TpsAA_LF'].std())[:6])
label_text_LF_AF = ("LF_AF \n" + str(NewDataFrame['TpsLF_AF'].mean())[:6] + '\n' + str(
NewDataFrame['TpsLF_AF'].median())[:6] + '\n' + str(NewDataFrame['TpsLF_AF'].std())[:6])
GlyphSource.data["text"] = [label_text_lignes, label_text_VA_LF, label_text_AA_LF, label_text_LF_AF]
#conn.commit()
def update_b3s():
# Si la requete se fait sur le b3s
newb3s = B3SSelect.value
# Changements des KiKos
NewListKiKoStr, NewListKiKo = list_kiko(newb3s)
multi_select_KiKo.value = NewListKiKo
multi_select_KiKo.options = NewListKiKo
# Changement des Lignes
NewListlignesStr, NewListLignes = list_lignes(newb3s)
multi_select_Lignes.value = NewListLignes
multi_select_Lignes.options = NewListLignes
update_database()
controls = [multi_select_KiKo, multi_select_Lignes]
for control in controls:
control.on_change('value', lambda attr, old, new: update_database())
button_update.on_click(update_database)
B3SSelect.on_change('value', lambda attr, old, new: update_b3s())
# mise en page initiale
TabsDays = Tabs(tabs=[tab_vitesses, tab_points])
layout = column(
row(column(row(B3SSelect, multi_select_KiKo, multi_select_Lignes), row(DeltaDates, button_update), TabsDays),
map), row(hist))
# initialise les données
update_database()
output_file('Hermes3.html')
curdoc().add_root(layout)
# show(layout) a utiliser sans "bokeh serve"
|
StarcoderdataPython
|
6664400
|
import requests
from api import http
def test_adjust_paging_with_no_params():
target = http.Http(lambda: requests.Session())
url = "https://gitlab.com/api/v4/projects/14171783/jobs"
expected = "https://gitlab.com/api/v4/projects/14171783/jobs?per_page=20"
actual = target.__adjust_paging__(url, 20)
assert expected == actual
def test_adjust_paging_with_existing_params():
target = http.Http(lambda: requests.Session())
url = "https://gitlab.com/api/v4/projects/14171783/jobs?scope=success&scope=failed"
expected = "https://gitlab.com/api/v4/projects/14171783/jobs?scope=success&scope=failed&per_page=20"
actual = target.__adjust_paging__(url, 20)
assert expected == actual
def test_adjust_paging_with_existing_per_page_query_param():
target = http.Http(lambda: requests.Session())
url = "https://gitlab.com/api/v4/projects/14171783/jobs?scope=success&scope=failed&per_page=20"
expected = "https://gitlab.com/api/v4/projects/14171783/jobs?scope=success&scope=failed&per_page=10"
actual = target.__adjust_paging__(url, 10)
assert expected == actual
|
StarcoderdataPython
|
303701
|
<filename>solutions/week-1/ending.py<gh_stars>1-10
n = int(input())
if 0 <= n <= 1000:
if n == 0:
print(n, "программистов", sep=" ")
elif n % 100 >= 10 and n % 100 <= 20:
print(n, "программистов", sep=" ")
elif n % 10 == 1:
print(n, "программист", sep=" ")
elif n % 10 >= 2 and n % 10 <= 4:
print(n, "программиста", sep=" ")
else:
print(n, "программистов", sep=" ")
else:
print("Входные данные должны быть в интервале 0≤n≤1000")
|
StarcoderdataPython
|
9685058
|
<reponame>eBertolina/toradocu
import csv
import sys
# Check command line arguments.
if len(sys.argv) < 2 or len(sys.argv) > 3:
print("""\
This script must be invoked with the following arguments:
1. CSV file to parse;
2. (Optional) Number of missing translation to be considered to compute overall recall;
Output will be printed on the standard output.
""")
sys.exit(1)
COLUMNS = [
'CORRECT PARAM CONDITIONS',
'WRONG PARAM CONDITIONS',
'UNEXPECTED PARAM CONDITIONS',
'MISSING PARAM CONDITIONS',
'CORRECT THROWS CONDITIONS',
'WRONG THROWS CONDITIONS',
'UNEXPECTED THROWS CONDITIONS',
'MISSING THROWS CONDITIONS',
'CORRECT RETURN CONDITIONS',
'WRONG RETURN CONDITIONS',
'UNEXPECTED RETURN CONDITIONS',
'MISSING RETURN CONDITIONS',
]
# Load information from the input CSV file.
with open(sys.argv[1], 'r') as stats_file:
data = list(csv.DictReader(stats_file))
results = dict()
for column in COLUMNS:
results[column] = 0
for row in data:
for column in COLUMNS:
results[column] += int(row.get(column, 0))
correct_param = results.get('CORRECT PARAM CONDITIONS', 0)
wrong_param = results.get('WRONG PARAM CONDITIONS', 0)
unexpected_param = results.get('UNEXPECTED PARAM CONDITIONS', 0)
missing_param = results.get('MISSING PARAM CONDITIONS', 0)
correct_throws = results.get('CORRECT THROWS CONDITIONS', 0)
wrong_throws = results.get('WRONG THROWS CONDITIONS', 0)
unexpected_throws = results.get('UNEXPECTED THROWS CONDITIONS', 0)
missing_throws = results.get('MISSING THROWS CONDITIONS', 0)
correct_return = results.get('CORRECT RETURN CONDITIONS', 0)
wrong_return = results.get('WRONG RETURN CONDITIONS', 0)
unexpected_return = results.get('UNEXPECTED RETURN CONDITIONS', 0)
missing_return = results.get('MISSING RETURN CONDITIONS', 0)
param_precision = 0 if correct_param == 0 else float(correct_param) / (correct_param + wrong_param + unexpected_param)
param_recall = 0 if correct_param == 0 else float(correct_param) / (correct_param + wrong_param + missing_param)
return_precision = 0 if correct_return == 0 else float(correct_return) / (correct_return + wrong_return + unexpected_return)
return_recall = 0 if correct_return == 0 else float(correct_return) / (correct_return + wrong_return + missing_return)
throws_precision = 0 if correct_throws == 0 else float(correct_throws) / (correct_throws + wrong_throws + unexpected_throws)
throws_recall = 0 if correct_throws == 0 else float(correct_throws) / (correct_throws + wrong_throws + missing_throws)
overall_correct = correct_param + correct_return + correct_throws
additional_missing = 0 if len(sys.argv) == 2 else int(sys.argv[2])
overall_missing = missing_param + missing_return + missing_throws + additional_missing
overall_wrong = wrong_param + wrong_return + wrong_throws
overall_unexpected = unexpected_param + unexpected_return + unexpected_throws
overall_precision = 0 if overall_correct == 0 else float(overall_correct) / (overall_correct + overall_wrong + overall_unexpected)
overall_recall = 0 if overall_correct == 0 else float(overall_correct) / (overall_correct + overall_wrong + overall_missing)
fmeasure = 0 if overall_precision + overall_recall == 0 else (2 * overall_precision * overall_recall) / (overall_precision + overall_recall)
output = "{:.2f}".format(param_precision) if (correct_param + wrong_param) > 0 else "n.a." # param_precision
output += " & {:.2f} && ".format(param_recall) # param_recall
output += "{:.2f}".format(return_precision) if (correct_return + wrong_return) > 0 else "n.a." # return_precision
output += " & {:.2f} && ".format(return_recall) # return_recall
output += "{:.2f}".format(throws_precision) if (correct_throws + wrong_throws) > 0 else "n.a." # throws_precision
output += " & {:.2f} && ".format(throws_recall) # throws_recall
output += "{:.2f} & {:.2f} & {:.2f} \\\\".format(overall_precision, overall_recall, fmeasure) # overall precision, recall, and fmeasure
print output
|
StarcoderdataPython
|
5122743
|
<reponame>aletuf93/analogistics
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def updateGlobalInventory(D_SKUs: pd.DataFrame, inventoryColumn: str):
"""
Update the global inventory of the warehouse
Args:
D_SKUs (pd.DataFrame): Input SKUs dataframe.
inventoryColumn (str): column name with the inventory.
Returns:
D_inventory (pd.DataFrame): Output DataFrame with inventory values.
"""
D_inventory = pd.DataFrame([], columns=['WH_INVENTORY_VOLUME', 'WH_INVENTORY_NORMALISED'])
givenVolumes = 0 # count the number of SKUs with a given volume
for i in range(0, len(D_SKUs)):
# i=33159
volume = D_SKUs.iloc[i]['VOLUME']
list_days = D_SKUs.iloc[i]['INVENTORY_DAYS']
# go on only if an inventory has been saved
if isinstance(list_days, list):
list_inventory = np.array(D_SKUs.iloc[i][inventoryColumn])
list_inventory = np.nan_to_num(list_inventory) # convert nan to 0
list_inventory_volume = list_inventory * volume
list_inventory_normalised = (list_inventory - min(list_inventory)) / (max(list_inventory) - min(list_inventory))
D_temp = pd.DataFrame(list_inventory_normalised, index=list_days, columns=['SKU_INVENTORY_NORMALISED'])
D_inventory = pd.concat([D_temp, D_inventory], axis=1, sort=False)
D_inventory = D_inventory.fillna(0)
D_inventory['WH_INVENTORY_NORMALISED'] = D_inventory['WH_INVENTORY_NORMALISED'] + D_inventory['SKU_INVENTORY_NORMALISED']
D_inventory = D_inventory.drop(columns=['SKU_INVENTORY_NORMALISED'])
if str(volume) != 'nan': # if volume is not nan
D_temp = pd.DataFrame(list_inventory_volume, index=list_days, columns=['SKU_INVENTORY_VOLUME'])
D_inventory = pd.concat([D_temp, D_inventory], axis=1, sort=False)
D_inventory = D_inventory.fillna(0)
D_inventory['WH_INVENTORY_VOLUME'] = D_inventory['WH_INVENTORY_VOLUME'] + D_inventory['SKU_INVENTORY_VOLUME']
D_inventory = D_inventory.drop(columns=['SKU_INVENTORY_VOLUME'])
givenVolumes = givenVolumes + 1
return D_inventory
def _cumulativeFunction(ser: pd.Series):
ser = ser.sort_values()
cum_dist = np.linspace(0., 1., len(ser))
ser_cdf = pd.Series(cum_dist, index=ser)
return ser_cdf
def inventoryAnalysis(D_global_inventory: pd.DataFrame):
"""
Perform analysis on the inventory values
Args:
D_global_inventory (pd.DataFrame): Input Dataframe with inventory values.
Returns:
dict: Output dict containing figures.
"""
output_figures = {}
# plot histogram
fig1 = plt.figure()
plt.hist(D_global_inventory['WH_INVENTORY_VOLUME'], color='orange')
plt.xlabel('Inventory values')
plt.ylabel('Frequency')
plt.title('Inventory histogram')
output_figures['INVENTORY_HIST'] = fig1
fig2 = plt.figure()
plt.hist(D_global_inventory['WH_INVENTORY_NORMALISED'], color='orange')
plt.xlabel('Normalised Inventory values')
plt.ylabel('Frequency')
plt.title('Normalised inventory histogram')
output_figures['INVENTORY_NORM_HIST'] = fig2
# plot trend
fig3 = plt.figure()
plt.plot(D_global_inventory.index, D_global_inventory['WH_INVENTORY_VOLUME'], color='orange')
plt.xlabel('Timeline')
plt.ylabel('Inventory values')
plt.title('Inventory time series')
plt.xticks(rotation=45)
output_figures['INVENTORY_TS'] = fig3
fig4 = plt.figure()
plt.plot(D_global_inventory.index, D_global_inventory['WH_INVENTORY_NORMALISED'], color='orange')
plt.xlabel('Timeline')
plt.ylabel('Normalised inventory values')
plt.title('Normalised inventory time series')
plt.xticks(rotation=45)
output_figures['INVENTORY_NORM_TS'] = fig4
# cumulative function
cdf_inventory = _cumulativeFunction(D_global_inventory['WH_INVENTORY_NORMALISED'])
fig5 = plt.figure()
cdf_inventory.plot(drawstyle='steps', color='orange')
plt.xlabel('Normalised inventory values')
plt.ylabel('Probability')
plt.title('Normalised inventory cumulative probability function')
output_figures['INVENTORY_NORM_CUM'] = fig5
cdf_inventory = _cumulativeFunction(D_global_inventory['WH_INVENTORY_VOLUME'])
fig6 = plt.figure()
cdf_inventory.plot(drawstyle='steps', color='orange')
plt.xlabel('Inventory values')
plt.ylabel('Probability')
plt.title('Inventory cumulative probability function')
output_figures['INVENTORY_CUM'] = fig6
return output_figures
def defineStockoutCurve(inventorySeries: pd.DataFrame):
"""
Define the stockout risk curve
Args:
inventorySeries (pd.DataFrame): Input Dataframe with inventory values.
Returns:
output_figure (TYPE): Output dict containing figures.
"""
output_figure = {}
# calculate the cumulative and the risk
cumulative = _cumulativeFunction(inventorySeries)
risk = 1 - cumulative
# plot the curve
fig1 = plt.figure()
plt.plot(cumulative.index, cumulative.values, drawstyle='steps', color='skyblue')
plt.plot(risk.index, risk.values, drawstyle='steps', color='orange')
plt.legend(['Cumulative distribution function', 'Stockout risk'])
plt.title("Stockout risk function")
plt.xlabel("Inventory value")
plt.ylabel("Risk or probability")
output_figure['stockout curve'] = fig1
return output_figure
|
StarcoderdataPython
|
3220581
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# PROGRAMMER: <NAME>.
# DATE CREATED: 19 Nov 2020
# REVISED DATE: 13 dic 2020
# PURPOSE: A Class to analyze a dataset of numbers and apply the Benfords Law counting the frequency of the first digit
# from 1 - 9
#
# Usage:
# 1. Create a new object with the class Benford, with an numpy array or list, or
# 2. Load a dataset with the method: load_dataset(args) or
# 3. Load an image with the method: load_image(args)
# 4. Analyze the data with the mehtod: apply_benford(args)
# 5. Plot the results or export to a file
# 6. Export the results to a file
#
# Imports python modules
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
class Benford:
''' A Class to perform the Benford´S Law analysis for the first leading digits [1-9]
Atributes:
-----------------------
dataset : numpy array of integers with the numbers to analyzed
normalized : boolean, flag to indicate if the output is normalized with the total values or the digits frequency
digits: numpy array with digits 1 to 9
digits_count: numpy array to store the leading digits frequency
reference: The Benford´s theorical reference for the leading digits
Methods:
------------------------
__init__ : Method to initialize the attributes
load_daaset : Method to read a .csv file and load the dataset
load_image: Method to load an image and load the dataset
benford_analysis: Mehtod to analyze the leading digits using the Benford´s Law
plot : Method to plot the resuls in a bar chart
export_to_csv : Method to export the results to a .csv file
'''
def __init__(self, dataset=None, normalized=True):
"""
Function to initialize the class
Parameters:
-------------------------
dataset: numpy array, A dataset containing numbers
normalized: boolean
"""
self.dataset = dataset
self.normalized = normalized
self.digits = np.array(range(1,10))
self.digits_count = np.zeros(9)
self.reference = np.array([30.1,17.6,12.5,9.7,7.9,6.7,5.8,5.1,4.6])
return None
def load_dataset(self, dir, sep=','):
""" Function to read a dataset using pandas
Parameters:
-------------
dir: str, address of the dataset and name
Returns:
-------------
None
"""
try:
dataset = pd.read_csv(dir, sep=sep )
dataset = np.asarray(dataset)
self.dataset = dataset
except:
print('The dataset could not be read, please check the address')
#if not type(x) is int:
# raise TypeError("Only integers are allowed")
return None
def load_image(self, path):
""" Function to read an image using numpy
Parameters:
-------------
path: str, address of the image, a local file or a http address
Returns:
-------------
None
"""
try:
image = np.array(Image.open(path))
print('Image read')
self.dataset = image
except:
print('The image could not be read, please check the address')
return None
def benford_analysis(self):
""" Function to analyze a dataset using the Benford´s Law, counting the frequency of first digits
Parameters:
-------------
dataset: a numpy array containing integers
Returns:
-------------
self.digits_count: Updated with the digits frequency normalized
"""
self.dataset = np.abs(self.dataset) #Convert all values to positive
self.dataset = self.dataset[self.dataset > 0] #Remove the values with 0
numbers, counts = np.unique(self.dataset, return_counts = True) # Count the numbers in the dataset and their frequency
numerical_units = np.log10(numbers).astype(int) # Get the int part of log base 10 of the numbers to know the number of units (10s, 100s, 1000s)
first_digit = numbers//10**numerical_units # Get the first digit using numbers // 10^numerical_units
# Summarize the first digit counts
for digit in self.digits:
self.digits_count[digit-1] = counts[first_digit==digit].sum()
if self.normalized: # If normalized, get the digits probability
self.digits_count = self.digits_count/self.digits_count.sum()*100
return None
def plot(self, figsize=(12, 6)):
""" Function to plot the benford results
Parameters:
-------------
figsize = tuple, size of the figure to plot
self.digits
self.digits_count
Returns:
-------------
A bar chart display in the screen
"""
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=figsize)
ax.set_title('Benford Law Analysis Results',fontsize=20)
ax.set_xlabel('Digits', fontsize=16)
if not self.normalized:
ax.set_ylabel('Freq', fontsize=16)
if self.normalized:
df = pd.DataFrame({'Benford Reference':[30.1,17.6,12.5,9.7,7.9,6.7,5.8,5.1,4.6],'P(D) Results':self.digits_count},index=self.digits)
df.plot.bar(ax=ax,grid=False, color=['teal','#5cb85c'], width=0.8)
for p in ax.patches:
value = '{:.01f}'.format(p.get_height()) # Get the value length
offset = (8 - len(value)) // 2 / 30 # Calculate the offset to center the label on the bar
ax.annotate('{:.01f}'.format(p.get_height()), (p.get_x()+offset, p.get_height()+0.1),fontsize=9, weight='bold')
else:
ax.bar(self.digits, self.digits_count, color='g')
ax.set_xticklabels(ax.get_xticklabels(), rotation=0, ha='right')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.get_yaxis().set_ticks([])
plt.show()
return None
def export_to_csv(self, path):
""" Function to export the benford analysis results to a file as a dataframe, including the benford reference values
Parameters:
-------------
path = str, path and filename where to save the results
Returns:
-------------
A saved .csv file
"""
df = pd.DataFrame({'Benford Reference':[30.1,17.6,12.5,9.7,7.9,6.7,5.8,5.1,4.6],'P(D) results':self.digits_count},index=self.digits)
df.index.rename('Digits',inplace=True)
try:
df.to_csv(path, header=True)
except:
print('File can not be saved, check the path')
return None
|
StarcoderdataPython
|
11243856
|
<gh_stars>0
import re
import time
from collections import Mapping
from glob import glob
from os import makedirs
from os.path import dirname
from os.path import exists as p_exists, join as p_join, isfile as p_isfile, isdir as p_isdir, getctime
import numpy as np
from torch import load as torch_load, save as torch_save
import yaml
import logging
import logging.config
def is_file(path):
return p_exists(path) and p_isfile(path)
def is_directory(path):
return p_exists(path) and p_isdir(path)
def make_directories_for_file(path):
directory = dirname(path)
if not p_exists(directory):
makedirs(directory)
def get_htr_logger(name):
with open('../configs/logging_config.yaml', 'r') as f:
make_directories_for_file(p_join("../logs/info.log"))
config = yaml.safe_load(f.read())
logging.config.dictConfig(config)
return logging.getLogger(name)
logger = get_htr_logger(__name__)
class TimeMeasure(object):
def __init__(self, enter_msg="", exit_msg="{}.", writer=logger.debug, print_enabled=True):
self.__enter_msg = enter_msg
self.__exit_msg = exit_msg
self.__writer = writer
self.__time = None
self.__print_enabled = print_enabled
def __enter__(self):
self.__start = time.time()
if self.__print_enabled and self.__enter_msg:
self.__writer(self.__enter_msg)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.__print_enabled:
self.__writer(self.__exit_msg.format(pretty_time_interval(self.delta)))
@property
def delta(self):
delta = time.time() - self.__start
delta = int(delta * 1000)
return delta
def save_checkpoint(path, total_epochs, model, loss, environment):
make_directories_for_file(path)
dictionary = dict()
dictionary["total_epochs"] = total_epochs
dictionary["model_states"] = model.state_dict()
dictionary["loss"] = loss
dictionary["environment"] = environment.to_dict()
torch_save(dictionary, path)
logger.info(f"Saved checkpoint in epoch {total_epochs} to '{path}'.")
def load_checkpoint(path):
if is_checkpoint(path):
return torch_load(path)
else:
raise RuntimeError("Checkpoint at '{}' does not exist!".format(path))
def is_checkpoint(path: str):
return is_file(path) and path.endswith(".pt")
def load_latest_checkpoint(directory):
list_of_files = glob(p_join(directory, '*.pt'))
latest_file = max(list_of_files, key=getctime)
return load_checkpoint(latest_file)
class WordDeEnCoder(object):
def __init__(self, chars):
self.__chars = chars
self.__idx_to_char = {i: c for i, c in enumerate(chars)}
self.__char_to_idx = {v: k for k, v in self.__idx_to_char.items()}
@property
def idx_to_char(self):
return self.__idx_to_char
@property
def char_to_idx(self):
return self.__char_to_idx
def encode_words(self, words):
return [np.asarray([self.char_to_idx[letter] for letter in word]) for word in words]
def decode_word(self, encoded_word):
return "".join([self.idx_to_char[num] for num in encoded_word])
def inject(value, my_locals):
if type(value) == str and value.startswith("locals://"):
path = value.split("//")[1].split("/")
obj = my_locals[path[0]]
for i in range(1, len(path)):
obj = getattr(obj, path[i])
value = obj
return value
class FrozenDict(Mapping):
def __init__(self, *args, **kwargs):
self._d = dict(*args, **kwargs)
self._hash = None
def __iter__(self):
return iter(self._d)
def __len__(self):
return len(self._d)
def __getitem__(self, key):
return self._d[key]
def __hash__(self):
return hash(tuple(sorted(self._d.items())))
def pretty_time_interval(millis):
seconds, millis = divmod(millis, 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
return f"{days}d {hours}h {minutes}min {seconds}sec {millis}ms"
def build_phrased_regex(left, right):
return re.compile(r"(?P<left>\|)?" +
r"(?P<leftCore>" + left + r")"
+ r"\|(?P<core>[a-zA-Z0-9.\s|]+)\|"
+ "(?P<rightCore>" + right + r")"
+ r"(?P<right>\|)?")
class Replacer(object):
def __init__(self):
phrased_regex = build_phrased_regex(left='"', right='"')
par_regex = build_phrased_regex(left=r'\(', right=r'\)')
self._regex_pipeline = (phrased_regex, par_regex)
self._abbreviation = re.compile(r"(\w+\.)\|(\w+)")
self._left_pipe = re.compile(r"(\|([\s!\\\"#&'()*+,\-./:;?]+))")
self._right_pipe = re.compile(r"(([\s!\\\"#&'()*+,\-./:;?]+)\|)")
def __call__(self, line):
result = line.replace("|,|", ", ")
for reg in self._regex_pipeline:
result = reg.sub(r'\g<left> \g<leftCore>\g<core>\g<rightCore>\g<right> ', result)
result = self._abbreviation.sub(r'\g<1> \g<2>', result)
result = self._abbreviation.sub(r'\g<1> \g<2>', result)
result = self._left_pipe.sub(r'\g<2>', result)
result = self._right_pipe.sub(r'\g<2>', result)
result = result.replace(") :", "):")
result = result.replace('|', ' ').replace(" ", " ")
#if result.count("-") > 1 and len(result) is 53:
# logger.error(f"{line} {result}")
return result
|
StarcoderdataPython
|
3422122
|
# -*- coding: utf-8 -*-
"""
PDF document reader.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from pdfminer.converter import PDFPageAggregator
from pdfminer.layout import LAParams, LTTextLine, LTTextBox, LTFigure
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfparser import PDFParser
import six
from ..doc.document import Document
from ..doc.text import Paragraph
from .base import BaseReader
from ..errors import ReaderError
class PdfReader(BaseReader):
""""""
def detect(self, fstring, fname=None):
""""""
if fname and not fname.endswith('.pdf'):
return False
return True
def _process_layout(self, layout):
"""Process an LTPage layout and return a list of elements."""
# Here we just group text into paragraphs
elements = []
for lt_obj in layout:
if isinstance(lt_obj, LTTextBox) or isinstance(lt_obj, LTTextLine):
elements.append(Paragraph(lt_obj.get_text().strip()))
elif isinstance(lt_obj, LTFigure):
# Recursive...
elements.extend(self._process_layout(lt_obj))
return elements
def parse(self, fstring):
try:
f = six.BytesIO(fstring)
parser = PDFParser(f)
document = PDFDocument(parser)
if not document.is_extractable:
raise ReaderError('PDF text extraction not allowed')
rsrcmgr = PDFResourceManager()
laparams = LAParams()
device = PDFPageAggregator(rsrcmgr, laparams=laparams)
interpreter = PDFPageInterpreter(rsrcmgr, device)
elements = []
for page in PDFPage.create_pages(document):
interpreter.process_page(page)
layout = device.get_result()
elements.extend(self._process_layout(layout))
return Document(*elements)
except Exception as e:
raise ReaderError(e)
# Functions to determine captions from layout analysis
#
# def get_element_type(l, el_type):
# """Return a flat list of all of one element type from a nested list of LT objects."""
# elements = []
# for el in l:
# if isinstance(el, el_type):
# elements.append(el)
# elif isinstance(el, collections.Iterable) and not isinstance(el, LTItem):
# elements.extend(get_element_type(el, el_type))
# return elements
#
#
# def pair_up(images, captions):
# """Pair up each caption with the image most likely to correspond to it."""
# pairs = []
# for cap in captions:
# possibles = []
# for im in images:
# if cap.bbox[3] < im.bbox[1] and cap.bbox[0] < im.bbox[2] and cap.bbox[2] > im.bbox[0]:
# possibles.append(im)
# if possibles:
# closest = possibles[0]
# for im in possibles:
# if get_distance(im, cap) < get_distance(closest, cap):
# closest = im
# pairs.append({'ltimage':closest,'ltcap':cap})
# return pairs
#
#
# def get_distance(fig, cap):
# """Return the distance between the top-centre of cap and the bottom-centre of fig."""
# figcen = [(fig.bbox[0]+fig.bbox[2])/2,fig.bbox[1]]
# capcen = [(cap.bbox[0]+cap.bbox[2])/2,cap.bbox[3]]
# distance = math.sqrt(pow(abs(figcen[0]-capcen[0]),2) + pow(abs(figcen[1]-capcen[1]),2))
# return distance
#
#
# def add_image_numbers(fig_caps, all_images):
# """Add the figure number and image number to each fig_cap."""
# for fig_cap in fig_caps:
# for i, im in enumerate(all_images):
# if fig_cap['ltimage'].name == im.name:
# fig_cap['imnum'] = i
# fig_num = fig_cap['ltcap'].get_text().split(None,2)[1]
# if not fig_num.isdigit():
# fig_num = re.split('\D', fig_num)[0]
# fig_cap['fignum'] = fig_num
# return fig_caps
#
#
# def remove_false_positives(fig_caps):
# """If two captions have the same figure number remove the one farthest from its image."""
# to_delete = []
# for i in range(len(fig_caps)):
# for j in range(i):
# if fig_caps[i]['fignum'] == fig_caps[j]['fignum']:
# # Check if fig_caps[i]['ltcap'] or fig_caps[j]['ltcap'] has "shows" or "illustrates"
# try:
# captext1 = fig_caps[i]['ltcap'].get_text().split(None,2)[2]
# captext2 = fig_caps[j]['ltcap'].get_text().split(None,2)[2]
# except IndexError:
# captext1 = fig_caps[i]['ltcap'].get_text()
# captext2 = fig_caps[j]['ltcap'].get_text()
# # reports, presents
# if captext1.startswith('shows') or captext1.startswith('illustrates') or captext1.startswith('displays') or captext1.startswith('reports') or captext1.startswith('presents'):
# to_delete.append(i)
# elif captext2.startswith('shows') or captext2.startswith('illustrates') or captext2.startswith('displays') or captext2.startswith('reports') or captext2.startswith('presents'):
# to_delete.append(j)
# else:
# dis1 = get_distance(fig_caps[i]['ltimage'], fig_caps[i]['ltcap'])
# dis2 = get_distance(fig_caps[j]['ltimage'], fig_caps[j]['ltcap'])
# if dis1 > dis2:
# to_delete.append(i)
# else:
# to_delete.append(j)
# fig_caps = [i for j, i in enumerate(fig_caps) if j not in to_delete]
# return fig_caps
|
StarcoderdataPython
|
122263
|
<filename>python/data_sutram/scraper/perform__.py<gh_stars>10-100
import json
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
options = webdriver.ChromeOptions()
#options.binary_location = "/Applications/Google Chrome.app/Contents/MacOS/Google Chrome"
chrome_driver_binary = "../chromedriver"
driver = webdriver.Chrome(chrome_driver_binary, chrome_options=options)
"""
caps = DesiredCapabilities.CHROME
caps['loggingPrefs'] = {'performance': 'ALL'}
driver = webdriver.Chrome(desired_capabilities=caps)
"""
driver.get('https://stackoverflow.com/questions/52633697/selenium-python-how-to-capture-network-traffics-response')
def process_browser_log_entry(entry):
response = json.loads(entry['message'])['message']
return response
browser_log = driver.get_log('performance')
events = [process_browser_log_entry(entry) for entry in browser_log]
events = [event for event in events if 'Network.response' in event['method']]
print(events)
|
StarcoderdataPython
|
8184965
|
<reponame>Belvarm/roguelike-tutorial
from __future__ import annotations
from typing import Optional, TYPE_CHECKING
import graphic
from actions import Impossible
if TYPE_CHECKING:
from actions import ActionWithItem
from location import Location
from inventory import Inventory
class Item(graphic.Graphic):
render_order = 1
def __init__(self) -> None:
self.owner: Optional[Inventory] = None
self.location: Optional[Location] = None
def lift(self) -> None:
"""Remove this item from any of its containers."""
if self.owner:
self.owner.contents.remove(self)
self.owner = None
if self.location:
item_list = self.location.map.items[self.location.xy]
item_list.remove(self)
if not item_list:
del self.location.map.items[self.location.xy]
self.location = None
def place(self, location: Location) -> None:
"""Place this item on the floor at the given location."""
assert not self.location, "This item already has a location."
assert not self.owner, "Can't be placed because this item is currently owned."
self.location = location
items = location.map.items
try:
items[location.xy].append(self)
except KeyError:
items[location.xy] = [self]
def plan_activate(self, action: ActionWithItem) -> ActionWithItem:
"""Item activated as part of an action.
Assume that action has an actor which is holding this items entity.
"""
return action
def action_activate(self, action: ActionWithItem) -> None:
raise Impossible(f"You can do nothing with the {self.name}.")
def consume(self, action: ActionWithItem) -> None:
"""Remove this item from the actors inventory."""
assert action.item is self
action.item.lift()
def action_drink(self, action: ActionWithItem) -> None:
"""Drink this item."""
raise Impossible("You can't drink that.")
def action_eat(self, action: ActionWithItem) -> None:
"""Eat this item."""
raise Impossible("You can't eat that.")
|
StarcoderdataPython
|
366895
|
import pandas as pd
old = pd.read_csv('user_reviews.csv')
new = pd.read_csv('user_reviews.csv')
# if then elif else (new)
# create new column
new['qualitative_rating'] = ''
# assign 'qualitative_rating' based on 'grade' with .loc
new.loc[new.grade < 5, 'qualitative_rating'] = 'bad'
new.loc[new.grade == 5, 'qualitative_rating'] = 'ok'
new.loc[new.grade > 5, 'qualitative_rating'] = 'good'
# create column based on other column (old)
# create new column
old['len_text'] = ''
# calculate length of column value with loop
for index in old.index:
old.loc[index, 'len_text'] = len(old.loc[index, 'text'])
# create column based on other column (new)
# create new column
new['len_text'] = ''
# calculate length of column value by converting to str
new['len_text'] = new['text'].str.len()
|
StarcoderdataPython
|
294313
|
# -*- coding: utf-8 -*-
"""
Python Script
Created on Sunday August 2017 10:17:41
@author: <NAME>
[desc]
Support vector machines (SVMs) are a set of supervised learning methods used for classification, regression and outliers detection.
The advantages of support vector machines are:
- Effective in high dimensional spaces.
- Still effective in cases where number of dimensions is greater than the number of samples.
- Uses a subset of training points in the decision function (called support vectors), so it is also memory efficient.
- Versatile: different Kernel functions can be specified for the decision function. Common kernels are provided, but it is also possible to specify custom kernels.
The disadvantages of support vector machines include:
- If the number of features is much greater than the number of samples, the method is likely to give poor performances.
- SVMs do not directly provide probability estimates, these are calculated using an expensive five-fold cross-validation.
The support vector machines in scikit-learn support both dense (numpy.ndarray and convertible to that by numpy.asarray) and sparse (any scipy.sparse) sample vectors as input. However, to use an SVM to make predictions for sparse data, it must have been fit on such data. For optimal performance, use C-ordered numpy.ndarray (dense) or scipy.sparse.csr_matrix (sparse) with dtype=float64.
[/desc]
ARGUMENTS:
----------
<inp> _data :
data </inp>
<inp>_target :
target </inp>
<inp>_features :
features </inp>
<inp>_predict :
predict </inp>
<inp>_algorithm : [optional] - [int] - [1]
Support Vector Machines Algorithm
Options:
1->Support Vector Classifier SVC
2->Linear Support Vector Classifier LinSVC
3->Nu Support Vector Classifier NuSVC
4->Support Vector Regression SVR
5->Linear Support Vector Regression LinSVR
6->Nu Support Vector Regression NuSVR
7->One Class Support Vector Machines
</inp>
<inp>_parameters_ : [optional]
Parameters of the Support Vector Machine Model, such as :
C, kernel, degree, gamma, coef0, probability, shrinking, tol, chach_size, class_weight, verbose, max_iter
,decision_function_shape and random_state</inp>
RETURN:
log_ = output log
output_ = predict Data
score_ score value
"""
import numpy as np
import pickle
_data = np.array(_data)
_target = np.array(_target)
def main( _data, _target, _features, _algorithm = 1):
''' this is the main function '''
allModels ={1:"SVC", 2:"LinearSVC", 3:"NuSVC",
4:"SVR", 5:"LinearSVR", 6:"NuSVR",
7:"OneClassSVM"}
if(_algorithm == None):
_algorithm =1
try:
exec("from sklearn.svm import "+ allModels[_algorithm]+"\n"
+ "log = "+ allModels[_algorithm]+".__doc__")
exec("model = "+allModels[_algorithm]+"()")
try:
model.fit(_data, _target)
prediction = model.predict(_predict)
try:
score = model.score(_data, _target)
except:
score = "Not defined"
except:
f1 = open(_data, 'r')
f2 = open(_target, 'r')
data1 = f1.read()
target1 = f2.read()
f1.close()
f2.close()
model.fit(pickle.loads(data1), pickle.loads(target1))
prediction = model.predict(_predict)
try:
score = model.score(pickle.loads(data1),pickle.loads(target1))
except:
score = "Not defined"
except Exception as e:
print str(e)
return [log, prediction, score]
if(_data != None and _target!= None and _predict!=None):
log_, output_, score_ = main( _data, _target, _features, _algorithm)
else:
print "Please Complete all the required data"
|
StarcoderdataPython
|
6626532
|
"""Module with all classes related to links.
Links are low level abstractions representing connections between two
interfaces.
"""
import hashlib
import json
import random
from kytos.core.common import GenericEntity
from kytos.core.exceptions import (KytosLinkCreationError,
KytosNoTagAvailableError)
from kytos.core.interface import TAGType
class Link(GenericEntity):
"""Define a link between two Endpoints."""
def __init__(self, endpoint_a, endpoint_b):
"""Create a Link instance and set its attributes.
Two kytos.core.interface.Interface are required as parameters.
"""
if endpoint_a is None:
raise KytosLinkCreationError("endpoint_a cannot be None")
if endpoint_b is None:
raise KytosLinkCreationError("endpoint_b cannot be None")
self.endpoint_a = endpoint_a
self.endpoint_b = endpoint_b
super().__init__()
def __hash__(self):
return hash(self.id)
def is_enabled(self):
"""Override the is_enabled method.
We consider a link enabled when all the interfaces are enabled.
Returns:
boolean: True if both interfaces are enabled, False otherwise.
"""
return (self._enabled and self.endpoint_a.is_enabled() and
self.endpoint_b.is_enabled())
def is_active(self):
"""Override the is_active method.
We consider a link active whether all the interfaces are active.
Returns:
boolean: True if the interfaces are active, othewrise False.
"""
return (self._active and self.endpoint_a.is_active() and
self.endpoint_b.is_active())
def __eq__(self, other):
"""Check if two instances of Link are equal."""
return self.id == other.id
@property
def id(self): # pylint: disable=invalid-name
"""Return id from Link intance.
Returns:
string: link id.
"""
dpid_a = self.endpoint_a.switch.dpid
port_a = self.endpoint_a.port_number
dpid_b = self.endpoint_b.switch.dpid
port_b = self.endpoint_b.port_number
if dpid_a < dpid_b:
elements = (dpid_a, port_a, dpid_b, port_b)
elif dpid_a > dpid_b:
elements = (dpid_b, port_b, dpid_a, port_a)
elif port_a < port_b:
elements = (dpid_a, port_a, dpid_b, port_b)
else:
elements = (dpid_b, port_b, dpid_a, port_a)
str_id = "%s:%s:%s:%s" % elements
return hashlib.sha256(str_id.encode('utf-8')).hexdigest()
@property
def available_tags(self):
"""Return the available tags for the link.
Based on the endpoint tags.
"""
return [tag for tag in self.endpoint_a.available_tags if tag in
self.endpoint_b.available_tags]
def use_tag(self, tag):
"""Remove a specific tag from available_tags if it is there.
Deprecated: use only the get_next_available_tag method.
"""
if self.is_tag_available(tag):
self.endpoint_a.use_tag(tag)
self.endpoint_b.use_tag(tag)
return True
return False
def is_tag_available(self, tag):
"""Check if a tag is available."""
return (self.endpoint_a.is_tag_available(tag) and
self.endpoint_b.is_tag_available(tag))
def get_next_available_tag(self):
"""Return the next available tag if exists."""
# Copy the available tags because in case of error
# we will remove and add elements to the available_tags
available_tags_a = self.endpoint_a.available_tags.copy()
available_tags_b = self.endpoint_b.available_tags.copy()
random.shuffle(available_tags_a)
random.shuffle(available_tags_b)
for tag in available_tags_a:
# Tag does not exist in endpoint B. Try another tag.
if tag not in available_tags_b:
continue
# Tag already in use. Try another tag.
if not self.endpoint_a.use_tag(tag):
continue
# Tag already in use in B. Mark the tag as available again.
if not self.endpoint_b.use_tag(tag):
self.endpoint_a.make_tag_available(tag)
continue
# Tag used successfully by both endpoints. Returning.
return tag
raise KytosNoTagAvailableError(self)
def make_tag_available(self, tag):
"""Add a specific tag in available_tags."""
if not self.is_tag_available(tag):
self.endpoint_a.make_tag_available(tag)
self.endpoint_b.make_tag_available(tag)
return True
return False
def available_vlans(self):
"""Get all available vlans from each interface in the link."""
vlans_a = self._get_available_vlans(self.endpoint_a)
vlans_b = self._get_available_vlans(self.endpoint_b)
return [vlan for vlan in vlans_a if vlan in vlans_b]
@staticmethod
def _get_available_vlans(endpoint):
"""Return all vlans from endpoint."""
tags = endpoint.available_tags
return [tag for tag in tags if tag.tag_type == TAGType.VLAN]
def as_dict(self):
"""Return the Link as a dictionary."""
return {'id': self.id,
'endpoint_a': self.endpoint_a.as_dict(),
'endpoint_b': self.endpoint_b.as_dict(),
'metadata': self.get_metadata_as_dict(),
'active': self.is_active(),
'enabled': self.is_enabled()}
def as_json(self):
"""Return the Link as a JSON string."""
return json.dumps(self.as_dict())
|
StarcoderdataPython
|
12810237
|
import pytest
from flask_controller_bundle import Controller
from flask_controller_bundle.attr_constants import CONTROLLER_ROUTES_ATTR
from flask_controller_bundle.route import Route
class TestRoute:
def test_should_register_defaults_to_true(self):
route = Route('/path', lambda: 'view_func')
assert route.should_register(None) is True
def test_should_register_with_boolean(self):
route = Route('/path', lambda: 'view_func', only_if=True)
assert route.should_register(None) is True
def test_should_register_with_callable(self):
route = Route('/path', lambda: 'view_func', only_if=lambda x: x)
assert route.should_register(True) is True
assert route.should_register(False) is False
def test_full_rule_requires_rule_with_a_controller(self):
class SomeController(Controller):
def index(self):
pass
route = getattr(SomeController, CONTROLLER_ROUTES_ATTR)['index'][0]
with pytest.raises(Exception) as e:
fail = route.full_rule
assert 'not fully initialized' in str(e)
def test_full_name_with_controller(self):
class SomeController(Controller):
def index(self):
pass
route = getattr(SomeController, CONTROLLER_ROUTES_ATTR)['index'][0]
assert route.full_name == 'tests.test_route.SomeController.index'
def test_full_name_with_func(self):
def a_view():
pass
route = Route('/foo', a_view)
assert route.full_name == 'tests.test_route.a_view'
|
StarcoderdataPython
|
3261681
|
<filename>pysnmp/CISCO-SYSLOG-CAPABILITY.py
#
# PySNMP MIB module CISCO-SYSLOG-CAPABILITY (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-SYSLOG-CAPABILITY
# Produced by pysmi-0.3.4 at Mon Apr 29 17:57:14 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, SingleValueConstraint, ConstraintsUnion, ValueRangeConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "SingleValueConstraint", "ConstraintsUnion", "ValueRangeConstraint", "ValueSizeConstraint")
ciscoAgentCapability, = mibBuilder.importSymbols("CISCO-SMI", "ciscoAgentCapability")
AgentCapabilities, ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "AgentCapabilities", "ModuleCompliance", "NotificationGroup")
IpAddress, ModuleIdentity, Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64, Integer32, MibIdentifier, iso, TimeTicks, Gauge32, Bits, ObjectIdentity, Unsigned32, NotificationType = mibBuilder.importSymbols("SNMPv2-SMI", "IpAddress", "ModuleIdentity", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64", "Integer32", "MibIdentifier", "iso", "TimeTicks", "Gauge32", "Bits", "ObjectIdentity", "Unsigned32", "NotificationType")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
ciscoSyslogCapability = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 7, 61))
ciscoSyslogCapability.setRevisions(('2010-01-22 14:32', '2008-08-11 00:00', '2008-06-08 00:00', '2006-10-26 00:00', '2006-05-25 00:00', '2004-02-03 00:00',))
if mibBuilder.loadTexts: ciscoSyslogCapability.setLastUpdated('201001221432Z')
if mibBuilder.loadTexts: ciscoSyslogCapability.setOrganization('Cisco Systems, Inc.')
ciscoSyslogCapCatOSV08R0101 = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 61, 1))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoSyslogCapCatOSV08R0101 = ciscoSyslogCapCatOSV08R0101.setProductRelease('Cisco CatOS 8.1(1).')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoSyslogCapCatOSV08R0101 = ciscoSyslogCapCatOSV08R0101.setStatus('current')
ciscoSyslogCapACSWV03R000 = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 61, 2))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoSyslogCapACSWV03R000 = ciscoSyslogCapACSWV03R000.setProductRelease('ACSW (Application Control Software) 3.0\n \n for Application Control Engine(ACE)\n\n Service Module.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoSyslogCapACSWV03R000 = ciscoSyslogCapACSWV03R000.setStatus('current')
ciscoSyslogCapCTSV100 = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 61, 3))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoSyslogCapCTSV100 = ciscoSyslogCapCTSV100.setProductRelease('Cisco TelePresence System (CTS) 1.0.0.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoSyslogCapCTSV100 = ciscoSyslogCapCTSV100.setStatus('current')
ciscoSyslogCapCTMV1000 = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 61, 4))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoSyslogCapCTMV1000 = ciscoSyslogCapCTMV1000.setProductRelease('Cisco TelePresence Manager (CTM) 1.0.0.0.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoSyslogCapCTMV1000 = ciscoSyslogCapCTMV1000.setStatus('current')
ciscoSyslogCapc4710aceVA1R70 = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 61, 5))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoSyslogCapc4710aceVA1R70 = ciscoSyslogCapc4710aceVA1R70.setProductRelease('ACSW (Application Control Software) A1(7)\n for ACE 4710 Application Control Engine \n Appliance.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoSyslogCapc4710aceVA1R70 = ciscoSyslogCapc4710aceVA1R70.setStatus('current')
ciscoSyslogCapVqe = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 61, 6))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoSyslogCapVqe = ciscoSyslogCapVqe.setProductRelease('VQE 3.5 release.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoSyslogCapVqe = ciscoSyslogCapVqe.setStatus('current')
mibBuilder.exportSymbols("CISCO-SYSLOG-CAPABILITY", ciscoSyslogCapCTSV100=ciscoSyslogCapCTSV100, ciscoSyslogCapability=ciscoSyslogCapability, ciscoSyslogCapACSWV03R000=ciscoSyslogCapACSWV03R000, PYSNMP_MODULE_ID=ciscoSyslogCapability, ciscoSyslogCapCTMV1000=ciscoSyslogCapCTMV1000, ciscoSyslogCapc4710aceVA1R70=ciscoSyslogCapc4710aceVA1R70, ciscoSyslogCapCatOSV08R0101=ciscoSyslogCapCatOSV08R0101, ciscoSyslogCapVqe=ciscoSyslogCapVqe)
|
StarcoderdataPython
|
9620701
|
<reponame>HereIsANiceNickname/CAS19
import logging
import os
import matplotlib.pyplot as plt
import networkx as nx
import yaml
from networkx import write_yaml
from actors import Actor
from fractories import Graphs, Actors, Distributions, Rules
log = logging.getLogger(__name__)
BASE_PATH_LAYOUT = "{}/{}.{}"
class Stats:
def __init__(self, name, path, config):
self.name = name
self.config = config
self.path = BASE_PATH_LAYOUT.format(path, name, "yml")
self.number_of_nodes = -1
self.number_of_edges = -1
self.average_orientation = -1
self.graph_density = -1
self.graph_transitivity = -1
def update(self, simulation):
actives = self.config["active"]
self.number_of_nodes = len(simulation.graph.nodes)
self.number_of_edges = len(simulation.graph.edges)
e = self.number_of_edges
n = self.number_of_nodes
if "average_orientation" in actives:
sum_of_orientation = 0
for node_a, node_b in simulation.graph.edges:
actor_a = simulation.graph.nodes[node_a]["actor"]
actor_b = simulation.graph.nodes[node_b]["actor"]
sum_of_orientation += actor_a.orientation(actor_b)
self.average_orientation = -1 if e == 0 else float((1 / e) * sum_of_orientation)
if "graph_density" in actives:
self.graph_density = nx.density(simulation.graph)
if "graph_transitivity" in actives:
self.graph_transitivity = nx.transitivity(simulation.graph)
def flush(self, n, ):
with open(self.path, "a+") as f:
data = {n: {k: self.__dict__[k] for k in self.__dict__ if k in self.config["active"]}}
yaml.dump(data=data, stream=f)
class Simulation:
def __init__(self, config):
self._config = config
self._name = self._config.get("name", "Not Defined")
self._paths = {"snapshot_root": "./snapshots/",
"picture_root": "./pictures/",
"stat_root": "./stats/"}
self._create_paths() # changes the _paths variable!!!
self._num_of_nodes = self._config.get("nodes")
self._snapshot_number = 0
self._draw_number = 0
self._stats = Stats(name=self._name, path=self._paths["stat_root"], config=self._config.get("stats"))
self.graph = self._create_graph()
self.n = 0
def _create_paths(self):
for key in self._paths:
cur_path = self._paths[key]
n = 0
if not os.path.exists(cur_path):
os.mkdir(cur_path)
test_path = "{root}{n}".format(root=cur_path, n=n)
while os.path.exists(test_path):
if not [s for s in os.listdir(test_path) if self._name in s]:
log.debug("Didn't found a File in the directory.")
break
n += 1
test_path = "{root}{n}".format(root=cur_path, n=n)
cur_path = test_path
if not os.path.exists(cur_path):
os.mkdir(cur_path)
self._paths.update({key: cur_path})
def update(self):
log.info("Update Simulation: {}".format(self._name))
actions = self._generate_action()
for action in actions:
for sub_action in action:
if sub_action["name"] is "remove_edge":
log.debug("Remove Edge")
self._remove_edge_(sub_action)
elif sub_action["name"] is "add_edge":
log.debug("Add Edge")
self._add_edge(sub_action)
elif sub_action["name"] is "add_node":
log.debug("Add Node")
self.graph.add_node()
elif sub_action["name"] is "remove_node":
log.debug("Remove Node")
self.graph.remove_node(sub_action["node_x"])
self.n += 1
def _remove_edge_(self, action):
node_x = action["node_x"]
node_z = action["node_z"]
if self.graph.has_edge(node_x, node_z):
self.graph.remove_edge(node_x, node_z)
def _add_edge(self, action):
node_x = action["node_x"]
node_z = action["node_z"]
if not self.graph.has_edge(node_x, node_z):
self.graph.add_edge(node_x, node_z)
def _generate_action(self):
result = []
for edge in self.graph.edges:
for conf in self._config.get("rules"):
result.append(Rules.create_actions_by_name(edge=edge, graph=self.graph, conf=conf))
return result
def stats(self):
log.info("Creates Stats: {}".format(self._name))
self._stats.update(self)
self._stats.flush(self.n)
def create_snapshot(self):
log.info("Creates Snapshot: {}_{}".format(self._name, self._snapshot_number))
write_yaml(self.graph, BASE_PATH_LAYOUT.format(self._paths["snapshot_root"], self._name, "yml"))
self._snapshot_number += 1
def draw(self):
log.info("Drawing Graph")
#plt.subplot(212)
#nx.draw(self.graph)
#plt.savefig(BASE_PATH_LAYOUT.format(self._paths["picture_root"],
# "_".join([self._name, str(self._draw_number)]),
# "pdf"))
self._draw_number += 1
def _create_graph(self):
return self._populate_graph(self._generate_graph())
def _generate_graph(self):
settings = self._config.get("graph").get("generator_function")
function_name = settings.get("name")
options = dict(settings.get("options", {}))
options.update({"n": self._num_of_nodes})
return Graphs.func_by_name(function_name)(**options)
def _populate_graph(self, graph):
actor_settings = self._config.get("actor")
gen_settings = actor_settings.get("generator_function")
dis_settings = actor_settings.get("distribution_function")
gen_function_name = gen_settings.get("name")
gen_options = gen_settings.get("options")
dis_function_name = dis_settings.get("name")
log.debug(dis_function_name)
dis_options = dict(dis_settings.get("options", {})) # Makes a copy because i don't want a mutable config.
dis_options.update({"graph": graph})
positions = Distributions.func_by_name(dis_function_name)(**dis_options)
actors = [Actors.func_by_name(gen_function_name)(**gen_options) for x in range(len(graph.nodes))]
log.info("Populate Graph with Actors.")
for pos, actor in zip(positions, actors):
actor = Actor(actor)
graph.nodes[pos]["actor"] = actor
return graph
|
StarcoderdataPython
|
3516271
|
EDA_URL = "https://eda.ru"
|
StarcoderdataPython
|
9663344
|
'''
Todo list schemas
'''
from pydantic import BaseModel
class ListInput(BaseModel):
'''
List model for input data
'''
list_name: str
def to_orm(self):
return dict(name=self.list_name)
class ListSchema(ListInput):
'''
List model to represent data from base
'''
list_id: int
@classmethod
def from_orm(cls, orm_obj):
return cls(list_id=orm_obj.id, list_name=orm_obj.name)
class ItemInput(BaseModel):
'''
Item model for input data
'''
todo_item_name: str
def to_orm(self):
return dict(name=self.todo_item_name)
class ItemSchema(ItemInput):
'''
Item model to represent data from base
'''
todo_item_id: int
@classmethod
def from_orm(cls, obj):
return cls(todo_item_id=obj.id, todo_item_name=obj.name)
|
StarcoderdataPython
|
1935040
|
"""This solves problem #387 of Project Euler (https://projecteuler.net).
Harshad Numbers
Problem 387
A Harshad or Niven number is a number that is divisible by the sum of its digits.
201 is a Harshad number because it is divisible by 3 (the sum of its digits.)
When we truncate the last digit from 201, we get 20, which is a Harshad number.
When we truncate the last digit from 20, we get 2, which is also a Harshad number.
Let's call a Harshad number that, while recursively truncating the last digit, always results
in a Harshad number a right truncatable Harshad number.
Also:
201/3=67 which is prime.
Let's call a Harshad number that, when divided by the sum of its digits, results in a prime a
strong Harshad number.
Now take the number 2011 which is prime.
When we truncate the last digit from it we get 201, a strong Harshad number that is also
right truncatable.
Let's call such primes strong, right truncatable Harshad primes.
You are given that the sum of the strong, right truncatable Harshad primes less than 10000 is
90619.
You are given that the sum of the strong, right truncatable Harshad primes less than 10^8 is
130459097.
Find the sum of the strong, right truncatable Harshad primes less than 10^14.
"""
from collections import defaultdict
from itertools import takewhile, count
from helpers import chronometric
from mathext import is_prime, digit_sum
EXPONENT = 14
LIMIT = 10 ** (EXPONENT - 1)
harshad_numbers = defaultdict(list)
harshad_numbers[0] += [1, 2, 3, 4, 5, 6, 7, 8, 9]
harshad_strong_right_truncatable_primes = []
def right_truncatable_harshad_numbers():
for ex in takewhile(lambda x: x <= EXPONENT, count(start=1)):
for h in harshad_numbers[ex - 1]:
k = 10 * h
for d in range(10):
candidate = k + d
if not (candidate % digit_sum(candidate)):
harshad_numbers[ex].append(candidate)
yield candidate
def is_strong(h):
return is_prime(h / digit_sum(h))
@chronometric
def sum_strong_right_truncatable_harshad_primes():
result = 0
for h in takewhile(lambda x: x < LIMIT, right_truncatable_harshad_numbers()):
if is_strong(h):
for d in range(1, 10, 2):
candidate = 10 * h + d
if is_prime(candidate):
result += candidate
return result
def run_application():
solution, elapsed = sum_strong_right_truncatable_harshad_primes()
print('Solution =', solution)
print('Runtime =', elapsed, 'seconds')
if __name__ == '__main__':
run_application()
# last line of code
|
StarcoderdataPython
|
6465264
|
<reponame>BrentG-1849260/PyVoxelizer
import sys
import os
import numpy as np
from ctypes import cdll, Structure, c_float
class Point3(Structure):
_fields_ = [
("x", c_float),
("y", c_float),
("z", c_float)
]
class Triangle3(Structure):
_fields_ = [
("v1", Point3),
("v2", Point3),
("v3", Point3)
]
triangle_lib = None
script_dir = os.path.dirname(os.path.realpath(__file__))
try:
if sys.platform.startswith('linux') and sys.maxsize == 9223372036854775807:
file_path_library = os.path.join(script_dir, 'triangleCube_linux64.so')
if os.path.exists(file_path_library):
triangle_lib = cdll.LoadLibrary(file_path_library)
elif sys.platform.startswith("win") and sys.maxsize == 2147483647:
file_path_library = os.path.join(script_dir, 'triangleCube_win32.so')
if os.path.exists(file_path_library):
triangle_lib = cdll.LoadLibrary(file_path_library)
except OSError:
triangle_lib = None
"""
Code conversion into python from:
'https://github.com/erich666/GraphicsGems/blob/master/gemsiii/triangleCube.c'
"""
INSIDE = 0
OUTSIDE = 1
EPS = 1e-5
# EPS = 0.0
# print(EPS)
def cross_product(a, b):
return (
a[1] * b[2] - a[2] * b[1],
-a[0] * b[2] + a[2] * b[0],
a[0] * b[1] - a[1] * b[0])
def sign3(point):
sign_code = 0
if point[0] < EPS:
sign_code |= 4
if point[0] > -EPS:
sign_code |= 32
if point[1] < EPS:
sign_code |= 2
if point[1] > -EPS:
sign_code |= 16
if point[2] < EPS:
sign_code |= 1
if point[2] > -EPS:
sign_code |= 8
return sign_code
def lerp(alpha, a, b):
return a + alpha * (b - a)
class Triangle(object):
"""
@type v1: numpy.ndarray
@type v2: numpy.ndarray
@type v3: numpy.ndarray
"""
def __init__(self):
"""
"""
self.v1 = 0
self.v2 = 0
self.v3 = 0
def set(self, vertex_1, vertex_2, vertex_3):
"""
@type vertex_1: numpy.ndarray
@type vertex_2: numpy.ndarray
@type vertex_3: numpy.ndarray
"""
self.v1 = vertex_1
self.v2 = vertex_2
self.v3 = vertex_3
def min(self, index):
if self.v1[index] < self.v2[index] and self.v1[index] < self.v3[index]:
return self.v1[index]
elif self.v2[index] < self.v3[index]:
return self.v2[index]
else:
return self.v3[index]
def max(self, index):
if self.v1[index] > self.v2[index] and self.v1[index] > self.v3[index]:
return self.v1[index]
elif self.v2[index] > self.v3[index]:
return self.v2[index]
else:
return self.v3[index]
def vertexes_to_c_triangle(vertex_1, vertex_2, vertex_3):
return Triangle3(
Point3(vertex_1[0], vertex_1[1], vertex_1[2]),
Point3(vertex_2[0], vertex_2[1], vertex_2[2]),
Point3(vertex_3[0], vertex_3[1], vertex_3[2])
)
def face_plane(point):
"""
Which of the six face-plane(s) is point P outside of?
@type point: numpy.ndarray | (float, float, float)
"""
face_plane_code = 0
if point[0] >= .5:
face_plane_code |= 0x01
if point[0] < -.5:
face_plane_code |= 0x02
if point[1] >= .5:
face_plane_code |= 0x04
if point[1] < -.5:
face_plane_code |= 0x08
if point[2] >= .5:
face_plane_code |= 0x10
if point[2] < -.5:
face_plane_code |= 0x20
return face_plane_code
def bevel_2d(point):
"""
Which of the twelve edge plane(s) is point P outside of?
"""
edge_plane_code = 0
if point[0] + point[1] >= 1.0:
edge_plane_code |= 0x001
if point[0] - point[1] >= 1.0:
edge_plane_code |= 0x002
if -point[0] + point[1] > 1.0:
edge_plane_code |= 0x004
if -point[0] - point[1] > 1.0:
edge_plane_code |= 0x008
if point[0] + point[2] >= 1.0:
edge_plane_code |= 0x010
if point[0] - point[2] >= 1.0:
edge_plane_code |= 0x020
if -point[0] + point[2] > 1.0:
edge_plane_code |= 0x040
if -point[0] - point[2] > 1.0:
edge_plane_code |= 0x080
if point[1] + point[2] >= 1.0:
edge_plane_code |= 0x100
if point[1] - point[2] >= 1.0:
edge_plane_code |= 0x200
if -point[1] + point[2] > 1.0:
edge_plane_code |= 0x400
if -point[1] - point[2] > 1.0:
edge_plane_code |= 0x800
return edge_plane_code
def bevel_3d(point):
"""
Which of the eight corner plane(s) is point P outside of?
"""
corner_plane_code = 0
if (point[0] + point[1] + point[2]) >= 1.5:
corner_plane_code |= 0x01
if (point[0] + point[1] - point[2]) >= 1.5:
corner_plane_code |= 0x02
if (point[0] - point[1] + point[2]) >= 1.5:
corner_plane_code |= 0x04
if (point[0] - point[1] - point[2]) >= 1.5:
corner_plane_code |= 0x08
if (-point[0] + point[1] + point[2]) > 1.5:
corner_plane_code |= 0x10
if (-point[0] + point[1] - point[2]) > 1.5:
corner_plane_code |= 0x20
if (-point[0] - point[1] + point[2]) > 1.5:
corner_plane_code |= 0x40
if (-point[0] - point[1] - point[2]) > 1.5:
corner_plane_code |= 0x80
return corner_plane_code
def check_point(point_a, point_b, alpha, mask):
"""
Test the point "alpha" of the way from P1 to P2
See if it is on a face of the cube
Consider only faces in "mask"
"""
plane_point_x = lerp(alpha, point_a[0], point_b[0])
plane_point_y = lerp(alpha, point_a[1], point_b[1])
plane_point_z = lerp(alpha, point_a[2], point_b[2])
plane_point = (plane_point_x, plane_point_y, plane_point_z)
return face_plane(plane_point) & mask
def check_line(point_a, point_b, outcode_diff):
"""
/* Compute intersection of P1 --> P2 line segment with face planes */
/* Then test intersection point to see if it is on cube face */
/* Consider only face planes in "outcode_diff" */
/* Note: Zero bits in "outcode_diff" means face line is outside of */
"""
if (0x01 & outcode_diff) != 0:
if check_point(point_a, point_b, (0.5 - point_a[0])/(point_b[0] - point_a[0]), 0x3e) == INSIDE:
return INSIDE
if (0x02 & outcode_diff) != 0:
if check_point(point_a, point_b, (-0.5 - point_a[0])/(point_b[0] - point_a[0]), 0x3d) == INSIDE:
return INSIDE
if (0x04 & outcode_diff) != 0:
if check_point(point_a, point_b, (0.5 - point_a[1])/(point_b[1] - point_a[1]), 0x3b) == INSIDE:
return INSIDE
if (0x08 & outcode_diff) != 0:
if check_point(point_a, point_b, (-0.5 - point_a[1])/(point_b[1] - point_a[1]), 0x37) == INSIDE:
return INSIDE
if (0x10 & outcode_diff) != 0:
if check_point(point_a, point_b, (0.5 - point_a[2])/(point_b[2] - point_a[2]), 0x2f) == INSIDE:
return INSIDE
if (0x20 & outcode_diff) != 0:
if check_point(point_a, point_b, (-0.5 - point_a[2])/(point_b[2] - point_a[2]), 0x1f) == INSIDE:
return INSIDE
return OUTSIDE
def point_triangle_intersection(p, t):
"""
Test if 3D point is inside 3D triangle
@type p: list[float]
@type t: Triangle
"""
# /* First, a quick bounding-box test: */
# /* If P is outside triangle bbox, there cannot be an intersection. */
# add/sub EPS as buffer to avoid an floating point issue
if p[0] > t.max(0) + EPS:
return OUTSIDE
if p[1] > t.max(1) + EPS:
return OUTSIDE
if p[2] > t.max(2) + EPS:
return OUTSIDE
if p[0] < t.min(0) - EPS:
return OUTSIDE
if p[1] < t.min(1) - EPS:
return OUTSIDE
if p[2] < t.min(2) - EPS:
return OUTSIDE
# /* For each triangle side, make a vector out of it by subtracting vertexes; */
# /* make another vector from one vertex to point P. */
# /* The crossproduct of these two vectors is orthogonal to both and the */
# /* signs of its X,Y,Z components indicate whether P was to the inside or */
# /* to the outside of this triangle side. */
vect12 = np.subtract(t.v1, t.v2)
vect1h = np.subtract(t.v1, p)
cross12_1p = cross_product(vect12, vect1h)
sign12 = sign3(cross12_1p) # /* Extract X,Y,Z signs as 0..7 or 0...63 integer */
vect23 = np.subtract(t.v2, t.v3)
vect2h = np.subtract(t.v2, p)
cross23_2p = cross_product(vect23, vect2h)
sign23 = sign3(cross23_2p)
vect31 = np.subtract(t.v3, t.v1)
vect3h = np.subtract(t.v3, p)
cross31_3p = cross_product(vect31, vect3h)
sign31 = sign3(cross31_3p)
# /* If all three cross product vectors agree in their component signs, */
# /* then the point must be inside all three. */
# /* P cannot be OUTSIDE all three sides simultaneously. */
if (sign12 & sign23 & sign31) == 0:
return OUTSIDE
return INSIDE
def t_c_intersection(triangle):
"""
/**********************************************/
/* This is the main algorithm procedure. */
/* Triangle t is compared with a unit cube, */
/* centered on the origin. */
/* It returns INSIDE (0) or OUTSIDE(1) if t */
/* intersects or does not intersect the cube. */
/**********************************************/
@type triangle: Triangle
"""
# long v1_test,v2_test,v3_test;
# float d,denom;
# Point3 vect12,vect13,norm;
# Point3 hitpp,hitpn,hitnp,hitnn;
# /* First compare all three vertexes with all six face-planes */
# /* If any vertex is inside the cube, return immediately! */
v1_test = face_plane(triangle.v1)
v2_test = face_plane(triangle.v2)
v3_test = face_plane(triangle.v3)
if v1_test == INSIDE:
return INSIDE
if v2_test == INSIDE:
return INSIDE
if v3_test == INSIDE:
return INSIDE
# /* If all three vertexes were outside of one or more face-planes, */
# /* return immediately with a trivial rejection! */
if (v1_test & v2_test & v3_test) != INSIDE:
return OUTSIDE
# /* Now do the same trivial rejection test for the 12 edge planes */
v1_test |= bevel_2d(triangle.v1) << 8
v2_test |= bevel_2d(triangle.v2) << 8
v3_test |= bevel_2d(triangle.v3) << 8
if (v1_test & v2_test & v3_test) != INSIDE:
return OUTSIDE
# /* Now do the same trivial rejection test for the 8 corner planes */
v1_test |= bevel_3d(triangle.v1) << 24
v2_test |= bevel_3d(triangle.v2) << 24
v3_test |= bevel_3d(triangle.v3) << 24
if (v1_test & v2_test & v3_test) != INSIDE:
return OUTSIDE
# /* If vertex 1 and 2, as a pair, cannot be trivially rejected */
# /* by the above tests, then see if the v1-->v2 triangle edge */
# /* intersects the cube. Do the same for v1-->v3 and v2-->v3. */
# /* Pass to the intersection algorithm the "OR" of the outcode */
# /* bits, so that only those cube faces which are spanned by */
# /* each triangle edge need be tested. */
if (v1_test & v2_test) == 0:
if check_line(triangle.v1, triangle.v2, v1_test | v2_test) == INSIDE:
return INSIDE
if (v1_test & v3_test) == 0:
if check_line(triangle.v1, triangle.v3, v1_test | v3_test) == INSIDE:
return INSIDE
if (v2_test & v3_test) == 0:
if check_line(triangle.v2, triangle.v3, v2_test | v3_test) == INSIDE:
return INSIDE
# /* By now, we know that the triangle is not off to any side, */
# /* and that its sides do not penetrate the cube. We must now */
# /* test for the cube intersecting the interior of the triangle. */
# /* We do this by looking for intersections between the cube */
# /* diagonals and the triangle...first finding the intersection */
# /* of the four diagonals with the plane of the triangle, and */
# /* then if that intersection is inside the cube, pursuing */
# /* whether the intersection point is inside the triangle itself. */
# /* To find plane of the triangle, first perform crossproduct on */
# /* two triangle side vectors to compute the normal vector. */
vect12 = np.subtract(triangle.v1, triangle.v2)
vect13 = np.subtract(triangle.v1, triangle.v3)
norm = cross_product(vect12, vect13)
# /* The normal vector "norm" X,Y,Z components are the coefficients */
# /* of the triangles AX + BY + CZ + D = 0 plane equation. If we */
# /* solve the plane equation for X=Y=Z (a diagonal), we get */
# /* -D/(A+B+C) as a metric of the distance from cube center to the */
# /* diagonal/plane intersection. If this is between -0.5 and 0.5, */
# /* the intersection is inside the cube. If so, we continue by */
# /* doing a point/triangle intersection. */
# /* Do this for all four diagonals. */
d = norm[0] * triangle.v1[0] + norm[1] * triangle.v1[1] + norm[2] * triangle.v1[2]
# /* if one of the diagonals is parallel to the plane, the other will intersect the plane */
denom = norm[0] + norm[1] + norm[2]
hitpp = [0.0, 0.0, 0.0]
if abs(denom) > EPS:
# /* skip parallel diagonals to the plane; division by 0 can occur */
hitpp[0] = hitpp[1] = hitpp[2] = d / denom
if abs(hitpp[0]) <= 0.5:
if point_triangle_intersection(hitpp, triangle) == INSIDE:
return INSIDE
denom = norm[0] + norm[1] - norm[2]
hitpn = [0.0, 0.0, 0.0]
if abs(denom) > EPS:
hitpn[0] = hitpn[1] = d / denom
hitpn[2] = -hitpn[0]
if abs(hitpn[0]) <= 0.5:
if point_triangle_intersection(hitpn, triangle) == INSIDE:
return INSIDE
denom = norm[0] - norm[1] + norm[2]
hitnp = [0.0, 0.0, 0.0]
if abs(denom) > EPS:
hitnp[0] = hitnp[2] = d / denom
hitnp[1] = -hitnp[0]
if abs(hitnp[0]) <= 0.5:
if point_triangle_intersection(hitnp, triangle) == INSIDE:
return INSIDE
denom = norm[0] - norm[1] - norm[2]
hitnn = [0.0, 0.0, 0.0]
if abs(denom) > EPS:
hitnn[0] = d / denom
hitnn[1] = hitnn[2] = -hitnn[0]
if abs(hitnn[0]) <= 0.5:
if point_triangle_intersection(hitnn, triangle) == INSIDE:
return INSIDE
# /* No edge touched the cube; no cube diagonal touched the triangle. */
# /* We're done...there was no intersection. */
return OUTSIDE
|
StarcoderdataPython
|
11236434
|
def reverse_number(x):
"""
:type x: int
:rtype: reversed number
"""
reverse = 0
while(x != 0):
remainder = x % 10
reverse = reverse * 10 + remainder
x //= 10
return print(f'The reversed number is:{reverse}')
reverse_number(123)
|
StarcoderdataPython
|
11306256
|
import unittest
from evo_tests.examples import ExampleLOC, ExampleLOTC, ExampleConfigurations, ExampleDealers
from evo_json.convert_py_json.convert_trait import convert_from_pj_loc
from evo_json.process_json.process_configuration import convert_config_to_dealer, convert_dealer_to_config
class TestConvert(unittest.TestCase):
def setUp(self):
self.ex_loc = ExampleLOC()
self.ex_lotc = ExampleLOTC()
self.ex_config = ExampleConfigurations()
self.ex_dealer = ExampleDealers()
def test_convert_loc(self):
self.assertEqual(convert_from_pj_loc(self.ex_loc.loc1), self.ex_lotc.lotc1)
def test_convert_to_config(self):
self.assertEqual(convert_dealer_to_config(self.ex_dealer.dealer_all_veg), self.ex_config.config)
def test_convert_from_config(self):
self.assertEqual(convert_config_to_dealer(self.ex_config.config), self.ex_dealer.dealer_all_veg)
|
StarcoderdataPython
|
12835310
|
#!/usr/bin/env python
# coding: utf-8
# <a href="https://colab.research.google.com/github/nagi1995/sarcastic-comment-detection/blob/main/Sarcastic_Comments.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# In[1]:
from google.colab import drive
drive.mount('/content/drive')
# In[2]:
get_ipython().system('ln -s /content/drive/MyDrive /mygdrive')
# In[3]:
get_ipython().system('ls /mygdrive')
# In[4]:
get_ipython().system('cp /mygdrive/Sarcasm_Headlines_Dataset_v2.json ./')
get_ipython().system('cp /mygdrive/Sarcasm_Headlines_Dataset.json ./')
# # Import Libraries
# In[38]:
import pandas as pd
import numpy as np
get_ipython().run_line_magic('matplotlib', 'inline')
from matplotlib import pyplot as plt
import seaborn as sns
import re
from collections import Counter
from wordcloud import WordCloud, STOPWORDS
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_curve, confusion_matrix, auc, accuracy_score
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier
import xgboost as xgb
import pickle
import cv2
import tensorflow as tf
import tensorflow_hub as hub
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.callbacks import *
from tensorflow.keras import Model, Input, Sequential
from datetime import datetime
from tensorflow.keras import Input, Model
from tensorflow.keras.layers import *
from tensorflow.keras.utils import plot_model
from google.colab.patches import cv2_imshow
from tqdm import tqdm
# In[1]:
from prettytable import PrettyTable
# In[6]:
tf.__version__, xgb.__version__, cv2.__version__, hub.__version__
# # Load data
# In[7]:
test = pd.read_json("Sarcasm_Headlines_Dataset.json", lines=True)
test.head()
# In[8]:
test.info()
# In[9]:
train = pd.read_json("Sarcasm_Headlines_Dataset_v2.json", lines=True)
train.head()
# In[10]:
train.info()
# In[11]:
plt.figure()
sns.countplot(data = train, x = "is_sarcastic")
plt.title("Class distribution")
plt.show()
# In[12]:
def length(phrase):
return len(phrase.split())
# In[13]:
train["length"] = train["headline"].apply(length)
train.head()
# In[14]:
plt.figure()
sns.displot(data = train, x = "length", kde = True)
plt.title("distribution of number of words in headlines")
plt.show()
# In[15]:
for i in [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100]:
print("{0}th percentile is {1}".format(i, np.percentile(train["length"], i)))
print()
for i in [90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100]:
print("{0}th percentile is {1}".format(i, np.percentile(train["length"], i)))
print()
for i in [99, 99.10, 99.20, 99.30, 99.40, 99.50, 99.60, 99.70, 99.80, 99.90]:
print("{0}th percentile is {1}".format(i, np.percentile(train["length"], i)))
print()
# In[16]:
# Reference: https://stackoverflow.com/a/47091490/6645883
def decontracted(phrase):
# specific
phrase = re.sub(r"won\'t", "will not", phrase)
phrase = re.sub(r"can\'t", "can not", phrase)
# general
phrase = re.sub(r"n\'t", " not", phrase)
phrase = re.sub(r"\'re", " are", phrase)
phrase = re.sub(r"\'s", " is", phrase)
phrase = re.sub(r"\'d", " would", phrase)
phrase = re.sub(r"\'ll", " will", phrase)
phrase = re.sub(r"\'t", " not", phrase)
phrase = re.sub(r"\'ve", " have", phrase)
phrase = re.sub(r"\'m", " am", phrase)
return phrase.lower()
# In[17]:
train["headline"] = train["headline"].apply(decontracted)
test["headline"] = test["headline"].apply(decontracted)
# In[18]:
# Reference: # https://www.geeksforgeeks.org/generating-word-cloud-python/
def wordcloud_plot(df):
comment_words = ""
stopwords = set(STOPWORDS)
# iterate through the csv file
for val in df.headline:
# typecaste each val to string
val = str(val)
# split the value
tokens = val.split()
# Converts each token into lowercase
for i in range(len(tokens)):
tokens[i] = tokens[i].lower()
comment_words += " ".join(tokens)+" "
wordcloud = WordCloud(width = 800, height = 800,
background_color = "white",
stopwords = stopwords,
min_font_size = 10).generate(comment_words)
# plot the WordCloud image
plt.figure(figsize = (8, 8), facecolor = None)
plt.imshow(wordcloud)
plt.axis("off")
plt.tight_layout(pad = 0)
plt.show()
# In[56]:
wordcloud_plot(train)
# In[57]:
wordcloud_plot(test)
# # BoW
# In[19]:
vectorizer = CountVectorizer(min_df = 10, max_df = 5000, ngram_range = (1, 3))
vectorizer.fit(train["headline"])
x_train = vectorizer.transform(train["headline"])
x_test = vectorizer.transform(test["headline"])
y_train = train["is_sarcastic"]
y_test = test["is_sarcastic"]
x_train.shape, x_test.shape
# ### Logistic Regression
# In[17]:
model = LogisticRegression(n_jobs = -1)
params = {"C" : [0.0001, .00033, .001, .0033, .01, .033, .1, .33, 1, 3.3, 10, 33, 100]}
gridsearch = GridSearchCV(model, params, cv = 5, scoring = "accuracy", return_train_score = True)
gridsearch.fit(x_train, y_train)
# In[18]:
results = pd.DataFrame.from_dict(gridsearch.cv_results_)
results = results.sort_values(['param_C'])
results.head()
# In[19]:
train_auc= results['mean_train_score']
train_auc_std= results['std_train_score']
cv_auc = results['mean_test_score']
cv_auc_std= results['std_test_score']
K = results['param_C']
plt.plot(K, train_auc, "bo-", label='Train accuracy')
plt.plot(K, cv_auc, "ro-", label='CV accuracy')
plt.xscale("log")
plt.legend()
plt.xlabel("C: hyperparameter")
plt.ylabel("accuracy")
plt.title("Hyper parameter Vs accuracy plot")
plt.grid()
plt.show()
# In[73]:
model = LogisticRegression(C = 1, max_iter = 200)
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
print("Accuracy:", 100*accuracy_score(y_test, y_pred))
cm = confusion_matrix(y_test, y_pred)
sns.heatmap(cm, annot = True, fmt = "d")
plt.xlabel("predicted label")
plt.ylabel("actual label")
plt.title("test confusion matrix")
plt.show()
# ### Naive Bayes
# In[83]:
model = MultinomialNB(class_prior = [.5, .5])
params = {"alpha" : [0.0001, .00033, .001, .0033, .01, .033, .1, .33, 1, 3.3, 10, 33, 100]}
gridsearch = GridSearchCV(model, params, cv = 5, scoring = "accuracy", return_train_score = True)
gridsearch.fit(x_train, y_train)
# In[84]:
results = pd.DataFrame.from_dict(gridsearch.cv_results_)
results = results.sort_values(['param_alpha'])
results.head()
# In[85]:
train_auc= results['mean_train_score']
train_auc_std= results['std_train_score']
cv_auc = results['mean_test_score']
cv_auc_std= results['std_test_score']
K = results['param_alpha']
plt.plot(K, train_auc, "bo-", label='Train accuracy')
plt.plot(K, cv_auc, "ro-", label='CV accuracy')
plt.xscale("log")
plt.legend()
plt.xlabel("alpha: hyperparameter")
plt.ylabel("accuracy")
plt.title("Hyper parameter Vs accuracy plot")
plt.grid()
plt.show()
# In[21]:
model = MultinomialNB(alpha = .033)
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
print("Accuracy:", 100*accuracy_score(y_test, y_pred))
cm = confusion_matrix(y_test, y_pred)
sns.heatmap(cm, annot = True, fmt = "d")
plt.xlabel("predicted label")
plt.ylabel("actual label")
plt.title("test confusion matrix")
plt.show()
# ### Random Forest
# In[94]:
get_ipython().run_cell_magic('time', '', '\nmodel = RandomForestClassifier()\nparams = {"n_estimators" : [10, 50, 100, 150]}\n\ngridsearch = GridSearchCV(model, params, \n cv = 5, scoring = "accuracy", \n return_train_score = True, \n verbose = 1, n_jobs = -1)\ngridsearch.fit(x_train, y_train)')
# In[96]:
results = pd.DataFrame.from_dict(gridsearch.cv_results_)
results = results.sort_values(['param_n_estimators'])
results.head()
# In[98]:
train_auc= results['mean_train_score']
train_auc_std= results['std_train_score']
cv_auc = results['mean_test_score']
cv_auc_std= results['std_test_score']
K = results['param_n_estimators']
plt.plot(K, train_auc, "bo-", label='Train accuracy')
plt.plot(K, cv_auc, "ro-", label='CV accuracy')
plt.legend()
plt.xlabel("number of trees: hyperparameter")
plt.ylabel("accuracy")
plt.title("Hyper parameter Vs accuracy plot")
plt.grid()
plt.show()
# In[23]:
model = RandomForestClassifier(n_estimators = 50)
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
print("Accuracy:", 100*accuracy_score(y_test, y_pred))
cm = confusion_matrix(y_test, y_pred)
sns.heatmap(cm, annot = True, fmt = "d")
plt.xlabel("predicted label")
plt.ylabel("actual label")
plt.title("test confusion matrix")
plt.show()
# ### GBDT
# In[131]:
model = xgb.XGBClassifier(verbosity = 1, use_label_encoder = False)
params = {"n_estimators" : [10, 50, 100, 150],
"max_depth" : [4, 8, 16, 32]}
gridsearch = GridSearchCV(model, params,
cv = 5, scoring = "accuracy",
return_train_score = True,
verbose = 1, n_jobs = -1)
gridsearch.fit(x_train, y_train)
# In[132]:
results = pd.DataFrame.from_dict(gridsearch.cv_results_)
results.head()
# In[133]:
hmap = results.pivot("param_max_depth", "param_n_estimators", "mean_train_score")
sns.heatmap(hmap, linewidth = 1, annot = True)
plt.ylabel("max_depth")
plt.xlabel("n_estimators")
plt.title("train accuracy in heatmap")
plt.show()
# In[134]:
hmap = results.pivot("param_max_depth", "param_n_estimators", "mean_test_score")
sns.heatmap(hmap, linewidth = 1, annot = True)
plt.ylabel("max_depth")
plt.xlabel("n_estimators")
plt.title("cv accuracy in heatmap")
plt.show()
# In[24]:
model = xgb.XGBClassifier(n_estimators = 150, max_depth = 32, verbosity = 1, use_label_encoder = False)
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
print("Accuracy:", 100*accuracy_score(y_test, y_pred))
cm = confusion_matrix(y_test, y_pred)
sns.heatmap(cm, annot = True, fmt = "d")
plt.xlabel("predicted label")
plt.ylabel("actual label")
plt.title("test confusion matrix")
plt.show()
# # TFIDF
# In[25]:
vectorizer = TfidfVectorizer(min_df = 10, max_df = 5000, ngram_range = (1, 3))
vectorizer.fit(train["headline"])
x_train = vectorizer.transform(train["headline"])
x_test = vectorizer.transform(test["headline"])
y_train = train["is_sarcastic"]
y_test = test["is_sarcastic"]
x_train.shape, x_test.shape
# ### Logistic Regression
# In[109]:
model = LogisticRegression(n_jobs = -1)
params = {"C" : [0.0001, .00033, .001, .0033, .01, .033, .1, .33, 1, 3.3, 10, 33, 100]}
gridsearch = GridSearchCV(model, params, cv = 5, scoring = "accuracy", return_train_score = True)
gridsearch.fit(x_train, y_train)
# In[110]:
results = pd.DataFrame.from_dict(gridsearch.cv_results_)
results = results.sort_values(['param_C'])
results.head()
# In[111]:
train_auc= results['mean_train_score']
train_auc_std= results['std_train_score']
cv_auc = results['mean_test_score']
cv_auc_std= results['std_test_score']
K = results['param_C']
plt.plot(K, train_auc, "bo-", label='Train accuracy')
plt.plot(K, cv_auc, "ro-", label='CV accuracy')
plt.xscale("log")
plt.legend()
plt.xlabel("C: hyperparameter")
plt.ylabel("accuracy")
plt.title("Hyper parameter Vs accuracy plot")
plt.grid()
plt.show()
# In[26]:
model = LogisticRegression(C = 3.3, max_iter = 200)
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
print("Accuracy:", 100*accuracy_score(y_test, y_pred))
cm = confusion_matrix(y_test, y_pred)
sns.heatmap(cm, annot = True, fmt = "d")
plt.xlabel("predicted label")
plt.ylabel("actual label")
plt.title("test confusion matrix")
plt.show()
# ### Naive Bayes
# In[115]:
model = MultinomialNB(class_prior = [.5, .5])
params = {"alpha" : [0.0001, .00033, .001, .0033, .01, .033, .1, .33, 1, 3.3, 10, 33, 100]}
gridsearch = GridSearchCV(model, params, cv = 5, scoring = "accuracy", return_train_score = True)
gridsearch.fit(x_train, y_train)
# In[116]:
results = pd.DataFrame.from_dict(gridsearch.cv_results_)
results = results.sort_values(['param_alpha'])
results.head()
# In[117]:
train_auc= results['mean_train_score']
train_auc_std= results['std_train_score']
cv_auc = results['mean_test_score']
cv_auc_std= results['std_test_score']
K = results['param_alpha']
plt.plot(K, train_auc, "bo-", label='Train accuracy')
plt.plot(K, cv_auc, "ro-", label='CV accuracy')
plt.xscale("log")
plt.legend()
plt.xlabel("alpha: hyperparameter")
plt.ylabel("accuracy")
plt.title("Hyper parameter Vs accuracy plot")
plt.grid()
plt.show()
# In[27]:
model = MultinomialNB(alpha = .01)
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
print("Accuracy:", 100*accuracy_score(y_test, y_pred))
cm = confusion_matrix(y_test, y_pred)
sns.heatmap(cm, annot = True, fmt = "d")
plt.xlabel("predicted label")
plt.ylabel("actual label")
plt.title("test confusion matrix")
plt.show()
# ### Random Forest
# In[121]:
get_ipython().run_cell_magic('time', '', '\nmodel = RandomForestClassifier()\nparams = {"n_estimators" : [10, 50, 100, 150]}\n\ngridsearch = GridSearchCV(model, params, \n cv = 5, scoring = "accuracy", \n return_train_score = True, \n verbose = 1, n_jobs = -1)\ngridsearch.fit(x_train, y_train)')
# In[122]:
results = pd.DataFrame.from_dict(gridsearch.cv_results_)
results = results.sort_values(['param_n_estimators'])
results.head()
# In[123]:
train_auc= results['mean_train_score']
train_auc_std= results['std_train_score']
cv_auc = results['mean_test_score']
cv_auc_std= results['std_test_score']
K = results['param_n_estimators']
plt.plot(K, train_auc, "bo-", label='Train accuracy')
plt.plot(K, cv_auc, "ro-", label='CV accuracy')
plt.legend()
plt.xlabel("number of trees: hyperparameter")
plt.ylabel("accuracy")
plt.title("Hyper parameter Vs accuracy plot")
plt.grid()
plt.show()
# In[28]:
model = RandomForestClassifier(n_estimators = 50)
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
print("Accuracy:", 100*accuracy_score(y_test, y_pred))
cm = confusion_matrix(y_test, y_pred)
sns.heatmap(cm, annot = True, fmt = "d")
plt.xlabel("predicted label")
plt.ylabel("actual label")
plt.title("test confusion matrix")
plt.show()
# ### GBDT
# In[125]:
model = xgb.XGBClassifier(verbosity = 1, use_label_encoder = False)
params = {"n_estimators" : [10, 50, 100, 150],
"max_depth" : [4, 8, 16, 32]}
gridsearch = GridSearchCV(model, params,
cv = 5, scoring = "accuracy",
return_train_score = True,
verbose = 1, n_jobs = -1)
gridsearch.fit(x_train, y_train)
# In[126]:
results = pd.DataFrame.from_dict(gridsearch.cv_results_)
results.head()
# In[127]:
hmap = results.pivot("param_max_depth", "param_n_estimators", "mean_train_score")
sns.heatmap(hmap, linewidth = 1, annot = True)
plt.ylabel("max_depth")
plt.xlabel("n_estimators")
plt.title("train accuracy in heatmap")
plt.show()
# In[128]:
hmap = results.pivot("param_max_depth", "param_n_estimators", "mean_test_score")
sns.heatmap(hmap, linewidth = 1, annot = True)
plt.ylabel("max_depth")
plt.xlabel("n_estimators")
plt.title("test accuracy in heatmap")
plt.show()
# In[29]:
model = xgb.XGBClassifier(n_estimators = 150, max_depth = 32, verbosity = 1, use_label_encoder = False)
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
print("Accuracy:", 100*accuracy_score(y_test, y_pred))
cm = confusion_matrix(y_test, y_pred)
sns.heatmap(cm, annot = True, fmt = "d")
plt.xlabel("predicted label")
plt.ylabel("actual label")
plt.title("test confusion matrix")
plt.show()
# # Deep learning
# In[50]:
label_encoder = OneHotEncoder()
label_encoder.fit(np.array(train["is_sarcastic"]).reshape(-1, 1))
y_train_ohe = label_encoder.transform(np.array(train["is_sarcastic"]).reshape(-1, 1))
y_test_ohe = label_encoder.transform(np.array(test["is_sarcastic"]).reshape(-1, 1))
y_train_ohe.shape, y_test_ohe.shape
# In[51]:
with open("/mygdrive/glove_vectors", "rb") as fi:
glove_model = pickle.load(fi)
glove_words = set(glove_model.keys())
# In[52]:
t = Tokenizer()
t.fit_on_texts(train["headline"])
encoded_train = t.texts_to_sequences(train["headline"])
encoded_test = t.texts_to_sequences(test["headline"])
max_length = 25
padded_train = pad_sequences(encoded_train,
maxlen = max_length,
padding = "post",
truncating = "post")
padded_test = pad_sequences(encoded_test,
maxlen = max_length,
padding = "post",
truncating = "post")
print(padded_train.shape, padded_test.shape, type(padded_train))
vocab_size = len(t.word_index) + 1
vocab_size
# In[53]:
embedding_matrix = np.zeros((vocab_size, 300)) # vector len of each word is 300
for word, i in t.word_index.items():
if word in glove_words:
vec = glove_model[word]
embedding_matrix[i] = vec
embedding_matrix.shape
# ### callbacks
# In[26]:
get_ipython().run_line_magic('load_ext', 'tensorboard')
# In[54]:
def checkpoint_path():
return "./model/weights.{epoch:02d}-{val_accuracy:.4f}.hdf5"
def log_dir():
return "./logs/fit/" + datetime.now().strftime("%Y-%m-%d-%H:%M:%S")
earlystop = EarlyStopping(monitor = "val_accuracy",
patience = 7,
verbose = 1,
restore_best_weights = True,
mode = 'max')
reduce_lr = ReduceLROnPlateau(monitor = "val_accuracy",
factor = .4642,
patience = 3,
verbose = 1,
min_delta = 0.001,
mode = 'max')
# ### model building
# In[55]:
tf.keras.backend.clear_session()
input = Input(shape = (max_length, ), name = "input")
embedding = Embedding(input_dim = vocab_size,
output_dim = 300, # glove vector size
weights = [embedding_matrix],
trainable = False)(input)
lstm = LSTM(32)(embedding)
flatten = Flatten()(lstm)
dense = Dense(16, activation = None,
kernel_initializer = "he_uniform")(flatten)
dropout = Dropout(.25)(dense)
activation = Activation("relu")(dropout)
output = Dense(2, activation = "softmax", name = "output")(activation)
model = Model(inputs = input, outputs = output)
model.compile(optimizer = "adam", loss = "sparse_categorical_crossentropy", metrics = ["accuracy"])
plot_model(model, to_file = "./model.png", show_shapes = True)
model.summary()
# In[56]:
cv2_imshow(cv2.imread("./model.png"))
# In[57]:
get_ipython().system('rm -rf ./logs/')
get_ipython().run_line_magic('tensorboard', '--logdir logs/fit')
# ### training model
# In[58]:
tensorboard_callback = TensorBoard(log_dir = log_dir(),
histogram_freq = 1,
write_images = True)
checkpoint = ModelCheckpoint(filepath = checkpoint_path(),
monitor='val_accuracy',
verbose = 1,
save_best_only = True,
mode = "max")
callbacks_list = [checkpoint, tensorboard_callback, earlystop, reduce_lr]
history = model.fit(padded_train, y_train,
validation_data = (padded_test, y_test),
epochs = 30,
batch_size = 32,
callbacks = callbacks_list)
# In[59]:
plt.figure()
L = len(history.history["loss"]) + 1
plt.plot(range(1, L), history.history["loss"], "bo-", label = "loss")
plt.plot(range(1, L), history.history["accuracy"], "g*-", label = "accuracy")
plt.plot(range(1, L), history.history["val_loss"], "y^-", label = "val_loss")
plt.plot(range(1, L), history.history["val_accuracy"], "ro-", label = "val_accuracy")
plt.legend()
plt.xlabel("epoch")
plt.grid()
plt.show()
# ### testing model
# In[60]:
y_pred_softmax = model.predict(padded_test)
y_pred = []
for i in range(len(y_pred_softmax)):
if y_pred_softmax[i][0] >= 0.5:
y_pred.append(0)
else:
y_pred.append(1)
print("Accuracy:", 100*accuracy_score(y_test, y_pred))
cm = confusion_matrix(y_test, y_pred)
sns.heatmap(cm, annot = True, fmt = "d")
plt.xlabel("predicted label")
plt.ylabel("actual label")
plt.title("test confusion matrix")
plt.show()
# # BERT encodings
# ### creating BERT model
# In[20]:
max_length = 27
# In[21]:
tf.keras.backend.clear_session()
input_word_ids = Input(shape = (max_length,), dtype = tf.int32, name = "input_word_ids")
input_mask = Input(shape = (max_length,), dtype = tf.int32, name = "input_mask")
segment_ids = Input(shape = (max_length,), dtype = tf.int32, name = "segment_ids")
bert_layer = hub.KerasLayer("https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/1", trainable = False)
pooled_output, sequence_output = bert_layer([input_word_ids, input_mask, segment_ids])
bert_model = Model(inputs = [input_word_ids, input_mask, segment_ids], outputs = pooled_output)
# In[22]:
bert_model.summary()
# In[23]:
bert_model.output
# ### tokenization
# In[24]:
vocab_file = bert_layer.resolved_object.vocab_file.asset_path.numpy()
do_lower_case = bert_layer.resolved_object.do_lower_case.numpy()
# In[25]:
get_ipython().system('pip install sentencepiece')
from tokenization import FullTokenizer
# In[26]:
tokenizer = FullTokenizer(vocab_file, do_lower_case)
# In[27]:
def my_tokens_util(series, max_length):
x_tokens = np.zeros((series.shape[0], max_length))
x_mask = np.ones((series.shape[0], max_length))
x_segment = np.zeros((series.shape[0], max_length))
for i in range(series.shape[0]):
tokens = tokenizer.tokenize(series.values[0])
if len(tokens) >= max_length - 2:
tokens = tokens[: (max_length - 2)]
tokens = ["[CLS]", *tokens, "[SEP]"]
pe_tokens = np.array(tokenizer.convert_tokens_to_ids(tokens))
length = len(tokens)
if length >= max_length:
x_tokens[i] = pe_tokens[:max_length]
else:
x_tokens[i, :length] = pe_tokens
x_mask[i, length:] = list(np.zeros(max_length - length))
return np.array(series), x_tokens, x_mask, x_segment
# In[28]:
X_train, X_train_tokens, X_train_mask, X_train_segment = my_tokens_util(train["headline"], max_length)
X_test, X_test_tokens, X_test_mask, X_test_segment = my_tokens_util(test["headline"], max_length)
# In[29]:
pickle.dump((X_train, X_train_tokens, X_train_mask, X_train_segment, y_train),open('/mygdrive/train_data.pkl','wb'))
pickle.dump((X_test, X_test_tokens, X_test_mask, X_test_segment, y_test),open('/mygdrive/test_data.pkl','wb'))
# In[30]:
X_train, X_train_tokens, X_train_mask, X_train_segment, y_train = pickle.load(open("/mygdrive/train_data.pkl", 'rb'))
X_test, X_test_tokens, X_test_mask, X_test_segment, y_test = pickle.load(open("/mygdrive/test_data.pkl", 'rb'))
# ### embeddings from BERT model
# In[31]:
X_train_pooled_output = bert_model.predict([X_train_tokens, X_train_mask, X_train_segment])
X_test_pooled_output = bert_model.predict([X_test_tokens, X_test_mask, X_test_segment])
# In[33]:
pickle.dump((X_train_pooled_output, X_test_pooled_output),open('/mygdrive/final_output.pkl','wb'))
# In[20]:
X_train_pooled_output, X_test_pooled_output = pickle.load(open('/mygdrive/final_output.pkl', 'rb'))
# In[21]:
X_train_pooled_output.shape, X_test_pooled_output.shape, y_train.shape, y_test.shape
# In[39]:
scaler = StandardScaler()
scaler.fit(X_train_pooled_output)
x_train = scaler.transform(X_train_pooled_output)
x_test = scaler.transform(X_test_pooled_output)
x_train.shape, x_test.shape
# ### training a NN with 768 features
# In[45]:
tf.keras.backend.clear_session()
model = Sequential()
model.add(Dense(128, activation = "relu", kernel_initializer = "he_uniform", input_shape = (768, )))
model.add(Dropout(.5))
model.add(Dense(32, activation = "relu", kernel_initializer = "he_uniform"))
model.add(Dropout(.5))
model.add(Dense(2, activation = "softmax"))
model.compile(loss = "sparse_categorical_crossentropy", optimizer = "adam", metrics = ["accuracy"])
plot_model(model, to_file = "./model.png", show_shapes = True)
model.summary()
# In[46]:
cv2_imshow(cv2.imread("./model.png"))
# In[ ]:
get_ipython().run_line_magic('tensorboard', '--logdir logs/fit')
# In[47]:
tensorboard_callback = TensorBoard(log_dir = log_dir(),
histogram_freq = 1,
write_images = True)
checkpoint = ModelCheckpoint(filepath = checkpoint_path(),
monitor='val_accuracy',
verbose = 1,
save_best_only = True,
mode = "max")
callbacks_list = [checkpoint, tensorboard_callback, earlystop, reduce_lr]
history = model.fit(x_train, y_train,
validation_data = (x_test, y_test),
epochs = 30,
batch_size = 32,
callbacks = callbacks_list)
# In[48]:
plt.figure()
L = len(history.history["loss"]) + 1
plt.plot(range(1, L), history.history["loss"], "bo-", label = "loss")
plt.plot(range(1, L), history.history["accuracy"], "g*-", label = "accuracy")
plt.plot(range(1, L), history.history["val_loss"], "y^-", label = "val_loss")
plt.plot(range(1, L), history.history["val_accuracy"], "ro-", label = "val_accuracy")
plt.legend()
plt.xlabel("epoch")
plt.grid()
plt.show()
# ### testing model
# In[49]:
y_pred_softmax = model.predict(x_test)
y_pred = []
for i in range(len(y_pred_softmax)):
if y_pred_softmax[i][0] >= 0.5:
y_pred.append(0)
else:
y_pred.append(1)
print("Accuracy:", 100*accuracy_score(y_test, y_pred))
cm = confusion_matrix(y_test, y_pred)
sns.heatmap(cm, annot = True, fmt = "d")
plt.xlabel("predicted label")
plt.ylabel("actual label")
plt.title("test confusion matrix")
plt.show()
# In[3]:
p = PrettyTable(["Model", "Test Accuracy"])
p.add_row(["BoW with Logistic Regression", "91.3287%"])
p.add_row(["BoW with Naive Bayes", "86.9557%"])
p.add_row(["BoW with Random Forest", "99.8951%"])
p.add_row(["BoW with XGBoost", "87.9815%"])
p.add_row(["TF-IDF with Logistic Regression", "90.7671%"])
p.add_row(["TF-IDF with Naive Bayes", "87.0979%"])
p.add_row(["TF-IDF with Random Forest", "99.9063%"])
p.add_row(["TF-IDF with XGBoost", "91.1752%"])
p.add_row(["Neural network with Glove Embeddings", "99.9925%"])
print(p)
# In[ ]:
|
StarcoderdataPython
|
9714724
|
<gh_stars>0
# Given n pairs of parentheses, write a function to generate all combinations of well-formed parentheses.
# For example, given n = 3, a solution set is:
# [
# "((()))",
# "(()())",
# "(())()",
# "()(())",
# "()()()"
# ]
# Backtracking - Problem solving approach that incrementally builds candidates to the solution, and
# abandons a candidate ("backtracks") as soon as it fails some constraint. It takes a
# complex problem with a wide decision space and narrows it down to the choices we make at
# each stack frame of our recurssion.
class Solution:
def generateParenthesis(self, n: int) -> 'list[str]':
res = []
self.generate('', n, n, res)
return res
def generate(self, p, left, right, res):
if left:
self.generate(p + '(', left - 1, right, res)
if right > left: # can only close a bracket that has been opened, hence right > left constraint
self.generate(p + ')', left, right - 1, res)
if not right:
res.append(p)
|
StarcoderdataPython
|
8173614
|
from __future__ import print_function, absolute_import, division
from collections import abc
from jinja2 import Template
class VCSConfiguration(object):
def __init__(self, name, options, global_variables, special_variables,
commit_message=None, finish_release=True,
include_files=None, include_all_files=False):
self.name = name
if commit_message is None:
commit_message = \
"Version updated {{ current_version }} -> {{ new_version }}"
commit_message_template = Template(commit_message)
template_variables = {}
template_variables.update(global_variables)
template_variables.update(special_variables)
self.commit_message = commit_message_template.render(
**template_variables)
self.finish_release = finish_release
self.options = {}
for key, value in options.items():
if isinstance(value, abc.Sequence):
value_template = Template(value)
self.options[key] = value_template.render(**template_variables)
else:
self.options[key] = value
self.options.update(special_variables)
self.include_files = include_files or []
self.include_all_files = include_all_files
@classmethod
def from_dict(cls, vcs_configuration_dict,
global_variables, special_variables):
return VCSConfiguration(
vcs_configuration_dict['name'],
vcs_configuration_dict.get('options', {}),
global_variables,
special_variables,
vcs_configuration_dict.get(
'commit_message', None),
vcs_configuration_dict.get(
'finish_release', True
),
vcs_configuration_dict.get(
'include_files', None
),
vcs_configuration_dict.get(
'include_all_files', False
)
)
|
StarcoderdataPython
|
3351201
|
<filename>rosbag_decode/bag-decode.py
from rosbags.rosbag2 import Reader
from rosbags.serde import deserialize_cdr
from datetime import datetime
path = "rosbag_decode/test-logs/rosbag2_2021_06_01-19_24_43"
def list_topics_test():
with Reader(path) as reader:
# topic and msgtype information is available on .topics dict
for topic, msgtype in reader.topics.items():
print(topic, msgtype)
def deser_msg_test():
with Reader(path) as reader:
for topic, msgtype, timestamp, rawdata in reader.messages(['/waverunner/sys/ctrl/scenario_sys_time']):
msg = deserialize_cdr(rawdata, msgtype)
#decode from nanosecond timestamp
readable_timestamp = datetime.fromtimestamp(timestamp*1E-9)
print(readable_timestamp)
print(msg.data)
if __name__ == "__main__":
#deser_msg_test()
list_topics_test()
|
StarcoderdataPython
|
4924883
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
同数のデータ点数を持つS, Tに対し、点ごとの対応が既知であるとして
点群間の平行移動・回転・スケーリングを推定する
"""
from typing import Tuple
from dataclasses import dataclass
import numpy as np
__all__ = ["MatchingResult", "minL2"]
@dataclass
class MatchingResult:
cost: float
offsetX: float
offsetY: float
angle: float
scale: float
movingCenterX: float
movingCenterY: float
def minL2(S: np.ndarray, T: np.ndarray) -> MatchingResult:
r"""Find (s, R, t) \in Sim(2) which minimizes sum_i || sRS_i + t - T_i ||^2.
Parameters
==========
S: (N, 2) array_like
Moving pointcloud.
T: (N, 2) array_like
Reference pointcloud.
Returns
=======
result: MatchingResult
"""
Smean = np.mean(S, axis=0)
Tmean = np.mean(T, axis=0)
S_ = S - Smean
T_ = T - Tmean
S_F2 = (S_ ** 2).sum()
T_F2 = (T_ ** 2).sum()
offset = Tmean - Smean
U, s, V = np.linalg.svd(S_.T @ T_)
rot = V @ U.T
angle = np.arctan2(rot[1,0], rot[0,0])
trS = np.sum(s)
scale = trS / S_F2
cost = T_F2 - trS ** 2 / S_F2
return MatchingResult(
cost, offset[0], offset[1], angle, scale,
Smean[0], Smean[1]
)
|
StarcoderdataPython
|
187497
|
<reponame>debprakash/emr-view<filename>emr_mine_python_scipts/pq_tree/common_intervals.py
'''
Created on Dec 29, 2010
@author: patnaik
'''
#def naive_common_interval(pi_AB, C_prev = None):
# n = len(pi_AB)
# C = set()
# for x in xrange(n-1):
# l = u = pi_AB[x]
# for y in xrange(x+1, n):
# l = min(l, pi_AB[y])
# u = max(u, pi_AB[y])
# if u - l - (y - x) == 0 and (C_prev == None or (l,u) in C_prev):
# C.add((l,u))
# return C
def naive_common_interval_order(pi_AB, pos_map, C_prev):
n = len(pi_AB)
C = set()
for x in xrange(n):
l = u = pi_AB[x]
for y in xrange(x, n):
l = min(l, pi_AB[y])
u = max(u, pi_AB[y])
if (u - l) - (y - x) == 0 and (C_prev == None or (l,u) in C_prev):
pos_map[(l,u)].append(x)
C.add((l,u))
return C
from pygraph.classes.digraph import digraph
from pygraph.algorithms.critical import transitive_edges
def reducegraph(edges):
g = digraph()
nodes = set()
for (x, y) in edges:
if x not in nodes:
nodes.add(x)
g.add_node(x)
if y not in nodes:
nodes.add(y)
g.add_node(y)
g.add_edge((x,y))
for (x,y) in transitive_edges(g):
if (x,y) in edges: edges.remove((x,y))
return edges
def make_tree(C, orders):
node_map = dict([(tuple(C[k]), k) for k in xrange(len(C))])
#print node_map
nodef = lambda x: node_map[C[x]]
edges = set()
for i in xrange(len(C)):
for j in xrange(len(C)):
if i == j: continue
if C[i][0] <= C[j][0] and C[i][1] >= C[j][1]:
edges.add((nodef(i), nodef(j)))
edges = reducegraph(edges)
edges = list(edges)
edges.sort()
for (i, j) in edges:
print C[i], "->", C[j]
def inv_map(seq_1, seq_2): #O(n)
m = {}
for i in xrange(len(seq_1)):
m[seq_1[i]] = i
return [m[x] for x in seq_2]
def common_intervals(sequences, pos_map):
C = None
for i in xrange(0,len(sequences)):
pi_AB = inv_map(sequences[0], sequences[i])
C = naive_common_interval_order(pi_AB, pos_map, C)
#C = naive_common_interval(pi_AB, C)
if C == None: C = []
else:
C = list(C)
C.sort()
return C
class Node(object):
def __init__(self):
self.parent = None
self.value = 0
self.type = 'L'
self.children = []
from collections import defaultdict
if __name__ == '__main__':
print 'Common interval'
sequences = [(1,2,3,4,5,6,7,8,9), (9,8,4,5,6,7,1,2,3), (1,2,3,8,7,4,5,6,9)]
#sequences = [(1,2,3,4,5,6), (3,1,2,4,5,6), (2,3,1,4,5,6)]
#sequences = [(1,2,3), (2,3,1), (3,2,1)]
#sequences = [(1,2,3,4), (2,1,3,4), (1,2,4,3), (2,1,4,3)]
#sequences = [(1,2,3,4,5), (1,2,3,4,5)]
pos_map = defaultdict(list)
C = common_intervals(sequences, pos_map)
for (l,u) in C:
print "(%d,%d)" % (l, u),
print " locations =", pos_map[(l,u)]
#make_tree(C, pos_map)
|
StarcoderdataPython
|
221007
|
import os
import sys
import tempfile
import subprocess
def compute_metrics_from_files(path_to_reference, path_to_candidate, trec_eval_bin_path):
trec_run_fd, trec_run_path = tempfile.mkstemp(text=True)
try:
with os.fdopen(trec_run_fd, 'w') as tmp:
for line in open(path_to_candidate):
qid, pid, rank = line.split()
rank = int(rank)
tmp.write(f"{qid} Q0 {pid} {rank} {1/rank} System\n")
result = subprocess.check_output([
trec_eval_bin_path, "-c", "-mndcg_cut.10", path_to_reference, trec_run_path])
print(result)
finally:
os.remove(trec_run_path)
def main():
"""Command line:
python test_trec_eval.py <path_to_reference_file> <path_to_candidate_file>
"""
print("Eval Started")
if len(sys.argv) == 3:
path_to_reference = sys.argv[1]
path_to_candidate = sys.argv[2]
trec_eval_bin_path = "./data/trec_eval"
assert os.path.exists(trec_eval_bin_path)
compute_metrics_from_files(path_to_reference, path_to_candidate, trec_eval_bin_path)
else:
print('Usage: test_trec_eval.py <reference ranking> <candidate ranking>')
exit()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
9632094
|
import os
from werkzeug.utils import secure_filename
from flask import(
Flask,
jsonify,
send_from_directory,
request,
redirect,
url_for
)
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config.from_object("project.config.Config")
db = SQLAlchemy(app)
class User(db.Model):
__tablename__ = "users"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128), unique=True, nullable=False)
image = db.Column(db.String(128), nullable=False)
def __init__(self, id, name, image):
self.id = id
self.name = name
self.image = image
@app.route("/")
def hello_world():
return jsonify(hello="world")
@app.route("/static/<path:filename>")
def staticfiles(filename):
return send_from_directory(app.config["STATIC_FOLDER"], filename)
@app.route("/media/<path:filename>")
def mediafiles(filename):
return send_from_directory(app.config["MEDIA_FOLDER"], filename)
@app.route("/upload", methods=["GET", "POST"])
def user():
if ('image' not in request.files):
resp = jsonify({'message' : 'No file part in the request'})
resp.status_code = 400
return resp
else:
try:
id = request.values['id']
name = request.values['name']
image = request.files['image']
filename = secure_filename(image.filename)
if filename == '':
resp = jsonify({'message' : 'No file selected for uploading'})
resp.status_code = 400
return resp
else:
image.save(os.path.join(app.config["MEDIA_FOLDER"], filename))
user = User(id, name, filename)
db.session.add(user)
db.session.commit()
return jsonify({'id': id,'name': name, 'image': filename}), 201
except Exception as e:
return jsonify({'message' : e}), 400
|
StarcoderdataPython
|
1701897
|
<gh_stars>1-10
"""
Real time AEmotion (env: 'torch')
"""
# %% Import libs
import pyaudio
import numpy as np
# import pickle
# import librosa
import keract
import sys
sys.path.append('..')
from src.modeling.tcn.tcn import TCN
import os
import tensorflow as tf
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
from tensorflow.keras.models import model_from_json
from sklearn.preprocessing import MinMaxScaler
# import matplotlib.pyplot as plt
# from IPython.display import clear_output
from datetime import datetime as dtime
import opensmile
import paho.mqtt.client as mqtt #import the client1
def process():
# %% Initialize MQTT
def on_message(client, userdata, message):
print("message received " ,str(message.payload.decode("utf-8")))
print("message topic=",message.topic)
# print("message qos=",message.qos)
# print("message retain flag=",message.retain)
def on_log(client, userdata, level, buf):
print("log: ",buf)
broker_address="172.16.31.10"
broker_port = 2494
keepalive = 60
print("Inicializando usuário MQTT: Labinter02")
client = mqtt.Client("Labinter02") #create new instance
client.on_message=on_message #attach function to callback
# client.on_log=on_log
client.username_pw_set("participants", "prp1nterac")
print("Conectando ao servidor ....")
client.connect(broker_address, broker_port, keepalive)
client.loop_start() #start the loop
# %% Define the trained model
# dataset = 'DEMOS'
dataset = 'RAVDESS'
# dataset = 'TESS'
# dataset = 'RAVDESS_TESS'
# dataset = 'AEMOTION'
print("Carregando rede neural ASEmotion")
# load model from file
with open('model/model_smile_' +dataset+ '.json', 'r') as json_file:
loaded_json = json_file.read()
model = model_from_json(loaded_json, custom_objects={'TCN': TCN})
# restore weights
model.load_weights('model/weights_smile_' +dataset+ '.h5')
# %% Pre-process input
# Config for opensmile feature set
smile = opensmile.Smile(
feature_set=opensmile.FeatureSet.eGeMAPSv02,
feature_level=opensmile.FeatureLevel.LowLevelDescriptors,
)
def input_prep(data, smile):
X_smile = np.empty(shape=(1, 296, 25))
df_x = smile.process_signal(data, 16000)
scaler = MinMaxScaler()
X_smile[0,:,:] = scaler.fit_transform(df_x.values)
return X_smile
# %% Identificar dispositivos de audio do sistema-------------------------
# os.system('cls||clear')
print('Lista de dispositivos:')
print('')
p = pyaudio.PyAudio()
info = p.get_host_api_info_by_index(0)
numdevices = info.get('deviceCount')
for i in range(0, numdevices):
if (p.get_device_info_by_host_api_device_index(0, i).get('maxInputChannels')) > 0:
print(" Dispositivo de audio ID ", i, " - ", p.get_device_info_by_host_api_device_index(0, i).get('name'))
# %% Time streaming #######################################################
RATE = 16000 # Sample rate
nn_time = 3 # signal length send to the network
CHUNK = round(RATE*nn_time) # Frame size
#input stream setup
print('')
print('')
device_ch = input('Digite o número correspondente a ID dispositivo de audio: ')
# pyaudio.paInt16 : representa resolução em 16bit
stream=p.open(format = pyaudio.paFloat32,
rate=RATE,
channels=1,
input_device_index = int(device_ch),
input=True,
frames_per_buffer=CHUNK)
print('')
print('')
print('Inicializando modelo:')
print('')
print('....')
print('')
labels = ['Guilt', 'Disgust', 'Happy', 'Neutral', 'Anger', 'Surprise', 'Sad']
history_pred = []
hist_time = []
while True:
data = np.frombuffer(stream.read(CHUNK), dtype=np.float32)
x_infer = input_prep(data, smile)
pred = model.predict(x_infer)
predi = pred.argmax(axis=1)
# history_pred = np.append(history_pred, predi[0])
# hist_time = np.append(hist_time, dtime.now().strftime('%H:%M:%S'))
print("Classificação: " + labels[predi[0]] + " Volume do som: " + str(round(max(data), 2))+")")
# GET ACTIVATIONS
layername = 'activation'
if dataset == 'DEMOS':
layername = 'activation_1'
l_weights = keract.get_activations(model, x_infer, layer_names=layername)
w_values = np.squeeze(l_weights[layername])
# SEND TO MQTT BrOKER
client.publish('hiper/labinter99_', labels[predi[0]])
str_display = ""
for k in range(len(labels)):
topic_pub = "hiper/labinter_" + labels[k]
# client.subscribe(topic_pub)
client.publish(topic_pub, str(w_values[k]))
str_display = str_display + topic_pub + " "
# SEND TO MQTT BrOKER
# for k in range(len(labels)):
# mqtt_client.publish_single(float(w_values[k]), topic=labels[k])
# plot
# clear_output(wait=True)
# plt.plot(w_values, 'b-')
# plt.title(labels[predi[0]])
# plt.yticks(ticks=np.arange(0,1.1,0.1))
# plt.xticks(ticks=np.arange(0,7), labels=labels)
# plt.xlabel('Emotion')
# plt.ylabel('NN certainty')
# plt.grid()
# plt.show()
# %% Plot history
h=plt.figure()
plt.scatter(range(0,len(history_pred)), history_pred)
plt.yticks(range(0,7) , labels=labels)
# plt.xticks(range(0,len(history_pred)) , labels=hist_time, rotation=90)
plt.xlabel('Time (each dot represents a ' + str(nn_time)+ 's iteration)')
plt.ylabel('Emotion')
plt.title('AEmotion classification')
plt.grid()
plt.show()
h.savefig("hist.pdf", bbox_inches='tight')
if __name__ == "__main__":
process()
# %%
|
StarcoderdataPython
|
4953290
|
"""add credits billing fields
Revision ID: <KEY>
Revises: <KEY>
Create Date: 2021-02-17 18:42:21.656755
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "<KEY>"
down_revision = "<KEY>"
branch_labels = None
depends_on = None
def upgrade() -> None:
op.create_index(
"jobs_fully_billed_index",
"jobs",
[sa.text("(((payload ->> 'fully_billed'::text))::boolean)")],
)
def downgrade() -> None:
op.drop_index("jobs_fully_billed_index", table_name="jobs")
|
StarcoderdataPython
|
3384104
|
<filename>stats-backend/api2/migrations/0003_rename_offer_offer_properties.py
# Generated by Django 3.2.12 on 2022-04-14 11:07
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api2', '0002_auto_20220414_1143'),
]
operations = [
migrations.RenameField(
model_name='offer',
old_name='offer',
new_name='properties',
),
]
|
StarcoderdataPython
|
12855914
|
<gh_stars>1-10
from django.apps import AppConfig
class NativeShortuuidConfig(AppConfig):
name = 'native_shortuuid'
|
StarcoderdataPython
|
11300233
|
<filename>python codes/FizzBuzz.py
i=0
n = int(input("Enter the number of lines : "))
while i<n:i+=1;print('FizzBuzz'[i%~2&4:12&8+i%~4]or i)
|
StarcoderdataPython
|
8084800
|
<gh_stars>0
def main():
import interactive_plotter as ip
import itertools
import numpy as np
x = np.linspace(-1, 1, 31)
y = np.linspace(-1, 1, 29)
xc = x[:-1] + 0.5 * np.diff(x)
yc = y[:-1] + 0.5 * np.diff(y)
time_step = 0.03
delta_t = time_step
X, Y = np.meshgrid(x, y)
Xc, Yc = np.meshgrid(xc, yc)
def f(x, y):
return np.sin(x + 3 * y) * np.cos(2 * x - y)
def df_dx(x, y):
return np.cos(x + 3 * y) * np.cos(2 * x - y) - 2 * np.sin(x + 3 * y) * np.sin(2 * x - y)
def df_dy(x, y):
return 3 * np.cos(x + 3 * y) * np.cos(2 * x - y) + np.sin(x + 3 * y) * np.sin(2 * x - y)
velocity_x = 1
velocity_y = 1
offset_x = 0
offset_y = 0
C=f(Xc + offset_x, Yc + offset_y)
U=df_dx(Xc + offset_x, Yc + offset_y)
V=df_dy(Xc + offset_x, Yc + offset_y)
iFig = ip.InteractiveFigure(figsize=(4,4))
iAx = iFig.get_interactive_axes()
iAx.axes.axis("off")
iQr = iAx.vector_field(Xc, Yc, U, V, color='black', scale=10)
iQm = iAx.scalar_field(X, Y, C)
iAx.axes.autoscale()
iAx.axes.set_aspect('equal')
iFig.render(pause=1)
index = 0
while True:
if offset_y > 2 * np.pi:
break
offset_x += velocity_x * delta_t
offset_y += velocity_y * delta_t
C=f(Xc + offset_x, Yc + offset_y)
U=df_dx(Xc + offset_x, Yc + offset_y)
V=df_dy(Xc + offset_x, Yc + offset_y)
iQm.plot(C=C)
iQr.plot(U=U, V=V)
iFig.render(pause=time_step)
iFig.savefig('fields.png', index=index, bbox_inches='tight', pad_inches = 0)
index +=1
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
6627998
|
<filename>swift_undelete/tests/test_middleware.py
#!/usr/bin/env python
# Copyright (c) 2014 SwiftStack, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from swift.common import swob
from swift_undelete import middleware as md
class FakeApp(object):
def __init__(self):
self.responses = [] # fill in later
self._calls = []
def __call__(self, env, start_response):
req = swob.Request(env)
self._calls.append((
req.method, req.path,
# mutable dict; keep a copy so subsequent calls can't change it
swob.HeaderKeyDict(req.headers)))
if len(self.responses) > 1:
resp = self.responses.pop(0)
else:
resp = self.responses[0]
status = resp['status']
headers = resp.get('headers', [])
body_iter = resp.get('body_iter', [])
start_response(status, headers)
return body_iter
@property
def calls(self):
"""
Returns the calls received by this application as a list of
(method, path) pairs.
"""
return [x[:2] for x in self._calls]
@property
def call_headers(self):
"""
Returns the list of headers received by this application as it was
called
"""
return [x[2] for x in self._calls]
@property
def calls_with_headers(self):
"""
Returns the calls received by this application as a list of
(method, path, headers) tuples.
"""
return self._calls
class TestConfigParsing(unittest.TestCase):
def test_defaults(self):
app = FakeApp()
undelete = md.filter_factory({})(app)
self.assertEqual(undelete.trash_prefix, ".trash-")
self.assertEqual(undelete.trash_lifetime, 86400 * 90)
self.assertFalse(undelete.block_trash_deletes)
def test_non_defaults(self):
app = FakeApp()
undelete = md.filter_factory({
'trash_prefix': '.heap__',
'trash_lifetime': '31536000',
'block_trash_deletes': 'on',
})(app)
self.assertEqual(undelete.trash_prefix, ".heap__")
self.assertEqual(undelete.trash_lifetime, 31536000)
self.assertTrue(undelete.block_trash_deletes)
class MiddlewareTestCase(unittest.TestCase):
"""
Just a base class for other test cases. Some setup, some utility methods.
Nothing too exciting.
"""
def setUp(self):
self.app = FakeApp()
self.undelete = md.filter_factory({})(self.app)
def call_mware(self, req, expect_exception=False):
status = [None]
headers = [None]
def start_response(s, h, ei=None):
status[0] = s
headers[0] = h
body_iter = self.undelete(req.environ, start_response)
body = ''
caught_exc = None
try:
for chunk in body_iter:
body += chunk
except Exception as exc:
if expect_exception:
caught_exc = exc
else:
raise
headerdict = swob.HeaderKeyDict(headers[0])
if expect_exception:
return status[0], headerdict, body, caught_exc
else:
return status[0], headerdict, body
class TestPassthrough(MiddlewareTestCase):
def test_account_passthrough(self):
"""
Account requests are passed through unmodified.
"""
self.app.responses = [{'status': '200 OK'}]
req = swob.Request.blank('/v1/a')
req.method = 'DELETE'
status, _, _ = self.call_mware(req)
self.assertEqual(status, "200 OK")
self.assertEqual(self.app.calls, [('DELETE', '/v1/a')])
def test_container_passthrough(self):
"""
Container requests are passed through unmodified.
"""
self.app.responses = [{'status': '200 OK'}]
req = swob.Request.blank('/v1/a/c')
req.method = 'DELETE'
status, _, _ = self.call_mware(req)
self.assertEqual(status, "200 OK")
self.assertEqual(self.app.calls, [('DELETE', '/v1/a/c')])
class TestObjectDeletion(MiddlewareTestCase):
def test_deleting_nonexistent_object(self):
# If the object isn't there, ignore the 404 on COPY and pass the
# DELETE request through. It might be an expired object, in which case
# the object DELETE will actually get it out of the container listing
# and free up some space.
self.app.responses = [
# COPY request
{'status': '404 Not Found'},
# trash-versions container creation request
#
# Ideally we'd skip this stuff, but we can't tell the difference
# between object-not-found (404) and
# destination-container-not-found (also 404).
{'status': '202 Accepted'},
# trash container creation request
{'status': '202 Accepted'},
# second COPY attempt:
{'status': '404 Not Found'},
# DELETE request
{'status': '404 Not Found',
'headers': [('X-Exophagous', 'ungrassed')]}]
req = swob.Request.blank('/v1/a/elements/Cf')
req.method = 'DELETE'
status, headers, body = self.call_mware(req)
self.assertEqual(status, "404 Not Found")
self.assertEqual(headers.get('X-Exophagous'), 'ungrassed')
self.assertEqual(self.app.calls,
[('COPY', '/v1/a/elements/Cf'),
('PUT', '/v1/a/.trash-elements-versions'),
('PUT', '/v1/a/.trash-elements'),
('COPY', '/v1/a/elements/Cf'),
('DELETE', '/v1/a/elements/Cf')])
def test_copy_to_existing_trash_container(self):
self.undelete.trash_lifetime = 1997339
self.app.responses = [
# COPY request
{'status': '201 Created',
'headers': [('X-Sir-Not-Appearing-In-This-Response', 'yup')]},
# DELETE request
{'status': '204 No Content',
'headers': [('X-Decadation', 'coprose')]}]
req = swob.Request.blank('/v1/MY_account/cats/kittens.jpg')
req.method = 'DELETE'
status, headers, body = self.call_mware(req)
self.assertEqual(status, "204 No Content")
# the client gets whatever the DELETE coughed up
self.assertNotIn('X-Sir-Not-Appearing-In-This-Response', headers)
self.assertEqual(headers['X-Decadation'], 'coprose')
self.assertEqual(2, len(self.app.calls))
# First, we performed a COPY request to save the object into the trash.
method, path, headers = self.app.calls_with_headers[0]
self.assertEqual(method, 'COPY')
self.assertEqual(path, '/v1/MY_account/cats/kittens.jpg')
self.assertEqual(headers['Destination'], '.trash-cats/kittens.jpg')
self.assertEqual(headers['X-Delete-After'], str(1997339))
# Second, we actually perform the DELETE request (and send that
# response to the client unaltered)
method, path, headers = self.app.calls_with_headers[1]
self.assertEqual(method, 'DELETE')
self.assertEqual(path, '/v1/MY_account/cats/kittens.jpg')
def test_copy_to_existing_trash_container_no_expiration(self):
self.undelete.trash_lifetime = 0
self.app.responses = [
# COPY request
{'status': '201 Created'},
# DELETE request
{'status': '204 No Content'}]
req = swob.Request.blank('/v1/MY_account/cats/kittens.jpg')
req.method = 'DELETE'
status, headers, body = self.call_mware(req)
self.assertEqual(status, "204 No Content")
self.assertEqual(2, len(self.app.calls))
method, path, headers = self.app.calls_with_headers[0]
self.assertEqual(method, 'COPY')
self.assertEqual(path, '/v1/MY_account/cats/kittens.jpg')
self.assertNotIn('X-Delete-After', headers)
def test_copy_to_missing_trash_container(self):
self.app.responses = [
# first COPY attempt: trash container doesn't exist
{'status': '404 Not Found'},
# trash-versions container creation request
{'status': '201 Created'},
# trash container creation request
{'status': '201 Created'},
# second COPY attempt:
{'status': '404 Not Found'},
# DELETE request
{'status': '204 No Content'}]
req = swob.Request.blank('/v1/a/elements/Lv')
req.method = 'DELETE'
status, headers, body = self.call_mware(req)
self.assertEqual(status, "204 No Content")
self.assertEqual(self.app.calls,
[('COPY', '/v1/a/elements/Lv'),
('PUT', '/v1/a/.trash-elements-versions'),
('PUT', '/v1/a/.trash-elements'),
('COPY', '/v1/a/elements/Lv'),
('DELETE', '/v1/a/elements/Lv')])
def test_copy_error(self):
self.app.responses = [
# COPY attempt: some mysterious error with some headers
{'status': '503 Service Unavailable',
'headers': [('X-Scraggedness', 'Goclenian')],
'body_iter': ['dunno what happened boss']}]
req = swob.Request.blank('/v1/a/elements/Te')
req.method = 'DELETE'
status, headers, body = self.call_mware(req)
self.assertEqual(status, "503 Service Unavailable")
self.assertEqual(headers.get('X-Scraggedness'), 'Goclenian')
self.assertIn('what happened', body)
self.assertEqual(self.app.calls, [('COPY', '/v1/a/elements/Te')])
def test_copy_missing_trash_container_error_creating_vrs_container(self):
self.app.responses = [
# first COPY attempt: trash container doesn't exist
{'status': '404 Not Found'},
# trash-versions container creation request: failure!
{'status': '403 Forbidden',
'headers': [('X-Pupillidae', 'Barry')],
'body_iter': ['oh hell no']}]
req = swob.Request.blank('/v1/a/elements/U')
req.method = 'DELETE'
status, headers, body = self.call_mware(req)
self.assertEqual(status, "403 Forbidden")
self.assertEqual(headers.get('X-Pupillidae'), 'Barry')
self.assertIn('oh hell no', body)
self.assertEqual(self.app.calls,
[('COPY', '/v1/a/elements/U'),
('PUT', '/v1/a/.trash-elements-versions')])
def test_copy_missing_trash_container_error_creating_container(self):
self.app.responses = [
# first COPY attempt: trash container doesn't exist
{'status': '404 Not Found'},
# trash-versions container creation request
{'status': '201 Created'},
# trash container creation request: fails!
{'status': "418 I'm a teapot",
'headers': [('X-Body-Type', 'short and stout')],
'body_iter': ['here is my handle, here is my spout']}]
req = swob.Request.blank('/v1/a/elements/Mo')
req.method = 'DELETE'
status, headers, body = self.call_mware(req)
self.assertEqual(status, "418 I'm a teapot")
self.assertEqual(headers.get('X-Body-Type'), 'short and stout')
self.assertIn('spout', body)
self.assertEqual(self.app.calls,
[('COPY', '/v1/a/elements/Mo'),
('PUT', '/v1/a/.trash-elements-versions'),
('PUT', '/v1/a/.trash-elements')])
def test_delete_from_trash(self):
"""
Objects in trash containers don't get saved.
"""
self.app.responses = [{'status': '204 No Content'}]
req = swob.Request.blank('/v1/a/.trash-borkbork/bork')
req.method = 'DELETE'
status, headers, body = self.call_mware(req)
self.assertEqual(status, "204 No Content")
self.assertEqual(self.app.calls,
[('DELETE', '/v1/a/.trash-borkbork/bork')])
def test_delete_from_trash_blocked(self):
self.undelete.block_trash_deletes = True
req = swob.Request.blank('/v1/a/.trash-borkbork/bork')
req.method = 'DELETE'
status, headers, body = self.call_mware(req)
self.assertEqual(status, "405 Method Not Allowed")
self.assertEqual(self.app.calls, [])
|
StarcoderdataPython
|
263753
|
<gh_stars>1-10
# Generated by Django 2.0.2 on 2018-04-04 17:50
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('voter', '0018_convert_badlines'),
]
operations = [
migrations.DeleteModel(
name='BadLine',
),
migrations.AlterUniqueTogether(
name='badlinerange',
unique_together={('filename', 'first_line_no')},
),
]
|
StarcoderdataPython
|
5038721
|
<filename>src/modules/general.py<gh_stars>1-10
import datetime
import typing
import discord
from discord.ext import commands
import config
import database
import utils
class GeneralCommands(commands.Cog, name="General"):
def __init__(self, bot):
self.bot = bot
@commands.command(name="donate")
async def donate(self, ctx):
"""
Donate to this bot UwU
"""
await ctx.send(
"Please go to this site to donate: https://www.patreon.com/RandomGhost (PayPal only).\n"
"I also accept Bitcoin and other crypto payments. Please contact me (RandomGhost#0666) "
f"or on the support server (`{ctx.prefix}support`) for other payment methods.\n"
"Thanks! <a:thanks:699004469610020964>"
)
@commands.command(name="invite", aliases=["invitelink"])
async def invite_bot(self, ctx):
"""
Get the invite link for the bot's support server
"""
await ctx.send(
"Bot invite link: https://top.gg/bot/506878658607054849 <:uwu:575372762583924757>"
)
@commands.command(name="creator", aliases=["support"])
async def creator(self, ctx):
"""
Get to know the creator of this bot,
so you can annoy him to fix the damn bugs!
"""
dev1 = ctx.bot.get_user(252297314394308608) or await ctx.bot.fetch_user(252297314394308608)
dev2 = ctx.bot.get_user(532123382280355860) or await ctx.bot.fetch_user(532123382280355860)
await ctx.send(
f"Bot created by {dev1} and co-developed by {dev2}.\n"
"Ask them for new features/bugs! <a:thanks:699004469610020964>\n"
"To join support server, use `=help` or go to https://support.pinocchiobot.xyz."
)
@commands.command(name="vote", enabled=(config.DBL_TOKEN is not None))
async def vote_bot(self, ctx):
"""
Vote for this bot! Isn't Pinocchio kawaii!?!
Vote for her and make her happy
"""
await ctx.send(
f"Vote for this bot and then claim your reward with `{ctx.prefix}claimreward`!\n"
"**Vote URL:** https://top.gg/bot/506878658607054849/vote\n"
"You can vote once every 12 hours. "
"You get 2x rewards for voting on weekends.\n"
)
@commands.command(name="poll")
async def poll(self, ctx, title: str, *options):
"""
Create a reaction poll
"""
if len(options) < 2:
return await ctx.send("Please add atleast 2 options!")
if len(options) > 10:
return await ctx.send("Max 10 options!")
desc = ""
for i, opt in enumerate(options):
desc += ":{0}: : {1}\n".format(utils.num_to_emote[i], opt)
embed = discord.Embed(title=title, color=ctx.author.color, description=desc)
embed.set_footer(
text=f"Poll made by: {ctx.author}",
icon_url=ctx.author.avatar_url,
)
msg = await ctx.send(embed=embed)
for i, _ in enumerate(options):
await msg.add_reaction(utils.num_to_uni_emote[i])
@commands.command(name="whois")
async def whois(self, ctx, user: typing.Optional[discord.Member]):
"""
Get information about a user
"""
user = user or ctx.author
embed = discord.Embed(title=f"{user.name}#{user.discriminator}", color=user.colour)
tdelta = datetime.datetime.now() - user.joined_at
embed.add_field(name="User ID", value=user.id)
if user.nick:
embed.add_field(name="Nickname", value=user.nick)
if user.top_role:
embed.add_field(name="Top Role", value=user.top_role)
embed.add_field(name="Status", value=user.status)
embed.add_field(name="Is Bot", value=user.bot)
_perms = user.guild_permissions
embed.add_field(name="Is Administrator", value=_perms.administrator)
roles = user.roles[1:]
if len(roles) > 0:
role_str = ", ".join([i.name for i in roles])
else:
role_str = "No roles set."
embed.add_field(name="Roles", inline=False, value=role_str)
embed.add_field(
name="Account Created On",
inline=False,
value=discord.utils.snowflake_time(user.id).strftime("%A, %d %B, %Y. %I:%M:%S %p"),
)
embed.add_field(
name="In Server For",
inline=False,
value=f"{tdelta.days} days, {tdelta.seconds//3600} hours",
)
perms_list = [
"kick_members",
"ban_members",
"manage_channels",
"manage_guild",
"add_reactions",
"view_audit_log",
"priority_speaker",
"send_messages",
"send_tts_messages",
"manage_messages",
"attach_files",
"read_message_history",
"mention_everyone",
"embed_links",
"external_emojis",
"connect",
"speak",
"mute_members",
"deafen_members",
"move_members",
"use_voice_activation",
"change_nickname",
"manage_nicknames",
"manage_roles",
"manage_webhooks",
"manage_emojis",
]
perms = []
for i in perms_list:
if getattr(_perms, i):
perms += [i.replace("_", " ").capitalize()]
if perms == []:
perms = ["No special permissions."]
perms_str = ", ".join(perms)
embed.add_field(name="Permissions", value=perms_str, inline=False)
embed.set_thumbnail(url=user.avatar_url)
await ctx.send(embed=embed)
@commands.command(name="say")
async def say(self, ctx, channel: typing.Optional[discord.TextChannel], *, text: str):
"""
Speak as Pinocchio.
"""
channel = channel or ctx.channel
if not channel.permissions_for(ctx.author).send_messages:
return await ctx.send(
"You don't have the permissions to send messages in that channel!"
)
if not channel.permissions_for(ctx.author).mention_everyone:
text = discord.utils.escape_mentions(text)
await channel.send(text)
@commands.command(name="worldleaderboard", aliases=["wlb"])
async def world_leaderboard(self, ctx):
"""
View the world's leaderboard
"""
engine = await database.prepare_engine()
query = """
SELECT id,M.member,tier,COALESCE(wsum,0) as waifu_sum,wallet,(COALESCE(wsum, 0)+wallet) as total
FROM members M
LEFT JOIN (select member_id, sum(purchased_for) as wsum from purchased_waifu group by member_id) PW
ON (M.id = PW.member_id)
WHERE wallet > 0 OR COALESCE(wsum, 0) > 0
ORDER BY total DESC LIMIT 50;
"""
results = await engine.fetch_all(query=query)
txt = generate_leaderboard_text(ctx.bot, results)
embed = discord.Embed(
title=":trophy: World Leaderboards",
colour=ctx.author.color,
description=txt,
)
top_user_name = ""
for result in results:
top_user = ctx.bot.get_user(result["member"])
if top_user is not None:
top_user_name = top_user.name
break
embed.set_footer(
text=f"Current World Champion is {top_user_name}.",
)
await ctx.send(embed=embed)
@commands.command(name="guildleaderboard", aliases=["glb"])
@utils.ensure_bot_ready()
async def guild_leaderboard(self, ctx):
"""
View this guild's leaderboard
"""
engine = await database.prepare_engine()
mlist = tuple([m.id for m in ctx.guild.members])
query = f"""
SELECT id,M.member,tier,COALESCE(wsum,0) as waifu_sum,wallet,(COALESCE(wsum, 0)+wallet) as total
FROM members M LEFT JOIN (
SELECT member_id,sum(purchased_for) as wsum FROM purchased_waifu
WHERE guild = {ctx.guild.id} GROUP BY member_id) PW ON (M.id = PW.member_id)
WHERE (wallet > 0 OR COALESCE(wsum, 0) > 0) AND M.member in {mlist}
ORDER BY total DESC LIMIT 10;
"""
results = await engine.fetch_all(query=query)
txt = generate_leaderboard_text(ctx.bot, results)
embed = discord.Embed(
title=":trophy: Guild Leaderboards",
colour=ctx.author.color,
description=txt,
)
top_user_name = ""
for result in results:
top_user = ctx.bot.get_user(result["member"])
if top_user is not None:
top_user_name = top_user.name
break
embed.set_footer(
text=f"Current Guild Champion is {top_user_name}.",
)
await ctx.send(embed=embed)
def generate_leaderboard_text(client, results):
rtxt = []
i = 1
for j in results:
user = client.get_user(j["member"])
if user is None:
continue
if i <= 3:
medal = ""
if i == 1:
medal = ":first_place:"
elif i == 2:
medal = ":second_place:"
elif i == 3:
medal = ":third_place:"
rtxt.append(
f"**[{str(i).zfill(2)}] __{user.name}__ {medal}**\nWallet: "
f"{j['wallet']}, Waifu Value: {j['waifu_sum']}, **Total: {j['total']}**"
) # noqa
else:
rtxt.append(
f"**[{str(i).zfill(2)}] {user.name}**\nWallet: {j['wallet']}, "
f"Waifu Value: {j['waifu_sum']}, **Total: {j['total']}**"
) # noqa
i += 1
if i == 11:
break
return "\n".join(rtxt)
|
StarcoderdataPython
|
45449
|
<reponame>VictorAtPL/Pascal-VOC12_Class-segmentation_Tensorflow-2.0.0
from tensorflow import keras
from tensorflow.keras.applications import VGG16
from tensorflow.keras.layers import Conv2D, Conv2DTranspose, Activation
from AbstractModel import AbstractModel
from common import load_sets_count, get_input_fn_and_steps_per_epoch, pascal_voc2012_segmentation_annotated_parser, \
pascal_voc2012_segmentation_not_annotated_parser
from constants import CLASS_NO, INPUT_WIDTH, INPUT_HEIGHT, TFRECORDS_SAVE_PATH
from models import fcn8
class Model(AbstractModel):
sets_count = load_sets_count()
def get_model(self, **kwargs) -> keras.Model:
# img_input = Input(shape=(INPUT_HEIGHT, INPUT_WIDTH, 3))
# Encoder - Load VGG16
# input_tensor = img_input,
encoder = VGG16(weights='imagenet', input_shape=(INPUT_HEIGHT, INPUT_WIDTH, 3),
include_top=False, pooling=None)
# Encoder - Get intermediate VGG16 layers output
pool2 = encoder.get_layer('block2_pool').output
# Encoder
conv6 = (Conv2D(filters=1024, kernel_size=(7, 7), activation='relu', padding='same', name='block6_conv1'))(
pool2)
conv7 = (Conv2D(filters=1024, kernel_size=(1, 1), activation='relu', padding='same', name='block7_conv1'))(
conv6)
# Decoder
fcn32 = Conv2DTranspose(CLASS_NO, kernel_size=(7, 7), strides=(4, 4), padding='same', name='block10_deconv3',
use_bias=False)(conv7)
output = Activation('softmax')(fcn32)
model = keras.Model(encoder.input, output)
return model
@classmethod
def get_input_fn_and_steps_per_epoch(cls, set_name, batch_size=None):
parser_fn = pascal_voc2012_segmentation_annotated_parser
if 'test' in set_name:
parser_fn = pascal_voc2012_segmentation_not_annotated_parser
return get_input_fn_and_steps_per_epoch(set_name, parser_fn, TFRECORDS_SAVE_PATH,
batch_size, cls.sets_count)
|
StarcoderdataPython
|
1776055
|
<filename>examples/scripts/location/create_location_object.py<gh_stars>0
import pyaurorax
def main():
loc = pyaurorax.Location(lat=51.0447, lon=-114.0719)
print(loc)
# ----------
if (__name__ == "__main__"):
main()
|
StarcoderdataPython
|
1949618
|
import random
import pygame
pygame.init()
screenx, screeny = 1000, 800
window = pygame.display.set_mode((screenx, screeny))
font = pygame.font.SysFont('Arial', 30)
pygame.display.set_caption("Pong")
# RGB
black = (0, 0, 0)
white = (255, 255, 255)
red = (255, 0, 0)
class Player:
def __init__(self, x, y, points):
self.x = x
self.y = y
self.points = points
self.rect = pygame.Rect(self.x, self.y, 25, 100)
def move(self, direction, speed):
self.y += direction * speed
if self.y <= 0:
self.y = 0
elif self.y + 100 >= screeny:
self.y = screeny - 100
self.rect = pygame.Rect(self.x, self.y, 25, 100)
class Ball:
def __init__(self, x, y, speed):
self.x = x
self.y = y
self.speed = speed
self.reflect_count = 0
self.x_direction = random.choice((-1, 1))
self.y_direction = random.choice((-1, 1))
self.rect = pygame.Rect(self.x, self.y, 25, 25)
def move(self):
self.x += self.x_direction * self.speed
self.y += self.y_direction * self.speed
self.rect = pygame.Rect(self.x, self.y, 25, 25)
def reflect(self, p1, bot):
if 0 >= self.y or self.y + 25 >= screeny:
self.y_direction *= -1
if self.rect.colliderect(p1.rect) or self.rect.colliderect(bot.rect):
self.x_direction *= -1
self.reflect_count += 1
if self.reflect_count >= 10 and self.speed < 10:
self.reflect_count = 0
self.speed += 1
def check_win(self, p1, bot):
reset = False
if self.x + 25 <= 0:
bot.points += 1
reset = True
elif self.x >= screenx:
p1.points += 1
reset = True
return reset
def redraw_game_window():
window.fill(black)
p_points = font.render(str(p1.points), True, red)
b_points = font.render(str(bot.points), True, red)
speed = font.render(f"Speed: {ball.speed}", True, red)
window.blit(p_points, (10, 10))
window.blit(b_points, (990 - len(str(bot.points)) * 15, 10))
window.blit(speed, (10, screeny - 35))
pygame.draw.rect(window, white, (ball.x, ball.y, 25, 25))
pygame.draw.rect(window, white, (p1.x, p1.y, 25, 100))
pygame.draw.rect(window, white, (bot.x, bot.y, 25, 100))
pygame.display.update()
run = True
ball = Ball(screenx // 2, screeny // 2 + 25, 5)
p1 = Player(50, screeny // 2, 0)
bot = Player(screenx - 75, screeny // 2, 0)
while run:
pygame.time.Clock().tick(60)
keys = pygame.key.get_pressed()
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if keys[pygame.K_UP]:
p1.move(-1, 8)
elif keys[pygame.K_DOWN]:
p1.move(1, 8)
if ball.speed <= 8:
bot_speed = ball.speed
else:
bot_speed = 8
bot.move(ball.y_direction, bot_speed)
if ball.check_win(p1, bot):
ball = Ball(screenx // 2, screeny // 2 + 25, 5)
p1 = Player(50, screeny // 2, p1.points)
bot = Player(screenx - 75, screeny // 2, bot.points)
ball.move()
ball.reflect(p1, bot)
redraw_game_window()
pygame.quit()
|
StarcoderdataPython
|
1748304
|
from hypothesis import given
from tests.utils import (BoundPortedEdgesPair,
equivalence)
from . import strategies
@given(strategies.edges_pairs, strategies.edges_pairs)
def test_basic(first_edges_pair: BoundPortedEdgesPair,
second_edges_pair: BoundPortedEdgesPair) -> None:
first_bound, first_ported = first_edges_pair
second_bound, second_ported = second_edges_pair
assert equivalence(first_bound == second_bound,
first_ported == second_ported)
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.