hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1633a9fb3de8a2d02c1b973e0da5225da5fdee84
| 25,426 |
py
|
Python
|
create_coherency_dataset.py
|
UKPLab/acl20-dialogue-coherence-assessment
|
328b888855dc833b4b0c05c259ee7115f4219dbe
|
[
"MIT"
] | 12 |
2020-05-03T12:41:53.000Z
|
2021-11-19T06:45:56.000Z
|
create_coherency_dataset.py
|
UKPLab/acl20-dialogue-coherence-assessment
|
328b888855dc833b4b0c05c259ee7115f4219dbe
|
[
"MIT"
] | 2 |
2020-07-02T08:19:19.000Z
|
2021-12-03T16:58:02.000Z
|
create_coherency_dataset.py
|
UKPLab/acl20-dialogue-coherence-assessment
|
328b888855dc833b4b0c05c259ee7115f4219dbe
|
[
"MIT"
] | 4 |
2020-08-27T08:36:55.000Z
|
2021-08-19T21:53:31.000Z
|
import math
import os
from copy import deepcopy
from ast import literal_eval
import pandas as pd
from math import factorial
import random
from collections import Counter, defaultdict
import sys
from nltk import word_tokenize
from tqdm import tqdm, trange
import argparse
import numpy as np
import re
import csv
from sklearn.model_selection import train_test_split
from swda.swda import CorpusReader, Transcript, Utterance
act2word = {1:"inform",2:"question", 3:"directive", 4:"commissive"}
def permute(sents, sent_DAs, amount):
""" return a list of different! permuted sentences and their respective dialog acts """
""" if amount is greater than the possible amount of permutations, only the uniquely possible ones are returned """
assert len(sents) == len(sent_DAs), "length of permuted sentences and list of DAs must be equal"
if amount == 0:
return []
permutations = [list(range(len(sents)))]
amount = min(amount, factorial(len(sents))-1)
for i in range(amount):
permutation = np.random.permutation(len(sents))
while permutation.tolist() in permutations:
permutation = np.random.permutation(len(sents))
permutations.append(permutation.tolist())
return permutations[1:] #the first one is the original, which was included s.t. won't be generated
def draw_rand_sent(act_utt_df, sent_len, amount):
""" df is supposed to be a pandas dataframe with colums 'act' and 'utt' (utterance),
with act being a number from 1 to 4 and utt being a sentence """
permutations = []
for _ in range(amount):
(utt, da, name, ix) = draw_rand_sent_from_df(act_utt_df)
sent_insert_ix = random.randint(0, sent_len-1)
permutations.append((utt, da, name, ix, sent_insert_ix))
return permutations
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--datadir",
required=True,
type=str,
help="""The input directory where the files of the corpus
are located. """)
parser.add_argument("--corpus",
required=True,
type=str,
help="""the name of the corpus to use, currently either 'DailyDialog' or 'Switchboard' """)
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--amount',
type=int,
default=20,
help="random seed for initialization")
parser.add_argument('--word2id',
action='store_true',
help= "convert the words to ids")
parser.add_argument('--task',
required=True,
type=str,
default="up",
help="""for which task the dataset should be created.
alternatives: up (utterance permutation)
us (utterance sampling)
hup (half utterance petrurbation)
ui (utterance insertion, nothing directly added!)""")
args = parser.parse_args()
random.seed(args.seed)
np.random.seed(args.seed)
if args.word2id:
f = open(os.path.join(args.datadir, "itos.txt"), "r")
word2id_dict = dict()
for i, word in enumerate(f):
word2id_dict[word[:-1].lower()] = i
word2id = lambda x: [word2id_dict[y] for y in x] # don't convert words to ids (yet). It gets done in the glove wrapper of mtl_coherence.py
else:
word2id = lambda x: x
tokenizer = word_tokenize
if args.corpus == 'DailyDialog':
converter = DailyDialogConverter(args.datadir, tokenizer, word2id, task=args.task)
converter.create_act_utt()
elif args.corpus == 'Switchboard':
converter = SwitchboardConverter(args.datadir, tokenizer, word2id, args.task, args.seed)
converter.create_vocab()
converter.convert_dset(amounts=args.amount)
def getKeysByValue(dictOfElements, valueToFind):
listOfKeys = list()
for item in dictOfElements.items():
if item[1] == valueToFind:
listOfKeys.append(item[0])
return listOfKeys
def switchboard_da_mapping():
mapping_dict = dict({
"sd": 1,
"b": 2,
"sv": 3,
"aa": 4,
"%-": 5,
"ba": 6,
"qy": 7,
"x": 8,
"ny": 9,
"fc": 10,
"%": 11,
"qw": 12,
"nn": 13,
"bk": 14,
"h": 15,
"qy^d": 16,
"o": 17,
"bh": 18,
"^q": 19,
"bf": 20,
"na": 21,
"ny^e": 22,
"ad": 23,
"^2": 24,
"b^m": 25,
"qo": 26,
"qh": 27,
"^h": 28,
"ar": 29,
"ng": 30,
"nn^e": 31,
"br": 32,
"no": 33,
"fp": 34,
"qrr": 35,
"arp": 36,
"nd": 37,
"t3": 38,
"oo": 39,
"co": 40,
"cc": 41,
"t1": 42,
"bd": 43,
"aap": 44,
"am": 45,
"^g": 46,
"qw^d": 47,
"fa": 48,
"ft":49
})
d = defaultdict(lambda: 11)
for (k, v) in mapping_dict.items():
d[k] = v
return d
if __name__ == "__main__":
main()
| 39.977987 | 146 | 0.532801 |
163549f9139dc6999e9e0ca088584cc51b142caa
| 12,432 |
py
|
Python
|
tests/test_selections.py
|
swimmio/sqlalchemy_swimm
|
d24accb7792743cf586bd7062531d108e7063eba
|
[
"MIT"
] | null | null | null |
tests/test_selections.py
|
swimmio/sqlalchemy_swimm
|
d24accb7792743cf586bd7062531d108e7063eba
|
[
"MIT"
] | null | null | null |
tests/test_selections.py
|
swimmio/sqlalchemy_swimm
|
d24accb7792743cf586bd7062531d108e7063eba
|
[
"MIT"
] | null | null | null |
import typing
import pytest
from src import selections
| 24.617822 | 91 | 0.178571 |
1635645909c86684dc1d01665725f73b3baa25cb
| 348 |
py
|
Python
|
tests/utils/test_clean_accounting_column.py
|
richardqiu/pyjanitor
|
aa3150e7b8e2adc4733ea206ea9c3093e21d4025
|
[
"MIT"
] | 2 |
2020-09-06T22:11:01.000Z
|
2022-03-19T23:57:24.000Z
|
tests/utils/test_clean_accounting_column.py
|
richardqiu/pyjanitor
|
aa3150e7b8e2adc4733ea206ea9c3093e21d4025
|
[
"MIT"
] | 1 |
2021-05-17T15:30:04.000Z
|
2021-07-29T09:39:56.000Z
|
tests/utils/test_clean_accounting_column.py
|
richardqiu/pyjanitor
|
aa3150e7b8e2adc4733ea206ea9c3093e21d4025
|
[
"MIT"
] | 1 |
2020-08-10T20:30:20.000Z
|
2020-08-10T20:30:20.000Z
|
import pytest
from janitor.utils import _clean_accounting_column
| 21.75 | 61 | 0.761494 |
16369f4689956af64363c246df723fffbf5f3a5e
| 7,164 |
py
|
Python
|
downloadParagraph.py
|
icadot86/bert
|
42070209183dab3b5ff59b0dea1398a9538960f3
|
[
"Apache-2.0"
] | null | null | null |
downloadParagraph.py
|
icadot86/bert
|
42070209183dab3b5ff59b0dea1398a9538960f3
|
[
"Apache-2.0"
] | null | null | null |
downloadParagraph.py
|
icadot86/bert
|
42070209183dab3b5ff59b0dea1398a9538960f3
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
import sys, getopt
import urllib
import requests
import requests_cache
import re
import time
from bs4 import BeautifulSoup
from requests import Session
sys.path.append("/home/taejoon1kim/BERT/my_bert")
from utils.cacheUtils import cacheExist, writeCache, readCache, getDownloadCachePath
from utils.path import BERT_INPUT_JSON, BERT_SEARCH_JSON
WIKI_URL = "wikipedia.org"
YOUTUBE_URL = "youtube.com/channel"
NO_RESULT = "no_result"
SEARCH_RESULT = {
"WIKI" : {"title" : f"{NO_RESULT}", "link" : f"{NO_RESULT}"},
"FIRST" : {"title" : f"{NO_RESULT}", "link" : f"{NO_RESULT}"},
"YOUTUBE" : {"title" : f"{NO_RESULT}", "link" : f"{NO_RESULT}"},
"test_input.json" : f"{NO_RESULT}",
"search_result.json" : f"{NO_RESULT}",
"Q_TYPE" : f"{NO_RESULT}"
}
if __name__ == "__main__":
main(sys.argv)
| 35.82 | 458 | 0.564768 |
1637357f64028a6c4c7d59c4294f21b8d56010e2
| 2,861 |
py
|
Python
|
data_io.py
|
LucasChenLC/courseManager2
|
3f91ea72dbc0a3f3afcc88c7f0959edb6c33adf9
|
[
"MIT"
] | null | null | null |
data_io.py
|
LucasChenLC/courseManager2
|
3f91ea72dbc0a3f3afcc88c7f0959edb6c33adf9
|
[
"MIT"
] | null | null | null |
data_io.py
|
LucasChenLC/courseManager2
|
3f91ea72dbc0a3f3afcc88c7f0959edb6c33adf9
|
[
"MIT"
] | null | null | null |
from xml.dom.minidom import Document, parse
'''
course_list = []
course_list.append(Course('Advance Math'))
course_list.append(Course('Linear Algebra'))
course_list.append(Course('Procedure Oriented Programming'))
course_list.append(Course('Object Oriented Programming'))
course_list[-1].add_pre_course(course_list, ['Procedure Oriented Programming'])
course_list.append(Course('College Physics'))
course_list[-1].add_pre_course(course_list, ['Advance Math'])
course_list.append(Course('Digital Logic'))
course_list[-1].add_pre_course(course_list, ['Procedure Oriented Programming'])
course_list.append(Course('Computer Organization'))
course_list[-1].add_pre_course(course_list, ['Advance Math', 'Procedure Oriented Programming', 'Digital Logic'])
course_list.append(Course('Computer Architecture'))
course_list[-1].add_pre_course(course_list,
['Advance Math', 'Procedure Oriented Programming', 'Digital Logic', 'Computer Organization'])
save_data_xml(course_list, 'resource/data/data.xml')
'''
| 37.644737 | 124 | 0.71828 |
163841fc5da39772ff971e9eff1ba89827ff6817
| 1,003 |
py
|
Python
|
tests/rules/test_git_rm_local_modifications.py
|
jlandrum/theheck
|
d2c008b6ca14220504be95f887253ddd9f5e9f72
|
[
"MIT"
] | null | null | null |
tests/rules/test_git_rm_local_modifications.py
|
jlandrum/theheck
|
d2c008b6ca14220504be95f887253ddd9f5e9f72
|
[
"MIT"
] | null | null | null |
tests/rules/test_git_rm_local_modifications.py
|
jlandrum/theheck
|
d2c008b6ca14220504be95f887253ddd9f5e9f72
|
[
"MIT"
] | null | null | null |
import pytest
from theheck.rules.git_rm_local_modifications import match, get_new_command
from theheck.types import Command
| 34.586207 | 81 | 0.67996 |
16384fd421a05dbe791af899ad03aaf8e20b6076
| 6,078 |
py
|
Python
|
application.py
|
statisticsnorway/microdata-data-service
|
d477b7b75589d4c977771122558c948c040a1106
|
[
"Apache-2.0"
] | null | null | null |
application.py
|
statisticsnorway/microdata-data-service
|
d477b7b75589d4c977771122558c948c040a1106
|
[
"Apache-2.0"
] | 7 |
2021-10-08T13:40:33.000Z
|
2022-02-04T10:37:55.000Z
|
application.py
|
statisticsnorway/microdata-data-service
|
d477b7b75589d4c977771122558c948c040a1106
|
[
"Apache-2.0"
] | null | null | null |
import logging
import json_logging
import tomlkit
import uvicorn
from fastapi import FastAPI, status
from fastapi.encoders import jsonable_encoder
from fastapi.openapi.docs import (
get_redoc_html,
get_swagger_ui_html,
get_swagger_ui_oauth2_redirect_html,
)
from fastapi.responses import JSONResponse
from fastapi.staticfiles import StaticFiles
from starlette.responses import PlainTextResponse, Response
from data_service.api.data_api import data_router
from data_service.api.observability_api import observability_router
from data_service.config import config
from data_service.core.processor import NotFoundException
from data_service.core.filters import EmptyResultSetException
"""
Self-hosting JavaScript and CSS for docs
https://fastapi.tiangolo.com/advanced/extending-openapi/#self-hosting-javascript-and-css-for-docs
"""
data_service_app = FastAPI(docs_url=None, redoc_url=None)
data_service_app.mount("/static", StaticFiles(directory="static"), name="static")
data_service_app.include_router(data_router)
data_service_app.include_router(observability_router)
def _get_project_meta():
with open('./pyproject.toml') as pyproject:
file_contents = pyproject.read()
return tomlkit.parse(file_contents)['tool']['poetry']
pkg_meta = _get_project_meta()
if __name__ == "__main__":
uvicorn.run(data_service_app, host="0.0.0.0", port=8000)
| 33.766667 | 109 | 0.74054 |
16386e8f49ac83e2f9c436adbc056266858401ad
| 18,764 |
py
|
Python
|
graspologic/embed/n2v.py
|
dtborders/graspologic
|
8ea9a47cabe35ad28ec9d381e525358c2027f619
|
[
"MIT"
] | null | null | null |
graspologic/embed/n2v.py
|
dtborders/graspologic
|
8ea9a47cabe35ad28ec9d381e525358c2027f619
|
[
"MIT"
] | null | null | null |
graspologic/embed/n2v.py
|
dtborders/graspologic
|
8ea9a47cabe35ad28ec9d381e525358c2027f619
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation and contributors.
# Licensed under the MIT License.
import logging
import math
import time
from typing import Any, List, Optional, Tuple, Union
import networkx as nx
import numpy as np
from ..utils import remap_node_ids
def node2vec_embed(
graph: Union[nx.Graph, nx.DiGraph],
num_walks: int = 10,
walk_length: int = 80,
return_hyperparameter: float = 1.0,
inout_hyperparameter: float = 1.0,
dimensions: int = 128,
window_size: int = 10,
workers: int = 8,
iterations: int = 1,
interpolate_walk_lengths_by_node_degree: bool = True,
random_seed: Optional[int] = None,
) -> Tuple[np.array, List[Any]]:
"""
Generates a node2vec embedding from a given graph. Will follow the word2vec algorithm to create the embedding.
Parameters
----------
graph: Union[nx.Graph, nx.DiGraph]
A networkx graph or digraph. A multigraph should be turned into a non-multigraph so that the calling user
properly handles the multi-edges (i.e. aggregate weights or take last edge weight).
If the graph is unweighted, the weight of each edge will default to 1.
num_walks : int
Number of walks per source. Default is 10.
walk_length: int
Length of walk per source. Default is 80.
return_hyperparameter : float
Return hyperparameter (p). Default is 1.0
inout_hyperparameter : float
Inout hyperparameter (q). Default is 1.0
dimensions : int
Dimensionality of the word vectors. Default is 128.
window_size : int
Maximum distance between the current and predicted word within a sentence. Default is 10.
workers : int
Use these many worker threads to train the model. Default is 8.
iterations : int
Number of epochs in stochastic gradient descent (SGD)
interpolate_walk_lengths_by_node_degree : bool
Use a dynamic walk length that corresponds to each nodes
degree. If the node is in the bottom 20 percentile, default to a walk length of 1. If it is in the top 10
percentile, use ``walk_length``. If it is in the 20-80 percentiles, linearly interpolate between 1 and ``walk_length``.
This will reduce lower degree nodes from biasing your resulting embedding. If a low degree node has the same
number of walks as a high degree node (which it will if this setting is not on), then the lower degree nodes
will take a smaller breadth of random walks when compared to the high degree nodes. This will result in your
lower degree walks dominating your higher degree nodes.
random_seed : int
Seed to be used for reproducible results. Default is None and will produce a random output. Note that for a fully
deterministically-reproducible run, you must also limit to a single worker thread (`workers=1`), to eliminate
ordering jitter from OS thread scheduling. In addition the environment variable ``PYTHONHASHSEED`` must be set
to control hash randomization.
Returns
-------
Tuple[np.array, List[Any]]
A tuple containing a matrix, with each row index corresponding to the embedding for each node. The tuple
also contains a vector containing the corresponding vertex labels for each row in the matrix.
The matrix and vector are positionally correlated.
Notes
-----
The original reference implementation of node2vec comes from Aditya Grover from
https://github.com/aditya-grover/node2vec/.
Further details on the Alias Method used in this functionality can be found at
https://lips.cs.princeton.edu/the-alias-method-efficient-sampling-with-many-discrete-outcomes/
References
----------
.. [1] Aditya Grover and Jure Leskovec "node2vec: Scalable Feature Learning for Networks."
Knowledge Discovery and Data Mining, 2016.
"""
_preconditions(
graph,
num_walks,
walk_length,
return_hyperparameter,
inout_hyperparameter,
dimensions,
window_size,
workers,
iterations,
interpolate_walk_lengths_by_node_degree,
)
random_state = np.random.RandomState(seed=random_seed)
node2vec_graph = _Node2VecGraph(
graph, return_hyperparameter, inout_hyperparameter, random_state
)
logging.info(
f"Starting preprocessing of transition probabilities on graph with {str(len(graph.nodes()))} nodes and "
f"{str(len(graph.edges()))} edges"
)
start = time.time()
logging.info(f"Starting at time {str(start)}")
node2vec_graph._preprocess_transition_probabilities()
logging.info(f"Simulating walks on graph at time {str(time.time())}")
walks = node2vec_graph._simulate_walks(
num_walks, walk_length, interpolate_walk_lengths_by_node_degree
)
logging.info(f"Learning embeddings at time {str(time.time())}")
model = _learn_embeddings(
walks, dimensions, window_size, workers, iterations, random_seed
)
end = time.time()
logging.info(
f"Completed. Ending time is {str(end)} Elapsed time is {str(start - end)}"
)
labels = node2vec_graph.original_graph.nodes()
remapped_labels = node2vec_graph.label_map_to_string
return (
np.array([model.wv.get_vector(remapped_labels[node]) for node in labels]),
labels,
)
def _learn_embeddings(
walks: List[Any],
dimensions: int,
window_size: int,
workers: int,
iterations: int,
random_seed: Optional[int],
):
"""
Learn embeddings by optimizing the skip-gram objective using SGD.
"""
from gensim.models import Word2Vec
walks = [list(map(str, walk)) for walk in walks]
# Documentation - https://radimrehurek.com/gensim/models/word2vec.html
model = Word2Vec(
walks,
size=dimensions,
window=window_size,
min_count=0,
sg=1, # Training algorithm: 1 for skip-gram; otherwise CBOW
workers=workers,
iter=iterations,
seed=random_seed,
)
return model
def _alias_setup(probabilities: List[float]):
"""
Compute utility lists for non-uniform sampling from discrete distributions.
Refer to
https://lips.cs.princeton.edu/the-alias-method-efficient-sampling-with-many-discrete-outcomes/
for details
"""
number_of_outcomes = len(probabilities)
alias = np.zeros(number_of_outcomes)
sampled_probabilities = np.zeros(number_of_outcomes, dtype=int)
smaller = []
larger = []
for i, prob in enumerate(probabilities):
alias[i] = number_of_outcomes * prob
if alias[i] < 1.0:
smaller.append(i)
else:
larger.append(i)
while len(smaller) > 0 and len(larger) > 0:
small = smaller.pop()
large = larger.pop()
sampled_probabilities[small] = large
alias[large] = alias[large] + alias[small] - 1.0
if alias[large] < 1.0:
smaller.append(large)
else:
larger.append(large)
return sampled_probabilities, alias
def _alias_draw(
probabilities: List[float], alias: List[float], random_state: np.random.RandomState
):
"""
Draw sample from a non-uniform discrete distribution using alias sampling.
"""
number_of_outcomes = len(probabilities)
random_index = int(np.floor(random_state.rand() * number_of_outcomes))
if random_state.rand() < alias[random_index]:
return random_index
else:
return probabilities[random_index]
| 35.537879 | 127 | 0.627052 |
1638d587cabcf4138e331d614308389b13e85fb7
| 8,421 |
py
|
Python
|
bot.py
|
NotBlizzard/blizzybot
|
41a6f07e4d3bb97772b07aa9d6a3af935b78fb9a
|
[
"MIT"
] | null | null | null |
bot.py
|
NotBlizzard/blizzybot
|
41a6f07e4d3bb97772b07aa9d6a3af935b78fb9a
|
[
"MIT"
] | null | null | null |
bot.py
|
NotBlizzard/blizzybot
|
41a6f07e4d3bb97772b07aa9d6a3af935b78fb9a
|
[
"MIT"
] | null | null | null |
# bot.py
# TODO:
# organize imports
# organize
from websocket import create_connection
from threading import Thread
from battle import Battle
import commands
import traceback
import requests
import inspect
import json
from fractions import Fraction
import random
import time
import sys
import re
import os
from learn import Learn
| 36.141631 | 131 | 0.517278 |
16391df203c1efac2e1f8b82d3e69209d5e07f18
| 10,758 |
py
|
Python
|
stRT/tdr/widgets/changes.py
|
Yao-14/stAnalysis
|
d08483ce581f5b03cfcad8be500aaa64b0293f74
|
[
"BSD-3-Clause"
] | null | null | null |
stRT/tdr/widgets/changes.py
|
Yao-14/stAnalysis
|
d08483ce581f5b03cfcad8be500aaa64b0293f74
|
[
"BSD-3-Clause"
] | null | null | null |
stRT/tdr/widgets/changes.py
|
Yao-14/stAnalysis
|
d08483ce581f5b03cfcad8be500aaa64b0293f74
|
[
"BSD-3-Clause"
] | null | null | null |
from typing import Optional, Tuple, Union
import numpy as np
import pandas as pd
import pyvista as pv
from pyvista import DataSet, MultiBlock, PolyData, UnstructuredGrid
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
from .ddrtree import DDRTree, cal_ncenter
from .slice import euclidean_distance, three_d_slice
####################################
# Changes along a vector direction #
####################################
#################################
# Changes along the model shape #
#################################
##############################
# Changes along the branches #
##############################
def ElPiGraph_tree(
X: np.ndarray,
NumNodes: int = 50,
**kwargs,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Generate a principal elastic tree.
Reference: Albergante et al. (2020), Robust and Scalable Learning of Complex Intrinsic Dataset Geometry via ElPiGraph.
Args:
X: DxN, data matrix list.
NumNodes: The number of nodes of the principal graph. Use a range of 10 to 100 for ElPiGraph approach.
**kwargs: Other parameters used in elpigraph.computeElasticPrincipalTree. For details, please see:
https://github.com/j-bac/elpigraph-python/blob/master/elpigraph/_topologies.py
Returns:
nodes: The nodes in the principal tree.
edges: The edges between nodes in the principal tree.
"""
try:
import elpigraph
except ImportError:
raise ImportError(
"You need to install the package `elpigraph-python`."
"\nInstall elpigraph-python via `pip install git+https://github.com/j-bac/elpigraph-python.git`."
)
ElPiGraph_kwargs = {
"alpha": 0.01,
"FinalEnergy": "Penalized",
"StoreGraphEvolution": True,
"GPU": False,
}
ElPiGraph_kwargs.update(kwargs)
if ElPiGraph_kwargs["GPU"] is True:
try:
import cupy
except ImportError:
raise ImportError(
"You need to install the package `cupy`."
"\nInstall cupy via `pip install cupy-cuda113`."
)
elpi_tree = elpigraph.computeElasticPrincipalTree(
X=np.asarray(X), NumNodes=NumNodes, **ElPiGraph_kwargs
)
nodes = elpi_tree[0]["NodePositions"] # ['AllNodePositions'][k]
matrix_edges_weights = elpi_tree[0]["ElasticMatrix"] # ['AllElasticMatrices'][k]
matrix_edges_weights = np.triu(matrix_edges_weights, 1)
edges = np.array(np.nonzero(matrix_edges_weights), dtype=int).transpose()
return nodes, edges
def SimplePPT_tree(
X: np.ndarray,
NumNodes: int = 50,
**kwargs,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Generate a simple principal tree.
Reference: Mao et al. (2015), SimplePPT: A simple principal tree algorithm, SIAM International Conference on Data Mining.
Args:
X: DxN, data matrix list.
NumNodes: The number of nodes of the principal graph. Use a range of 100 to 2000 for PPT approach.
**kwargs: Other parameters used in simpleppt.ppt. For details, please see:
https://github.com/LouisFaure/simpleppt/blob/main/simpleppt/ppt.py
Returns:
nodes: The nodes in the principal tree.
edges: The edges between nodes in the principal tree.
"""
try:
import igraph
import simpleppt
except ImportError:
raise ImportError(
"You need to install the package `simpleppt` and `igraph`."
"\nInstall simpleppt via `pip install -U simpleppt`."
"\nInstall igraph via `pip install -U igraph`"
)
SimplePPT_kwargs = {
"seed": 1,
"lam": 10,
}
SimplePPT_kwargs.update(kwargs)
X = np.asarray(X)
ppt_tree = simpleppt.ppt(X=X, Nodes=NumNodes, **SimplePPT_kwargs)
R = ppt_tree.R
nodes = (np.dot(X.T, R) / R.sum(axis=0)).T
B = ppt_tree.B
edges = np.array(
igraph.Graph.Adjacency((B > 0).tolist(), mode="undirected").get_edgelist()
)
return nodes, edges
def map_points_to_branch(
model: Union[PolyData, UnstructuredGrid],
nodes: np.ndarray,
spatial_key: Optional[str] = None,
key_added: Optional[str] = "nodes",
inplace: bool = False,
**kwargs,
):
"""
Find the closest principal tree node to any point in the model through KDTree.
Args:
model: A reconstruct model.
nodes: The nodes in the principal tree.
spatial_key: The key that corresponds to the coordinates of the point in the model. If spatial_key is None,
the coordinates are model.points.
key_added: The key under which to add the nodes labels.
inplace: Updates model in-place.
kwargs: Other parameters used in scipy.spatial.KDTree.
Returns:
A model, which contains the following properties:
`model.point_data[key_added]`, the nodes labels array.
"""
from scipy.spatial import KDTree
model = model.copy() if not inplace else model
X = model.points if spatial_key is None else model[spatial_key]
nodes_kdtree = KDTree(np.asarray(nodes), **kwargs)
_, ii = nodes_kdtree.query(np.asarray(X), k=1)
model.point_data[key_added] = ii
return model if not inplace else None
def map_gene_to_branch(
model: Union[PolyData, UnstructuredGrid],
tree: PolyData,
key: Union[str, list],
nodes_key: Optional[str] = "nodes",
inplace: bool = False,
):
"""
Find the closest principal tree node to any point in the model through KDTree.
Args:
model: A reconstruct model contains the gene expression label.
tree: A three-dims principal tree model contains the nodes label.
key: The key that corresponds to the gene expression.
nodes_key: The key that corresponds to the coordinates of the nodes in the tree.
inplace: Updates tree model in-place.
Returns:
A tree, which contains the following properties:
`tree.point_data[key]`, the gene expression array.
"""
model = model.copy()
model_data = pd.DataFrame(model[nodes_key], columns=["nodes_id"])
key = [key] if isinstance(key, str) else key
for sub_key in key:
model_data[sub_key] = np.asarray(model[sub_key])
model_data = model_data.groupby(by="nodes_id").sum()
model_data["nodes_id"] = model_data.index
model_data.index = range(len(model_data.index))
tree = tree.copy() if not inplace else tree
tree_data = pd.DataFrame(tree[nodes_key], columns=["nodes_id"])
tree_data = pd.merge(tree_data, model_data, how="outer", on="nodes_id")
tree_data.fillna(value=0, inplace=True)
for sub_key in key:
tree.point_data[sub_key] = tree_data[sub_key].values
return tree if not inplace else None
def construct_tree_model(
nodes: np.ndarray,
edges: np.ndarray,
key_added: Optional[str] = "nodes",
) -> PolyData:
"""
Construct a principal tree model.
Args:
nodes: The nodes in the principal tree.
edges: The edges between nodes in the principal tree.
key_added: The key under which to add the nodes labels.
Returns:
A three-dims principal tree model, which contains the following properties:
`tree_model.point_data[key_added]`, the nodes labels array.
"""
padding = np.empty(edges.shape[0], int) * 2
padding[:] = 2
edges_w_padding = np.vstack((padding, edges.T)).T
tree_model = pv.PolyData(nodes, edges_w_padding)
tree_model.point_data[key_added] = np.arange(0, len(nodes), 1)
return tree_model
| 31.734513 | 125 | 0.635899 |
16394617ff3197501b57f08cd314d25d52093a16
| 842 |
py
|
Python
|
test/test_add_group.py
|
nkoshkina/Python_Training3
|
e917440d37883dbcaa527a0700bcfa1478a1c1ce
|
[
"Apache-2.0"
] | null | null | null |
test/test_add_group.py
|
nkoshkina/Python_Training3
|
e917440d37883dbcaa527a0700bcfa1478a1c1ce
|
[
"Apache-2.0"
] | null | null | null |
test/test_add_group.py
|
nkoshkina/Python_Training3
|
e917440d37883dbcaa527a0700bcfa1478a1c1ce
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from model.group import Group
import pytest
import allure_pytest
| 36.608696 | 93 | 0.693587 |
163995382115c67384ddb8a508342f8bf7650216
| 1,164 |
py
|
Python
|
cyberbrain/frame_tree.py
|
testinggg-art/Cyberbrain
|
e38c74c174e23aa386d005b03f09b30aa1b3a0ae
|
[
"MIT"
] | null | null | null |
cyberbrain/frame_tree.py
|
testinggg-art/Cyberbrain
|
e38c74c174e23aa386d005b03f09b30aa1b3a0ae
|
[
"MIT"
] | null | null | null |
cyberbrain/frame_tree.py
|
testinggg-art/Cyberbrain
|
e38c74c174e23aa386d005b03f09b30aa1b3a0ae
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from .frame import Frame
from .generated.communication_pb2 import CursorPosition
| 28.390244 | 87 | 0.670103 |
163c66ec8f6a6a9ebf21f694414728829c5d030d
| 7,851 |
py
|
Python
|
src/otp_yubikey/models.py
|
moggers87/django-otp-yubikey
|
2d7cf9dc91ba57b65aa62254532997cc1e6261dd
|
[
"BSD-2-Clause"
] | null | null | null |
src/otp_yubikey/models.py
|
moggers87/django-otp-yubikey
|
2d7cf9dc91ba57b65aa62254532997cc1e6261dd
|
[
"BSD-2-Clause"
] | null | null | null |
src/otp_yubikey/models.py
|
moggers87/django-otp-yubikey
|
2d7cf9dc91ba57b65aa62254532997cc1e6261dd
|
[
"BSD-2-Clause"
] | null | null | null |
from __future__ import absolute_import, division, print_function, unicode_literals
from base64 import b64decode
from binascii import hexlify, unhexlify
from struct import pack
import six
from django.db import models
from django.utils.encoding import force_text
from django_otp.models import Device
from django_otp.util import hex_validator, random_hex
from yubiotp.client import YubiClient10, YubiClient11, YubiClient20
from yubiotp.modhex import modhex
from yubiotp.otp import decode_otp
| 27.644366 | 139 | 0.640683 |
163cbfb7a11f70465bec9d58e23cdc35d6fe4e2c
| 5,976 |
py
|
Python
|
v1/hsvfilter.py
|
gavinIRL/RHBot
|
1e22ae5ca7b67ebd6a72c23d9f46d5a8eb6e99cf
|
[
"MIT"
] | null | null | null |
v1/hsvfilter.py
|
gavinIRL/RHBot
|
1e22ae5ca7b67ebd6a72c23d9f46d5a8eb6e99cf
|
[
"MIT"
] | 60 |
2021-03-29T14:29:49.000Z
|
2021-05-03T06:06:19.000Z
|
v1/hsvfilter.py
|
gavinIRL/RHBot
|
1e22ae5ca7b67ebd6a72c23d9f46d5a8eb6e99cf
|
[
"MIT"
] | null | null | null |
import typing
# custom data structure to hold the state of an HSV filter
# Putting this here out of the way as it's a chonk
# For a given item string case it will return the optimal filter and the correct position to look
def grab_object_preset(object_name=None, **kwargs) -> typing.Tuple[HsvFilter, list]:
if object_name is None:
#print("Using default filter")
return HsvFilter(0, 0, 0, 255, 255, 255, 0, 0, 0, 0), [3, 32, 1280, 794]
if object_name == "dungeon_check":
return HsvFilter(0, 73, 94, 106, 255, 255, 0, 0, 0, 0), [1083, 295, 1188, 368]
if object_name == "enemy_map_loc":
#print("Using enemy location filter")
if kwargs.get("big_map"):
return HsvFilter(0, 128, 82, 8, 255, 255, 0, 66, 30, 34), [485, 280, 900, 734]
return HsvFilter(0, 128, 82, 8, 255, 255, 0, 66, 30, 34), [1100, 50, 1260, 210]
if object_name == "player_map_loc":
if kwargs.get("big_map"):
return HsvFilter(31, 94, 86, 73, 255, 255, 0, 0, 0, 0), [485, 280, 900, 734]
return HsvFilter(31, 94, 86, 73, 255, 255, 0, 0, 0, 0), [1100, 50, 1260, 210]
if object_name == "other_player_map_loc":
if kwargs.get("big_map"):
return HsvFilter(16, 172, 194, 32, 255, 255, 0, 0, 70, 37), [485, 280, 900, 734]
return HsvFilter(16, 172, 194, 32, 255, 255, 0, 0, 70, 37), [1100, 50, 1260, 210]
if object_name == "loot_distant":
return HsvFilter(14, 116, 33, 32, 210, 59, 16, 0, 3, 0), [10, 145, 1084, 684]
if object_name == "loot_near":
return HsvFilter(0, 155, 135, 31, 240, 217, 0, 0, 0, 0), [460, 420, 855, 710]
if object_name == "prompt_press_x_pickup":
return HsvFilter(78, 110, 110, 97, 189, 255, 0, 0, 0, 0), [1080, 660, 1255, 725]
if object_name == "message_section_cleared":
return HsvFilter(0, 0, 214, 179, 65, 255, 0, 0, 0, 17), [464, 600, 855, 680]
if object_name == "message_go":
return HsvFilter(32, 114, 89, 58, 255, 255, 0, 12, 0, 0), [600, 222, 700, 275]
if object_name == "enemy_nametag":
return HsvFilter(49, 0, 139, 91, 30, 197, 0, 0, 40, 38), [10, 145, 1084, 684]
if object_name == "message_boss_encounter":
return HsvFilter(0, 92, 128, 13, 255, 255, 0, 0, 0, 0), [630, 520, 1120, 680]
if object_name == "display_boss_name_and_healthbar":
return HsvFilter(0, 92, 123, 29, 255, 255, 0, 0, 0, 20), [415, 533, 888, 700]
if object_name == "loot_chest_normal":
# This is a difficult one to separate
return HsvFilter(0, 34, 38, 28, 152, 124, 0, 0, 5, 12), [10, 145, 1084, 684]
if object_name == "map_outline":
if kwargs.get("big_map"):
return HsvFilter(0, 128, 82, 8, 255, 255, 0, 66, 30, 34), [485, 280, 900, 734]
return HsvFilter(0, 128, 82, 8, 255, 255, 0, 66, 30, 34), [1100, 50, 1260, 210]
if object_name == "gate_map_pos":
# This is a very difficult one to separate
if kwargs.get("big_map"):
return HsvFilter(0, 128, 82, 8, 255, 255, 0, 66, 30, 34), [485, 280, 900, 734]
return HsvFilter(0, 128, 82, 8, 255, 255, 0, 66, 30, 34), [1100, 50, 1260, 210]
if object_name == "prompt_move_reward_screen":
return HsvFilter(72, 98, 92, 105, 255, 225, 0, 54, 24, 38)
if object_name == "prompt_select_card":
return HsvFilter(79, 149, 140, 255, 255, 255, 0, 0, 0, 0)
if object_name == "event_chest_special_appear":
return HsvFilter(0, 124, 62, 88, 217, 246, 0, 0, 0, 0)
if object_name == "inventory_green_item":
return HsvFilter(37, 147, 0, 61, 255, 255, 0, 0, 0, 0)
if object_name == "inventory_blue_item":
return HsvFilter(79, 169, 0, 109, 246, 188, 0, 0, 0, 0)
if object_name == "inventory_yellow_item":
# This is a dangerous one as it can barely
# distinguish against green items and vice versa
return HsvFilter(19, 91, 107, 31, 168, 181, 0, 11, 32, 21)
if object_name == "inventory_purple_item":
return HsvFilter(126, 153, 0, 255, 255, 255, 0, 0, 0, 0)
if object_name == "button_repair":
return None, [208, 600]
# These are all To be done later
if object_name == "event_card_trade":
return HsvFilter(0, 0, 0, 255, 255, 255, 0, 0, 0, 0)
if object_name == "event_otherworld":
return HsvFilter(0, 0, 0, 255, 255, 255, 0, 0, 0, 0)
if object_name == "loot_chest_special":
if kwargs.get("big_map"):
return HsvFilter(0, 0, 0, 255, 255, 255, 0, 0, 0, 0), [10, 145, 1084, 684]
return HsvFilter(0, 0, 0, 255, 255, 255, 0, 0, 0, 0), [10, 145, 1084, 684]
if object_name == "cards":
return HsvFilter(0, 0, 0, 255, 255, 255, 0, 0, 0, 0), [735, 32, 1085, 100]
if object_name == "enemy_arrow":
return HsvFilter(0, 0, 0, 255, 255, 255, 0, 0, 0, 0), [10, 145, 1084, 684]
# Buttons for clicking, known positions
if object_name == "button_explore_again":
return None, []
if object_name == "button_choose_map":
return None, []
if object_name == "button_open_store":
return None, []
if object_name == "button_go_town":
return None, []
if object_name == "button_inv_equipment":
return None, []
if object_name == "button_inv_consume":
return None, []
if object_name == "button_inv_other":
return None, []
if object_name == "button_repair_confirm":
return None, []
if object_name == "inv_grid_location":
return None, [533+44*kwargs.get("col"), 277+44*kwargs.get("row")]
| 49.38843 | 97 | 0.593373 |
163d64f557e7427d0b9ba345ed63cc3b52a618e5
| 14,278 |
py
|
Python
|
glue/core/tests/test_state_objects.py
|
HPLegion/glue
|
1843787ccb4de852dfe103ff58473da13faccf5f
|
[
"BSD-3-Clause"
] | null | null | null |
glue/core/tests/test_state_objects.py
|
HPLegion/glue
|
1843787ccb4de852dfe103ff58473da13faccf5f
|
[
"BSD-3-Clause"
] | null | null | null |
glue/core/tests/test_state_objects.py
|
HPLegion/glue
|
1843787ccb4de852dfe103ff58473da13faccf5f
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from numpy.testing import assert_allclose
from echo import CallbackProperty, ListCallbackProperty
from glue.core import Data, DataCollection
from .test_state import clone
from ..state_objects import (State, StateAttributeLimitsHelper,
StateAttributeSingleValueHelper,
StateAttributeHistogramHelper)
EXPECTED_STR = """
a: 2
b: hello
flat: <CallbackList with 3 elements>
nested: <CallbackList with 3 elements>
"""
EXPECTED_REPR = """
<SimpleTestState
a: 2
b: hello
flat: <CallbackList with 3 elements>
nested: <CallbackList with 3 elements>
>
"""
def test_histogram_helper_common_n_bin():
data = Data(x=[-3.2, 4.3, 2.2],
y=['a', 'f', 'd'],
z=[1.1, 2.3, 1.2],
label='test_data')
state = SimpleState()
helper = StateAttributeHistogramHelper(state, attribute='comp',
lower='x_min', upper='x_max', n_bin='n_bin',
common_n_bin='common')
state.data = data
state.comp = data.id['x']
state.n_bin = 9
state.comp = data.id['y']
assert state.n_bin == 3
state.comp = data.id['z']
assert state.n_bin == 15
state.n_bin = 12
state.common = True
state.comp = data.id['x']
assert state.n_bin == 12
state.n_bin = 11
state.comp = data.id['y']
assert state.n_bin == 3
state.comp = data.id['z']
assert state.n_bin == 11
state.common = False
state.n_bin = 13
state.comp = data.id['x']
assert state.n_bin == 11
def test_histogram_helper_common_n_bin_active():
# Make sure that common_n_bin works as expected if True from start
data = Data(x=[-3.2, 4.3, 2.2],
y=['a', 'f', 'd'],
z=[1.1, 2.3, 1.2],
label='test_data')
state = SimpleState()
helper = StateAttributeHistogramHelper(state, attribute='comp',
lower='x_min', upper='x_max', n_bin='n_bin',
common_n_bin='common')
state.data = data
state.comp = data.id['x']
state.n_bin = 9
state.comp = data.id['z']
assert state.n_bin == 9
state.n_bin = 12
state.common = True
state.comp = data.id['x']
assert state.n_bin == 12
state.n_bin = 11
state.comp = data.id['y']
assert state.n_bin == 3
state.comp = data.id['z']
assert state.n_bin == 11
state.common = False
state.n_bin = 13
state.comp = data.id['x']
assert state.n_bin == 11
def test_limits_helper_initial_values():
# Regression test for a bug that occurred if the limits cache was empty
# but some attributes were set to values - in this case we don't want to
# override the existing values.
data = Data(x=np.linspace(-100, 100, 10000),
y=np.linspace(2, 3, 10000), label='test_data')
state = SimpleState()
state.lower = 1
state.upper = 2
state.comp = data.id['x']
helper = StateAttributeLimitsHelper(state, attribute='comp',
lower='lower', upper='upper')
assert helper.lower == 1
assert helper.upper == 2
| 27.832359 | 96 | 0.588178 |
163d903313e3ca0e241b2c27dfd7fddcb15bbfdb
| 287 |
py
|
Python
|
ecommerce_api/core/cart/exceptions.py
|
victormartinez/ecommerceapi
|
a887d9e938050c15ebf52001f63d7aa7f33fa5ee
|
[
"MIT"
] | null | null | null |
ecommerce_api/core/cart/exceptions.py
|
victormartinez/ecommerceapi
|
a887d9e938050c15ebf52001f63d7aa7f33fa5ee
|
[
"MIT"
] | null | null | null |
ecommerce_api/core/cart/exceptions.py
|
victormartinez/ecommerceapi
|
a887d9e938050c15ebf52001f63d7aa7f33fa5ee
|
[
"MIT"
] | null | null | null |
from typing import Iterable, Optional
| 31.888889 | 68 | 0.700348 |
163dc7048c89ab3ce7a0707b33435bed5fbe6660
| 6,742 |
py
|
Python
|
test/unit/test_record.py
|
jsoref/neo4j-python-driver
|
32c130c9a975dbf8c0d345b362d096b5e1dd3e5b
|
[
"Apache-2.0"
] | null | null | null |
test/unit/test_record.py
|
jsoref/neo4j-python-driver
|
32c130c9a975dbf8c0d345b362d096b5e1dd3e5b
|
[
"Apache-2.0"
] | null | null | null |
test/unit/test_record.py
|
jsoref/neo4j-python-driver
|
32c130c9a975dbf8c0d345b362d096b5e1dd3e5b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright (c) 2002-2018 "Neo Technology,"
# Network Engine for Objects in Lund AB [http://neotechnology.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from neo4j.v1 import Record
| 43.496774 | 116 | 0.590923 |
163ee50e70aae9c38787e48d9c60c83c946fac91
| 9,923 |
py
|
Python
|
tests/integration_tests/test_dashboards.py
|
hugocool/explainerdashboard
|
e725528c3d94a1a45b51bd9632686d0697274f54
|
[
"MIT"
] | 1 |
2021-11-19T09:30:56.000Z
|
2021-11-19T09:30:56.000Z
|
tests/integration_tests/test_dashboards.py
|
hugocool/explainerdashboard
|
e725528c3d94a1a45b51bd9632686d0697274f54
|
[
"MIT"
] | null | null | null |
tests/integration_tests/test_dashboards.py
|
hugocool/explainerdashboard
|
e725528c3d94a1a45b51bd9632686d0697274f54
|
[
"MIT"
] | null | null | null |
import dash
from catboost import CatBoostClassifier, CatBoostRegressor
from xgboost import XGBClassifier, XGBRegressor
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from explainerdashboard.explainers import ClassifierExplainer, RegressionExplainer
from explainerdashboard.datasets import titanic_survive, titanic_fare, titanic_embarked, titanic_names
from explainerdashboard.dashboards import ExplainerDashboard
| 44.698198 | 102 | 0.665121 |
163f5e0eb3de89d92ad7d61128630ed72fcd3690
| 1,079 |
py
|
Python
|
code/scripts/GeneratePNG_Preview_AsIs.py
|
dgrechka/bengaliai-cv19
|
9ef15c5b140628337ae6efe0d76e7ec5d291dc17
|
[
"MIT"
] | null | null | null |
code/scripts/GeneratePNG_Preview_AsIs.py
|
dgrechka/bengaliai-cv19
|
9ef15c5b140628337ae6efe0d76e7ec5d291dc17
|
[
"MIT"
] | null | null | null |
code/scripts/GeneratePNG_Preview_AsIs.py
|
dgrechka/bengaliai-cv19
|
9ef15c5b140628337ae6efe0d76e7ec5d291dc17
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import sys
import os
from glob import glob
import png
sys.path.append(os.path.join(__file__,'..','..'))
from tfDataIngest import tfDataSetParquet as tfDsParquet
inputDataDir = sys.argv[1]
outputDir = sys.argv[2]
# test app
if __name__ == "__main__":
files = glob(os.path.join(inputDataDir,"train*.parquet"))
print("Found {0} parquet files in input dir {1}".format(len(files),inputDataDir))
print("First is {0}".format(files[0]))
ds = tfDsParquet.create_parquet_dataset([files[0]])
for element in ds.as_numpy_iterator():
#print("Iterating...")
sampleId,pixels = element
sampleId = sampleId.decode("utf-8")
fileName = os.path.join(outputDir,"{0}.png".format(sampleId))
png.from_array(pixels, mode="L").save(fileName)
#print(element)
#print("sample name is {0}".format(sampleId))
#print(sampleIds.shape)
#print(pixels.shape)
# a += 1
# if a > 10:
# break
print("Done")
#print("{0} elements in the dataset".format(len(ds.)))
| 29.972222 | 85 | 0.636701 |
1640d2033b3fc61dda0183c87b5baa9f8cbed3bd
| 2,763 |
py
|
Python
|
widgets/datepicker_ctrl/codegen.py
|
RSabet/wxGlade
|
8b62eb8397308e60977857455b2765727b1b940f
|
[
"MIT"
] | 225 |
2018-03-26T11:23:22.000Z
|
2022-03-24T09:44:08.000Z
|
widgets/datepicker_ctrl/codegen.py
|
RSabet/wxGlade
|
8b62eb8397308e60977857455b2765727b1b940f
|
[
"MIT"
] | 403 |
2018-01-03T19:47:28.000Z
|
2018-03-23T17:43:39.000Z
|
widgets/datepicker_ctrl/codegen.py
|
DietmarSchwertberger/wxGlade
|
8e78cdc509d458cc896d47315e19f3daa6c09213
|
[
"MIT"
] | 47 |
2018-04-08T16:48:38.000Z
|
2021-12-21T20:08:44.000Z
|
"""\
Code generator functions for wxDatePickerCtrl objects
@copyright: 2002-2007 Alberto Griggio
@copyright: 2014-2016 Carsten Grohmann
@copyright: 2016-2021 Dietmar Schwertberger
@license: MIT (see LICENSE.txt) - THIS PROGRAM COMES WITH NO WARRANTY
"""
import common, compat
import wcodegen
def xrc_code_generator(obj):
xrcgen = common.code_writers['XRC']
return DatePickerCtrlXrcObject(obj)
def initialize():
klass = 'wxDatePickerCtrl'
common.class_names['EditDatePickerCtrl'] = klass
common.register('python', klass, PythonDatePickerCtrlGenerator(klass))
common.register('C++', klass, CppDatePickerCtrlGenerator(klass))
common.register('XRC', klass, xrc_code_generator)
| 33.695122 | 106 | 0.615635 |
1642121cd961a12c79b579c9fabd08e8a6ce9bc8
| 3,960 |
py
|
Python
|
train.py
|
lck1201/simple-effective-3Dpose-baseline
|
790a185b44e48a9cc619f52b6615aae729bff76b
|
[
"MIT"
] | 20 |
2019-03-29T12:20:10.000Z
|
2021-02-07T08:32:18.000Z
|
train.py
|
motokimura/simple-effective-3Dpose-baseline
|
790a185b44e48a9cc619f52b6615aae729bff76b
|
[
"MIT"
] | 10 |
2019-04-03T15:25:00.000Z
|
2021-03-26T16:23:33.000Z
|
train.py
|
motokimura/simple-effective-3Dpose-baseline
|
790a185b44e48a9cc619f52b6615aae729bff76b
|
[
"MIT"
] | 7 |
2019-06-02T13:25:27.000Z
|
2020-12-17T06:07:17.000Z
|
import pprint
import mxnet as mx
from mxnet import gluon
from mxnet import init
from lib.core.get_optimizer import *
from lib.core.metric import MPJPEMetric
from lib.core.loss import MeanSquareLoss
from lib.core.loader import JointsDataIter
from lib.network import get_net
from lib.net_module import *
from lib.utils import *
from lib.dataset.hm36 import hm36
from config import config, gen_config, update_config_from_args, s_args
config = update_config_from_args(config, s_args)
if __name__ == '__main__':
main()
| 41.684211 | 124 | 0.646212 |
1643d3915575e537c0423b05a3b3b1e3b7eb7865
| 6,789 |
py
|
Python
|
FastLinear/generate_memory_bank.py
|
WangFeng18/dino
|
1a4e49bd0e99d7e205338b14994a1d57c3084cfe
|
[
"Apache-2.0"
] | null | null | null |
FastLinear/generate_memory_bank.py
|
WangFeng18/dino
|
1a4e49bd0e99d7e205338b14994a1d57c3084cfe
|
[
"Apache-2.0"
] | null | null | null |
FastLinear/generate_memory_bank.py
|
WangFeng18/dino
|
1a4e49bd0e99d7e205338b14994a1d57c3084cfe
|
[
"Apache-2.0"
] | null | null | null |
import os
from tqdm import tqdm
import torch.backends.cudnn as cudnn
import torch
from datasets import ImageNetInstance, ImageNetInstanceLMDB
from torchvision import transforms
import argparse
from BaseTaskModel.task_network import get_moco_network, get_swav_network, get_selfboost_network, get_minmaxent_network, get_simclr_network, get_sup_network, get_dino_network
from torch.utils.data import DataLoader
from PIL import ImageFile, Image
import torch.distributed as dist
from lars import *
ImageFile.LOAD_TRUNCATED_IMAGES = True
import warnings
warnings.filterwarnings('ignore')
def concat_all_gather(tensor):
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
tensors_gather = [torch.ones_like(tensor)
for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output
if __name__ == '__main__':
main()
| 44.664474 | 174 | 0.705259 |
16447f2400735bc0538f6c77d41578715bdd08b9
| 2,489 |
py
|
Python
|
tests/utils/test_mercator.py
|
anuragtr/fabric8-analytics-rudra
|
13fb15539d195fcb89ced02b205d034ec0c18e00
|
[
"Apache-2.0"
] | 1 |
2019-05-13T09:31:19.000Z
|
2019-05-13T09:31:19.000Z
|
tests/utils/test_mercator.py
|
anuragtr/fabric8-analytics-rudra
|
13fb15539d195fcb89ced02b205d034ec0c18e00
|
[
"Apache-2.0"
] | null | null | null |
tests/utils/test_mercator.py
|
anuragtr/fabric8-analytics-rudra
|
13fb15539d195fcb89ced02b205d034ec0c18e00
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from rudra.utils.mercator import SimpleMercator
| 34.09589 | 82 | 0.526718 |
16449c2c8a80a3f0f14b7a2a74915dc78441651d
| 139 |
py
|
Python
|
tests/checks/run_performance_tests.py
|
stjordanis/mljar-supervised
|
8c3f9d1ed527dfcfdaef91cf82e2779c5832e294
|
[
"MIT"
] | 1,882 |
2018-11-05T13:20:54.000Z
|
2022-03-31T14:31:46.000Z
|
tests/checks/run_performance_tests.py
|
stjordanis/mljar-supervised
|
8c3f9d1ed527dfcfdaef91cf82e2779c5832e294
|
[
"MIT"
] | 499 |
2019-03-14T09:57:51.000Z
|
2022-03-30T06:00:43.000Z
|
tests/checks/run_performance_tests.py
|
stjordanis/mljar-supervised
|
8c3f9d1ed527dfcfdaef91cf82e2779c5832e294
|
[
"MIT"
] | 277 |
2019-02-08T21:32:13.000Z
|
2022-03-29T03:26:05.000Z
|
import os
import sys
import unittest
from tests.tests_bin_class.test_performance import *
if __name__ == "__main__":
unittest.main()
| 15.444444 | 52 | 0.769784 |
1645daef0bb42b38a2691d6bb4f86fefa0af94a5
| 283 |
py
|
Python
|
task/CheckAllocations.py
|
wookiee2187/vc3-login-pod
|
3c0f5490c094bf0b4587a743efac68d722ea5ee2
|
[
"MIT"
] | 1 |
2019-07-17T19:01:34.000Z
|
2019-07-17T19:01:34.000Z
|
task/CheckAllocations.py
|
wookiee2187/vc3-login-pod
|
3c0f5490c094bf0b4587a743efac68d722ea5ee2
|
[
"MIT"
] | null | null | null |
task/CheckAllocations.py
|
wookiee2187/vc3-login-pod
|
3c0f5490c094bf0b4587a743efac68d722ea5ee2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from vc3master.task import VC3Task
| 16.647059 | 58 | 0.590106 |
16477f8a306c6c85422ce092acee78844c0cd611
| 4,037 |
py
|
Python
|
django_airbrake/utils/client.py
|
Captricity/airbrake-django
|
2ea126653883732a13f1a80c9e567b7076601620
|
[
"BSD-3-Clause"
] | null | null | null |
django_airbrake/utils/client.py
|
Captricity/airbrake-django
|
2ea126653883732a13f1a80c9e567b7076601620
|
[
"BSD-3-Clause"
] | 2 |
2016-07-12T15:44:02.000Z
|
2016-08-19T20:31:49.000Z
|
django_airbrake/utils/client.py
|
Captricity/airbrake-django
|
2ea126653883732a13f1a80c9e567b7076601620
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
import traceback
from django.conf import settings
from django.urls import resolve
from lxml import etree
from six.moves.urllib.request import urlopen, Request
| 34.211864 | 107 | 0.566757 |
1648b2044844b3d9b645771b179a716a797264e9
| 599 |
py
|
Python
|
src/spaceone/inventory/connector/snapshot.py
|
jean1042/plugin-azure-cloud-services
|
3a75a516c9a4d1e8a4962988934ead3fd40e8494
|
[
"Apache-2.0"
] | 1 |
2020-12-08T11:59:54.000Z
|
2020-12-08T11:59:54.000Z
|
src/spaceone/inventory/connector/snapshot.py
|
jean1042/plugin-azure-cloud-services
|
3a75a516c9a4d1e8a4962988934ead3fd40e8494
|
[
"Apache-2.0"
] | 4 |
2021-01-26T10:43:37.000Z
|
2021-12-17T10:13:33.000Z
|
src/spaceone/inventory/connector/snapshot.py
|
jean1042/plugin-azure-cloud-services
|
3a75a516c9a4d1e8a4962988934ead3fd40e8494
|
[
"Apache-2.0"
] | 2 |
2021-01-13T03:24:05.000Z
|
2021-01-19T07:25:45.000Z
|
import logging
from spaceone.inventory.libs.connector import AzureConnector
from spaceone.inventory.error import *
from spaceone.inventory.error.custom import *
__all__ = ['SnapshotConnector']
_LOGGER = logging.getLogger(__name__)
| 28.52381 | 69 | 0.721202 |
1649638736a414c6fde2874636d2e6f9fe9164e4
| 2,912 |
py
|
Python
|
docs/tutorial/context/app.py
|
theasylum/wired
|
6b6a3e83702b18ebb41ca1f94e957bdf7e44986d
|
[
"MIT"
] | 12 |
2018-07-22T15:40:35.000Z
|
2020-12-27T21:39:18.000Z
|
docs/tutorial/context/app.py
|
theasylum/wired
|
6b6a3e83702b18ebb41ca1f94e957bdf7e44986d
|
[
"MIT"
] | 36 |
2019-03-23T13:47:25.000Z
|
2020-11-28T18:08:14.000Z
|
docs/tutorial/context/app.py
|
theasylum/wired
|
6b6a3e83702b18ebb41ca1f94e957bdf7e44986d
|
[
"MIT"
] | 6 |
2019-03-23T20:08:57.000Z
|
2021-06-03T16:52:06.000Z
|
"""
A customer walks into a store. Do the steps to interact with them:
- Get *a* (not *the*) greeter
- Interact with them
Simple wired application:
- Settings that say what punctuation to use
- Registry
- Two factories that says hello, one for the FrenchCustomer context
- A default Customer and FrenchCustomer
"""
from dataclasses import dataclass
from wired import ServiceRegistry
def setup(settings: Settings) -> ServiceRegistry:
# Make the registry
registry = ServiceRegistry()
# Make the greeter factories, using punctuation from settings
punctuation = settings.punctuation
# First the default greeter, no context
# Register it as a factory using its class for the "key"
registry.register_factory(default_greeter_factory, Greeter)
# Now the French greeter, using context of FrenchCustomer
# Register it as a factory using its class for the "key", but
# this time register with a "context"
registry.register_factory(
french_greeter_factory, Greeter, context=FrenchCustomer
)
return registry
def greet_customer(registry: ServiceRegistry, customer: Customer) -> str:
# A customer comes in, handle the steps in the greeting
# as a container.
container = registry.create_container()
# Get a Greeter using the customer as context. Use the Customer when
# generating the greeting.
greeter: Greeter = container.get(Greeter, context=customer)
greeting = greeter(customer)
return greeting
def main():
settings = Settings(punctuation='!!')
registry = setup(settings)
# *** Default Customer
# Make a Customer, pass into the "greet_customer" interaction,
# then test the result.
customer = Customer(name='Mary')
assert 'Hello Mary !!' == greet_customer(registry, customer)
# *** French Customer
# Make a FrenchCustomer, pass into the "greet_customer" interaction,
# then test the result.
french_customer = FrenchCustomer(name='Henri')
assert 'Bonjour Henri !!' == greet_customer(registry, french_customer)
| 25.54386 | 74 | 0.712569 |
1649bff1d5c282f752cad12fddde82da77d3b6ea
| 3,133 |
py
|
Python
|
feast/DetectionModules/ldar_program.py
|
GeoSensorWebLab/FEAST_PtE
|
63ff8b7925873d756666f3c0c4b9f0f84abd5eb2
|
[
"MIT"
] | 10 |
2020-03-26T20:12:19.000Z
|
2022-02-14T22:47:01.000Z
|
feast/DetectionModules/ldar_program.py
|
GeoSensorWebLab/FEAST_PtE
|
63ff8b7925873d756666f3c0c4b9f0f84abd5eb2
|
[
"MIT"
] | 1 |
2021-07-14T21:14:12.000Z
|
2021-07-14T21:14:12.000Z
|
feast/DetectionModules/ldar_program.py
|
GeoSensorWebLab/FEAST_PtE
|
63ff8b7925873d756666f3c0c4b9f0f84abd5eb2
|
[
"MIT"
] | 9 |
2020-03-27T22:57:31.000Z
|
2021-09-29T17:29:35.000Z
|
"""
This module defines the LDARProgram class.
"""
import numpy as np
import copy
from .repair import Repair
from ..EmissionSimModules.result_classes import ResultDiscrete, ResultContinuous
| 48.2 | 120 | 0.679221 |
164cf23737de25e42e24acaa15cc12f759dc3323
| 12,783 |
py
|
Python
|
src/CycleGAN.py
|
sjmoran/SIDGAN
|
169bd69974bbb7f5760c28a00c231a856017e51c
|
[
"0BSD"
] | 25 |
2020-09-17T06:29:41.000Z
|
2022-03-22T06:38:37.000Z
|
src/CycleGAN.py
|
sjmoran/SIDGAN
|
169bd69974bbb7f5760c28a00c231a856017e51c
|
[
"0BSD"
] | 2 |
2021-05-30T09:00:46.000Z
|
2021-11-24T08:34:26.000Z
|
src/CycleGAN.py
|
sjmoran/SIDGAN
|
169bd69974bbb7f5760c28a00c231a856017e51c
|
[
"0BSD"
] | 5 |
2020-10-16T00:44:10.000Z
|
2021-11-04T15:59:55.000Z
|
#Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#This program is free software; you can redistribute it and/or modify it under the terms of the BSD 0-Clause License.
#This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the BSD 0-Clause License for more details.
from keras.optimizers import Adam
from models.ICCV_architectures import *
from models.unet import *
from keras.engine.topology import Network
import sys
import tensorflow as tf
from utilities.data_loader import *
| 46.824176 | 181 | 0.586013 |
164e763a74e067d7e8c03c1d5ec3635ec5b33a02
| 876 |
py
|
Python
|
application/fastapi/main.py
|
edson-dev/neoway
|
f792e16c0f627e8b94b54f001e87e076f36311ab
|
[
"MIT"
] | null | null | null |
application/fastapi/main.py
|
edson-dev/neoway
|
f792e16c0f627e8b94b54f001e87e076f36311ab
|
[
"MIT"
] | null | null | null |
application/fastapi/main.py
|
edson-dev/neoway
|
f792e16c0f627e8b94b54f001e87e076f36311ab
|
[
"MIT"
] | null | null | null |
import uvicorn
from fastapi import FastAPI
from fastapi.staticfiles import StaticFiles
from routes import doc, api
from fastapi.templating import Jinja2Templates
from starlette.requests import Request
# configure static and templates file on jinja 2
app = FastAPI(
title=f"Technical Case",
description=f"endpoint para subir planilhas para banco de dados relacional Postgres.",
version=f"0.0.1",
static_directory="static"
)
app.mount("/static", StaticFiles(directory="static"), name="static")
#import factory builders and initiate
doc.init_app(app)
api.init_app(app, "/api")
#
templates = Jinja2Templates(directory="templates")
#views
if __name__ == "__main__":
uvicorn.run("main:app", host="0.0.0.0", port=8080)
| 28.258065 | 90 | 0.745434 |
164f24393208739c6bb0a99eb1b2e8ed9fcd90d3
| 58,056 |
py
|
Python
|
civis/io/_tables.py
|
jsfalk/civis-python
|
39b6498b2d67d838d720d9631d74f3d3d43f7c1a
|
[
"BSD-3-Clause"
] | null | null | null |
civis/io/_tables.py
|
jsfalk/civis-python
|
39b6498b2d67d838d720d9631d74f3d3d43f7c1a
|
[
"BSD-3-Clause"
] | null | null | null |
civis/io/_tables.py
|
jsfalk/civis-python
|
39b6498b2d67d838d720d9631d74f3d3d43f7c1a
|
[
"BSD-3-Clause"
] | null | null | null |
import json
import concurrent.futures
import csv
from os import path
import io
import logging
import os
import shutil
from tempfile import TemporaryDirectory
import warnings
import zlib
import gzip
import zipfile
from civis import APIClient
from civis._utils import maybe_get_random_name
from civis.base import EmptyResultError, CivisImportError
from civis.futures import CivisFuture
from civis.io import civis_to_file, file_to_civis, query_civis
from civis.utils import run_job
from civis._deprecation import deprecate_param
import requests
try:
from io import StringIO
except ImportError:
from cStringIO import StringIO
try:
import pandas as pd
NO_PANDAS = False
except ImportError:
NO_PANDAS = True
CHUNK_SIZE = 32 * 1024
log = logging.getLogger(__name__)
__all__ = ['read_civis', 'read_civis_sql', 'civis_to_csv',
'civis_to_multifile_csv', 'dataframe_to_civis', 'csv_to_civis',
'civis_file_to_table', 'split_schema_tablename',
'export_to_civis_file']
DELIMITERS = {
',': 'comma',
'\t': 'tab',
'|': 'pipe',
}
def export_to_civis_file(sql, database, job_name=None, client=None,
credential_id=None, polling_interval=None,
hidden=True, csv_settings=None):
"""Store results of a query to a Civis file
Parameters
----------
sql : str
The SQL select string to be executed.
database : str or int
Execute the query against this database. Can be the database name
or ID.
job_name : str, optional
A name to give the job. If omitted, a random job name will be
used.
client : :class:`civis.APIClient`, optional
If not provided, an :class:`civis.APIClient` object will be
created from the :envvar:`CIVIS_API_KEY`.
credential_id : str or int, optional
The database credential ID. If ``None``, the default credential
will be used.
polling_interval : int or float, optional
Number of seconds to wait between checks for query completion.
hidden : bool, optional
If ``True`` (the default), this job will not appear in the Civis UI.
csv_settings : dict, optional
A dictionary of csv_settings to pass to
:func:`civis.APIClient.scripts.post_sql`.
Returns
-------
fut : :class:`~civis.futures.CivisFuture`
A future which returns the response from
:func:`civis.APIClient.scripts.get_sql_runs` after the sql query
has completed and the result has been stored as a Civis file.
Examples
--------
>>> sql = "SELECT * FROM schema.table"
>>> fut = export_to_civis_file(sql, "my_database")
>>> file_id = fut.result()['output'][0]["file_id"]
See Also
--------
civis.io.read_civis : Read directly into memory without SQL.
civis.io.read_civis_sql : Read results of a SQL query into memory.
civis.io.civis_to_csv : Write directly to a CSV file.
civis.io.civis_file_to_table : Upload a Civis file to a Civis table
"""
client = client or APIClient()
script_id, run_id = _sql_script(client=client,
sql=sql,
database=database,
job_name=job_name,
credential_id=credential_id,
csv_settings=csv_settings,
hidden=hidden)
fut = CivisFuture(client.scripts.get_sql_runs, (script_id, run_id),
polling_interval=polling_interval, client=client,
poll_on_creation=False)
return fut
def _sql_script(client, sql, database, job_name, credential_id, hidden=False,
csv_settings=None):
job_name = maybe_get_random_name(job_name)
db_id = client.get_database_id(database)
credential_id = credential_id or client.default_credential
csv_settings = csv_settings or {}
export_job = client.scripts.post_sql(job_name,
remote_host_id=db_id,
credential_id=credential_id,
sql=sql,
hidden=hidden,
csv_settings=csv_settings)
run_job = client.scripts.post_sql_runs(export_job.id)
log.debug('Started run %d of SQL script %d', run_job.id, export_job.id)
return export_job.id, run_job.id
def _get_sql_select(table, columns=None):
if columns and not isinstance(columns, (list, tuple)):
raise TypeError("columns must be a list, tuple or None")
select = ", ".join(columns) if columns is not None else "*"
sql = "select {} from {}".format(select, table)
return sql
def _get_headers(client, sql, database, credential_id, polling_interval=None):
headers = None
try:
# use 'begin read only;' to ensure we can't change state
sql = 'begin read only; select * from ({}) limit 1'.format(sql)
fut = query_civis(sql, database, client=client,
credential_id=credential_id,
polling_interval=polling_interval)
headers = fut.result()['result_columns']
except Exception as exc: # NOQA
log.debug("Failed to retrieve headers due to %s", str(exc))
return headers
def _decompress_stream(response, buf, write_bytes=True):
# use response.raw for a more consistent approach
# if content-encoding is specified in the headers
# then response.iter_content will decompress the stream
# however, our use of content-encoding is inconsistent
chunk = response.raw.read(CHUNK_SIZE)
d = zlib.decompressobj(zlib.MAX_WBITS | 32)
while chunk or d.unused_data:
if d.unused_data:
to_decompress = d.unused_data + chunk
d = zlib.decompressobj(zlib.MAX_WBITS | 32)
else:
to_decompress = d.unconsumed_tail + chunk
if write_bytes:
buf.write(d.decompress(to_decompress))
else:
buf.write(d.decompress(to_decompress).decode('utf-8'))
chunk = response.raw.read(CHUNK_SIZE)
def split_schema_tablename(table):
"""Split a Redshift 'schema.tablename' string
Remember that special characters (such as '.') can only
be included in a schema or table name if delimited by double-quotes.
Parameters
----------
table: str
Either a Redshift schema and table name combined
with a ".", or else a single table name.
Returns
-------
schema, tablename
A 2-tuple of strings. The ``schema`` may be None if the input
is only a table name, but the ``tablename`` will always be filled.
Raises
------
ValueError
If the input ``table`` is not separable into a schema and
table name.
"""
reader = csv.reader(StringIO(str(table)),
delimiter=".",
doublequote=True,
quotechar='"')
schema_name_tup = next(reader)
if len(schema_name_tup) == 1:
schema_name_tup = (None, schema_name_tup[0])
if len(schema_name_tup) != 2:
raise ValueError("Cannot parse schema and table. "
"Does '{}' follow the pattern 'schema.table'?"
.format(table))
return tuple(schema_name_tup)
def _replace_null_column_names(column_list):
"""Replace null names in columns from file cleaning with an appropriately
blank column name.
Parameters
----------
column_list: list[dict]
the list of columns from file cleaning.
Returns
--------
column_list: list[dict]
"""
new_cols = []
for i, col in enumerate(column_list):
# Avoid mutating input arguments
new_col = dict(col)
if new_col.get('name') is None:
new_col['name'] = 'column_{}'.format(i)
new_cols.append(new_col)
return new_cols
def _check_all_detected_info(detected_info, headers, delimiter,
compression, output_file_id):
"""Check a single round of cleaning results as compared to provided values.
Parameters
----------
detected_info: Dict[str, Any]
The detected info of the file as returned by the Civis API.
headers: bool
The provided value for whether or not the file contains errors.
delimiter: str
The provided value for the file delimiter.
compression: str
The provided value for the file compression.
output_file_id: int
The cleaned file's Civis ID. Used for debugging.
Raises
------
CivisImportError
If the values detected on the file do not match their expected
attributes.
"""
if headers != detected_info['includeHeader']:
raise CivisImportError('Mismatch between detected headers - '
'please ensure all imported files either '
'have a header or do not.')
if delimiter != detected_info['columnDelimiter']:
raise CivisImportError('Provided delimiter "{}" does not match '
'detected delimiter for {}: "{}"'.format(
delimiter,
output_file_id,
detected_info["columnDelimiter"])
)
if compression != detected_info['compression']:
raise CivisImportError('Mismatch between detected and provided '
'compressions - provided compression was {}'
' but detected compression {}. Please '
'ensure all imported files have the same '
'compression.'.format(
compression,
detected_info['compression'])
)
def _check_column_types(table_columns, file_columns, output_obj_id):
"""Check that base column types match those current defined for the table.
Parameters
----------
table_columns: List[Dict[str, str]]
The columns for the table to be created.
file_columns: List[Dict[str, str]]
The columns detected by the Civis API for the file.
output_obj_id: int
The file ID under consideration; used for error messaging.
Raises
------
CivisImportError
If the table columns and the file columns have a type mismatch, or
differ in count.
"""
if len(table_columns) != len(file_columns):
raise CivisImportError('All files should have the same number of '
'columns. Expected {} columns but file {} '
'has {} columns'.format(
len(table_columns),
output_obj_id,
len(file_columns))
)
error_msgs = []
for idx, (tcol, fcol) in enumerate(zip(table_columns, file_columns)):
# for the purposes of type checking, we care only that the types
# share a base type (e.g. INT, VARCHAR, DECIMAl) rather than that
# they have the same precision and length
# (e.g VARCHAR(42), DECIMAL(8, 10))
tcol_base_type = tcol['sql_type'].split('(', 1)[0]
fcol_base_type = fcol['sql_type'].split('(', 1)[0]
if tcol_base_type != fcol_base_type:
error_msgs.append(
'Column {}: File base type was {}, but expected {}'.format(
idx,
fcol_base_type,
tcol_base_type
)
)
if error_msgs:
raise CivisImportError(
'Encountered the following errors for file {}:\n\t{}'.format(
output_obj_id,
'\n\t'.join(error_msgs)
)
)
| 40.798313 | 79 | 0.617111 |
164f6ae0c583900eea5f44762f6006a785208240
| 2,218 |
py
|
Python
|
tests/unit/small_text/integrations/pytorch/test_strategies.py
|
chschroeder/small-text
|
ef28e91ba0c94fe938dde4f16253aa8695ea13b7
|
[
"MIT"
] | 218 |
2021-05-26T16:38:53.000Z
|
2022-03-30T09:48:54.000Z
|
tests/unit/small_text/integrations/pytorch/test_strategies.py
|
chschroeder/small-text
|
ef28e91ba0c94fe938dde4f16253aa8695ea13b7
|
[
"MIT"
] | 9 |
2021-10-16T23:23:02.000Z
|
2022-02-22T15:23:11.000Z
|
tests/unit/small_text/integrations/pytorch/test_strategies.py
|
chschroeder/small-text
|
ef28e91ba0c94fe938dde4f16253aa8695ea13b7
|
[
"MIT"
] | 21 |
2021-06-24T11:19:44.000Z
|
2022-03-12T16:29:53.000Z
|
import unittest
import pytest
from small_text.integrations.pytorch.exceptions import PytorchNotFoundError
try:
from small_text.integrations.pytorch.query_strategies import (
BADGE,
ExpectedGradientLength,
ExpectedGradientLengthMaxWord)
except PytorchNotFoundError:
pass
| 30.383562 | 94 | 0.712353 |
164ff194ddd6475fcc83a8af8f5b4d32701c55ea
| 886 |
py
|
Python
|
pymterm/colour/tango.py
|
stonewell/pymterm
|
af36656d5f7fb008533178d14b00d83d72ba00cf
|
[
"MIT"
] | 102 |
2016-07-21T06:39:02.000Z
|
2022-03-09T19:34:03.000Z
|
pymterm/colour/tango.py
|
stonewell/pymterm
|
af36656d5f7fb008533178d14b00d83d72ba00cf
|
[
"MIT"
] | 2 |
2017-01-11T13:43:34.000Z
|
2020-01-19T12:06:47.000Z
|
pymterm/colour/tango.py
|
stonewell/pymterm
|
af36656d5f7fb008533178d14b00d83d72ba00cf
|
[
"MIT"
] | 4 |
2020-03-22T04:08:35.000Z
|
2021-06-27T23:38:02.000Z
|
TANGO_PALLETE = [
'2e2e34343636',
'cccc00000000',
'4e4e9a9a0606',
'c4c4a0a00000',
'34346565a4a4',
'757550507b7b',
'060698989a9a',
'd3d3d7d7cfcf',
'555557575353',
'efef29292929',
'8a8ae2e23434',
'fcfce9e94f4f',
'72729f9fcfcf',
'adad7f7fa8a8',
'3434e2e2e2e2',
'eeeeeeeeecec',
]
| 24.611111 | 69 | 0.613995 |
16506683fe35155169d6fbcd3b4087bff7394386
| 22,681 |
py
|
Python
|
user_manager/oauth/oauth2.py
|
voegtlel/auth-manager-backend
|
20d40de0abc9deeb3fcddd892ffe2e635301917a
|
[
"MIT"
] | null | null | null |
user_manager/oauth/oauth2.py
|
voegtlel/auth-manager-backend
|
20d40de0abc9deeb3fcddd892ffe2e635301917a
|
[
"MIT"
] | null | null | null |
user_manager/oauth/oauth2.py
|
voegtlel/auth-manager-backend
|
20d40de0abc9deeb3fcddd892ffe2e635301917a
|
[
"MIT"
] | null | null | null |
from datetime import datetime, timedelta
from enum import Enum
from typing import List, Optional, Tuple, Dict, Any, Union
import time
from authlib.common.security import generate_token
from authlib.consts import default_json_headers
from authlib.oauth2 import (
OAuth2Request,
AuthorizationServer as _AuthorizationServer,
ResourceProtector as _ResourceProtector,
OAuth2Error,
HttpRequest,
)
from authlib.oauth2.rfc6749 import InvalidClientError
from authlib.oauth2.rfc6749.grants import (
AuthorizationCodeGrant as _AuthorizationCodeGrant,
RefreshTokenGrant as _RefreshTokenGrant,
BaseGrant,
)
from authlib.oauth2.rfc6749.grants import (
ResourceOwnerPasswordCredentialsGrant as _ResourceOwnerPasswordCredentialsGrant,
)
from authlib.oauth2.rfc6749.util import scope_to_list
from authlib.oauth2.rfc6750 import BearerTokenValidator as _BearerTokenValidator, BearerToken as _BearerToken, \
InsufficientScopeError
from authlib.oauth2.rfc8414 import AuthorizationServerMetadata
from authlib.oidc.core import UserInfo
from authlib.oidc.core.grants import (
OpenIDCode as _OpenIDCode,
OpenIDImplicitGrant as _OpenIDImplicitGrant,
OpenIDHybridGrant as _OpenIDHybridGrant,
)
from authlib.oidc.core.grants.util import is_openid_scope, generate_id_token
from fastapi import HTTPException
from starlette.concurrency import run_in_threadpool
from starlette.responses import Response, JSONResponse
from user_manager.common.config import config
from user_manager.common.models import DbAuthorizationCode, DbToken, DbClient, DbUser, DbManagerSchema, DbUserProperty, \
UserPropertyType
from user_manager.common.mongo import authorization_code_collection, token_collection, \
client_collection, client_user_cache_collection, user_group_collection, async_token_collection, \
async_user_group_collection, async_client_collection, user_collection, read_schema, async_read_schema
from . import oauth2_key
from .user_helper import UserWithRoles
USERS_SCOPE = '*users'
def save_authorization_code(code: str, request: TypedRequest):
nonce = request.data.get('nonce')
item = DbAuthorizationCode(
code=code,
client_id=request.client.id,
redirect_uri=request.redirect_uri,
scope=request.scope,
user_id=request.user.user.id,
nonce=nonce,
auth_time=int(time.time()),
expiration_time=datetime.utcnow() + timedelta(seconds=config.oauth2.token_expiration.authorization_code),
)
authorization_code_collection.insert_one(item.document())
return item
def save_token(token: Dict[str, Any], request: TypedRequest):
if request.user:
user_id = request.user.user.id
else:
user_id = None
now = int(time.time())
token_data = DbToken.validate_document({
'client_id': request.client.id,
'user_id': user_id,
'issued_at': now,
'expiration_time': datetime.utcnow() + timedelta(seconds=token.get('expires_in', 0)),
'scope': request.scope,
'auth_time': request.credential.get_auth_time(),
**token
})
token_collection.insert_one(token_data.document())
return token_data
def query_client(client_id: str):
client_data = client_collection.find_one({'_id': client_id})
if client_data is None:
return None
return DbClient.validate_document(client_data)
authorization = AuthorizationServer(
query_client,
save_token,
BearerToken(AccessTokenGenerator(), expires_generator=token_expires_in, refresh_token_generator=token_generator),
)
# support all openid grants
authorization.register_grant(AuthorizationCodeGrant, [OpenIDCode(), OpenIDSessionState()])
authorization.register_grant(OpenIDImplicitGrant)
authorization.register_grant(OpenIDHybridGrant)
authorization.register_grant(RefreshTokenGrant, [OpenIDCode(), OpenIDSessionState()])
authorization.register_grant(ResourceOwnerPasswordCredentialsGrant)
resource_protector = ResourceProtector()
resource_protector.register_token_validator(BearerTokenValidator())
user_introspection = UserIntrospection()
token_revocation = RevocationEndpoint()
request_origin_verifier = RequestOriginVerifier()
other_user_inspection = OtherUserInspection()
other_users_inspection = OtherUsersInspection()
| 40.501786 | 121 | 0.680217 |
16517f3c2ccf47bb7eb0759cee7e8d2e4ec1a86f
| 3,553 |
py
|
Python
|
src/adsb/sbs/server.py
|
claws/adsb
|
4a7d35880dece6baaf24370fab445e2571fc19e9
|
[
"MIT"
] | 7 |
2018-07-11T00:50:47.000Z
|
2021-09-29T10:36:44.000Z
|
src/adsb/sbs/server.py
|
claws/adsb
|
4a7d35880dece6baaf24370fab445e2571fc19e9
|
[
"MIT"
] | 3 |
2020-06-13T23:27:42.000Z
|
2020-07-22T03:06:16.000Z
|
src/adsb/sbs/server.py
|
claws/adsb
|
4a7d35880dece6baaf24370fab445e2571fc19e9
|
[
"MIT"
] | 3 |
2020-01-08T19:05:42.000Z
|
2022-02-11T02:22:23.000Z
|
import asyncio
import datetime
import logging
import socket
from . import protocol
from typing import Tuple
from asyncio import AbstractEventLoop
logger = logging.getLogger(__name__)
def deregister_protocol(self, peer: Tuple[str, int]) -> None:
""" De-register a protocol instance from the server.
This peer will no longer receive messages.
:param peer: Tuple of (host:str, port:int).
"""
del self.protocols[peer]
def send_message(self, msg: bytes, peer: Tuple[str, int] = None) -> None:
""" Send a message.
:param msg: A bytes object representing the SBS format message to
send to peers. The message is assumed to include the end of
message delimiter.
:param peer: A specific peer to send the message to. Peer is a
Tuple of (host:str, port:int). If not specified then the message
is broadcast to all peers.
"""
if self.protocols:
if peer:
prot = self.protocols.get(peer)
if prot:
prot.send_message(msg)
else:
raise Exception(
f"Server can't send msg to non-existant peer: {peer}"
)
else:
# broadcast message to all peers
for peer, prot in self.protocols.items():
prot.send_message(msg)
else:
raise Exception("Server can't send msg, no peers available")
| 32.59633 | 77 | 0.565156 |
1652c769892c847b99d4a49f23694f814ea670c4
| 2,803 |
py
|
Python
|
src/robusta/core/model/events.py
|
kandahk/robusta
|
61a2001cb1c4e90e8a74b810463ec99e6cb80787
|
[
"MIT"
] | null | null | null |
src/robusta/core/model/events.py
|
kandahk/robusta
|
61a2001cb1c4e90e8a74b810463ec99e6cb80787
|
[
"MIT"
] | null | null | null |
src/robusta/core/model/events.py
|
kandahk/robusta
|
61a2001cb1c4e90e8a74b810463ec99e6cb80787
|
[
"MIT"
] | null | null | null |
import logging
import uuid
from enum import Enum
from typing import List, Optional, Dict, Any
from dataclasses import dataclass, field
from pydantic import BaseModel
from ...integrations.scheduled.playbook_scheduler import PlaybooksScheduler
from ..reporting.base import Finding, BaseBlock
# Right now:
# 1. this is a dataclass but we need to make all fields optional in subclasses because of https://stackoverflow.com/questions/51575931/
# 2. this can't be a pydantic BaseModel because of various pydantic bugs (see https://github.com/samuelcolvin/pydantic/pull/2557)
# once the pydantic PR that addresses those issues is merged, this should be a pydantic class
# (note that we need to integrate with dataclasses because of hikaru)
| 35.481013 | 135 | 0.708883 |
1653cd2fffd32e2ad6ea59e14f67f33d48afc170
| 560 |
py
|
Python
|
examples/django_mongoengine/bike/models.py
|
pfrantz/graphene-mongo
|
f7d4f3e194ec41793e6da547934c34e11fd9ef51
|
[
"MIT"
] | 260 |
2018-02-03T01:00:42.000Z
|
2022-02-18T12:42:01.000Z
|
examples/django_mongoengine/bike/models.py
|
pfrantz/graphene-mongo
|
f7d4f3e194ec41793e6da547934c34e11fd9ef51
|
[
"MIT"
] | 159 |
2018-02-09T07:35:03.000Z
|
2022-03-20T03:43:23.000Z
|
examples/django_mongoengine/bike/models.py
|
pfrantz/graphene-mongo
|
f7d4f3e194ec41793e6da547934c34e11fd9ef51
|
[
"MIT"
] | 124 |
2018-02-04T20:19:01.000Z
|
2022-03-25T21:40:41.000Z
|
from mongoengine import Document
from mongoengine.fields import (
FloatField,
StringField,
ListField,
URLField,
ObjectIdField,
)
| 20 | 35 | 0.642857 |
1653e68a3494182dbc33ba8410b68bb9f85c16c2
| 97 |
py
|
Python
|
src/tensor/tensor/movement/__init__.py
|
jedhsu/tensor
|
3b2fe21029fa7c50b034190e77d79d1a94ea5e8f
|
[
"Apache-2.0"
] | null | null | null |
src/tensor/tensor/movement/__init__.py
|
jedhsu/tensor
|
3b2fe21029fa7c50b034190e77d79d1a94ea5e8f
|
[
"Apache-2.0"
] | null | null | null |
src/tensor/tensor/movement/__init__.py
|
jedhsu/tensor
|
3b2fe21029fa7c50b034190e77d79d1a94ea5e8f
|
[
"Apache-2.0"
] | null | null | null |
from ._movement import Movement
from .path import MovementPath
from .paths import MovementPaths
| 19.4 | 32 | 0.835052 |
1654499e8423c0c8a91eb13123406b32dfc847c1
| 8,988 |
py
|
Python
|
opticalmapping/standalone/om_augmenter.py
|
sauloal/ipython
|
35c24a10330da3e54b5ee29df54ee263f5268d18
|
[
"MIT"
] | null | null | null |
opticalmapping/standalone/om_augmenter.py
|
sauloal/ipython
|
35c24a10330da3e54b5ee29df54ee263f5268d18
|
[
"MIT"
] | null | null | null |
opticalmapping/standalone/om_augmenter.py
|
sauloal/ipython
|
35c24a10330da3e54b5ee29df54ee263f5268d18
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import os
import sys
from om_shared import *
if __name__ == '__main__':
if len(sys.argv) ==1:
print "no arguments given"
sys.exit(1)
args = parse_args(sys.argv[1:])
main(args)
"""
# $ cd D:\Plextor\data\Acquisitie\BioNanoGenomics\MyLycopersicumWorkspace_31022015\Imports; C:\Program Files\BioNano Genomics\RefAligner\WindowsRefAligner.exe -f -ref D:\Plextor\data\Acquisitie\BioNanoGenomics\MyLycopersicumWorkspace_31022015\Imports\S_lycopersicum_chromosomes.2.50.BspQI-BbvCI.cmap -i D:\Plextor\data\Acquisitie\BioNanoGenomics\MyLycopersicumWorkspace_31022015\Imports\EXP_REFINEFINAL1.cmap -o S_lycopersicum_chromosomes.2.50.BspQI-BbvCI_to_EXP_REFINEFINAL1 -endoutlier 1e-2 -outlier 1e-4 -extend 1 -FN 0.08 -FP 0.8 -sf 0.2 -sd 0 -sr 0.02 -res 2.9 -resSD 0.7 -mres 2.0 -A 5 -biaswt 0 -M 1 -Mfast 0 -maxmem 2 -T 1e-6 -stdout -stderr
# r3498 $Header: http://svn.bnm.local:81/svn/informatics/RefAligner/branches/3480/RefAligner.cpp 3470 2014-12-17 19:29:21Z tanantharaman $
# FLAGS: USE_SSE=0 USE_AVX=0 USE_MIC=0 USE_PFLOAT=1 USE_RFLOAT=1 DEBUG=1 VERB=1
# XMAP File Version: 0.2
# Label Channels: 1
# Reference Maps From: S_lycopersicum_chromosomes.2.50.BspQI-BbvCI_to_EXP_REFINEFINAL1_r.cmap
# Query Maps From: S_lycopersicum_chromosomes.2.50.BspQI-BbvCI_to_EXP_REFINEFINAL1_q.cmap
#h XmapEntryID QryContigID RefContigID QryStartPos QryEndPos RefStartPos RefEndPos Orientation Confidence HitEnum QryLen RefLen LabelChannel Alignment
#f int int int float float float float string float string float float int string
1 141 1 528400.6 571697.5 10672 54237.5 + 6.65 4M2D2M 1439123.5 21805821 1 "(1,34)(2,34)(3,35)(4,36)(5,37)(6,38)(8,38)(9,39)"
2 174 1 21236.5 1568390 10672 1553561 + 79.35 2M3D1M1D1M1D4M1I2M1D2M1D1M2I2D9M3I3M1D6M1D2M2D1M1D6M1D1M1D1M2D2M2D1M1I1D1M1D5M2D4M2D1M2D2M1D2M1D3M1D1M1D2M3I3D1M1D1M3D2M3D1M2I1D1M2D1M1D1M1I2D3M2I1M1D2M1D1M1D1M2I3D3M3D1M2D1M1D1M1D5M2D12M 1568410 21805821 1 "(1,2)(2,2)(3,3)(6,4)(7,4)(9,5)(11,6)(12,7)(13,8)(14,9)(15,11)(16,12)(18,13)(19,14)(20,15)(21,15)(24,18)(25,19)(26,20)(27,21)(28,22)(29,23)(30,24)(31,25)(32,26)(33,30)(34,31)(35,32)(37,33)(38,34)(39,35)(40,36)(41,37)(42,38)(44,39)(45,40)(47,41)(48,41)(50,42)(51,43)(52,44)(53,45)(54,46)(55,47)(57,48)(59,49)(60,50)(62,50)(63,51)(66,52)(68,54)(69,55)(70,55)(71,56)(72,57)(73,58)(74,59)(76,60)(77,60)(78,61)(79,62)(80,63)(82,64)(83,64)(86,65)(87,66)(89,67)(90,68)(92,69)(93,70)(94,71)(95,72)(96,72)(98,73)(99,74)(103,78)(105,79)(109,80)(110,81)(111,82)(114,82)(116,85)(119,86)(120,87)(121,87)(124,89)(125,90)(126,91)(127,94)(128,95)(129,95)(130,96)(132,97)(134,98)(138,101)(139,102)(140,103)(143,104)(144,104)(146,105)(147,105)(149,106)(151,107)(152,108)(153,109)(154,110)(155,111)(158,112)(159,113)(160,114)(161,115)(162,116)(163,117)(164,118)(165,119)(166,120)(167,121)(168,122)(169,123)"
"""
| 61.561644 | 1,184 | 0.595683 |
1654fce2866f6b2ef021c29092efa26419e5ba83
| 4,918 |
py
|
Python
|
uhd_restpy/testplatform/sessions/ixnetwork/impairment/profile/fixedclassifier/fixedclassifier.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 20 |
2019-05-07T01:59:14.000Z
|
2022-02-11T05:24:47.000Z
|
uhd_restpy/testplatform/sessions/ixnetwork/impairment/profile/fixedclassifier/fixedclassifier.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 60 |
2019-04-03T18:59:35.000Z
|
2022-02-22T12:05:05.000Z
|
uhd_restpy/testplatform/sessions/ixnetwork/impairment/profile/fixedclassifier/fixedclassifier.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 13 |
2019-05-20T10:48:31.000Z
|
2021-10-06T07:45:44.000Z
|
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
from typing import List, Any, Union
| 41.677966 | 187 | 0.700895 |
16557fb191c1ea62849d52d444fde47864d855b9
| 43,651 |
py
|
Python
|
lantz/drivers/sacher/Sacher_EPOS.py
|
mtsolmn/lantz-drivers
|
f48caf9000ddd08f2abb837d832e341410af4788
|
[
"BSD-3-Clause"
] | 4 |
2019-05-04T00:10:53.000Z
|
2020-10-22T18:08:40.000Z
|
lantz/drivers/sacher/Sacher_EPOS.py
|
mtsolmn/lantz-drivers
|
f48caf9000ddd08f2abb837d832e341410af4788
|
[
"BSD-3-Clause"
] | 3 |
2019-07-12T13:44:17.000Z
|
2020-10-22T19:32:08.000Z
|
lantz/drivers/sacher/Sacher_EPOS.py
|
mtsolmn/lantz-drivers
|
f48caf9000ddd08f2abb837d832e341410af4788
|
[
"BSD-3-Clause"
] | 9 |
2019-04-03T17:07:03.000Z
|
2021-02-15T21:53:55.000Z
|
# sacher_epos.py, python wrapper for sacher epos motor
# David Christle <[email protected]>, August 2014
#
"""
Possbily Maxon EPOS now
"""
"""
This is the actual version that works
But only in the lab32 virtual environment
"""
# from instrument import Instrument
# import qt
import ctypes
import ctypes.wintypes
import logging
import time
# from instrument import Instrument
from ctypes.wintypes import DWORD, WORD
import numpy as np
"""
okay so we import a bunch of random stuff
I always forget what ctypes is for but I'll worry about it later
"""
# from subprocess import Popen, PIPE
# from multiprocessing.managers import BaseManager
# import atexit
# import os
# python32_dir = "C:\\Users\\Alex\\Miniconda3\\envs\\lab32"
# assert os.path.isdir(python32_dir)
# os.chdir(python32_dir)
# derp = "C:\\Users\\Alex\\Documents\\wow_such_code"
# assert os.path.isdir(derp)
# os.chdir(derp)
# p = Popen([python32_dir + "\\python.exe", derp + "\\delegate.py"], stdout=PIPE, cwd=derp)
# atexit.register(p.terminate)
# port = int(p.stdout.readline())
# authkey = p.stdout.read()
# print(port, authkey)
# m = BaseManager(address=("localhost", port), authkey=authkey)
# m.connect()
# tell manager to expect an attribute called LibC
# m.register("SacherLasaTeknique")
# access and use libc
# libc = m.SacherLasaTeknique()
# print(libc.vcs())
# eposlib = ctypes.windll.eposcmd
eposlib = ctypes.windll.LoadLibrary('C:\\Users\\Carbro\\Desktop\\Charmander\\EposCmd.dll')
DeviceName = b'EPOS'
ProtocolStackName = b'MAXON_RS232'
InterfaceName = b'RS232'
"""
Max on
Max off
but anyway it looks like ctypes is the thing that's talking to the epos dll
"""
HISTCHAN = 65536
TTREADMAX = 131072
RANGES = 8
MODE_HIST = 0
MODE_T2 = 2
MODE_T3 = 3
FLAG_OVERFLOW = 0x0040
FLAG_FIFOFULL = 0x0003
# in mV
ZCMIN = 0
ZCMAX = 20
DISCRMIN = 0
DISCRMAX = 800
# in ps
OFFSETMIN = 0
OFFSETMAX = 1000000000
# in ms
ACQTMIN = 1
ACQTMAX = 10 * 60 * 60 * 1000
# in mV
PHR800LVMIN = -1600
PHR800LVMAX = 2400
"""
wooooooo a bunch a variables and none of them are explained
way to go dc you da real champ
"""
"""
Also we're done with the Sacher_EPOS() class at this point
"""
if __name__ == '__main__':
epos = Sacher_EPOS(None, b'COM3')
# epos.set_coeffs(8.34529e-12,8.49218e-5,1081.92,10840,11860)
# epos.do_get_wavelength()
# print('#1 Motor current: {}'.format(epos.get_motor_current()))
# epos.do_get_wavelength()
# print('motor position is...')
# current_pos = epos.get_motor_position()
# print('current position is {}'.format(current_pos))
# new_pos = current_pos + 10000
# epos.set_target_position(new_pos, True, True)
# print(epos.get_motor_position())
# print('#2 Motor current: {}'.format(epos.get_motor_current()))
# epos.find_home()
# epos.restore()
# time.sleep(7)
epos.do_set_wavelength(1151.5)
# epos.do_get_wavelength()
print('Motor current: {}'.format(epos.get_motor_current()))
print('Motor position: {}'.format(epos.get_motor_position()))
"""
OTHER MISC. NOTES:
increasing wavelength:
causes the square to rotate left
causes base to move to the left when square is stuck in
causes screw to loosen
causes large gold base to tighten
decreasing wavelength:
there's an overshoot when lowering wavelength
causes the square to rotate right
causes base to move to the right when square is stuck in
causes screw to tighten
causes large gold base to loosen, and also unplug the motor
Also you don't need to explicitly run epos.initialize() because there's an __init__ function which contains epos.initialize()
"""
# womp the end
| 41.532826 | 147 | 0.625644 |
165616f6329f47d7fc22c8cc1eb0970f40d768d9
| 1,652 |
py
|
Python
|
tools/generate_lst.py
|
haotianliu001/HRNet-Lesion
|
9dae108879456e084b2200e39d7e58c1c08c2b16
|
[
"MIT"
] | null | null | null |
tools/generate_lst.py
|
haotianliu001/HRNet-Lesion
|
9dae108879456e084b2200e39d7e58c1c08c2b16
|
[
"MIT"
] | null | null | null |
tools/generate_lst.py
|
haotianliu001/HRNet-Lesion
|
9dae108879456e084b2200e39d7e58c1c08c2b16
|
[
"MIT"
] | null | null | null |
import argparse
import os
image_dir = 'image'
label_dir = 'label'
splits = ['train', 'val', 'test']
image_dirs = [
'image/{}',
'image/{}_crop'
]
label_dirs = [
'label/{}/annotations',
'label/{}/annotations_crop',
]
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('root', type=str, help='path of dataset root')
args = parser.parse_args()
generate(args.root)
| 30.036364 | 116 | 0.579903 |
1658161ce6f6978b51d0a1fdd4a0ce93c2160124
| 897 |
py
|
Python
|
examples/example.py
|
f-dangel/unfoldNd
|
63e9abc4867d8678c2ac00da567dc106e9f6f2c7
|
[
"MIT"
] | 21 |
2021-03-04T04:56:20.000Z
|
2022-03-31T11:15:28.000Z
|
examples/example.py
|
f-dangel/unfoldNd
|
63e9abc4867d8678c2ac00da567dc106e9f6f2c7
|
[
"MIT"
] | 12 |
2021-02-16T16:16:23.000Z
|
2021-05-28T06:00:41.000Z
|
examples/example.py
|
f-dangel/unfoldNd
|
63e9abc4867d8678c2ac00da567dc106e9f6f2c7
|
[
"MIT"
] | 1 |
2021-11-04T12:52:19.000Z
|
2021-11-04T12:52:19.000Z
|
"""How to use ``unfoldNd``. A comparison with ``torch.nn.Unfold``."""
# imports, make this example deterministic
import torch
import unfoldNd
torch.manual_seed(0)
# random batched RGB 32x32 image-shaped input tensor of batch size 64
inputs = torch.randn((64, 3, 32, 32))
# module hyperparameters
kernel_size = 3
dilation = 1
padding = 1
stride = 2
# both modules accept the same arguments and perform the same operation
torch_module = torch.nn.Unfold(
kernel_size, dilation=dilation, padding=padding, stride=stride
)
lib_module = unfoldNd.UnfoldNd(
kernel_size, dilation=dilation, padding=padding, stride=stride
)
# forward pass
torch_outputs = torch_module(inputs)
lib_outputs = lib_module(inputs)
# check
if torch.allclose(torch_outputs, lib_outputs):
print(" Outputs of torch.nn.Unfold and unfoldNd.UnfoldNd match.")
else:
raise AssertionError(" Outputs don't match")
| 24.916667 | 71 | 0.753623 |
1658fa9a24f0d70843df0f950d0081f1ffadc11b
| 797 |
py
|
Python
|
src/pretix/helpers/escapejson.py
|
NicsTr/pretix
|
e6d2380d9ed1836cc64a688b2be20d00a8500eab
|
[
"ECL-2.0",
"Apache-2.0"
] | 1 |
2020-04-25T00:11:00.000Z
|
2020-04-25T00:11:00.000Z
|
src/pretix/helpers/escapejson.py
|
NicsTr/pretix
|
e6d2380d9ed1836cc64a688b2be20d00a8500eab
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/pretix/helpers/escapejson.py
|
NicsTr/pretix
|
e6d2380d9ed1836cc64a688b2be20d00a8500eab
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
from django.utils.encoding import force_str
from django.utils.functional import keep_lazy
from django.utils.safestring import SafeText, mark_safe
_json_escapes = {
ord('>'): '\\u003E',
ord('<'): '\\u003C',
ord('&'): '\\u0026',
}
_json_escapes_attr = {
ord('>'): '\\u003E',
ord('<'): '\\u003C',
ord('&'): '\\u0026',
ord('"'): '"',
ord("'"): ''',
ord("="): '=',
}
| 25.709677 | 75 | 0.6399 |
1659ed45e2efb246708ee177c0a31eb71473cb9b
| 1,813 |
py
|
Python
|
pyxley/charts/plotly/base.py
|
snowind/pyxley
|
cff9e50b8d80b9794c6907355e541f166959cd6c
|
[
"MIT"
] | 2,536 |
2015-06-26T20:12:30.000Z
|
2022-03-01T07:26:44.000Z
|
pyxley/charts/plotly/base.py
|
zhiaozhou/pyxley
|
2dab00022d977d986169cd8a629b3a2f91be893f
|
[
"MIT"
] | 51 |
2015-07-17T14:16:43.000Z
|
2021-07-09T21:34:36.000Z
|
pyxley/charts/plotly/base.py
|
zhiaozhou/pyxley
|
2dab00022d977d986169cd8a629b3a2f91be893f
|
[
"MIT"
] | 335 |
2015-07-16T20:22:00.000Z
|
2022-02-25T07:18:15.000Z
|
from ..charts import Chart
from flask import jsonify, request
_BASE_CONFIG = {
"showLink": False,
"displaylogo": False,
"modeBarButtonsToRemove": ["sendDataToCloud"]
}
| 27.059701 | 73 | 0.492554 |
165b5afa3e28ca226423cdaac8f6894170030430
| 576 |
py
|
Python
|
pyqt/getting_started/close_window.py
|
CospanDesign/python
|
9f911509aae7abd9237c14a4635294c7719c9129
|
[
"MIT"
] | 5 |
2015-12-12T20:16:45.000Z
|
2020-02-21T19:50:31.000Z
|
pyqt/getting_started/close_window.py
|
CospanDesign/python
|
9f911509aae7abd9237c14a4635294c7719c9129
|
[
"MIT"
] | null | null | null |
pyqt/getting_started/close_window.py
|
CospanDesign/python
|
9f911509aae7abd9237c14a4635294c7719c9129
|
[
"MIT"
] | 2 |
2020-06-01T06:27:06.000Z
|
2022-03-10T13:21:03.000Z
|
#!/usr/bin/python
import sys
from PyQt4 import QtGui
from PyQt4 import QtCore
if __name__ == "__main__":
main()
| 19.2 | 65 | 0.682292 |
165bd59707bf7d41b2fcb3dbf5d490a2e8660a09
| 732 |
py
|
Python
|
test/means/test_zero_mean.py
|
bdecost/gpytorch
|
a5f1ad3e47daf3f8db04b605fb13ff3f9f871e3a
|
[
"MIT"
] | null | null | null |
test/means/test_zero_mean.py
|
bdecost/gpytorch
|
a5f1ad3e47daf3f8db04b605fb13ff3f9f871e3a
|
[
"MIT"
] | null | null | null |
test/means/test_zero_mean.py
|
bdecost/gpytorch
|
a5f1ad3e47daf3f8db04b605fb13ff3f9f871e3a
|
[
"MIT"
] | 1 |
2018-11-15T10:03:40.000Z
|
2018-11-15T10:03:40.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
import unittest
from gpytorch.means import ZeroMean
| 28.153846 | 78 | 0.629781 |
165bdb25d95d9e2ecf502312358485ebe1274976
| 1,948 |
py
|
Python
|
generator/contact.py
|
rizzak/python_training
|
38bbe5d7e38892e8dcc28caeae1481b98cce7356
|
[
"Apache-2.0"
] | null | null | null |
generator/contact.py
|
rizzak/python_training
|
38bbe5d7e38892e8dcc28caeae1481b98cce7356
|
[
"Apache-2.0"
] | null | null | null |
generator/contact.py
|
rizzak/python_training
|
38bbe5d7e38892e8dcc28caeae1481b98cce7356
|
[
"Apache-2.0"
] | null | null | null |
import jsonpickle
import random
import string
from model.contact import Contact
import os.path
import getopt
import sys
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["number of contacts", "file"])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n = 5
f = "data/contacts.json"
for o, a in opts:
if o == "-n":
n = int(a)
elif o == "-f":
f = a
testdata = [Contact(first_name="", middle_name="", last_name="", nickname="", title="", company="", address="",
home_tel="", mobile_tel="", work_tel="", fax="", email="", homepage="", birthday="",
anniversary="", secondary_address="", secondary_tel="", notes="")] + [
Contact(first_name=random_string('first_name', 10), middle_name=random_string('middle_name', 10), last_name=random_string('last_name', 10),
nickname=random_string('nickname', 10), title=random_string('random_string', 10), company=random_string('company', 10),
address=random_string('address', 10), home_tel=random_string('home_tel', 10), mobile_tel=random_string('mobile_tel', 10),
work_tel=random_string('work_tel', 10), fax=random_string('fax', 10), email=random_string('email', 10),
homepage=random_string('homepage', 10), birthday=random_string('birthday', 10), anniversary=random_string('anniversary', 10),
secondary_address=random_string('secondary_address', 10), secondary_tel=random_string('secondary_tel', 10), notes=random_string('notes', 10))
for i in range(5)
]
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
with open(file , "w") as out:
jsonpickle.set_encoder_options("json", indent=2)
out.write(jsonpickle.encode(testdata))
| 40.583333 | 153 | 0.664271 |
165cb63df5c2c12565813006cb857ecc7266b584
| 9,952 |
py
|
Python
|
Lib/test/test_runpy.py
|
arvindm95/unladen-swallow
|
8175e37eaea7ca66ed03283b46bc1d2db0d3f9c3
|
[
"PSF-2.0"
] | 2,293 |
2015-01-02T12:46:10.000Z
|
2022-03-29T09:45:43.000Z
|
python/src/Lib/test/test_runpy.py
|
weiqiangzheng/sl4a
|
d3c17dca978cbeee545e12ea240a9dbf2a6999e9
|
[
"Apache-2.0"
] | 315 |
2015-05-31T11:55:46.000Z
|
2022-01-12T08:36:37.000Z
|
python/src/Lib/test/test_runpy.py
|
weiqiangzheng/sl4a
|
d3c17dca978cbeee545e12ea240a9dbf2a6999e9
|
[
"Apache-2.0"
] | 1,033 |
2015-01-04T07:48:40.000Z
|
2022-03-24T09:34:37.000Z
|
# Test the runpy module
import unittest
import os
import os.path
import sys
import tempfile
from test.test_support import verbose, run_unittest, forget
from runpy import _run_code, _run_module_code, run_module
# Note: This module can't safely test _run_module_as_main as it
# runs its tests in the current process, which would mess with the
# real __main__ module (usually test.regrtest)
# See test_cmd_line_script for a test that executes that code path
# Set up the test code and expected results
def test_main():
run_unittest(RunModuleCodeTest)
run_unittest(RunModuleTest)
if __name__ == "__main__":
test_main()
| 39.181102 | 82 | 0.60621 |
165d5b352de2106b373e88fa207e7c0361117e91
| 4,795 |
py
|
Python
|
experiments/_pytorch/_grpc_server/protofiles/imagedata_pb2.py
|
RedisAI/benchmarks
|
65b8509b81795da73f25f51941c61fbd9765914c
|
[
"MIT"
] | 6 |
2019-04-18T10:17:52.000Z
|
2021-07-02T19:57:08.000Z
|
experiments/_pytorch/_grpc_server/protofiles/imagedata_pb2.py
|
hhsecond/benchmarks
|
65b8509b81795da73f25f51941c61fbd9765914c
|
[
"MIT"
] | 1 |
2021-07-21T12:17:08.000Z
|
2021-07-21T12:17:08.000Z
|
experiments/_pytorch/_grpc_server/protofiles/imagedata_pb2.py
|
hhsecond/benchmarks
|
65b8509b81795da73f25f51941c61fbd9765914c
|
[
"MIT"
] | 2 |
2020-03-15T00:37:57.000Z
|
2022-02-26T04:36:00.000Z
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: imagedata.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='imagedata.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x0fimagedata.proto\"H\n\tImageData\x12\r\n\x05image\x18\x01 \x01(\x0c\x12\x0e\n\x06height\x18\x02 \x01(\x05\x12\r\n\x05width\x18\x03 \x01(\x05\x12\r\n\x05\x64type\x18\x04 \x01(\t\"!\n\x0fPredictionClass\x12\x0e\n\x06output\x18\x01 \x03(\x02\x32<\n\tPredictor\x12/\n\rGetPrediction\x12\n.ImageData\x1a\x10.PredictionClass\"\x00\x62\x06proto3')
)
_IMAGEDATA = _descriptor.Descriptor(
name='ImageData',
full_name='ImageData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='image', full_name='ImageData.image', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='height', full_name='ImageData.height', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='width', full_name='ImageData.width', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dtype', full_name='ImageData.dtype', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=19,
serialized_end=91,
)
_PREDICTIONCLASS = _descriptor.Descriptor(
name='PredictionClass',
full_name='PredictionClass',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='output', full_name='PredictionClass.output', index=0,
number=1, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=93,
serialized_end=126,
)
DESCRIPTOR.message_types_by_name['ImageData'] = _IMAGEDATA
DESCRIPTOR.message_types_by_name['PredictionClass'] = _PREDICTIONCLASS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ImageData = _reflection.GeneratedProtocolMessageType('ImageData', (_message.Message,), dict(
DESCRIPTOR = _IMAGEDATA,
__module__ = 'imagedata_pb2'
# @@protoc_insertion_point(class_scope:ImageData)
))
_sym_db.RegisterMessage(ImageData)
PredictionClass = _reflection.GeneratedProtocolMessageType('PredictionClass', (_message.Message,), dict(
DESCRIPTOR = _PREDICTIONCLASS,
__module__ = 'imagedata_pb2'
# @@protoc_insertion_point(class_scope:PredictionClass)
))
_sym_db.RegisterMessage(PredictionClass)
_PREDICTOR = _descriptor.ServiceDescriptor(
name='Predictor',
full_name='Predictor',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=128,
serialized_end=188,
methods=[
_descriptor.MethodDescriptor(
name='GetPrediction',
full_name='Predictor.GetPrediction',
index=0,
containing_service=None,
input_type=_IMAGEDATA,
output_type=_PREDICTIONCLASS,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_PREDICTOR)
DESCRIPTOR.services_by_name['Predictor'] = _PREDICTOR
# @@protoc_insertion_point(module_scope)
| 30.935484 | 365 | 0.740563 |
165e5478bb41b24d4a9ab5bce186c085b7367f24
| 4,937 |
py
|
Python
|
app/api/admin_sales/discounted.py
|
akashtalole/python-flask-restful-api
|
475d8fd7be1724183716a197aac4257f8fbbeac4
|
[
"MIT"
] | 3 |
2019-09-05T05:28:49.000Z
|
2020-06-10T09:03:37.000Z
|
app/api/admin_sales/discounted.py
|
akashtalole/python-flask-restful-api
|
475d8fd7be1724183716a197aac4257f8fbbeac4
|
[
"MIT"
] | null | null | null |
app/api/admin_sales/discounted.py
|
akashtalole/python-flask-restful-api
|
475d8fd7be1724183716a197aac4257f8fbbeac4
|
[
"MIT"
] | null | null | null |
from sqlalchemy import func
from flask_rest_jsonapi import ResourceList
from marshmallow_jsonapi import fields
from marshmallow_jsonapi.flask import Schema
from app.api.helpers.utilities import dasherize
from app.api.bootstrap import api
from app.models import db
from app.models.discount_code import DiscountCode
from app.models.event import Event
from app.models.order import Order, OrderTicket
from app.models.user import User
| 41.838983 | 102 | 0.552157 |
165e549759c53b8757e058aa4a4e0a0e6b69b060
| 407 |
py
|
Python
|
spacy/lang/sr/__init__.py
|
g4brielvs/spaCy
|
cca8651fc8133172ebaa9d9fc438ed1fbf34fb33
|
[
"BSD-3-Clause",
"MIT"
] | 4 |
2021-08-11T05:46:23.000Z
|
2021-09-11T05:16:57.000Z
|
spacy/lang/sr/__init__.py
|
g4brielvs/spaCy
|
cca8651fc8133172ebaa9d9fc438ed1fbf34fb33
|
[
"BSD-3-Clause",
"MIT"
] | 1 |
2021-03-01T19:01:37.000Z
|
2021-03-01T19:01:37.000Z
|
spacy/lang/sr/__init__.py
|
g4brielvs/spaCy
|
cca8651fc8133172ebaa9d9fc438ed1fbf34fb33
|
[
"BSD-3-Clause",
"MIT"
] | 2 |
2021-01-26T17:29:02.000Z
|
2021-03-13T08:54:53.000Z
|
from .stop_words import STOP_WORDS
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from .lex_attrs import LEX_ATTRS
from ...language import Language
__all__ = ["Serbian"]
| 21.421053 | 54 | 0.781327 |
165e63725354de429a448d866f665cccca991916
| 656 |
py
|
Python
|
mmdet/ops/dcn/__init__.py
|
TJUsym/TJU_Advanced_CV_Homework
|
2d85943390e9ba53b80988e0ab8d50aef0cd17da
|
[
"Apache-2.0"
] | 1,158 |
2019-04-26T01:08:32.000Z
|
2022-03-30T06:46:24.000Z
|
mmdet/ops/dcn/__init__.py
|
TJUsym/TJU_Advanced_CV_Homework
|
2d85943390e9ba53b80988e0ab8d50aef0cd17da
|
[
"Apache-2.0"
] | 148 |
2021-03-18T09:44:02.000Z
|
2022-03-31T06:01:39.000Z
|
mmdet/ops/dcn/__init__.py
|
TJUsym/TJU_Advanced_CV_Homework
|
2d85943390e9ba53b80988e0ab8d50aef0cd17da
|
[
"Apache-2.0"
] | 197 |
2020-01-29T09:58:27.000Z
|
2022-03-25T12:08:56.000Z
|
from .functions.deform_conv import deform_conv, modulated_deform_conv
from .functions.deform_pool import deform_roi_pooling
from .modules.deform_conv import (DeformConv, ModulatedDeformConv,
DeformConvPack, ModulatedDeformConvPack)
from .modules.deform_pool import (DeformRoIPooling, DeformRoIPoolingPack,
ModulatedDeformRoIPoolingPack)
__all__ = [
'DeformConv', 'DeformConvPack', 'ModulatedDeformConv',
'ModulatedDeformConvPack', 'DeformRoIPooling', 'DeformRoIPoolingPack',
'ModulatedDeformRoIPoolingPack', 'deform_conv', 'modulated_deform_conv',
'deform_roi_pooling'
]
| 46.857143 | 76 | 0.739329 |
165f2a4da2ed50464bfa13f0495fc689063e0199
| 1,189 |
py
|
Python
|
api/skill/serializer.py
|
zaubermaerchen/imas_cg_api
|
45ebdde8c47ff4fabbf58b75721721f142afb46b
|
[
"MIT"
] | 2 |
2016-02-01T21:03:53.000Z
|
2018-10-20T09:15:12.000Z
|
api/skill/serializer.py
|
zaubermaerchen/imas_cg_api
|
45ebdde8c47ff4fabbf58b75721721f142afb46b
|
[
"MIT"
] | 1 |
2020-01-05T12:50:35.000Z
|
2020-01-05T12:50:35.000Z
|
api/skill/serializer.py
|
zaubermaerchen/imas_cg_api
|
45ebdde8c47ff4fabbf58b75721721f142afb46b
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from rest_framework import serializers
from data.models import Skill, SkillValue
| 26.422222 | 72 | 0.64508 |
1660d7a15a18998c6c8ae4f9e573b184061a0341
| 5,061 |
py
|
Python
|
Codes/Converting_RGB_to_GreyScale.py
|
sichkar-valentyn/Image_processing_in_Python
|
43d7c979bcd742cc202a28c2dea6ea5bc87562a2
|
[
"MIT"
] | 3 |
2018-12-02T03:59:51.000Z
|
2019-11-20T18:37:41.000Z
|
Codes/Converting_RGB_to_GreyScale.py
|
sichkar-valentyn/Image_processing_in_Python
|
43d7c979bcd742cc202a28c2dea6ea5bc87562a2
|
[
"MIT"
] | null | null | null |
Codes/Converting_RGB_to_GreyScale.py
|
sichkar-valentyn/Image_processing_in_Python
|
43d7c979bcd742cc202a28c2dea6ea5bc87562a2
|
[
"MIT"
] | 2 |
2018-10-18T07:01:26.000Z
|
2022-03-22T08:22:33.000Z
|
# File: Converting_RGB_to_GreyScale.py
# Description: Opening RGB image as array, converting to GreyScale and saving result into new file
# Environment: PyCharm and Anaconda environment
#
# MIT License
# Copyright (c) 2018 Valentyn N Sichkar
# github.com/sichkar-valentyn
#
# Reference to:
# Valentyn N Sichkar. Image processing in Python // GitHub platform. DOI: 10.5281/zenodo.1343603
# Opening RGB image as array, converting to GreyScale and saving result into new file
# Importing needed libraries
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
from skimage import color
from skimage import io
import scipy.misc
# Creating an array from image data
image_RGB = Image.open("images/eagle.jpg")
image_np = np.array(image_RGB)
# Checking the type of the array
print(type(image_np)) # <class 'numpy.ndarray'>
# Checking the shape of the array
print(image_np.shape)
# Showing image with every channel separately
channel_R = image_np[:, :, 0]
channel_G = image_np[:, :, 1]
channel_B = image_np[:, :, 2]
# Creating a figure with subplots
f, ax = plt.subplots(nrows=2, ncols=2)
# ax is (2, 2) np array and to make it easier to read we use 'flatten' function
# Or we can call each time ax[0, 0]
ax0, ax1, ax2, ax3 = ax.flatten()
# Adjusting first subplot
ax0.imshow(channel_R, cmap='Reds')
ax0.set_xlabel('')
ax0.set_ylabel('')
ax0.set_title('Red channel')
# Adjusting second subplot
ax1.imshow(channel_G, cmap='Greens')
ax1.set_xlabel('')
ax1.set_ylabel('')
ax1.set_title('Green channel')
# Adjusting third subplot
ax2.imshow(channel_B, cmap='Blues')
ax2.set_xlabel('')
ax2.set_ylabel('')
ax2.set_title('Blue channel')
# Adjusting fourth subplot
ax3.imshow(image_np)
ax3.set_xlabel('')
ax3.set_ylabel('')
ax3.set_title('Original image')
# Function to make distance between figures
plt.tight_layout()
# Giving the name to the window with figure
f.canvas.set_window_title('Eagle image in three channels R, G and B')
# Showing the plots
plt.show()
# Converting RGB image into GrayScale image
# Using formula:
# Y' = 0.299 R + 0.587 G + 0.114 B
image_RGB = Image.open("images/eagle.jpg")
image_np = np.array(image_RGB)
image_GreyScale = image_np[:, :, 0] * 0.299 + image_np[:, :, 1] * 0.587 + image_np[:, :, 2] * 0.114
# Checking the type of the array
print(type(image_GreyScale)) # <class 'numpy.ndarray'>
# Checking the shape of the array
print(image_GreyScale.shape)
# Giving the name to the window with figure
plt.figure('GreyScaled image from RGB')
# Showing the image by using obtained array
plt.imshow(image_GreyScale, cmap='Greys')
plt.show()
# Preparing array for saving - creating three channels with the same data in each
# Firstly, creating array with zero elements
# And by 'image_GreyScale.shape + tuple([3])' we add one more element '3' to the tuple
# Now the shape will be (1080, 1920, 3) - which is tuple type
image_GreyScale_with_3_channels = np.zeros(image_GreyScale.shape + tuple([3]))
# Secondly, reshaping GreyScale image from 2D to 3D
x = image_GreyScale.reshape((1080, 1920, 1))
# Finally, writing all data in three channels
image_GreyScale_with_3_channels[:, :, 0] = x[:, :, 0]
image_GreyScale_with_3_channels[:, :, 1] = x[:, :, 0]
image_GreyScale_with_3_channels[:, :, 2] = x[:, :, 0]
# Saving image into a file from obtained 3D array
scipy.misc.imsave("images/result_1.jpg", image_GreyScale_with_3_channels)
# Checking that image was written with three channels and they are identical
result_1 = Image.open("images/result_1.jpg")
result_1_np = np.array(result_1)
print(result_1_np.shape)
print(np.array_equal(result_1_np[:, :, 0], result_1_np[:, :, 1]))
print(np.array_equal(result_1_np[:, :, 1], result_1_np[:, :, 2]))
# Showing saved resulted image
# Giving the name to the window with figure
plt.figure('GreyScaled image from RGB')
# Here we don't need to specify the map like cmap='Greys'
plt.imshow(result_1_np)
plt.show()
# Another way to convert RGB image into GreyScale image
image_RGB = io.imread("images/eagle.jpg")
image_GreyScale = color.rgb2gray(image_RGB)
# Checking the type of the array
print(type(image_GreyScale)) # <class 'numpy.ndarray'>
# Checking the shape of the array
print(image_GreyScale.shape)
# Giving the name to the window with figure
plt.figure('GreyScaled image from RGB')
# Showing the image by using obtained array
plt.imshow(image_GreyScale, cmap='Greys')
plt.show()
# Saving converted image into a file from processed array
scipy.misc.imsave("images/result_2.jpg", image_GreyScale)
# One more way for converting
image_RGB_as_GreyScale = io.imread("images/eagle.jpg", as_gray=True)
# Checking the type of the array
print(type(image_RGB_as_GreyScale)) # <class 'numpy.ndarray'>
# Checking the shape of the array
print(image_RGB_as_GreyScale.shape)
# Giving the name to the window with figure
plt.figure('GreyScaled image from RGB')
# Showing the image by using obtained array
plt.imshow(image_RGB_as_GreyScale, cmap='Greys')
plt.show()
# Saving converted image into a file from processed array
scipy.misc.imsave("images/result_3.jpg", image_RGB_as_GreyScale)
| 33.966443 | 99 | 0.752223 |
1661f7c0c438355d7d875aa2c983973094881c84
| 3,193 |
py
|
Python
|
template_renderer.py
|
hamza-gheggad/gcp-iam-collector
|
02b46453b9ec23af07a0d81f7250f1de61e0ee23
|
[
"Apache-2.0"
] | null | null | null |
template_renderer.py
|
hamza-gheggad/gcp-iam-collector
|
02b46453b9ec23af07a0d81f7250f1de61e0ee23
|
[
"Apache-2.0"
] | null | null | null |
template_renderer.py
|
hamza-gheggad/gcp-iam-collector
|
02b46453b9ec23af07a0d81f7250f1de61e0ee23
|
[
"Apache-2.0"
] | null | null | null |
import colorsys
import json
from jinja2 import Environment, PackageLoader
import graph
| 31.303922 | 91 | 0.593173 |
166293ba707b563d24827825716e3e79a6848c40
| 13,007 |
py
|
Python
|
powerapi/cli/tools.py
|
danglotb/powerapi
|
67b2508588bfe1e20d90f9fe6bccda34d3455262
|
[
"BSD-3-Clause"
] | null | null | null |
powerapi/cli/tools.py
|
danglotb/powerapi
|
67b2508588bfe1e20d90f9fe6bccda34d3455262
|
[
"BSD-3-Clause"
] | null | null | null |
powerapi/cli/tools.py
|
danglotb/powerapi
|
67b2508588bfe1e20d90f9fe6bccda34d3455262
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2018, INRIA
# Copyright (c) 2018, University of Lille
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import logging
from functools import reduce
from powerapi.exception import PowerAPIException
from powerapi.cli.parser import MainParser, ComponentSubParser
from powerapi.cli.parser import store_true
from powerapi.cli.parser import BadValueException, MissingValueException
from powerapi.cli.parser import BadTypeException, BadContextException
from powerapi.cli.parser import UnknowArgException
from powerapi.report_model import HWPCModel, PowerModel, FormulaModel, ControlModel
from powerapi.database import MongoDB, CsvDB, InfluxDB, OpenTSDB
from powerapi.puller import PullerActor
from powerapi.pusher import PusherActor
| 49.268939 | 128 | 0.667948 |
1662a331dbe1e237d08e9e21a3e8d596bcbce6c4
| 2,477 |
py
|
Python
|
pyxrd/mixture/models/insitu_behaviours/insitu_behaviour.py
|
PyXRD/pyxrd
|
26bacdf64f3153fa74b8caa62e219b76d91a55c1
|
[
"BSD-2-Clause"
] | 27 |
2018-06-15T15:28:18.000Z
|
2022-03-10T12:23:50.000Z
|
pyxrd/mixture/models/insitu_behaviours/insitu_behaviour.py
|
PyXRD/pyxrd
|
26bacdf64f3153fa74b8caa62e219b76d91a55c1
|
[
"BSD-2-Clause"
] | 22 |
2018-06-14T08:29:16.000Z
|
2021-07-05T13:33:44.000Z
|
pyxrd/mixture/models/insitu_behaviours/insitu_behaviour.py
|
PyXRD/pyxrd
|
26bacdf64f3153fa74b8caa62e219b76d91a55c1
|
[
"BSD-2-Clause"
] | 8 |
2019-04-13T13:03:51.000Z
|
2021-06-19T09:29:11.000Z
|
# coding=UTF-8
# ex:ts=4:sw=4:et=on
#
# Copyright (c) 2013, Mathijs Dumon
# All rights reserved.
# Complete license can be found in the LICENSE file.
from mvc.models.properties import StringProperty
from pyxrd.generic.io.custom_io import storables, Storable
from pyxrd.generic.models.base import DataModel
from pyxrd.refinement.refinables.mixins import RefinementGroup
| 34.402778 | 103 | 0.583771 |
16635cf724808862aeb33d75c907fed77d96d1fc
| 857 |
py
|
Python
|
1 plainProgrammingBug/start 1 plainProgrammingBug.py
|
vishalbelsare/SLAPP3
|
da187b771831aaaabaee16a26ad341db2e968104
|
[
"CC0-1.0"
] | 8 |
2017-10-18T05:19:17.000Z
|
2020-03-24T21:23:52.000Z
|
1 plainProgrammingBug/start 1 plainProgrammingBug.py
|
vishalbelsare/SLAPP3
|
da187b771831aaaabaee16a26ad341db2e968104
|
[
"CC0-1.0"
] | null | null | null |
1 plainProgrammingBug/start 1 plainProgrammingBug.py
|
vishalbelsare/SLAPP3
|
da187b771831aaaabaee16a26ad341db2e968104
|
[
"CC0-1.0"
] | 4 |
2017-10-25T09:07:49.000Z
|
2019-08-18T09:17:58.000Z
|
# start 1 plainProgrammingBug.py
import random
# returns -1, 0, 1 with equal probability
SimpleBug()
"""
you can eliminate the randomMove() function substituting
xPos += randomMove()
yPos += randomMove()
with
xPos += random.randint(-1, 1)
yPos += random.randint(-1, 1)
but the use of the function allows us to use here a self-explanatory
name
"""
| 19.930233 | 69 | 0.568261 |
166407e573ed13b6f495ddb118b6bb572fdf1148
| 423 |
py
|
Python
|
ba5a-min-coins/money_change.py
|
kjco/bioinformatics-algorithms
|
3c466157b89c1cbd54749563e39d86a307d7a3f3
|
[
"MIT"
] | null | null | null |
ba5a-min-coins/money_change.py
|
kjco/bioinformatics-algorithms
|
3c466157b89c1cbd54749563e39d86a307d7a3f3
|
[
"MIT"
] | null | null | null |
ba5a-min-coins/money_change.py
|
kjco/bioinformatics-algorithms
|
3c466157b89c1cbd54749563e39d86a307d7a3f3
|
[
"MIT"
] | null | null | null |
money = 8074
#money = 18705
#coin_list = [24,23,21,5,3,1]
coin_list = [24,13,12,7,5,3,1]
#coin_list = map(int, open('dataset_71_8.txt').read().split(','))
d = {0:0}
for m in range(1,money+1):
min_coin = 1000000
for coin in coin_list:
if m >= coin:
if d[m-coin]+1 < min_coin:
min_coin = d[m-coin]+1
d[m] = min_coin
#print d
print d[money]
| 18.391304 | 66 | 0.51773 |
1665579643c424a545b6a8b3af94a1a9e0f4f184
| 357 |
py
|
Python
|
examples/remove_comments.py
|
igordejanovic/textx-bibtex
|
b1374a39b96da9c1bc979c367b9ed3feb04f4f01
|
[
"MIT"
] | 1 |
2020-06-17T21:51:33.000Z
|
2020-06-17T21:51:33.000Z
|
examples/remove_comments.py
|
igordejanovic/textx-bibtex
|
b1374a39b96da9c1bc979c367b9ed3feb04f4f01
|
[
"MIT"
] | null | null | null |
examples/remove_comments.py
|
igordejanovic/textx-bibtex
|
b1374a39b96da9c1bc979c367b9ed3feb04f4f01
|
[
"MIT"
] | null | null | null |
"""
Remove comments from bib file.
"""
from textx import metamodel_for_language
from txbibtex import bibentry_str
BIB_FILE = 'references.bib'
bibfile = metamodel_for_language('bibtex').model_from_file(BIB_FILE)
# Drop line comments.
print('\n'.join([bibentry_str(e) for e in bibfile.entries
if e.__class__.__name__ != 'BibLineComment']))
| 27.461538 | 68 | 0.739496 |
1665f41d1c03f32167e2cea236d3cf7a022b6b61
| 3,202 |
py
|
Python
|
google-cloud-sdk/lib/surface/compute/resource_policies/create/group_placement.py
|
bopopescu/Social-Lite
|
ee05d6a7431c36ff582c8d6b58bb20a8c5f550bf
|
[
"Apache-2.0"
] | null | null | null |
google-cloud-sdk/lib/surface/compute/resource_policies/create/group_placement.py
|
bopopescu/Social-Lite
|
ee05d6a7431c36ff582c8d6b58bb20a8c5f550bf
|
[
"Apache-2.0"
] | 4 |
2020-07-21T12:51:46.000Z
|
2022-01-22T10:29:25.000Z
|
google-cloud-sdk/lib/surface/compute/resource_policies/create/group_placement.py
|
bopopescu/Social-Lite
|
ee05d6a7431c36ff582c8d6b58bb20a8c5f550bf
|
[
"Apache-2.0"
] | 1 |
2020-07-25T18:17:57.000Z
|
2020-07-25T18:17:57.000Z
|
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create resource policy command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import utils as compute_api
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute import flags as compute_flags
from googlecloudsdk.command_lib.compute.resource_policies import flags
from googlecloudsdk.command_lib.compute.resource_policies import util
def _CommonArgs(parser, api_version):
"""A helper function to build args based on different API version."""
messages = apis.GetMessagesModule('compute', api_version)
flags.MakeResourcePolicyArg().AddArgument(parser)
flags.AddCommonArgs(parser)
flags.AddGroupPlacementArgs(parser, messages)
parser.display_info.AddCacheUpdater(None)
CreateGroupPlacement.detailed_help = {
'DESCRIPTION':
"""\
Create a Google Compute Engine Group Placement Resource Policy.
""",
'EXAMPLES':
"""\
To create a Google Compute Engine Group Placement Resource policy with 2 VMs and 2 availability domains, run:
$ {command} my-resource-policy --region=REGION --vm-count=2 --availability-domain-count=2
"""
}
| 37.232558 | 109 | 0.777327 |
16661518293e1bbad26be3766a9addb9bc564758
| 629 |
py
|
Python
|
paperoni/io.py
|
notoraptor/paperoni
|
acdf2d3d790b98d6a171177ffd9d6342f86bc7ea
|
[
"MIT"
] | 88 |
2020-08-27T17:58:58.000Z
|
2021-12-01T19:29:56.000Z
|
paperoni/io.py
|
notoraptor/paperoni
|
acdf2d3d790b98d6a171177ffd9d6342f86bc7ea
|
[
"MIT"
] | 8 |
2020-08-27T02:54:11.000Z
|
2022-02-01T13:35:41.000Z
|
paperoni/io.py
|
notoraptor/paperoni
|
acdf2d3d790b98d6a171177ffd9d6342f86bc7ea
|
[
"MIT"
] | 6 |
2020-08-25T16:43:28.000Z
|
2021-12-08T16:41:02.000Z
|
import json
from .papers import Papers
from .researchers import Researchers
def ResearchersFile(filename):
"""Parse a file containing researchers."""
try:
with open(filename, "r") as file:
data = json.load(file)
except FileNotFoundError:
data = {}
return Researchers(data, filename=filename)
def PapersFile(filename, researchers=None):
"""Parse a file containing papers."""
try:
with open(filename, "r") as file:
data = json.load(file)
except FileNotFoundError:
data = {}
return Papers(data, filename=filename, researchers=researchers)
| 25.16 | 67 | 0.655008 |
16666943ca1f78d9acd45c2909883bd0b65b734d
| 934 |
py
|
Python
|
src/lib/sd2/test_addresses.py
|
zachkont/sd2
|
92d8c55a8c7ac51c00ba514be01955aa7162e4ef
|
[
"Apache-2.0"
] | null | null | null |
src/lib/sd2/test_addresses.py
|
zachkont/sd2
|
92d8c55a8c7ac51c00ba514be01955aa7162e4ef
|
[
"Apache-2.0"
] | null | null | null |
src/lib/sd2/test_addresses.py
|
zachkont/sd2
|
92d8c55a8c7ac51c00ba514be01955aa7162e4ef
|
[
"Apache-2.0"
] | null | null | null |
#############################################################################
# Copyright (c) 2017 SiteWare Corp. All right reserved
#############################################################################
import logging
import pytest
from . import addresses
| 30.129032 | 77 | 0.626338 |
166739b28ed7ffa22c5f71499709f1fd302bd933
| 1,914 |
py
|
Python
|
config_model.py
|
Asha-ai/BERT_abstractive_proj
|
f0e8f659d6b8821cfe0d15f4075e8cb890efdfe9
|
[
"Apache-2.0"
] | 17 |
2020-01-11T15:15:21.000Z
|
2021-12-08T10:03:36.000Z
|
config_model.py
|
Asha-ai/BERT_abstractive_proj
|
f0e8f659d6b8821cfe0d15f4075e8cb890efdfe9
|
[
"Apache-2.0"
] | 6 |
2020-03-01T17:14:58.000Z
|
2021-05-21T16:05:03.000Z
|
config_model.py
|
Asha-ai/BERT_abstractive_proj
|
f0e8f659d6b8821cfe0d15f4075e8cb890efdfe9
|
[
"Apache-2.0"
] | 8 |
2020-05-11T21:24:51.000Z
|
2021-07-23T09:18:46.000Z
|
import texar.tf as tx
beam_width = 5
hidden_dim = 768
bert = {
'pretrained_model_name': 'bert-base-uncased'
}
# See https://texar.readthedocs.io/en/latest/code/modules.html#texar.tf.modules.BERTEncoder.default_hparams
bert_encoder = {}
# From https://github.com/asyml/texar/blob/413e07f859acbbee979f274b52942edd57b335c1/examples/transformer/config_model.py#L27-L45
# with adjustments for BERT
decoder = {
'dim': hidden_dim,
'num_blocks': 6,
'multihead_attention': {
'num_heads': 8,
'output_dim': hidden_dim
},
'initializer': {
'type': 'variance_scaling_initializer',
'kwargs': {
'scale': 1.0,
'mode': 'fan_avg',
'distribution': 'uniform',
},
},
'poswise_feedforward': tx.modules.default_transformer_poswise_net_hparams(output_dim=hidden_dim)
}
loss_label_confidence = 0.9
opt = {
'optimizer': {
'type': 'AdamOptimizer',
'kwargs': {
'beta1': 0.9,
'beta2': 0.997,
'epsilon': 1e-9
}
}
}
lr = {
# The 'learning_rate_schedule' can have the following 3 values:
# - 'static' -> A simple static learning rate, specified by 'static_lr'
# - 'aiayn' -> The learning rate used in the "Attention is all you need" paper.
# - 'constant.linear_warmup.rsqrt_decay.rsqrt_depth' -> The learning rate for Texar's Transformer example
'learning_rate_schedule': 'aiayn',
# The learning rate constant used for the 'constant.linear_warmup.rsqrt_decay.rsqrt_depth' learning rate
'lr_constant': 2 * (hidden_dim ** -0.5),
# The warmup steps for the 'aiayn' and 'constant.linear_warmup.rsqrt_decay.rsqrt_depth' learning rate
'warmup_steps': 4000,
# The static learning rate, when 'static' is used.
'static_lr': 1e-3,
# A multiplier that can be applied to the 'aiayn' learning rate.
'aiayn_multiplier': 0.2
}
| 31.377049 | 128 | 0.653083 |
16677a6fe2ff1b1e4b01bda4446f100594d88c8e
| 390 |
py
|
Python
|
wishes/migrations/0005_auto_20201029_0904.py
|
e-elson/bd
|
e35c59686e5ec81925c22353e269601f286634db
|
[
"MIT"
] | null | null | null |
wishes/migrations/0005_auto_20201029_0904.py
|
e-elson/bd
|
e35c59686e5ec81925c22353e269601f286634db
|
[
"MIT"
] | null | null | null |
wishes/migrations/0005_auto_20201029_0904.py
|
e-elson/bd
|
e35c59686e5ec81925c22353e269601f286634db
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.2 on 2020-10-29 09:04
from django.db import migrations, models
| 20.526316 | 55 | 0.594872 |
166802c5b61892041a13896dbed6ef514fd83df2
| 7,115 |
py
|
Python
|
undeployed/legacy/Landsat/DNtoReflectance.py
|
NASA-DEVELOP/dnppy
|
8f7ef6f0653f5a4ea730ee557c72a2c89c06ce0b
|
[
"NASA-1.3"
] | 65 |
2015-09-10T12:59:56.000Z
|
2022-02-27T22:09:03.000Z
|
undeployed/legacy/Landsat/DNtoReflectance.py
|
snowzm/dnppy
|
8f7ef6f0653f5a4ea730ee557c72a2c89c06ce0b
|
[
"NASA-1.3"
] | 40 |
2015-04-08T19:23:30.000Z
|
2015-08-04T15:53:11.000Z
|
undeployed/legacy/Landsat/DNtoReflectance.py
|
snowzm/dnppy
|
8f7ef6f0653f5a4ea730ee557c72a2c89c06ce0b
|
[
"NASA-1.3"
] | 45 |
2015-08-14T19:09:38.000Z
|
2022-02-15T18:53:16.000Z
|
#-------------------------------------------------------------------------------
# Name: Landsat Digital Numbers to Radiance/Reflectance
# Purpose: To convert landsat 4,5, or 7 pixel values from digital numbers
# to Radiance, Reflectance, or Temperature
# Author: Quinten Geddes [email protected]
# NASA DEVELOP Program
# Created: 19/10/2012
#-------------------------------------------------------------------------------
import arcpy
import math
arcpy.CheckOutExtension("Spatial")
def DNtoReflectance(Lbands,MetaData,OutputType="Reflectance/Temperature",Save=False,OutputFolder=""):
"""This function is used to convert Landsat 4,5, or 7 pixel values from
digital numbers to Radiance, Reflectance, or Temperature (if using Band 6)
-----Inputs------
Lbands: GeoTIFF files containing individual bands of Landsat imagery. These
must have the original names as downloaded and must be from a single scene.
MetaData: The metadata text file that is downloaded with the Landsat Bands themselves.
This may be either the old or new MTL.txt file.
OutputType: Choose whether the output should be:
"Radiance"
"Reflectance/Temperature" - Calculates Reflectance for spectral bands
and Temperature in Kelvin for Thermal bands
Save: Boolean value that indicates whether the output rasters will be saved permanantly
Each band will be saved as an individual GeoTIFF file and be named
accoriding to the original filename and the output pixel unit
*if this is true, then the OutputFolder variable must also be set
OutputFolder: Folder in which to save the output rasters
-----Outputs-----
A list of arcpy raster objects in a sequence that mirrors that of the input Lbands
"""
OutList=[]
#These lists will be used to parse the meta data text file and locate relevant information
#metadata format was changed August 29, 2012. This tool can process either the new or old format
newMeta=['LANDSAT_SCENE_ID = "','DATE_ACQUIRED = ',"SUN_ELEVATION = ",
"RADIANCE_MAXIMUM_BAND_{0} = ","RADIANCE_MINIMUM_BAND_{0} = ",
"QUANTIZE_CAL_MAX_BAND_{0} = ","QUANTIZE_CAL_MIN_BAND_{0} = "]
oldMeta=['BAND1_FILE_NAME = "',"ACQUISITION_DATE = ","SUN_ELEVATION = ",
"LMAX_BAND{0} = ","LMIN_BAND{0} = ",
"QCALMAX_BAND{0} = ","QCALMIN_BAND{0} = "]
f=open(MetaData)
MText=f.read()
#the presence of a PRODUCT_CREATION_TIME category is used to identify old metadata
#if this is not present, the meta data is considered new.
#Band6length refers to the length of the Band 6 name string. In the new metadata this string is longer
if "PRODUCT_CREATION_TIME" in MText:
Meta=oldMeta
Band6length=2
else:
Meta=newMeta
Band6length=8
#The tilename is located using the newMeta/oldMeta indixes and the date of capture is recorded
if Meta==newMeta:
TileName=MText.split(Meta[0])[1].split('"')[0]
year=TileName[9:13]
jday=TileName[13:16]
elif Meta==oldMeta:
TileName=MText.split(Meta[0])[1].split('"')[0]
year=TileName[13:17]
jday=TileName[17:20]
date=MText.split(Meta[1])[1].split('\n')[0]
#the spacecraft from which the imagery was capture is identified
#this info determines the solar exoatmospheric irradiance (ESun) for each band
spacecraft=MText.split('SPACECRAFT_ID = "')[1].split('"')[0]
ThermBands=["6"]
if "7" in spacecraft:
ESun=(1969.0,1840.0,1551.0,1044.0,255.700,0. ,82.07,1368.00)
ThermBands=["B6_VCID_1","B6_VCID_2"]
elif "5" in spacecraft: ESun=(1957.0,1826.0,1554.0,1036.0,215.0 ,0. ,80.67)
elif "4" in spacecraft: ESun=(1957.0,1825.0,1557.0,1033.0,214.9 ,0. ,80.72)
elif "8" in spacecraft:
ESun=(1857.0,1996.0,1812.0,1516.0,983.3 ,251.8,85.24,0.0,389.3,0.,0.)
ThermBands=["10","11"]
else:
arcpy.AddError("This tool only works for Landsat 4, 5, 7 or 8 ")
raise arcpy.ExecuteError()
#determing if year is leap year and setting the Days in year accordingly
if float(year) % 4 ==0: DIY=366.
else:DIY=365.
#using the date to determing the distance from the sun
theta =2*math.pi*float(jday)/DIY
dSun2 = (1.00011 + 0.034221*math.cos(theta) + 0.001280*math.sin(theta) +
0.000719*math.cos(2*theta)+ 0.000077*math.sin(2*theta) )
SZA=90.-float(MText.split(Meta[2])[1].split("\n")[0])
#Calculating values for each band
for pathname in Lbands:
try:
BandNum=pathname.split("\\")[-1].split("B")[1][0:2]
try: int(BandNum)
except: BandNum=pathname.split("\\")[-1].split("B")[1][0]
except:
msg="Error reading Band {0}. Bands must have original names as downloaded.".format(str(inputbandnum))
arcpy.AddError(msg)
print msg
raise arcpy.ExecuteError
#changing Band 6 name to match metadata
if BandNum=="6" and spacecraft[8]=="7":
BandNum=pathname.split("\\")[-1].split("B")[1][0:Band6length]
print "Processing Band {0}".format(BandNum)
Oraster=arcpy.Raster(pathname)
#using the oldMeta/newMeta indixes to pull the min/max for radiance/Digital numbers
LMax= float(MText.split(Meta[3].format(BandNum))[1].split("\n")[0])
LMin= float(MText.split(Meta[4].format(BandNum))[1].split("\n")[0])
QCalMax=float(MText.split(Meta[5].format(BandNum))[1].split("\n")[0])
QCalMin=float(MText.split(Meta[6].format(BandNum))[1].split("\n")[0])
Radraster=(((LMax - LMin)/(QCalMax-QCalMin)) * (Oraster - QCalMin)) +LMin
Oraster=0
if OutputType=="Radiance":
Radraster.save("{0}\\{1}_B{2}_Radiance.tif".format(OutputFolder,TileName,BandNum))
Radraster=0
elif OutputType=="Reflectance/Temperature":
#Calculating temperature for band 6 if present
if BandNum in ThermBands:
Refraster=1282.71/(arcpy.sa.Ln((666.09/Radraster)+1.0))
BandPath="{0}\\{1}_B{2}_Temperature.tif".format(OutputFolder,TileName,BandNum)
arcpy.AddMessage("Proceeded through if")
#Otherwise calculate reflectance
else:
Refraster=( math.pi * Radraster * dSun2) / (ESun[int(BandNum[0])-1] * math.cos(SZA*math.pi/180) )
BandPath="{0}\\{1}_B{2}_TOA_Reflectance.tif".format(OutputFolder,TileName,BandNum)
arcpy.AddMessage("Proceeded through else")
if Save==True:
Refraster.save(BandPath)
OutList.append(arcpy.Raster(BandPath))
else:
OutList.append(Refraster)
del Refraster,Radraster
arcpy.AddMessage( "Reflectance Calculated for Band {0}".format(BandNum))
print "Reflectance Calculated for Band {0}".format(BandNum)
f.close()
return OutList
| 42.100592 | 113 | 0.619115 |
1668b92419e5394d4eb735fba074c84b5eb16b19
| 1,396 |
py
|
Python
|
.modules/.theHarvester/discovery/twittersearch.py
|
termux-one/EasY_HaCk
|
0a8d09ca4b126b027b6842e02fa0c29d8250e090
|
[
"Apache-2.0"
] | 1,103 |
2018-04-20T14:08:11.000Z
|
2022-03-29T06:22:43.000Z
|
.modules/.theHarvester/discovery/twittersearch.py
|
sshourya948/EasY_HaCk
|
0a8d09ca4b126b027b6842e02fa0c29d8250e090
|
[
"Apache-2.0"
] | 29 |
2019-04-03T14:52:38.000Z
|
2022-03-24T12:33:05.000Z
|
.modules/.theHarvester/discovery/twittersearch.py
|
sshourya948/EasY_HaCk
|
0a8d09ca4b126b027b6842e02fa0c29d8250e090
|
[
"Apache-2.0"
] | 262 |
2017-09-16T22:15:50.000Z
|
2022-03-31T00:38:42.000Z
|
import string
import requests
import sys
import myparser
import re
| 32.465116 | 169 | 0.592407 |
166903b8515452d27e1a1b1b4a84d3d174d4f220
| 708 |
py
|
Python
|
scrap_instagram.py
|
genaforvena/nn_scrapper
|
897766a52202aa056afd657995ed39b2b91e1fe2
|
[
"Apache-2.0"
] | null | null | null |
scrap_instagram.py
|
genaforvena/nn_scrapper
|
897766a52202aa056afd657995ed39b2b91e1fe2
|
[
"Apache-2.0"
] | null | null | null |
scrap_instagram.py
|
genaforvena/nn_scrapper
|
897766a52202aa056afd657995ed39b2b91e1fe2
|
[
"Apache-2.0"
] | null | null | null |
import urllib.request
import json
access_token = "265791501.a4af066.f45a9f44719a4b2cb2d137118524e32b"
api_url = "https://api.instagram.com/v1"
nn_lat = 56.296504
nn_lng = 43.936059
locations = request("/locations/search", "lat=" + str(nn_lat) + "&lng=" + str(nn_lng))["data"]
print(locations)
for location in locations:
location_id = location["id"]
location_media = request("/locations/" + str(location_id) + "/media/recent")
print(location_media)
| 29.5 | 94 | 0.706215 |
16693286bda8fc5cb36e02f9aa7765ff20fcfe4e
| 7,066 |
py
|
Python
|
tests/unit/utils/test_validators.py
|
kajusK/HiddenPlaces
|
aa976f611a419bc33f8a65f0314956ec09fe2bfd
|
[
"MIT"
] | null | null | null |
tests/unit/utils/test_validators.py
|
kajusK/HiddenPlaces
|
aa976f611a419bc33f8a65f0314956ec09fe2bfd
|
[
"MIT"
] | null | null | null |
tests/unit/utils/test_validators.py
|
kajusK/HiddenPlaces
|
aa976f611a419bc33f8a65f0314956ec09fe2bfd
|
[
"MIT"
] | null | null | null |
"""Unit tests for app.validators. """
from wtforms import ValidationError
import flask
from pytest import raises
from app.utils.validators import password_rules, image_file, allowed_file
def _run_validator_check(subtests, validator, valid, invalid):
"""Runs tests again validator with valid and invalid inputs.
Args:
subtest: Subtests fixture.
validator: Validator instance to run tests against
valid: List of valid inputs
invalid: List of invalid inputs
"""
field = DummyField()
for item in valid:
field.data = item
with subtests.test(item=item):
validator(DummyForm(), field)
for item in invalid:
field.data = item
with subtests.test(item=item):
with raises(ValidationError):
validator(DummyForm(), field)
| 35.686869 | 78 | 0.644495 |
166add4d1cc09be73d6135b394a15f57ecfca1b9
| 615 |
py
|
Python
|
ts_eval/utils/nans.py
|
vshulyak/ts-eval
|
2049b1268cf4272f5fa1471851523f8da14dd84c
|
[
"MIT"
] | 1 |
2021-07-12T08:58:07.000Z
|
2021-07-12T08:58:07.000Z
|
ts_eval/utils/nans.py
|
vshulyak/ts-eval
|
2049b1268cf4272f5fa1471851523f8da14dd84c
|
[
"MIT"
] | null | null | null |
ts_eval/utils/nans.py
|
vshulyak/ts-eval
|
2049b1268cf4272f5fa1471851523f8da14dd84c
|
[
"MIT"
] | null | null | null |
import warnings
import numpy as np
def nans_in_same_positions(*arrays):
"""
Compares all provided arrays to see if they have NaNs in the same positions.
"""
if len(arrays) == 0:
return True
for arr in arrays[1:]:
if not (np.isnan(arrays[0]) == np.isnan(arr)).all():
return False
return True
def nanmeanw(arr, axis=None):
"""
Computes nanmean without raising a warning in case of NaNs in the dataset
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
return np.nanmean(arr, axis=axis)
| 24.6 | 80 | 0.642276 |
166b671e9115e476c69bab6e6077599dd6b6cdea
| 5,434 |
py
|
Python
|
tests/authorization/test_searches.py
|
UOC/dlkit
|
a9d265db67e81b9e0f405457464e762e2c03f769
|
[
"MIT"
] | 2 |
2018-02-23T12:16:11.000Z
|
2020-10-08T17:54:24.000Z
|
tests/authorization/test_searches.py
|
UOC/dlkit
|
a9d265db67e81b9e0f405457464e762e2c03f769
|
[
"MIT"
] | 87 |
2017-04-21T18:57:15.000Z
|
2021-12-13T19:43:57.000Z
|
tests/authorization/test_searches.py
|
UOC/dlkit
|
a9d265db67e81b9e0f405457464e762e2c03f769
|
[
"MIT"
] | 1 |
2018-03-01T16:44:25.000Z
|
2018-03-01T16:44:25.000Z
|
"""Unit tests of authorization searches."""
import pytest
from ..utilities.general import is_never_authz, is_no_authz, uses_cataloging, uses_filesystem_only
from dlkit.abstract_osid.osid import errors
from dlkit.primordium.id.primitives import Id
from dlkit.primordium.type.primitives import Type
from dlkit.runtime import PROXY_SESSION, proxy_example
from dlkit.runtime.managers import Runtime
REQUEST = proxy_example.SimpleRequest()
CONDITION = PROXY_SESSION.get_proxy_condition()
CONDITION.set_http_request(REQUEST)
PROXY = PROXY_SESSION.get_proxy(CONDITION)
DEFAULT_TYPE = Type(**{'identifier': 'DEFAULT', 'namespace': 'DEFAULT', 'authority': 'DEFAULT'})
| 36.469799 | 176 | 0.749724 |
166ccaa355ece2f923c461999fa3eb16171b7163
| 350 |
py
|
Python
|
mechroutines/models/_flux.py
|
keceli/mechdriver
|
978994ba5c77b6df00078b639c4482dacf269440
|
[
"Apache-2.0"
] | 1 |
2022-03-22T20:47:04.000Z
|
2022-03-22T20:47:04.000Z
|
mechroutines/models/_flux.py
|
keceli/mechdriver
|
978994ba5c77b6df00078b639c4482dacf269440
|
[
"Apache-2.0"
] | 1 |
2021-02-12T21:11:16.000Z
|
2021-12-07T21:32:14.000Z
|
mechroutines/models/_flux.py
|
keceli/mechdriver
|
978994ba5c77b6df00078b639c4482dacf269440
|
[
"Apache-2.0"
] | 8 |
2019-12-18T20:09:46.000Z
|
2020-11-14T16:37:28.000Z
|
"""
NEW: Handle flux files
"""
import autofile
def read_flux(ts_save_path, vrc_locs=(0,)):
""" Read the geometry from the filesys
"""
vrc_fs = autofile.fs.vrctst(ts_save_path)
if vrc_fs[-1].file.flux.exists(vrc_locs):
flux_str = vrc_fs[-1].file.flux.read(vrc_locs)
else:
flux_str = None
return flux_str
| 18.421053 | 54 | 0.64 |
166ddfdb964d4dc41f4f840af0cda8cfbfe5a687
| 4,990 |
py
|
Python
|
RandomForest/RandomForest.py
|
nachiket273/ML_Algo_Implemented
|
74ae47fdf620545fdf8c934c5997784faadaebb7
|
[
"MIT"
] | 7 |
2020-08-03T13:43:53.000Z
|
2022-02-18T20:38:51.000Z
|
RandomForest/RandomForest.py
|
nachiket273/ML_Algo_Implemented
|
74ae47fdf620545fdf8c934c5997784faadaebb7
|
[
"MIT"
] | null | null | null |
RandomForest/RandomForest.py
|
nachiket273/ML_Algo_Implemented
|
74ae47fdf620545fdf8c934c5997784faadaebb7
|
[
"MIT"
] | 2 |
2020-09-06T21:54:16.000Z
|
2022-01-22T19:59:33.000Z
|
import math
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator
import sys
import os
sys.path.append(os.path.abspath('../DecisionTree'))
from DecisionTree import DecisionTree
| 40.901639 | 93 | 0.546293 |
166e1671aebcb4e327d8e4f8b8b62dc58ec16062
| 556 |
py
|
Python
|
tests/basics/generator_pend_throw.py
|
iotctl/pycopy
|
eeb841afea61b19800d054b3b289729665fc9aa4
|
[
"MIT"
] | 663 |
2018-12-30T00:17:59.000Z
|
2022-03-14T05:03:41.000Z
|
tests/basics/generator_pend_throw.py
|
iotctl/pycopy
|
eeb841afea61b19800d054b3b289729665fc9aa4
|
[
"MIT"
] | 41 |
2019-06-06T08:31:19.000Z
|
2022-02-13T16:53:41.000Z
|
tests/basics/generator_pend_throw.py
|
iotctl/pycopy
|
eeb841afea61b19800d054b3b289729665fc9aa4
|
[
"MIT"
] | 60 |
2019-06-01T04:25:00.000Z
|
2022-02-25T01:47:31.000Z
|
g = gen()
try:
g.pend_throw
except AttributeError:
print("SKIP")
raise SystemExit
print(next(g))
print(next(g))
g.pend_throw(ValueError())
v = None
try:
v = next(g)
except Exception as e:
print("raised", repr(e))
print("ret was:", v)
# It's legal to pend exception in a just-started generator, just the same
# as it's legal to .throw() into it.
g = gen()
g.pend_throw(ValueError())
try:
next(g)
except ValueError:
print("ValueError from just-started gen")
| 15.444444 | 73 | 0.624101 |
166e4003ce5bc54874ebae493377303b4c270f29
| 4,511 |
py
|
Python
|
src/UnitTypes/ProjectileModule.py
|
USArmyResearchLab/ARL_Battlespace
|
2f17a478f62c20a4db387d5d3e4bbeaa3197cd49
|
[
"MIT"
] | 1 |
2022-03-31T19:15:04.000Z
|
2022-03-31T19:15:04.000Z
|
src/UnitTypes/ProjectileModule.py
|
USArmyResearchLab/ARL_Battlespace
|
2f17a478f62c20a4db387d5d3e4bbeaa3197cd49
|
[
"MIT"
] | null | null | null |
src/UnitTypes/ProjectileModule.py
|
USArmyResearchLab/ARL_Battlespace
|
2f17a478f62c20a4db387d5d3e4bbeaa3197cd49
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 15 09:49:47 2020
@author: james.z.hare
"""
from src.UnitModule import UnitClass, advance
from copy import deepcopy
import math
# Will be used as the projectile for the missile launcher unit
| 32.221429 | 158 | 0.62137 |
166ed868a00e2876de6024b3dcf661e7d6afc455
| 216 |
py
|
Python
|
OOP_MiniQuiz/run_car_Level2.py
|
HelloYeew/helloyeew-lab-computer-programming-i
|
60b05072f32f23bab4a336b506ba7f66e52c045d
|
[
"MIT"
] | null | null | null |
OOP_MiniQuiz/run_car_Level2.py
|
HelloYeew/helloyeew-lab-computer-programming-i
|
60b05072f32f23bab4a336b506ba7f66e52c045d
|
[
"MIT"
] | null | null | null |
OOP_MiniQuiz/run_car_Level2.py
|
HelloYeew/helloyeew-lab-computer-programming-i
|
60b05072f32f23bab4a336b506ba7f66e52c045d
|
[
"MIT"
] | null | null | null |
from car import *
car1 = Car("Nissan","Tiida",450000)
car2 = Car("Toyota","Vios",400000)
car3 = Car("BMW","X3",3400000)
compare(car3,car1)
compare(car1,car2)
| 18 | 35 | 0.671296 |
166f10041a007d09adb3797f8fd4bf54942b5eeb
| 1,513 |
py
|
Python
|
prelude/monads.py
|
michel-slm/python-prelude
|
b3ca89ff2bf150f772764f59d2796d2fcce1013d
|
[
"MIT"
] | 2 |
2015-05-12T16:12:56.000Z
|
2020-08-26T20:52:47.000Z
|
prelude/monads.py
|
michel-slm/python-prelude
|
b3ca89ff2bf150f772764f59d2796d2fcce1013d
|
[
"MIT"
] | null | null | null |
prelude/monads.py
|
michel-slm/python-prelude
|
b3ca89ff2bf150f772764f59d2796d2fcce1013d
|
[
"MIT"
] | null | null | null |
from abc import ABCMeta, abstractmethod
from prelude.typeclasses import Monad
from prelude.decorators import monad_eq, singleton
| 18.9125 | 50 | 0.613351 |
16715a2b77e2526acf8bf40591ec7bc531389bde
| 848 |
py
|
Python
|
Deep Sort/src/imgconverter.py
|
JJavier98/TFG-Dron-de-Vigilancia
|
7fd68a981854ac480ad2f0c936a0dd58d2a9f38b
|
[
"MIT"
] | null | null | null |
Deep Sort/src/imgconverter.py
|
JJavier98/TFG-Dron-de-Vigilancia
|
7fd68a981854ac480ad2f0c936a0dd58d2a9f38b
|
[
"MIT"
] | null | null | null |
Deep Sort/src/imgconverter.py
|
JJavier98/TFG-Dron-de-Vigilancia
|
7fd68a981854ac480ad2f0c936a0dd58d2a9f38b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from __future__ import print_function
import roslib
roslib.load_manifest('msgs_to_cv2')
import sys
import rospy
import cv2
from std_msgs.msg import String
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
if __name__ == '__main__':
main(sys.argv)
| 20.190476 | 77 | 0.741745 |
16718d7813439bbbc33bc80e98b6e4741d2b5b6c
| 261 |
py
|
Python
|
foodx_devops_tools/azure/__init__.py
|
Food-X-Technologies/foodx_devops_tools
|
57d1bf1304d9c9a386eaffa427f9eb36c410c350
|
[
"MIT"
] | 3 |
2021-06-23T20:53:43.000Z
|
2022-01-26T14:19:43.000Z
|
foodx_devops_tools/azure/__init__.py
|
Food-X-Technologies/foodx_devops_tools
|
57d1bf1304d9c9a386eaffa427f9eb36c410c350
|
[
"MIT"
] | 33 |
2021-08-09T15:44:51.000Z
|
2022-03-03T18:28:02.000Z
|
foodx_devops_tools/azure/__init__.py
|
Food-X-Technologies/foodx_devops_tools
|
57d1bf1304d9c9a386eaffa427f9eb36c410c350
|
[
"MIT"
] | 1 |
2021-06-23T20:53:52.000Z
|
2021-06-23T20:53:52.000Z
|
# Copyright (c) 2021 Food-X Technologies
#
# This file is part of foodx_devops_tools.
#
# You should have received a copy of the MIT License along with
# foodx_devops_tools. If not, see <https://opensource.org/licenses/MIT>.
"""Azure related utilities."""
| 29 | 73 | 0.731801 |
16725a52de27142aa18864c727dddea44204b666
| 5,940 |
py
|
Python
|
beartype/vale/__init__.py
|
posita/beartype
|
e56399686e1f2ffd5128a4030b19314504e32450
|
[
"MIT"
] | null | null | null |
beartype/vale/__init__.py
|
posita/beartype
|
e56399686e1f2ffd5128a4030b19314504e32450
|
[
"MIT"
] | null | null | null |
beartype/vale/__init__.py
|
posita/beartype
|
e56399686e1f2ffd5128a4030b19314504e32450
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2021 Beartype authors.
# See "LICENSE" for further details.
'''
**Beartype validators.**
This submodule publishes a PEP-compliant hierarchy of subscriptable (indexable)
classes enabling callers to validate the internal structure of arbitrarily
complex scalars, data structures, and third-party objects. Like annotation
objects defined by the :mod:`typing` module (e.g., :attr:`typing.Union`), these
classes dynamically generate PEP-compliant type hints when subscripted
(indexed) and are thus intended to annotate callables and variables. Unlike
annotation objects defined by the :mod:`typing` module, these classes are *not*
explicitly covered by existing PEPs and thus *not* directly usable as
annotations.
Instead, callers are expected to (in order):
#. Annotate callable parameters and returns to be validated with
:pep:`593`-compliant :attr:`typing.Annotated` type hints.
#. Subscript those hints with (in order):
#. The type of those parameters and returns.
#. One or more subscriptions of classes declared by this submodule.
'''
# ....................{ IMPORTS }....................
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# WARNING: To avoid polluting the public module namespace, external attributes
# should be locally imported at module scope *ONLY* under alternate private
# names (e.g., "from argparse import ArgumentParser as _ArgumentParser" rather
# than merely "from argparse import ArgumentParser").
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
from beartype.vale._is._valeis import _IsFactory
from beartype.vale._is._valeistype import (
_IsInstanceFactory,
_IsSubclassFactory,
)
from beartype.vale._is._valeisobj import _IsAttrFactory
from beartype.vale._is._valeisoper import _IsEqualFactory
# ....................{ SINGLETONS }....................
# Public factory singletons instantiating these private factory classes.
Is = _IsFactory(basename='Is')
IsAttr = _IsAttrFactory(basename='IsAttr')
IsEqual = _IsEqualFactory(basename='IsEqual')
IsInstance = _IsInstanceFactory(basename='IsInstance')
IsSubclass = _IsSubclassFactory(basename='IsSubclass')
# Delete all private factory classes imported above for safety.
del (
_IsFactory,
_IsAttrFactory,
_IsEqualFactory,
_IsInstanceFactory,
_IsSubclassFactory,
)
# ....................{ TODO }....................
#FIXME: As intelligently requested by @Saphyel at #32, add support for
#additional classes support constraints resembling:
#
#* String constraints:
# * Email.
# * Uuid.
# * Choice.
# * Language.
# * Locale.
# * Country.
# * Currency.
#* Comparison constraints
# * IdenticalTo.
# * NotIdenticalTo.
# * LessThan.
# * GreaterThan.
# * Range.
# * DivisibleBy.
#FIXME: Add a new BeartypeValidator.get_cause_or_none() method with the same
#signature and docstring as the existing CauseSleuth.get_cause_or_none()
#method. This new BeartypeValidator.get_cause_or_none() method should then be
#called by the "_peperrorannotated" submodule to generate human-readable
#exception messages. Note that this implies that:
#* The BeartypeValidator.__init__() method will need to additionally accept a new
# mandatory "get_cause_or_none: Callable[[], Optional[str]]" parameter, which
# that method should then localize to "self.get_cause_or_none".
#* Each __class_getitem__() dunder method of each "_BeartypeValidatorFactoryABC" subclass will need
# to additionally define and pass that callable when creating and returning
# its "BeartypeValidator" instance.
#FIXME: *BRILLIANT IDEA.* Holyshitballstime. The idea here is that we can
#leverage all of our existing "beartype.is" infrastructure to dynamically
#synthesize PEP-compliant type hints that would then be implicitly supported by
#any runtime type checker. At present, subscriptions of "Is" (e.g.,
#"Annotated[str, Is[lambda text: bool(text)]]") are only supported by beartype
#itself. Of course, does anyone care? I mean, if you're using a runtime type
#checker, you're probably *ONLY* using beartype. Right? That said, this would
#technically improve portability by allowing users to switch between different
#checkers... except not really, since they'd still have to import beartype
#infrastructure to do so. So, this is probably actually useless.
#
#Nonetheless, the idea itself is trivial. We declare a new
#"beartype.is.Portable" singleton accessed in the same way: e.g.,
# from beartype import beartype
# from beartype.is import Portable
# NonEmptyStringTest = Is[lambda text: bool(text)]
# NonEmptyString = Portable[str, NonEmptyStringTest]
# @beartype
# def munge_it(text: NonEmptyString) -> str: ...
#
#So what's the difference between "typing.Annotated" and "beartype.is.Portable"
#then? Simple. The latter dynamically generates one new PEP 3119-compliant
#metaclass and associated class whenever subscripted. Clearly, this gets
#expensive in both space and time consumption fast -- which is why this won't
#be the default approach. For safety, this new class does *NOT* subclass the
#first subscripted class. Instead:
#* This new metaclass of this new class simply defines an __isinstancecheck__()
# dunder method. For the above example, this would be:
# class NonEmptyStringMetaclass(object):
# def __isinstancecheck__(cls, obj) -> bool:
# return isinstance(obj, str) and NonEmptyStringTest(obj)
#* This new class would then be entirely empty. For the above example, this
# would be:
# class NonEmptyStringClass(object, metaclass=NonEmptyStringMetaclass):
# pass
#
#Well, so much for brilliant. It's slow and big, so it seems doubtful anyone
#would actually do that. Nonetheless, that's food for thought for you.
| 45.343511 | 99 | 0.711616 |
16730d6f4856a5911d4dfcf4a29a2f5449a0ddb0
| 3,536 |
py
|
Python
|
tests/test_authentication.py
|
movermeyer/cellardoor
|
25192b07224ff7bd33fd29ebac07340bef53a2ed
|
[
"MIT"
] | null | null | null |
tests/test_authentication.py
|
movermeyer/cellardoor
|
25192b07224ff7bd33fd29ebac07340bef53a2ed
|
[
"MIT"
] | 3 |
2015-01-31T14:53:06.000Z
|
2015-02-01T19:04:30.000Z
|
tests/test_authentication.py
|
movermeyer/cellardoor
|
25192b07224ff7bd33fd29ebac07340bef53a2ed
|
[
"MIT"
] | 2 |
2015-01-31T14:54:28.000Z
|
2018-03-05T17:33:42.000Z
|
import unittest
from mock import Mock
import base64
from cellardoor import errors
from cellardoor.authentication import *
from cellardoor.authentication.basic import BasicAuthIdentifier
| 30.747826 | 95 | 0.756505 |
16731efe14cf79a4c56966e84b709e60bb9faf4f
| 42 |
py
|
Python
|
src/styleaug/__init__.py
|
somritabanerjee/speedplusbaseline
|
5913c611d8c182ad8070abcf5f1baffc554dfd90
|
[
"MIT"
] | 69 |
2019-04-09T18:05:33.000Z
|
2022-03-11T05:58:59.000Z
|
src/styleaug/__init__.py
|
somritabanerjee/speedplusbaseline
|
5913c611d8c182ad8070abcf5f1baffc554dfd90
|
[
"MIT"
] | 6 |
2019-04-01T12:04:10.000Z
|
2022-01-19T11:49:13.000Z
|
src/styleaug/__init__.py
|
somritabanerjee/speedplusbaseline
|
5913c611d8c182ad8070abcf5f1baffc554dfd90
|
[
"MIT"
] | 13 |
2019-05-22T19:08:36.000Z
|
2021-08-13T01:21:47.000Z
|
from .styleAugmentor import StyleAugmentor
| 42 | 42 | 0.904762 |
167422ad1c22d904c1fb3127c28d48e06243100c
| 2,698 |
py
|
Python
|
configs/classification/imagenet/mixups/convnext/convnext_tiny_smooth_mix_8xb256_accu2_ema_fp16.py
|
Westlake-AI/openmixup
|
ea81250819e740dd823e30cb7ce382d14a3c1b91
|
[
"Apache-2.0"
] | 10 |
2021-12-30T10:22:27.000Z
|
2022-03-30T02:31:38.000Z
|
configs/classification/imagenet/mixups/convnext/convnext_tiny_smooth_mix_8xb256_accu2_ema_fp16.py
|
Westlake-AI/openmixup
|
ea81250819e740dd823e30cb7ce382d14a3c1b91
|
[
"Apache-2.0"
] | 3 |
2022-01-20T21:02:48.000Z
|
2022-03-19T13:49:45.000Z
|
configs/classification/imagenet/mixups/convnext/convnext_tiny_smooth_mix_8xb256_accu2_ema_fp16.py
|
Westlake-AI/openmixup
|
ea81250819e740dd823e30cb7ce382d14a3c1b91
|
[
"Apache-2.0"
] | null | null | null |
_base_ = [
'../../../_base_/datasets/imagenet/swin_sz224_4xbs256.py',
'../../../_base_/default_runtime.py',
]
# model settings
model = dict(
type='MixUpClassification',
pretrained=None,
alpha=0.2,
mix_mode="cutmix",
mix_args=dict(
attentivemix=dict(grid_size=32, top_k=None, beta=8), # AttentiveMix+ in this repo (use pre-trained)
automix=dict(mask_adjust=0, lam_margin=0), # require pre-trained mixblock
fmix=dict(decay_power=3, size=(224,224), max_soft=0., reformulate=False),
manifoldmix=dict(layer=(0, 3)),
puzzlemix=dict(transport=True, t_batch_size=32, t_size=-1, # adjust t_batch_size if CUDA out of memory
mp=None, block_num=4, # block_num<=4 and mp=2/4 for fast training
beta=1.2, gamma=0.5, eta=0.2, neigh_size=4, n_labels=3, t_eps=0.8),
resizemix=dict(scope=(0.1, 0.8), use_alpha=True),
samix=dict(mask_adjust=0, lam_margin=0.08), # require pre-trained mixblock
),
backbone=dict(
type='ConvNeXt',
arch='tiny',
out_indices=(3,),
norm_cfg=dict(type='LN2d', eps=1e-6),
act_cfg=dict(type='GELU'),
drop_path_rate=0.1,
gap_before_final_norm=True,
),
head=dict(
type='ClsMixupHead', # mixup CE + label smooth
loss=dict(type='LabelSmoothLoss',
label_smooth_val=0.1, num_classes=1000, mode='original', loss_weight=1.0),
with_avg_pool=False, # gap_before_final_norm is True
in_channels=768, num_classes=1000)
)
# interval for accumulate gradient
update_interval = 2 # total: 8 x bs256 x 2 accumulates = bs4096
# additional hooks
custom_hooks = [
dict(type='EMAHook', # EMA_W = (1 - m) * EMA_W + m * W
momentum=0.9999,
warmup='linear',
warmup_iters=20 * 626, warmup_ratio=0.9, # warmup 20 epochs.
update_interval=update_interval,
),
]
# optimizer
optimizer = dict(
type='AdamW',
lr=4e-3, # lr = 5e-4 * (256 * 4) * 4 accumulate / 1024 = 4e-3 / bs4096
weight_decay=0.05, eps=1e-8, betas=(0.9, 0.999),
paramwise_options={
'(bn|ln|gn)(\d+)?.(weight|bias)': dict(weight_decay=0.),
'bias': dict(weight_decay=0.),
})
# apex
use_fp16 = True
fp16 = dict(type='apex', loss_scale=dict(init_scale=512., mode='dynamic'))
optimizer_config = dict(grad_clip=None, update_interval=update_interval, use_fp16=use_fp16)
# lr scheduler
lr_config = dict(
policy='CosineAnnealing',
by_epoch=False, min_lr=1e-5,
warmup='linear',
warmup_iters=20, warmup_by_epoch=True, # warmup 20 epochs.
warmup_ratio=1e-6,
)
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=300)
| 34.151899 | 111 | 0.640474 |
16748f009db0117be1d076ddc5a413db7e45e64c
| 2,274 |
py
|
Python
|
mcstasscript/interface/reader.py
|
PaNOSC-ViNYL/McStasScript
|
bd94ebc6cac290c3c9662871df40d76edbe4a44e
|
[
"BSD-3-Clause"
] | 3 |
2019-08-29T14:15:06.000Z
|
2021-03-04T12:08:48.000Z
|
mcstasscript/interface/reader.py
|
PaNOSC-ViNYL/McStasScript
|
bd94ebc6cac290c3c9662871df40d76edbe4a44e
|
[
"BSD-3-Clause"
] | 37 |
2019-03-05T12:28:32.000Z
|
2022-03-22T10:11:23.000Z
|
mcstasscript/interface/reader.py
|
PaNOSC-ViNYL/McStasScript
|
bd94ebc6cac290c3c9662871df40d76edbe4a44e
|
[
"BSD-3-Clause"
] | 6 |
2019-10-21T20:19:10.000Z
|
2022-03-09T10:12:16.000Z
|
import os
from mcstasscript.instr_reader.control import InstrumentReader
from mcstasscript.interface.instr import McStas_instr
| 28.425 | 79 | 0.579595 |
1676599bdfdd4b081bb8bb20aa32589f69c604ef
| 3,701 |
py
|
Python
|
src/regrtest.py
|
ucsd-progsys/csolve-bak
|
89cfeb5403e617f45ece4bae9f88f8e6cd7ca934
|
[
"BSD-3-Clause"
] | null | null | null |
src/regrtest.py
|
ucsd-progsys/csolve-bak
|
89cfeb5403e617f45ece4bae9f88f8e6cd7ca934
|
[
"BSD-3-Clause"
] | 1 |
2018-04-24T10:43:07.000Z
|
2018-04-24T10:43:07.000Z
|
src/regrtest.py
|
ucsd-progsys/csolve-bak
|
89cfeb5403e617f45ece4bae9f88f8e6cd7ca934
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
# Copyright (c) 2009 The Regents of the University of California. All rights reserved.
#
# Permission is hereby granted, without written agreement and without
# license or royalty fees, to use, copy, modify, and distribute this
# software and its documentation for any purpose, provided that the
# above copyright notice and the following two paragraphs appear in
# all copies of this software.
#
# IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY
# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
# ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
# IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
#
# THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
# ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATION
# TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
import time, subprocess, optparse, sys, socket, os
import misc.rtest as rtest
solve = "./csolve -c".split()
null = open("/dev/null", "w")
now = (time.asctime(time.localtime(time.time()))).replace(" ","_")
logfile = "../tests/logs/regrtest_results_%s_%s" % (socket.gethostname (), now)
argcomment = "//! run with "
#####################################################################################
#testdirs = [("../postests", 0)]
#testdirs = [("../negtests", 1)]
#testdirs = [("../slowtests", 1)]
#DEFAULT
testdirs = [("../tests/postests", 0), ("../tests/negtests", [1, 2])]
#testdirs = [("../tests/microtests", 0)]
parser = optparse.OptionParser()
parser.add_option("-t", "--threads", dest="threadcount", default=1, type=int, help="spawn n threads")
parser.add_option("-o", "--opts", dest="opts", default="", type=str, help="additional arguments to csolve")
parser.disable_interspersed_args()
options, args = parser.parse_args()
runner = rtest.TestRunner (Config (options.opts, testdirs, logfile, options.threadcount))
exit (runner.run ())
| 38.154639 | 107 | 0.676574 |
16766ccc57f251df7ba9394a55b7eabdd7d12e46
| 2,925 |
py
|
Python
|
country_capital_guesser.py
|
NathanMH/ComputerClub
|
197585c1a77f71ee363547740d6e09f945e7526f
|
[
"MIT"
] | null | null | null |
country_capital_guesser.py
|
NathanMH/ComputerClub
|
197585c1a77f71ee363547740d6e09f945e7526f
|
[
"MIT"
] | null | null | null |
country_capital_guesser.py
|
NathanMH/ComputerClub
|
197585c1a77f71ee363547740d6e09f945e7526f
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
#######################
"""####################
Index:
1. Imports and Readme
2. Functions
3. Main
4. Testing
####################"""
#######################
###################################################################
# 1. IMPORTS AND README
###################################################################
import easygui
import country_list_getter
###################################################################
# 2. FUNCTIONS
###################################################################
# Dictionary. It has keys (Canada, France etc...) and Values (Paris, Ottawa)
country_list_getter.main()
COUNTRIES_CAPITALS = country_list_getter.FINAL_LIST
###################################################################
# 3. MAIN
###################################################################
###################################################################
# 4. TESTING
###################################################################
# COUNTRIES_CAPITALS = {"Canada": "Ottawa", "United States": "Washington", "France": "Paris"}
# ask_to_play()
# main_question_box("Canada")
funtime()
| 33.62069 | 160 | 0.494017 |
1676c1cee546273be3e4746fcf8ddcdf0ca583bb
| 2,288 |
py
|
Python
|
data_analysis/audiocommons_ffont/scripts/rekordbox_xml_to_analysis_rhythm_rekordbox_file.py
|
aframires/freesound-loop-annotator
|
a24e0c23bfc671e41e8627150e7b9fcae5c8cb13
|
[
"Apache-2.0"
] | 18 |
2020-01-22T14:58:18.000Z
|
2022-02-21T12:07:51.000Z
|
data_analysis/audiocommons_ffont/scripts/rekordbox_xml_to_analysis_rhythm_rekordbox_file.py
|
aframires/freesound-loop-annotator
|
a24e0c23bfc671e41e8627150e7b9fcae5c8cb13
|
[
"Apache-2.0"
] | 2 |
2020-02-24T13:14:05.000Z
|
2020-09-21T13:34:53.000Z
|
data_analysis/audiocommons_ffont/scripts/rekordbox_xml_to_analysis_rhythm_rekordbox_file.py
|
aframires/freesound-loop-annotator
|
a24e0c23bfc671e41e8627150e7b9fcae5c8cb13
|
[
"Apache-2.0"
] | 1 |
2020-01-22T14:55:36.000Z
|
2020-01-22T14:55:36.000Z
|
# Need this to import from parent directory when running outside pycharm
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
from ac_utils.general import save_to_json, load_from_json
import click
import xml.etree.ElementTree
from urllib import unquote
if __name__ == '__main__':
rekordbox_file_to_analysis_file()
| 39.448276 | 119 | 0.660402 |
1676d72870f651008f4e3aca9c90ccf681a85a4a
| 5,947 |
py
|
Python
|
inventree/part.py
|
SergeoLacruz/inventree-python
|
94681428f61de4ca51171e685812ebc436b9be42
|
[
"MIT"
] | null | null | null |
inventree/part.py
|
SergeoLacruz/inventree-python
|
94681428f61de4ca51171e685812ebc436b9be42
|
[
"MIT"
] | null | null | null |
inventree/part.py
|
SergeoLacruz/inventree-python
|
94681428f61de4ca51171e685812ebc436b9be42
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import logging
import re
import inventree.base
import inventree.stock
import inventree.company
import inventree.build
logger = logging.getLogger('inventree')
| 27.920188 | 82 | 0.626534 |
167719b0cc59eef9b7fff6f4ce109cd0d2fe8bc1
| 12,932 |
py
|
Python
|
tests/test_web_urldispatcher.py
|
avstarkov/aiohttp
|
b0a03cffccf677bf316227522a9b841c15dcb869
|
[
"Apache-2.0"
] | null | null | null |
tests/test_web_urldispatcher.py
|
avstarkov/aiohttp
|
b0a03cffccf677bf316227522a9b841c15dcb869
|
[
"Apache-2.0"
] | null | null | null |
tests/test_web_urldispatcher.py
|
avstarkov/aiohttp
|
b0a03cffccf677bf316227522a9b841c15dcb869
|
[
"Apache-2.0"
] | null | null | null |
import functools
import os
import shutil
import tempfile
from unittest import mock
from unittest.mock import MagicMock
import pytest
from aiohttp import abc, web
from aiohttp.web_urldispatcher import SystemRoute
async def test_follow_symlink(tmp_dir_path, aiohttp_client):
"""
Tests the access to a symlink, in static folder
"""
data = 'hello world'
my_dir_path = os.path.join(tmp_dir_path, 'my_dir')
os.mkdir(my_dir_path)
my_file_path = os.path.join(my_dir_path, 'my_file_in_dir')
with open(my_file_path, 'w') as fw:
fw.write(data)
my_symlink_path = os.path.join(tmp_dir_path, 'my_symlink')
os.symlink(my_dir_path, my_symlink_path)
app = web.Application()
# Register global static route:
app.router.add_static('/', tmp_dir_path, follow_symlinks=True)
client = await aiohttp_client(app)
# Request the root of the static directory.
r = await client.get('/my_symlink/my_file_in_dir')
assert r.status == 200
assert (await r.text()) == data
async def test_access_non_existing_resource(tmp_dir_path, aiohttp_client):
"""
Tests accessing non-existing resource
Try to access a non-exiting resource and make sure that 404 HTTP status
returned.
"""
app = web.Application()
# Register global static route:
app.router.add_static('/', tmp_dir_path, show_index=True)
client = await aiohttp_client(app)
# Request the root of the static directory.
r = await client.get('/non_existing_resource')
assert r.status == 404
async def test_handler_metadata_persistence():
"""
Tests accessing metadata of a handler after registering it on the app
router.
"""
app = web.Application()
def sync_handler(request):
"""Doc"""
return web.Response()
app.router.add_get('/async', async_handler)
with pytest.warns(DeprecationWarning):
app.router.add_get('/sync', sync_handler)
for resource in app.router.resources():
for route in resource:
assert route.handler.__doc__ == 'Doc'
def test_system_route():
route = SystemRoute(web.HTTPCreated(reason='test'))
with pytest.raises(RuntimeError):
route.url_for()
assert route.name is None
assert route.resource is None
assert "<SystemRoute 201: test>" == repr(route)
assert 201 == route.status
assert 'test' == route.reason
def test_resource_raw_match():
app = web.Application()
route = app.router.add_get("/a", handler, name="a")
assert route.resource.raw_match("/a")
route = app.router.add_get("/{b}", handler, name="b")
assert route.resource.raw_match("/{b}")
resource = app.router.add_static("/static", ".")
assert not resource.raw_match("/static")
| 27.514894 | 79 | 0.634009 |
1678ba6ffacdb3dc2a1730ee864aab5b2813d801
| 13,683 |
py
|
Python
|
R-GMM-VGAE/model_citeseer.py
|
nairouz/R-GAE
|
acc7bfe36153a4c7d6f68e21a557bb4d99dab639
|
[
"MIT"
] | 26 |
2021-07-18T01:31:48.000Z
|
2022-03-31T03:23:11.000Z
|
R-GMM-VGAE/model_citeseer.py
|
Fawzidev/R-GAE
|
80988ddf951f1723091a04b617ce4fc6d20ab9ce
|
[
"MIT"
] | 3 |
2021-10-01T07:24:42.000Z
|
2021-11-03T14:25:55.000Z
|
R-GMM-VGAE/model_citeseer.py
|
Fawzidev/R-GAE
|
80988ddf951f1723091a04b617ce4fc6d20ab9ce
|
[
"MIT"
] | 7 |
2021-07-18T01:47:01.000Z
|
2022-01-24T21:09:10.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Authors : Nairouz Mrabah ([email protected]) & Mohamed Fawzi Touati ([email protected])
# @Paper : Rethinking Graph Autoencoder Models for Attributed Graph Clustering
# @License : MIT License
import torch
import numpy as np
import torch.nn as nn
import scipy.sparse as sp
import torch.nn.functional as F
from tqdm import tqdm
from torch.optim import Adam
from sklearn.mixture import GaussianMixture
from torch.optim.lr_scheduler import StepLR
from preprocessing import sparse_to_tuple
from sklearn.neighbors import NearestNeighbors
from sklearn import metrics
from munkres import Munkres
| 46.699659 | 259 | 0.625448 |
16796b947c516147ed6529d69a08e17bbd4afe73
| 3,005 |
py
|
Python
|
odoo-13.0/addons/stock_account/models/account_chart_template.py
|
VaibhavBhujade/Blockchain-ERP-interoperability
|
b5190a037fb6615386f7cbad024d51b0abd4ba03
|
[
"MIT"
] | null | null | null |
odoo-13.0/addons/stock_account/models/account_chart_template.py
|
VaibhavBhujade/Blockchain-ERP-interoperability
|
b5190a037fb6615386f7cbad024d51b0abd4ba03
|
[
"MIT"
] | null | null | null |
odoo-13.0/addons/stock_account/models/account_chart_template.py
|
VaibhavBhujade/Blockchain-ERP-interoperability
|
b5190a037fb6615386f7cbad024d51b0abd4ba03
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, models, _
import logging
_logger = logging.getLogger(__name__)
| 47.698413 | 180 | 0.577704 |
167a0dd80799c1a419238ba6164d01472b85e5d4
| 6,094 |
py
|
Python
|
lib/roi_data/loader.py
|
BarneyQiao/pcl.pytorch
|
4e0280e5e1470f705e620eda26f881d627c5016c
|
[
"MIT"
] | 233 |
2019-05-10T07:17:42.000Z
|
2022-03-30T09:24:16.000Z
|
lib/roi_data/loader.py
|
Michael-Steven/Crack_Image_WSOD
|
4e8591a7c0768cee9eb7240bb9debd54824f5b33
|
[
"MIT"
] | 78 |
2019-05-10T21:10:47.000Z
|
2022-03-29T13:57:32.000Z
|
lib/roi_data/loader.py
|
Michael-Steven/Crack_Image_WSOD
|
4e8591a7c0768cee9eb7240bb9debd54824f5b33
|
[
"MIT"
] | 57 |
2019-05-10T07:17:37.000Z
|
2022-03-24T04:43:24.000Z
|
import math
import numpy as np
import numpy.random as npr
import torch
import torch.utils.data as data
import torch.utils.data.sampler as torch_sampler
from torch.utils.data.dataloader import default_collate
from torch._six import int_classes as _int_classes
from core.config import cfg
from roi_data.minibatch import get_minibatch
import utils.blob as blob_utils
# from model.rpn.bbox_transform import bbox_transform_inv, clip_boxes
def cal_minibatch_ratio(ratio_list):
"""Given the ratio_list, we want to make the RATIO same for each minibatch on each GPU.
Note: this only work for 1) cfg.TRAIN.MAX_SIZE is ignored during `prep_im_for_blob`
and 2) cfg.TRAIN.SCALES containing SINGLE scale.
Since all prepared images will have same min side length of cfg.TRAIN.SCALES[0], we can
pad and batch images base on that.
"""
DATA_SIZE = len(ratio_list)
ratio_list_minibatch = np.empty((DATA_SIZE,))
num_minibatch = int(np.ceil(DATA_SIZE / cfg.TRAIN.IMS_PER_BATCH)) # Include leftovers
for i in range(num_minibatch):
left_idx = i * cfg.TRAIN.IMS_PER_BATCH
right_idx = min((i+1) * cfg.TRAIN.IMS_PER_BATCH - 1, DATA_SIZE - 1)
if ratio_list[right_idx] < 1:
# for ratio < 1, we preserve the leftmost in each batch.
target_ratio = ratio_list[left_idx]
elif ratio_list[left_idx] > 1:
# for ratio > 1, we preserve the rightmost in each batch.
target_ratio = ratio_list[right_idx]
else:
# for ratio cross 1, we make it to be 1.
target_ratio = 1
ratio_list_minibatch[left_idx:(right_idx+1)] = target_ratio
return ratio_list_minibatch
def collate_minibatch(list_of_blobs):
"""Stack samples seperately and return a list of minibatches
A batch contains NUM_GPUS minibatches and image size in different minibatch may be different.
Hence, we need to stack smaples from each minibatch seperately.
"""
Batch = {key: [] for key in list_of_blobs[0]}
# Because roidb consists of entries of variable length, it can't be batch into a tensor.
# So we keep roidb in the type of "list of ndarray".
lists = []
for blobs in list_of_blobs:
lists.append({'data' : blobs.pop('data'),
'rois' : blobs.pop('rois'),
'labels' : blobs.pop('labels')})
for i in range(0, len(list_of_blobs), cfg.TRAIN.IMS_PER_BATCH):
mini_list = lists[i:(i + cfg.TRAIN.IMS_PER_BATCH)]
minibatch = default_collate(mini_list)
for key in minibatch:
Batch[key].append(minibatch[key])
return Batch
| 38.56962 | 97 | 0.639317 |
167a8c5cf5187907cc0dbc578ad93057948ece69
| 28,272 |
py
|
Python
|
venv/Lib/site-packages/sklearn/linear_model/tests/test_least_angle.py
|
andywu113/fuhe_predict
|
7fd816ae83467aa659d420545cd3e25a5e933d5f
|
[
"MIT"
] | 3 |
2019-06-05T12:11:20.000Z
|
2022-01-17T13:53:06.000Z
|
venv/Lib/site-packages/sklearn/linear_model/tests/test_least_angle.py
|
kevinten10/Clothing-Classification
|
9aac6e339651137179f4e4da36fe7743cf4bdca4
|
[
"MIT"
] | 3 |
2021-06-08T20:58:27.000Z
|
2022-03-12T00:16:49.000Z
|
venv/Lib/site-packages/sklearn/linear_model/tests/test_least_angle.py
|
kevinten10/Clothing-Classification
|
9aac6e339651137179f4e4da36fe7743cf4bdca4
|
[
"MIT"
] | 1 |
2019-02-11T22:36:12.000Z
|
2019-02-11T22:36:12.000Z
|
import warnings
from distutils.version import LooseVersion
import numpy as np
import pytest
from scipy import linalg
from sklearn.model_selection import train_test_split
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import TempMemmap
from sklearn.exceptions import ConvergenceWarning
from sklearn import linear_model, datasets
from sklearn.linear_model.least_angle import _lars_path_residues, LassoLarsIC
# TODO: use another dataset that has multiple drops
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
n_samples = y.size
def test_collinearity():
# Check that lars_path is robust to collinearity in input
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
rng = np.random.RandomState(0)
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert not np.isnan(coef_path_).any()
residual = np.dot(X, coef_path_[:, -1]) - y
assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded
n_samples = 10
X = rng.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
# Test that the ``return_path=False`` option returns the correct output
alphas_, _, coef_path_ = linear_model.lars_path(
X, y, method='lar')
alpha_, _, coef = linear_model.lars_path(
X, y, method='lar', return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert alpha_ == alphas_[-1]
def test_no_path_precomputed():
# Test that the ``return_path=False`` option with Gram remains correct
alphas_, _, coef_path_ = linear_model.lars_path(
X, y, method='lar', Gram=G)
alpha_, _, coef = linear_model.lars_path(
X, y, method='lar', Gram=G, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert alpha_ == alphas_[-1]
def test_no_path_all_precomputed():
# Test that the ``return_path=False`` option with Gram and Xy remains
# correct
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, _, coef_path_ = linear_model.lars_path(
X, y, method='lasso', Xy=Xy, Gram=G, alpha_min=0.9)
alpha_, _, coef = linear_model.lars_path(
X, y, method='lasso', Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert alpha_ == alphas_[-1]
def test_lasso_lars_ic():
# Test the LassoLarsIC object by checking that
# - some good features are selected.
# - alpha_bic > alpha_aic
# - n_nonzero_bic < n_nonzero_aic
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
X = np.c_[X, rng.randn(X.shape[0], 5)] # add 5 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert_greater(lars_bic.alpha_, lars_aic.alpha_)
assert_less(len(nonzero_bic), len(nonzero_aic))
assert_less(np.max(nonzero_bic), diabetes.data.shape[1])
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
def test_lars_path_readonly_data():
# When using automated memory mapping on large input, the
# fold data is in read-only mode
# This is a non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/4597
splitted_data = train_test_split(X, y, random_state=42)
with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test):
# The following should not fail despite copy=False
_lars_path_residues(X_train, y_train, X_test, y_test, copy=False)
# now we gonna test the positive option for all estimator classes
default_parameter = {'fit_intercept': False}
estimator_parameter_map = {'LassoLars': {'alpha': 0.1},
'LassoLarsCV': {},
'LassoLarsIC': {}}
| 38.360923 | 79 | 0.616051 |
167b4e3bb5a00625d3f0b289e41e2bc170fabc61
| 3,128 |
py
|
Python
|
parser.py
|
FeroxTL/pynginxconfig-new
|
71cb78c635930b0a764d3274646d436e8d2f1c4d
|
[
"MIT"
] | 8 |
2016-03-25T04:22:39.000Z
|
2022-02-12T21:46:47.000Z
|
parser.py
|
Winnerer/pynginxconfig
|
71cb78c635930b0a764d3274646d436e8d2f1c4d
|
[
"MIT"
] | null | null | null |
parser.py
|
Winnerer/pynginxconfig
|
71cb78c635930b0a764d3274646d436e8d2f1c4d
|
[
"MIT"
] | 3 |
2019-01-26T15:54:54.000Z
|
2022-02-12T21:46:47.000Z
|
#coding: utf8
import copy
import re
from blocks import Block, EmptyBlock, KeyValueOption, Comment, Location
qwe = EmptyBlock()
parse("""#{ asd #qweqeqwe{}
servername qweqweqweqweqwe; # comment {lalalal} #1
server {
listen
8080
tls;
root /data/up1;
location / {
l200;
}
location /qwe{
s 500;
}#123
}#qweqwe""", qwe)
print(qwe.render())
qwe = EmptyBlock()
parse(""" servername wqeqweqwe;
http {
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
# server_tokens off;
# server_names_hash_bucket_size 64;
# server_name_in_redirect off;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# Logging Settings
##
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
##
# Gzip Settings
##
gzip on;
gzip_disable "msie6";
}#123123
""", qwe)
print(qwe.render())
| 24.825397 | 113 | 0.545716 |
167b69684843eed85973a69dafe6205fbdff9406
| 845 |
py
|
Python
|
cocos2d/tools/jenkins-scripts/configs/cocos-2dx-develop-win32.py
|
triompha/EarthWarrior3D
|
d68a347902fa1ca1282df198860f5fb95f326797
|
[
"MIT"
] | null | null | null |
cocos2d/tools/jenkins-scripts/configs/cocos-2dx-develop-win32.py
|
triompha/EarthWarrior3D
|
d68a347902fa1ca1282df198860f5fb95f326797
|
[
"MIT"
] | null | null | null |
cocos2d/tools/jenkins-scripts/configs/cocos-2dx-develop-win32.py
|
triompha/EarthWarrior3D
|
d68a347902fa1ca1282df198860f5fb95f326797
|
[
"MIT"
] | null | null | null |
import os
import subprocess
import sys
print 'Build Config:'
print ' Host:win7 x86'
print ' Branch:develop'
print ' Target:win32'
print ' "%VS110COMNTOOLS%..\IDE\devenv.com" "build\cocos2d-win32.vc2012.sln" /Build "Debug|Win32"'
if(os.path.exists('build/cocos2d-win32.vc2012.sln') == False):
node_name = os.environ['NODE_NAME']
source_dir = '../cocos-2dx-develop-base-repo/node/' + node_name
source_dir = source_dir.replace("/", os.sep)
os.system("xcopy " + source_dir + " . /E /Y /H")
os.system('git pull origin develop')
os.system('git submodule update --init --force')
ret = subprocess.call('"%VS110COMNTOOLS%..\IDE\devenv.com" "build\cocos2d-win32.vc2012.sln" /Build "Debug|Win32"', shell=True)
os.system('git clean -xdf -f')
print 'build exit'
print ret
if ret == 0:
exit(0)
else:
exit(1)
| 33.8 | 127 | 0.668639 |
167cfaccf65c4a217ee921178f5ab5094fc6d8a6
| 241 |
py
|
Python
|
iris_sdk/models/data/ord/rate_center_search_order.py
|
NumberAI/python-bandwidth-iris
|
0e05f79d68b244812afb97e00fd65b3f46d00aa3
|
[
"MIT"
] | 2 |
2020-04-13T13:47:59.000Z
|
2022-02-23T20:32:41.000Z
|
iris_sdk/models/data/ord/rate_center_search_order.py
|
bandwidthcom/python-bandwidth-iris
|
dbcb30569631395041b92917252d913166f7d3c9
|
[
"MIT"
] | 5 |
2020-09-18T20:59:24.000Z
|
2021-08-25T16:51:42.000Z
|
iris_sdk/models/data/ord/rate_center_search_order.py
|
bandwidthcom/python-bandwidth-iris
|
dbcb30569631395041b92917252d913166f7d3c9
|
[
"MIT"
] | 5 |
2018-12-12T14:39:50.000Z
|
2020-11-17T21:42:29.000Z
|
#!/usr/bin/env python
from iris_sdk.models.base_resource import BaseData
from iris_sdk.models.maps.ord.rate_center_search_order import \
RateCenterSearchOrderMap
| 30.125 | 64 | 0.834025 |
167df72d7c85276ff20ea4552c3c38a522dba306
| 7,024 |
py
|
Python
|
optimizer.py
|
thanusha22/CEC-1
|
02ad9247b006a348cc871a5714cf5abfa4a516af
|
[
"MIT"
] | null | null | null |
optimizer.py
|
thanusha22/CEC-1
|
02ad9247b006a348cc871a5714cf5abfa4a516af
|
[
"MIT"
] | null | null | null |
optimizer.py
|
thanusha22/CEC-1
|
02ad9247b006a348cc871a5714cf5abfa4a516af
|
[
"MIT"
] | null | null | null |
from pathlib import Path
import optimizers.PSO as pso
import optimizers.MVO as mvo
import optimizers.GWO as gwo
import optimizers.MFO as mfo
import optimizers.CS as cs
import optimizers.BAT as bat
import optimizers.WOA as woa
import optimizers.FFA as ffa
import optimizers.SSA as ssa
import optimizers.GA as ga
import optimizers.HHO as hho
import optimizers.SCA as sca
import optimizers.JAYA as jaya
import optimizers.HYBRID as hybrid
import benchmarks
import csv
import numpy
import time
import warnings
import os
import plot_convergence as conv_plot
import plot_boxplot as box_plot
warnings.simplefilter(action="ignore")
def run(optimizer, objectivefunc, NumOfRuns, params, export_flags):
"""
It serves as the main interface of the framework for running the experiments.
Parameters
----------
optimizer : list
The list of optimizers names
objectivefunc : list
The list of benchmark functions
NumOfRuns : int
The number of independent runs
params : set
The set of parameters which are:
1. Size of population (PopulationSize)
2. The number of iterations (Iterations)
export_flags : set
The set of Boolean flags which are:
1. Export (Exporting the results in a file)
2. Export_details (Exporting the detailed results in files)
3. Export_convergence (Exporting the covergence plots)
4. Export_boxplot (Exporting the box plots)
Returns
-----------
N/A
"""
# Select general parameters for all optimizers (population size, number of iterations) ....
PopulationSize = params["PopulationSize"]
Iterations = params["Iterations"]
# Export results ?
Export = export_flags["Export_avg"]
Export_details = export_flags["Export_details"]
Export_convergence = export_flags["Export_convergence"]
Export_boxplot = export_flags["Export_boxplot"]
Flag = False
Flag_details = False
# CSV Header for for the cinvergence
CnvgHeader = []
results_directory = time.strftime("%Y-%m-%d-%H-%M-%S") + "/"
Path(results_directory).mkdir(parents=True, exist_ok=True)
for l in range(0, Iterations):
CnvgHeader.append("Iter" + str(l + 1))
for i in range(0, len(optimizer)):
for j in range(0, len(objectivefunc)):
convergence = [0] * NumOfRuns
executionTime = [0] * NumOfRuns
for k in range(0, NumOfRuns):
func_details = benchmarks.getFunctionDetails(objectivefunc[j])
x = selector(optimizer[i], func_details, PopulationSize, Iterations)
convergence[k] = x.convergence
optimizerName = x.optimizer
objfname = x.objfname
if Export_details == True:
ExportToFile = results_directory + "experiment_details.csv"
with open(ExportToFile, "a", newline="\n") as out:
writer = csv.writer(out, delimiter=",")
if (
Flag_details == False
): # just one time to write the header of the CSV file
header = numpy.concatenate(
[["Optimizer", "objfname", "ExecutionTime"], CnvgHeader]
)
writer.writerow(header)
Flag_details = True # at least one experiment
executionTime[k] = x.executionTime
a = numpy.concatenate(
[[x.optimizer, x.objfname, x.executionTime], x.convergence]
)
writer.writerow(a)
out.close()
if Export == True:
ExportToFile = results_directory + "experiment.csv"
with open(ExportToFile, "a", newline="\n") as out:
writer = csv.writer(out, delimiter=",")
if (
Flag == False
): # just one time to write the header of the CSV file
header = numpy.concatenate(
[["Optimizer", "objfname", "ExecutionTime"], CnvgHeader]
)
writer.writerow(header)
Flag = True
avgExecutionTime = float("%0.2f" % (sum(executionTime) / NumOfRuns))
avgConvergence = numpy.around(
numpy.mean(convergence, axis=0, dtype=numpy.float64), decimals=2
).tolist()
a = numpy.concatenate(
[[optimizerName, objfname, avgExecutionTime], avgConvergence]
)
writer.writerow(a)
out.close()
if Export_convergence == True:
conv_plot.run(results_directory, optimizer, objectivefunc, Iterations)
if Export_boxplot == True:
box_plot.run(results_directory, optimizer, objectivefunc, Iterations)
if Flag == False: # Faild to run at least one experiment
print(
"No Optomizer or Cost function is selected. Check lists of available optimizers and cost functions"
)
print("Execution completed")
| 38.173913 | 111 | 0.58955 |
167e133f17b315eee99f736bb553b46a271cd9cc
| 1,614 |
py
|
Python
|
tests/fields/test_primitive_types.py
|
slawak/dataclasses-avroschema
|
04e69a176b3e72bfa0acd3edbd044ecd161b1a68
|
[
"MIT"
] | null | null | null |
tests/fields/test_primitive_types.py
|
slawak/dataclasses-avroschema
|
04e69a176b3e72bfa0acd3edbd044ecd161b1a68
|
[
"MIT"
] | null | null | null |
tests/fields/test_primitive_types.py
|
slawak/dataclasses-avroschema
|
04e69a176b3e72bfa0acd3edbd044ecd161b1a68
|
[
"MIT"
] | null | null | null |
import dataclasses
import pytest
from dataclasses_avroschema import fields
from . import consts
| 34.340426 | 87 | 0.76456 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.