seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
37370950188
|
import argparse
import openai
import json
import time
from tqdm.auto import tqdm
from settings import settings
from textwrap import dedent
def evaluate(dataset: str, gold_path, log_file: str):
"""
Returns the average score for the dataset.
Args:
dataset: Path to the json dataset
log_file: Path to save the evaluation results
Returns:
Average score for the dataset
"""
def _get_score(generated_content, ground_truth):
if ground_truth == "":
raise ValueError("Ground truth is empty")
prompt = (
f"{base_prompt}\nSurvey Name: {survey_name.strip()}\nSurvey Section: {survey_section.strip()}\nContent: {generated_content.strip()}\nGround Truth Text: {ground_truth}\nEvaluation Form (scores ONLY)\nScore:"
)
score = get_llm_score(prompt)
return score
with open(dataset, "r") as f:
data = json.load(f)
with open(gold_path, "r", encoding="utf8") as f:
gold_data = json.load(f)
with open(log_file, "w") as f:
all_scores = []
for survey_name in data:
for survey_section, content in tqdm(data[survey_name].items(), desc=f"Evaluating {survey_name}"):
if content.get("subsections"):
all_sub_scores = []
for sub_name, sub_content in tqdm(content.get("subsections").items(), desc=f"Subsections"):
generated_content = sub_content["content"]
ground_truth = gold_data[survey_name][survey_section]["subsections"][sub_name]["content"]
sub_score = _get_score(generated_content, ground_truth)
all_sub_scores.append(sub_score)
json.dump({"survey_name": survey_name, "survey_section": survey_section, "subsection": sub_name, "score": sub_score}, f)
f.write("\n")
score = sum(all_sub_scores)/len(all_sub_scores)
else:
generated_content = content["content"]
ground_truth = gold_data[survey_name][survey_section]["content"]
score = _get_score(generated_content, ground_truth)
all_scores.append(score)
json.dump({"survey_name": survey_name, "survey_section": survey_section, "content": generated_content, "score": score}, f)
f.write("\n")
return sum(all_scores)/len(all_scores)
def get_llm_score(prompt, tries=0):
system_prompt = dedent("""
You will be given a text written for a survey section and a ground truth section.
Your task is to rate the content of the survey section on one metric comparing this text with the ground truth which has the maximum score.
Please make sure you read and understand the instructions carefully.
Please keep the document open while reviewing, and refer to it as needed.""")
try:
response = openai.ChatCompletion.create(
model=settings.model,
messages=[
{"role": "system", "content": system_prompt.strip()},
{"role": "user", "content": prompt},
],
max_tokens=settings.max_tokens,
temperature=settings.temperature,
top_p=settings.top_p,
n=settings.n,
)
except Exception as e:
time.sleep(60 + 10*tries)
print(f"Retrying {tries+1} time")
if tries < 6:
return get_llm_score(prompt, tries+1)
else:
raise e
all_predictions = [int(item["message"]["content"]) for item in response.choices]
# Scores are the sum of probabilities for each class multiplied by the class value
scores = sum(all_predictions.count(i)/len(all_predictions) * i for i in range(1, 6))
return scores
# python evaluate.py --dataset /home/thales/Documents/AutoSurvey/test/proc1/proc1.json --gold /home/thales/Documents/AutoSurvey/data/dataset/survey_3.json --logs proc1_eval.json
# python evaluate.py --dataset /home/thales/Documents/AutoSurvey/test/proc2/proc2.json --gold /home/thales/Documents/AutoSurvey/data/dataset/survey_3.json --logs proc2_eval.json
# python evaluate.py --dataset /home/thales/Documents/AutoSurvey/test/proc3/proc3.json --gold /home/thales/Documents/AutoSurvey/data/dataset/survey_3.json --logs proc3_eval.json
# python evaluate.py --dataset /home/thales/Documents/AutoSurvey/test/proc4/proc4.json --gold /home/thales/Documents/AutoSurvey/data/dataset/survey_3.json --logs proc4_eval.json
if __name__ == "__main__":
argparser = argparse.ArgumentParser()
argparser.add_argument("--dataset", type=str, required=True, help="Path to the json dataset")
argparser.add_argument("--gold", type=str, required=True, help="Path to the json dataset")
argparser.add_argument("--logs", type=str, default="evaluation_results.json", help="Path to save the evaluation results")
args = argparser.parse_args()
openai.api_key = settings.openai_key
base_prompt = dedent("""Evaluation Steps:
1 - Carefully read the content to identify the main topic and key points.
2 - Evaluate whether the content adequately addresses the main topic stated in the title and provides a comprehensive technical description of it.
3 - Assign a score to the text on a scale of 1 to 5, where 1 represents the lowest score and 5 represents the highest score, according to the Evaluation Criteria.""")
average_score = evaluate(args.dataset, args.gold, args.logs)
|
ZanezZephyrs/AutoSurvey
|
AutoSurvey/evaluation/evaluate.py
|
evaluate.py
|
py
| 5,555 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26803495013
|
import random
import copy
class Board:
def __init__(self, players: tuple, columns=64, rows=48):
"""
Initialize the game, and sets defaults setting.
"""
self.columns = columns
self.rows = rows
self.players = players
self.history_grid = [[-1 for i in range(self.columns)] for j in range(self.rows)]
self.neighbor_grid = [[[0 for p in range(len(self.players))] for i in range(self.columns)] for j in range(self.rows)]
self.current_grid = [[[0 for p in range(len(self.players))] for i in range(self.columns)] for j in range(self.rows)]
def randomize_grid(self):
"""
Inputs random alive cells.
"""
for r in range(self.rows):
for c in range(self.columns):
for p in range(len(self.players)):
if (random.randint(0, 9) == 0):
self.current_grid[r][c][p] = 1
def update_neighbors(self):
"""
Update the neighbors count.
"""
# Helper tables for continuous edges.
dx = [-1, -1, -1, 0, 0, 1, 1, 1]
dy = [-1, 0, 1, -1, 1, -1, 0, 1]
# Count how many alive neighbors.
for r in range(self.rows):
for c in range(self.columns):
for p in range(len(self.players)):
alive = 0
for i in range(8):
nc = (c + dy[i]) % self.columns
nr = (r + dx[i]) % self.rows
if self.current_grid[nr][nc][p]:
alive += 1
self.neighbor_grid[r][c][p] = alive
def update_conflicting(self):
"""
Resolve status in cells with conflicts and set history value.
"""
# Count how many alive neighbors.
for r in range(self.rows):
for c in range(self.columns):
nc = [0 for p in range(len(self.players))]
for p in range(len(self.players)):
if self.current_grid[r][c][p]:
nc[p] = self.neighbor_grid[r][c][p]
if len(self.players) > nc.count(0):
max_nc = max(nc)
max_nc_c = nc.count(max_nc)
if 1 == max_nc_c:
pim = nc.index(max_nc)
for p in range(len(self.players)):
if pim == p:
self.current_grid[r][c][p] = 1
self.history_grid[r][c] = p
else:
self.current_grid[r][c][p] = 0
else:
self.history_grid[r][c] = -1
for p in range(len(self.players)):
if max_nc == nc[p]:
self.current_grid[r][c][p] = 1
else:
self.current_grid[r][c][p] = 0
def update_grids(self):
"""
Apply the rules on all grids.
"""
self.update_neighbors()
self.update_conflicting()
# Apply usual rules on a temporary copy.
tmp_grid = copy.deepcopy(self.current_grid)
for r in range(self.rows):
for c in range(self.columns):
for p in range(len(self.players)):
# Apply alive cell rules
alive = self.neighbor_grid[r][c][p]
if self.current_grid[r][c][p]:
if alive == 2 or alive == 3:
tmp_grid[r][c][p] = 1
else:
tmp_grid[r][c][p] = 0
# Apply dead Cell rules
else:
if alive == 3:
tmp_grid[r][c][p] = 1
else:
tmp_grid[r][c][p] = 0
# Copy new state by value.
self.current_grid = copy.deepcopy(tmp_grid)
|
vichango/infolipo
|
GameOfLife/src/board.py
|
board.py
|
py
| 4,078 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11107764647
|
aa=input()
aa2=list(map(int,input().split()))
c=d=0
for i in range(0,len(aa2)+1):
if(i==aa2[i]):
c=c+1
else:
d=d+1
break
if(d>=1):
print(d)
|
Ibarsjoel1234/Program-Python
|
lenasindex.py
|
lenasindex.py
|
py
| 176 |
python
|
fa
|
code
| 0 |
github-code
|
6
|
2705587377
|
from __future__ import annotations
import abc
from collections import ChainMap
from typing import Any, ClassVar, Optional, Type, TypeVar
import attr
from basic_notion import exc
from basic_notion.utils import set_to_dict, del_from_dict
def _get_attr_keys_for_cls(
members: dict[str, Any],
only_editable: bool = False,
only_derived: bool = False,
) -> dict[str, tuple[str, ...]]:
from basic_notion.attr import ItemAttrDescriptor
result: dict[str, tuple[str, ...]] = dict()
for name, prop in members.items():
if not isinstance(prop, ItemAttrDescriptor):
continue
if only_editable and not prop.editable:
continue
if only_derived and not prop.derived:
continue
attr_key: tuple[str, ...]
try:
attr_key = prop.key
except AttributeError:
attr_key = (name,)
result[name] = attr_key
return result
class NotionItemBaseMetaclass(abc.ABCMeta):
# abc.ABCMeta is needed here for the abc.ABC functionality
"""Metaclass that adds ``__notion_attr_keys__`` to all ``NotionItemBase`` subclasses"""
def __new__(cls, name: str, bases: tuple[type, ...], dct: dict):
attr_keys_name = '__notion_attr_keys__'
editable_keys_name = '__notion_editable_keys__'
derived_keys_name = '__notion_derived_keys__'
base_attr_key_maps = tuple(
getattr(base, attr_keys_name) # type: ignore
for base in bases if type(base) is cls
)
base_editable_key_maps = tuple(
getattr(base, editable_keys_name) # type: ignore
for base in bases if type(base) is cls
)
base_derived_key_maps = tuple(
getattr(base, derived_keys_name) # type: ignore
for base in bases if type(base) is cls
)
attr_keys = dict(ChainMap(_get_attr_keys_for_cls(dct), *base_attr_key_maps))
editable_keys = dict(ChainMap(_get_attr_keys_for_cls(dct, only_editable=True), *base_editable_key_maps))
derived_keys = dict(ChainMap(_get_attr_keys_for_cls(dct, only_derived=True), *base_derived_key_maps))
# Added to __dict__
dct[attr_keys_name] = attr_keys
dct[editable_keys_name] = editable_keys
dct[derived_keys_name] = derived_keys
new_cls = super().__new__(cls, name, bases, dct)
return new_cls
_ITEM_TV = TypeVar('_ITEM_TV', bound='NotionItemBase')
@attr.s(slots=True)
class NotionItemBase(metaclass=NotionItemBaseMetaclass):
__notion_attr_keys__: dict[str, tuple[str, ...]] = None # type: ignore # defined in metaclass
__notion_editable_keys__: dict[str, tuple[str, ...]] = None # type: ignore # defined in metaclass
__notion_derived_keys__: dict[str, tuple[str, ...]] = None # type: ignore # defined in metaclass
OBJECT_TYPE_KEY_STR: ClassVar[str] = ''
OBJECT_TYPE_STR: ClassVar[str] = ''
_data: Optional[dict[str, Any]] = attr.ib(kw_only=True, default=None)
@classmethod
@property
def attr_keys(cls) -> dict[str, tuple[str, ...]]:
return cls.__notion_attr_keys__
@classmethod
@property
def editable_keys(cls) -> dict[str, tuple[str, ...]]:
return cls.__notion_editable_keys__
@classmethod
@property
def derived_keys(cls) -> dict[str, tuple[str, ...]]:
return cls.__notion_derived_keys__
@property
def data(self) -> dict:
if self._data is None:
raise exc.ItemHasNoData(f'Object {type(self).__name__} has no data')
return self._data
@classmethod
def _make_inst_attr_dict(cls, kwargs: dict[str, Any]) -> dict:
data: dict[str, Any] = {}
for name, key in cls.editable_keys.items(): # type: ignore
if name not in kwargs:
continue
value = kwargs[name]
# Get attr descriptor and its `set_converter` callable
# (if it exists) to convert the value into its serializable form
prop = getattr(cls, name)
set_converter = prop.set_converter
if set_converter is not None:
value = set_converter(value)
set_to_dict(data, key, value)
return data
@classmethod
def _make_inst_dict(cls, kwargs: dict[str, Any]) -> dict:
data = {}
if cls.OBJECT_TYPE_KEY_STR and cls.OBJECT_TYPE_STR:
data[cls.OBJECT_TYPE_KEY_STR] = cls.OBJECT_TYPE_STR
data.update(cls._make_inst_attr_dict(kwargs))
return data
@classmethod
def make(cls: Type[_ITEM_TV], **kwargs: Any) -> _ITEM_TV:
"""Generate instance from attributes"""
data = cls._make_inst_dict(kwargs)
return cls(data=data)
def clear_derived_attrs(self) -> None:
for name, other_key in self.attr_keys.items():
if name in self.derived_keys:
# Is not editable
del_from_dict(self.data, other_key)
|
altvod/basic-notion
|
src/basic_notion/base.py
|
base.py
|
py
| 4,973 |
python
|
en
|
code
| 6 |
github-code
|
6
|
9836602574
|
import matplotlib.pyplot as plt
import numpy as np
k=9.0e9
q=1.9e-19
d=1.0e1
t=np.linspace(0,2*np.pi,10000)
i=1
V=V=(3*k*q*(d**2)/(2*(i**3)))*np.cos(2*t)
plt.plot(t,V,color='black')
plt.xlabel('theta')
plt.ylabel('Potential')
plt.show()
|
Rohan-Chakravarthy/Basic-Mathematics-Programs
|
quad alt.py
|
quad alt.py
|
py
| 247 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33022799224
|
from django.conf.urls import patterns, include, url
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'yj.views.home'),
url(r'^api/', include('api.urls')),
# Include an application:
# url(r'^app_name/', include('app_name.urls', namespace="app_name")),
url(r'^admin/', include(admin.site.urls)),
)
urlpatterns += staticfiles_urlpatterns()
|
bob1b/yj
|
yj/urls.py
|
urls.py
|
py
| 477 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27984628392
|
from scipy.io.wavfile import read, write
import io
import matplotlib.pyplot as plt
## This may look a bit intricate/useless, considering the fact that scipy's read() and write() function already return a
## numpy ndarray, but the BytesIO "hack" may be useful in case you get the wav not through a file, but trough some websocket or
## HTTP Post request. This should obviously work with any other sound format, as long as you have the proper decoding function
with open("gn.wav", "rb") as wavfile:
input_wav = wavfile.read()
# here, input_wav is a bytes object representing the wav object
rate, data = read(io.BytesIO(input_wav))
# data is a numpy ND array representing the audio data. Let's do some stuff with it
reversed_data = data[::-1] #reversing it
print(reversed_data.shape)
plt.plot(reversed_data)
plt.show()
#then, let's save it to a BytesIO object, which is a buffer for bytes object
bytes_wav = bytes()
byte_io = io.BytesIO(bytes_wav)
write(byte_io, rate, reversed_data)
output_wav = byte_io.read()
|
Hrithik0the0research/gan-discrimator
|
gan/synthetic-data-generator-main/audio_read.py
|
audio_read.py
|
py
| 1,017 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70789372028
|
# Counting element
# Given an integer array, count element x such that x + 1 is also in array.If there're duplicates in array, count them separately.
# Example 1:
# Input: {1, 2, 3}
# Output: 2
# Explanation:
# First element is 1 + 1 = 2 (2 is present in an array)
# Second element is 2 + 1 = 3 (3 is present in an array)
# Third element is 3 + 1 = 4 (4 is not present in an array)
#
# Example 2:
# Input: {1, 1, 3, 3, 5, 5, 7, 7}
# Output: 0
#
# Example 3:
# Input: {1, 3, 2, 3, 5, 0}
# Output: 3
# Explanation:
# 1 + 1 = 2 (Exist)
# 3 + 1 = 4 (Not exist)
# 2 + 1 = 3 (Exist)
# 3 + 1 = 4 (Not exist)
# 5 + 1 = 6 (Not exist)
# 0 + 1 = 1 (Exist)
#
# Example 4:
# Input: {1, 1, 2, 2}
# Output: 2
from collections import defaultdict
class Solution(object):
def countElements(self, arr):
number_dictionary = defaultdict(int)
count = 0
for num in arr:
number_dictionary[num] += 1
for num in number_dictionary:
count += number_dictionary.get(num-1, 0)
return count
|
deepk777/leetcode
|
30-day-challenge-2020/April/week1/day7-counting-element.py
|
day7-counting-element.py
|
py
| 1,238 |
python
|
en
|
code
| 1 |
github-code
|
6
|
40128549954
|
# included from libs/mincostflow.py
"""
Min Cost Flow
"""
# derived: https://atcoder.jp/contests/practice2/submissions/16726003
from heapq import heappush, heappop
class MinCostFlow():
def __init__(self, n):
self.n = n
self.graph = [[] for _ in range(n)]
self.pos = []
def add_edge(self, fr, to, cap, cost):
#assert 0 <= fr < self.n
#assert 0 <= to < self.n
m = len(self.pos)
self.pos.append((fr, len(self.graph[fr])))
self.graph[fr].append([to, len(self.graph[to]), cap, cost])
self.graph[to].append([fr, len(self.graph[fr]) - 1, 0, -cost])
return m
def get_edge(self, idx):
#assert 0 <= idx < len(self.pos)
to, rev, cap, cost = self.graph[self.pos[idx][0]][self.pos[idx][1]]
_rev_to, _rev_rev, rev_cap, _rev_cost = self.graph[to][rev]
return self.pos[idx][0], to, cap + rev_cap, rev_cap, cost
def edges(self):
for i in range(len(self.pos)):
yield self.get_edge(i)
def dual_ref(self, s, t):
dist = [2**63 - 1] * self.n
dist[s] = 0
vis = [0] * self.n
self.pv = [-1] * self.n
self.pe = [-1] * self.n
queue = []
heappush(queue, (0, s))
while queue:
k, v = heappop(queue)
if vis[v]:
continue
vis[v] = True
if v == t:
break
for i in range(len(self.graph[v])):
to, _rev, cap, cost = self.graph[v][i]
if vis[to] or cap == 0:
continue
cost += self.dual[v] - self.dual[to]
if dist[to] - dist[v] > cost:
dist[to] = dist[v] + cost
self.pv[to] = v
self.pe[to] = i
heappush(queue, (dist[to], to))
if not vis[t]:
return False
for v in range(self.n):
if not vis[v]:
continue
self.dual[v] -= dist[t] - dist[v]
return True
def flow(self, s, t):
return self.flow_with_limit(s, t, 2**63 - 1)
def flow_with_limit(self, s, t, limit):
return self.slope_with_limit(s, t, limit)[-1]
def slope(self, s, t):
return self.slope_with_limit(s, t, 2**63 - 1)
def slope_with_limit(self, s, t, limit):
#assert 0 <= s < self.n
#assert 0 <= t < self.n
#assert s != t
flow = 0
cost = 0
prev_cost = -1
res = [(flow, cost)]
self.dual = [0] * self.n
while flow < limit:
if not self.dual_ref(s, t):
break
c = limit - flow
v = t
while v != s:
c = min(c, self.graph[self.pv[v]][self.pe[v]][2])
v = self.pv[v]
v = t
while v != s:
_to, rev, _cap, _ = self.graph[self.pv[v]][self.pe[v]]
self.graph[self.pv[v]][self.pe[v]][2] -= c
self.graph[v][rev][2] += c
v = self.pv[v]
d = -self.dual[s]
flow += c
cost += c * d
if prev_cost == d:
res.pop()
res.append((flow, cost))
prev_cost = cost
return res
# end of libs/mincostflow.py
# included from snippets/main.py
def debug(*x, msg=""):
import sys
print(msg, *x, file=sys.stderr)
def solve(N, M, AS, BS, RS):
global mcf
INF = 10 ** 5
mcf = MinCostFlow(N + 5)
start = N
goal = N + 1
round = N + 2
for i in range(3):
mcf.add_edge(start, round + i, M, 0)
for i in range(3):
for j in range(N):
r = AS[j] * (BS[j] ** (i + 1)) % RS[i]
mcf.add_edge(round + i, j, 1, INF - r)
for j in range(N):
cs = [AS[j] * (BS[j] ** (k + 1)) for k in range(3)]
cs.append(0)
for k in range(3):
c = cs[k] - cs[k-1]
mcf.add_edge(j, goal, 1, c)
return INF * (3 * M) - mcf.flow(start, goal)[-1]
def main():
# parse input
N, M = map(int, input().split())
AS = list(map(int, input().split()))
BS = list(map(int, input().split()))
RS = list(map(int, input().split()))
print(solve(N, M, AS, BS, RS))
# tests
T1 = """
2 1
3 2
3 3
100000 100000 100000
"""
TEST_T1 = """
>>> as_input(T1)
>>> main()
81
"""
T2 = """
4 2
2 4 3 3
4 2 3 3
100000 100000 100000
"""
TEST_T2 = """
>>> as_input(T2)
>>> main()
210
"""
T3 = """
20 19
3 2 3 4 3 3 2 3 2 2 3 3 4 3 2 4 4 3 3 4
2 3 4 2 4 3 3 2 4 2 4 3 3 2 3 4 4 4 2 2
3 4 5
"""
TEST_T3 = """
>>> as_input(T3)
>>> main()
-1417
"""
def _test():
import doctest
doctest.testmod()
g = globals()
for k in sorted(g):
if k.startswith("TEST_"):
print(k)
doctest.run_docstring_examples(g[k], g, name=k)
def as_input(s):
"use in test, use given string as input file"
import io
f = io.StringIO(s.strip())
g = globals()
g["input"] = lambda: bytes(f.readline(), "ascii")
g["read"] = lambda: bytes(f.read(), "ascii")
if __name__ == "__main__":
import sys
input = sys.stdin.buffer.readline
read = sys.stdin.buffer.read
sys.setrecursionlimit(10 ** 6)
if sys.argv[-1] == "-t":
print("testing")
_test()
sys.exit()
main()
sys.exit()
# end of snippets/main.py
|
nishio/atcoder
|
PAST3/o.py
|
o.py
|
py
| 5,401 |
python
|
en
|
code
| 1 |
github-code
|
6
|
33369908821
|
"""
A script to extract IuPS addresses from an RNC CMExport file
Works with Huawei RNC CMExport
By Tubagus Rizal
2017
"""
import xml.etree.ElementTree as ET
import glob
import pdb
def getRncInfo(xmlroot):
# get RNC info
rnc = {}
for rncInfo in xmlroot.findall(".//*[@className='BSC6900UMTSNE']/attr"):
if rncInfo.attrib["name"] == "fdn":
rnc["fdn"] = rncInfo.text
if rncInfo.attrib["name"] == "name":
rnc["name"] = rncInfo.text
if rncInfo.attrib["name"] == "neID":
rnc["neid"] = rncInfo.text
return rnc
def getIuPSIpAddress(xmlroot):
# get a list of IuPS interface
iupsIpAddr = []
for ipPath in xmlroot.findall(".//*[@className='BSC6900UMTSIPPATH']/attr"):
if ipPath.attrib["name"] == "IPADDR":
ipAddress = ipPath.text
if ipPath.attrib["name"] == "ITFT" and ipPath.text == "IUPS":
if not ipAddress in iupsIpAddr:
iupsIpAddr.append(ipAddress)
return iupsIpAddr
def main():
xmlFolder = "D:\\1000-MyDocuments\\100-Projects\\098-ProximusCFT\\TAADisttributor\\3G-OSS"
xmlFiles = [file for file in glob.glob(xmlFolder + "/**/*.xml", recursive=True)]
for xmlFile in xmlFiles:
tree = ET.parse(xmlFile)
root = tree.getroot()
#print result
rnc = getRncInfo(root)
for key, value in rnc.items():
print (key, value, ",", end=" ")
iupsIpAddr = getIuPSIpAddress(root)
for item in iupsIpAddr:
print ( "IuPS: ", item, end=" ")
print("\n")
if __name__ == "__main__":
main()
|
trizal/python-CMExportReader
|
getIuPS.py
|
getIuPS.py
|
py
| 1,817 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5606424198
|
chicken_orders = int(input())
fish_orders = int(input())
veg_orders = int(input())
delivery_fee = 2.50
chicken_meal = 10.35
fish_meal = 12.40
veg_meal = 8.15
food_price = (chicken_orders * chicken_meal) + (fish_orders * fish_meal) + (veg_orders * veg_meal)
dessert = food_price * 0.20
full_cost = food_price + dessert + delivery_fee
print(full_cost)
|
koleva26k/programming_basics
|
food_delivery.py
|
food_delivery.py
|
py
| 354 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24991661721
|
def ustr(value):
"""This method is similar to the builtin `str` method, except
it will return Unicode string.
@param value: the value to convert
@rtype: unicode
@return: unicode string
"""
if isinstance(value, unicode):
return value
if hasattr(value, '__unicode__'):
return unicode(value)
if not isinstance(value, str):
value = str(value)
return unicode(value, 'utf-8')
__builtins__['ustr'] = ustr
# vim: ts=4 sts=4 sw=4 si et
|
factorlibre/openerp-extra-6.1
|
comparison/website/erpComparator/erpcomparator/__init__.py
|
__init__.py
|
py
| 500 |
python
|
en
|
code
| 9 |
github-code
|
6
|
12388726621
|
import os, sys
import subprocess
import json
import uproot
import awkward as ak
from coffea import processor, util, hist
from coffea.nanoevents import NanoEventsFactory, NanoAODSchema
from boostedhiggs import HbbPlotProcessor
from distributed import Client
from lpcjobqueue import LPCCondorCluster
from dask.distributed import performance_report
from dask_jobqueue import HTCondorCluster, SLURMCluster
env_extra = [
f"export PYTHONPATH=$PYTHONPATH:{os.getcwd()}",
]
cluster = LPCCondorCluster(
transfer_input_files=["boostedhiggs"],
ship_env=True,
memory="8GB",
image="coffeateam/coffea-dask:0.7.11-fastjet-3.3.4.0rc9-ga05a1f8"
)
cluster.adapt(minimum=1, maximum=50)
client = Client(cluster)
print("Waiting for at least one worker...") # noqa
client.wait_for_workers(1)
year = sys.argv[1]
with performance_report(filename="dask-report.html"):
# get list of input files
infiles = subprocess.getoutput("ls infiles/"+year+"*.json").split()
for this_file in infiles:
if "bsm" in this_file:
continue
index = this_file.split("_")[1].split(".json")[0]
print(this_file, index)
if "qcd" in index or "higgs" in index or "data" in index or 'top' in index:
continue
uproot.open.defaults["xrootd_handler"] = uproot.source.xrootd.MultithreadedXRootDSource
p = HbbPlotProcessor(year=year,jet_arbitration='ddb')
args = {'savemetrics':True, 'schema':NanoAODSchema}
output = processor.run_uproot_job(
this_file,
treename="Events",
processor_instance=p,
executor=processor.dask_executor,
executor_args={
"client": client,
# "skipbadfiles": args.skipbadfiles,
"schema": processor.NanoAODSchema,
"retries": 50,
},
chunksize=100000,
# maxchunks=args.max,
)
outfile = 'outfiles-plots/'+str(year)+'_dask_'+index+'.coffea'
util.save(output, outfile)
|
jennetd/hbb-coffea
|
vbf-scripts/submit-plots-dask.py
|
submit-plots-dask.py
|
py
| 2,206 |
python
|
en
|
code
| 4 |
github-code
|
6
|
29867399693
|
import os
import csv
import sqlite3
DATA_DIR="data"
DATABASE="database.db"
sensors = {
"dht22": {
"table": "temperaturUndLuftdruck",
"mapping": {
"sensor_id": "SensorID",
"timestamp": "datetime",
"humidity": "luftwert",
"temperature": "tempwert"
}
},
"sds011": {
"table": "feinstaubsensor",
"mapping": {
"sensor_id": "SensorID",
"timestamp": "datetime",
"P1": "p1wert",
"P2": "p2wert"
}
}
}
def main():
if not os.path.isdir(DATA_DIR):
print("Daten wurden nicht heruntergeladen")
return
count = 0
con = sqlite3.connect(DATABASE)
for sensor, conf in sensors.items():
table = conf["table"]
mapping = conf["mapping"]
mapping_count = len(mapping)
with open("schema/{}.sql".format(table), "r") as schema:
schema = schema.read()
cur = con.cursor()
try:
cur.executescript(schema)
except sqlite3.OperationalError:
pass
TABLE_PLACEHOLDERS = ", ".join(mapping.values())
VALUE_PLACEHOLDER = ", ".join([":{}".format(key) for key in mapping.keys()])
QUERY = """
INSERT OR IGNORE
INTO {0}({1})
VALUES ({2})
""".format(table, TABLE_PLACEHOLDERS, VALUE_PLACEHOLDER)
for root, dirs, files in os.walk("{}/{}".format(DATA_DIR, sensor)):
for name in files:
if not name.endswith(".csv"):
continue
full_name = "{}/{}".format(root, name)
with open(full_name, "r") as raw_data:
data = csv.DictReader(raw_data, delimiter=";")
data = list(data)
cur = con.cursor()
cur.executemany(QUERY, data)
con.commit()
con.close()
if __name__ == "__main__":
main()
|
Jan200101/feinstaub-projekt
|
import.py
|
import.py
|
py
| 1,971 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73252316348
|
""""
Для заданного набора N точек на плоскости найти прямоугольник минимальной площади,
содержащий все указанные точки.
Стороны прямоугольника не обязаны быть параллельными координатным осям
"""
# important functions: MinimumBoundingBox
from scipy.spatial import ConvexHull
from math import sqrt,atan2
import numpy as np
from math import atan2, cos, sin, pi
from collections import namedtuple
import matplotlib.pyplot as plt
from min_square import Point
def unit_vector(pt0, pt1):
dis_0_to_1 = sqrt((pt0[0] - pt1[0])**2 + (pt0[1] - pt1[1])**2)
return (pt1[0] - pt0[0]) / dis_0_to_1, \
(pt1[1] - pt0[1]) / dis_0_to_1
def orthogonal_vector(vector):
return -1 * vector[1], vector[0]
def bounding_area(index, hull):
unit_vector_p = unit_vector(hull[index], hull[index+1])
unit_vector_o = orthogonal_vector(unit_vector_p)
dis_p = tuple(np.dot(unit_vector_p, pt) for pt in hull)
dis_o = tuple(np.dot(unit_vector_o, pt) for pt in hull)
min_p = min(dis_p)
min_o = min(dis_o)
len_p = max(dis_p) - min_p
len_o = max(dis_o) - min_o
return {'area': len_p * len_o,
'length_parallel': len_p,
'length_orthogonal': len_o,
'rectangle_center': (min_p + len_p / 2, min_o + len_o / 2),
'unit_vector': unit_vector_p,
}
def to_xy_coordinates(unit_vector_angle, point):
angle_orthogonal = unit_vector_angle + pi / 2
return point[0] * cos(unit_vector_angle) + point[1] * cos(angle_orthogonal), \
point[0] * sin(unit_vector_angle) + point[1] * sin(angle_orthogonal)
def rotate_points(center_of_rotation, angle, points):
rot_points = []
ang = []
for pt in points:
diff = tuple([pt[d] - center_of_rotation[d] for d in range(2)])
diff_angle = atan2(diff[1], diff[0]) + angle
ang.append(diff_angle)
diff_length = sqrt(sum([d**2 for d in diff]))
rot_points.append((center_of_rotation[0] + diff_length * cos(diff_angle),
center_of_rotation[1] + diff_length * sin(diff_angle)))
return rot_points
def rectangle_corners(rectangle):
corner_points = []
for i1 in (.5, -.5):
for i2 in (i1, -1 * i1):
corner_points.append((rectangle['rectangle_center'][0] + i1 * rectangle['length_parallel'],
rectangle['rectangle_center'][1] + i2 * rectangle['length_orthogonal']))
return rotate_points(rectangle['rectangle_center'], rectangle['unit_vector_angle'], corner_points)
BoundingBox = namedtuple('BoundingBox', ('area',
'length_parallel',
'length_orthogonal',
'rectangle_center',
'unit_vector',
'unit_vector_angle',
'corner_points'
)
)
# use this function to find the listed properties of the minimum bounding box of a point cloud
def MinimumBoundingBox(points):
# Requires: points to be a list or tuple of 2D points. ex: ((5, 2), (3, 4), (6, 8))
# needs to be more than 2 points
# Effects: returns a namedtuple that contains:
# area: area of the rectangle
# length_parallel: length of the side that is parallel to unit_vector
# length_orthogonal: length of the side that is orthogonal to unit_vector
# rectangle_center: coordinates of the rectangle center
# (use rectangle_corners to get the corner points of the rectangle)
# unit_vector: direction of the length_parallel side. RADIANS
# (it's orthogonal vector can be found with the orthogonal_vector function
# unit_vector_angle: angle of the unit vector
# corner_points: set that contains the corners of the rectangle
if len(points) <= 2: raise ValueError('More than two points required.')
hull_ordered = [points[index] for index in ConvexHull(points).vertices]
hull_ordered.append(hull_ordered[0])
hull_ordered = tuple(hull_ordered)
min_rectangle = bounding_area(0, hull_ordered)
for i in range(1, len(hull_ordered)-1):
rectangle = bounding_area(i, hull_ordered)
if rectangle['area'] < min_rectangle['area']:
min_rectangle = rectangle
min_rectangle['unit_vector_angle'] = atan2(min_rectangle['unit_vector'][1], min_rectangle['unit_vector'][0])
min_rectangle['rectangle_center'] = to_xy_coordinates(min_rectangle['unit_vector_angle'], min_rectangle['rectangle_center'])
# this is ugly but a quick hack and is being changed in the speedup branch
return BoundingBox(
area = min_rectangle['area'],
length_parallel = min_rectangle['length_parallel'],
length_orthogonal = min_rectangle['length_orthogonal'],
rectangle_center = min_rectangle['rectangle_center'],
unit_vector = min_rectangle['unit_vector'],
unit_vector_angle = min_rectangle['unit_vector_angle'],
corner_points = set(rectangle_corners(min_rectangle))
)
if __name__ =='__main__':
points = []
for _ in range(int(input('Count points\n'))):
# points.append(Point.create_point()) # для мануального ввода точек многогранника
points.append(Point().get_tuple_style())
rectangle = MinimumBoundingBox(points).corner_points
rectangle = sorted(rectangle, key=lambda p: atan2(p[1], p[0]))
print(rectangle)
print([Point(p[0], p[1]).__str__('blue') for p in points])
plt.fill(
[p[0] for p in rectangle],
[p[1] for p in rectangle],
fill=False
)
plt.show()
|
ded-evsey/TandACG
|
21.py
|
21.py
|
py
| 6,002 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29249896795
|
import matplotlib.pyplot as plt
import seaborn as sns
from table import create_table
import pandas as pd
import streamlit as st
import plotly.tools as tls
import plotly.figure_factory as ff
import numpy as np
import plotly.express as px
from download import report_downlaoder
import os
st.image('Somaiya Header.png',width=500)
st.title('Result analysis')
st.subheader('KJ somaiya institute of engineering and IT')
st.sidebar.title('Welcome to the result analyser App')
st.sidebar.markdown('<html><body style="background-color:yellow;"> You can Do <b>Visual</b> or <b>Database</b> analysis as you wish after filling the parameters select as required </body></html>'
,unsafe_allow_html=True)
Analyse_type=st.sidebar.radio('Analyser',('Visual','Database Analyser','Reports'))
filename = st.text_input('Enter a file path:','Sem4.pdf')
Semester=st.text_input("Enter the No of semester",4)
Subject_no =st.text_input("enter the number of subjects",5)
Labs_no =st.text_input("enter the number of Labs",5)
@st.cache(persist=True)
def load_data():
theory_df,pracs_df=create_table(filename,Subject_no,Labs_no)
return theory_df,pracs_df
cleandf=load_data()[0]
pracsdf=load_data()[1]
#pie chart
explode=[0.1,0]
colours=['lightgreen','Red']
fig=px.pie(cleandf,labels=['Pass','Fail'],names='Pass/fail',title='Passing and failing percentage')
if Analyse_type=='Visual':
#Pie chart
st.markdown('<html><h1><body style="background-color:orange;">Pie chart</body></h1></html>',unsafe_allow_html=True)
#fig=cleandf['Pass/fail'].value_counts().plot(kind='pie',labels=['pass','fail'],autopct='%1.1f%%',startangle=140,
# explode=explode,shadow=True,colors=colours,figsize=(5,5))
st.plotly_chart(fig)
#Bar chart
st.markdown('<html><h1><body style="background-color:pink;">Bar charts</body></h1></html>',unsafe_allow_html=True)
plt.style.use('bmh')
colors=['green','slateblue','mediumorchid','gold','darkorange','coral','yellow']
k=1
for i in range(int(Subject_no)):
fig=plt.figure()
#cleandf.iloc[:,k].plot(kind='hist',bins=3,color=colors[k])
sns.distplot(cleandf.iloc[:,k],color=colors[k],norm_hist=True)
plt.xlabel(f'Marks in {cleandf.columns[k]}')
plt.ylabel('No of students')
try:
plotly_fig = tls.mpl_to_plotly(fig)
except:
subject=cleandf.columns[k]
plotly_fig=px.histogram(cleandf,x=subject,histnorm='probability density',opacity=0.8,title=f'Marks in {cleandf.columns[k]}',color_discrete_sequence=['indianred'] )
st.plotly_chart(plotly_fig)
k=k+1
if(k>int(Subject_no)):
break
#Bar chart Pracicals
st.markdown('<html><h1><body style="background-color:cyan;">Bar charts for practicals</body></h1></html>',unsafe_allow_html=True)
plt.style.use('bmh')
colors=['green','slateblue','mediumorchid','gold','darkorange','coral','yellow']
k=1
for i in range(int(Subject_no)):
fig=plt.figure()
#cleandf.iloc[:,k].plot(kind='hist',bins=3,color=colors[k])
sns.distplot(pracsdf.iloc[:,k],color=colors[k],norm_hist=True)
plt.xlabel(f'Marks in {pracsdf.columns[k]}')
plt.ylabel('No of students')
try:
plotly_fig = tls.mpl_to_plotly(fig)
except:
subject=pracsdf.columns[k]
plotly_fig=px.histogram(pracsdf,x=subject,histnorm='probability density',opacity=0.8,title=f'Marks in {pracsdf.columns[k]}',color_discrete_sequence=['indianred'] )
st.plotly_chart(plotly_fig)
k=k+1
if(k>int(Subject_no)):
break
#Database
if Analyse_type=='Database Analyser':
st.markdown('<html><h1><body style="background-color:Grey;">Database Analysis</body></h1></html>',
unsafe_allow_html=True)
from database import create_database,query_execute
create_database(cleandf,pracsdf,Semester)
st.subheader(f'SQL Theory table for sem {Semester}' )
query=st.text_input("enter a query for the sql databse",f'SELECT * FROM Sem_{Semester}_theory_results')
#query=f'SELECT * FROM Sem_{Semester}_theory_results
output=query_execute(query)
st.dataframe(output)
st.subheader(f'SQL practical table for sem {Semester}')
query=st.text_input("enter a query for the sql databse",f'SELECT * FROM Sem_{Semester}_pracs_results')
#query=f'SELECT * FROM Sem_{Semester}_pracs_results'
output_pracs=query_execute(query)
st.dataframe(output_pracs)
if Analyse_type=='Reports':
#First class
st.markdown('<html><h1><body style="background-color:cyan;">First class students</body></h1></html>',
unsafe_allow_html=True)
FC=cleandf[cleandf['CGPA']>=7.75]
fc_students=FC.shape[0]
st.dataframe(FC)
st.write(f' There are {fc_students} students in first class')
#Second class
st.markdown('<html><h1><body style="background-color:cyan;">Second class students</body></h1></html>',
unsafe_allow_html=True)
SC=cleandf[(cleandf['CGPA']>=6.75) & (cleandf['CGPA']<=7.74)]
st.dataframe(SC)
sc_students=SC.shape[0]
st.write(f' There are {sc_students} students in second class')
#pass class
st.markdown('<html><h1><body style="background-color:cyan;">pass class students</body></h1></html>',
unsafe_allow_html=True)
PC=cleandf[(cleandf['CGPA']>=4.00) & (cleandf['CGPA']<=5.74)]
st.dataframe(PC)
pc_students=PC.shape[0]
st.write(f' There are {pc_students} students in pass class')
#Top 5 scorers
st.markdown('<html><h1><body style="background-color:blue;">Toppers</body></h1></html>',
unsafe_allow_html=True)
no_students = st.number_input('Number of students ', 6)
column = 'CGPA'
column=st.selectbox('select an attribute',
tuple(cleandf.columns[1:])
)
bottom = False
toppers = cleandf[column].sort_values(ascending=bottom).values
toppers_report = cleandf[cleandf[column].isin(toppers)].sort_values(by=[column], ascending=False)
st.dataframe(toppers_report)
report = report_downlaoder(FC,SC,PC,toppers_report,fig)
st.sidebar.subheader('Click On reports to generate a report and get an option to download one')
st.sidebar.markdown(report,unsafe_allow_html=True)
|
rahulthaker/Result-analysis
|
Analysis.py
|
Analysis.py
|
py
| 6,347 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39943332700
|
from decouple import config
from logic import bet
My_Money = int(config('MY_MONEY'))
while True:
print('you have ' + str(My_Money))
print('do you wanna play? (yes or no)')
a = input('')
if a.strip() == 'no':
print('you are out of the game')
break
elif a.strip() == 'yes':
b = int(input('guess the number from 1 to 30 '))
g = int(input('your bet '))
My_Money -= g
My_Money += bet(b, g)
#cvbnjmk
else:
print('yes or no')
|
aliiiiaa/hw5
|
25-2_Aliia_Abyllkasymova_hw_5.py
|
25-2_Aliia_Abyllkasymova_hw_5.py
|
py
| 509 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30906506351
|
import mailchimp_marketing as MailchimpMarketing
from mailchimp_marketing.api_client import ApiClientError
def survey_monkey_distribute_daily(**kwargs):
api_key = kwargs['api_key']
server = kwargs['server']
try:
client = MailchimpMarketing.Client()
client.set_config({
"api_key": api_key,
"server": server
})
response = client.ping.get()
print(response)
except ApiClientError as error:
print(error)
x = client.campaigns.replicate('df4d22a9b2')['id']
client.campaigns.send(x)
|
GregorMonsonFD/holmly_sourcing_legacy
|
scripts/python/survey_monkey_distribute_daily.py
|
survey_monkey_distribute_daily.py
|
py
| 525 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26986931486
|
# -*- coding: utf-8 -*-
import itertools
import struct
import pytest
from mock import Mock, call, patch
from nameko_grpc.errors import GrpcError
from nameko_grpc.streams import (
STREAM_END,
ByteBuffer,
ReceiveStream,
SendStream,
StreamBase,
)
class TestByteBuffer:
def test_peek(self):
buffer = ByteBuffer()
buffer.write(b"abcdefghi")
assert buffer.peek(slice(0, 1)) == b"a"
assert buffer.peek(slice(3, 6)) == b"def"
assert buffer.peek(slice(-2, -1)) == b"h"
assert buffer.read() == b"abcdefghi"
def test_peek_all(self):
buffer = ByteBuffer()
buffer.write(b"abcdefghi")
assert buffer.peek() == b"abcdefghi"
assert buffer.read() == b"abcdefghi"
def test_discard(self):
buffer = ByteBuffer()
buffer.write(b"abcdefghi")
assert buffer.discard(3) is None
assert buffer.read() == b"defghi"
def test_discard_all(self):
buffer = ByteBuffer()
buffer.write(b"abcdefghi")
assert buffer.discard() is None
assert buffer.read() == b""
def test_read(self):
buffer = ByteBuffer()
buffer.write(b"abcdefghi")
assert buffer.read(3) == b"abc"
assert buffer.read() == b"defghi"
def test_read_all(self):
buffer = ByteBuffer()
buffer.write(b"abcdefghi")
assert buffer.read() == b"abcdefghi"
assert buffer.read() == b""
def test_write(self):
buffer = ByteBuffer()
buffer.write(b"abc")
assert buffer.peek() == b"abc"
buffer.write(b"def")
assert buffer.peek() == b"abcdef"
def test_empty(self):
buffer = ByteBuffer()
assert buffer.empty() is True
buffer.write(b"abc")
assert buffer.empty() is False
buffer.discard()
assert buffer.empty() is True
def test_len(self):
buffer = ByteBuffer()
assert len(buffer) == 0
buffer.write(b"abc")
assert len(buffer) == 3
class TestStreamBase:
def test_exhausted(self):
stream = StreamBase(1)
stream.buffer.write(b"abc")
assert not stream.exhausted
stream.close()
assert stream.closed
assert not stream.exhausted
stream.queue.get()
assert stream.queue.empty()
assert not stream.exhausted
stream.buffer.discard()
assert stream.buffer.empty()
assert stream.exhausted
def test_close(self):
stream = StreamBase(1)
stream.close()
assert stream.closed
assert stream.queue.get() == STREAM_END
def test_close_with_error(self):
stream = StreamBase(1)
error = GrpcError("boom", "details")
stream.close(error)
assert stream.closed
assert stream.queue.get() == error
def test_close_with_non_error(self):
stream = StreamBase(1)
error = Exception("boom")
with pytest.raises(AssertionError):
stream.close(error)
class TestReceiveStream:
def test_write_to_closed_stream(self):
stream = ReceiveStream(1)
assert stream.buffer.empty()
stream.close()
stream.write(b"\x00\x00\x00")
assert stream.buffer.empty()
def test_write_less_bytes_than_header(self):
stream = ReceiveStream(1)
stream.write(b"\x00\x00\x00")
assert stream.queue.empty()
assert stream.buffer.peek() == b"\x00\x00\x00"
def test_write_less_bytes_than_one_message(self):
stream = ReceiveStream(1)
stream.write(b"\x00\x00\x00\x01\x00\xff\xff\xff")
assert stream.queue.empty()
assert stream.buffer.peek() == b"\x00\x00\x00\x01\x00\xff\xff\xff"
def test_write_more_bytes_than_one_message(self):
stream = ReceiveStream(1)
# incompressed single byte message, followed by two more bytes of /xff
stream.write(b"\x00\x00\x00\x00\x01\xff\xff\xff")
# single byte message is queued
assert stream.queue.get() == (False, b"\xff")
# following two bytes remain in the buffer
assert stream.buffer.peek() == b"\xff\xff"
def test_write_multiple_messages(self):
stream = ReceiveStream(1)
for _ in range(10):
stream.write(b"\x00\x00\x00\x00\x01\xff") # 10 single byte messages
assert stream.queue.qsize() == 10
assert len(stream.buffer) == 0
def test_consume_grpc_error(self):
stream = ReceiveStream(1)
error = GrpcError("boom", "details")
stream.queue.put(error)
message_type = Mock()
with pytest.raises(GrpcError):
next(stream.consume(message_type))
def test_consume_end_of_stream(self):
stream = ReceiveStream(1)
stream.close()
message_type = Mock()
assert list(stream.consume(message_type)) == []
def test_consume_uncompressed_message(self):
stream = ReceiveStream(1)
message_data = b"x"
message_type = Mock()
message = message_type()
stream.queue.put((False, message_data))
stream.close() # close stream so that consume exits
assert list(stream.consume(message_type)) == [message]
assert message.ParseFromString.call_args_list == [call(message_data)]
@patch("nameko_grpc.streams.decompress")
def test_consume_compressed_message(self, decompress):
stream = ReceiveStream(1)
message_data = b"x"
message_type = Mock()
message = message_type()
stream.queue.put((True, message_data))
stream.close() # close stream so that consume exits
assert list(stream.consume(message_type)) == [message]
assert message.ParseFromString.call_args_list == [
call(decompress(message_data))
]
@patch("nameko_grpc.streams.decompress")
def test_consume_multiple_messages(self, decompress):
stream = ReceiveStream(1)
message_data = b"x"
message_type = Mock()
message = message_type()
stream.queue.put((False, message_data))
stream.queue.put((True, message_data))
stream.close() # close stream so that consume exits
assert list(stream.consume(message_type)) == [message, message]
assert message.ParseFromString.call_args_list == [
call(message_data),
call(decompress(message_data)),
]
class TestSendStream:
def test_populate(self):
stream = SendStream(1)
stream.populate(range(10))
assert stream.closed
assert stream.queue.qsize() == 11
def test_populate_closed_stream(self):
stream = SendStream(1)
stream.close()
assert stream.closed
stream.populate(range(10))
assert stream.queue.qsize() == 1
class TestSendStreamHeadersToSend:
def test_no_headers(self):
stream = SendStream(1)
assert len(stream.headers) == 0
assert stream.headers_to_send(False) is False
def test_empty_queue(self):
stream = SendStream(1)
stream.headers.set(("foo", "bar"))
assert stream.queue.qsize() == 0
assert stream.headers_to_send(True) is False
assert stream.headers_to_send(False) == [(b"foo", b"bar")]
def test_mark_as_sent(self):
stream = SendStream(1)
stream.headers.set(("foo", "bar"))
assert stream.headers_to_send(False) == [(b"foo", b"bar")] # marks as sent
assert stream.headers_to_send(False) is False # previously sent
def test_defer_until_data(self):
stream = SendStream(1)
stream.headers.set(("foo", "bar"))
assert stream.headers_to_send(True) is False # defer until data
stream.queue.put(Mock())
assert stream.queue.qsize() == 1
assert stream.headers_to_send(True) == [(b"foo", b"bar")]
class TestSendStreamTrailersToSend:
def test_no_trailers(self):
stream = SendStream(1)
assert len(stream.trailers) == 0
assert stream.trailers_to_send() is False
def test_send_trailers(self):
stream = SendStream(1)
stream.trailers.set(("foo", "bar"))
assert stream.trailers_to_send() == [(b"foo", b"bar")]
@pytest.fixture
def generate_messages():
with patch("nameko_grpc.streams.compress") as compress:
compress.side_effect = lambda body, _: (False, body)
def generate(count, length):
"""Generate a series of mock messages.
If `count` is 2 and `length` is 4, when passed to `stream.populate`,
two messages with the following payload will be added to the stream's
queue.
#1. b`\x00\x00\x00\x00`
#2. b`\x01\x01\x01\x01`
"""
messages = []
for index in range(count):
message = Mock()
message.SerializeToString.return_value = bytes([index] * length)
messages.append(message)
return messages
yield generate
class TestSendStreamFlushQueueToBuffer:
def test_empty_queue(self):
stream = SendStream(1)
assert stream.queue.qsize() == 0
stream.flush_queue_to_buffer()
assert stream.buffer.empty()
def test_messages_on_queue(self, generate_messages):
stream = SendStream(1)
stream.populate(generate_messages(count=2, length=20))
header = struct.pack("?", False) + struct.pack(">I", 20)
stream.flush_queue_to_buffer()
assert stream.buffer.peek() == header + b"\x00" * 20 + header + b"\x01" * 20
def test_stream_closed(self, generate_messages):
stream = SendStream(1)
stream.populate(generate_messages(count=2, length=20))
header = struct.pack("?", False) + struct.pack(">I", 20)
stream.flush_queue_to_buffer()
assert stream.buffer.peek() == header + b"\x00" * 20 + header + b"\x01" * 20
stream.flush_queue_to_buffer() # stream closed; no-op
assert stream.buffer.peek() == header + b"\x00" * 20 + header + b"\x01" * 20
def test_error_on_queue(self, generate_messages):
stream = SendStream(1)
error = GrpcError("boom", "details")
messages = itertools.chain(generate_messages(count=2, length=20), [error])
stream.populate(messages)
with pytest.raises(GrpcError):
stream.flush_queue_to_buffer()
class TestSendStreamRead:
def test_no_data(self):
stream = SendStream(1)
max_bytes = 10
chunk_size = 10
assert stream.buffer.empty()
assert list(stream.read(max_bytes, chunk_size)) == []
def test_less_than_one_chunk_of_data(self):
stream = SendStream(1)
stream.buffer.write(b"abc")
max_bytes = 10
chunk_size = 5
assert list(stream.read(max_bytes, chunk_size)) == [b"abc"]
assert stream.buffer.empty()
def test_more_than_one_chunk_of_data(self):
stream = SendStream(1)
stream.buffer.write(b"abcdefghijklm")
max_bytes = 10
chunk_size = 5
assert list(stream.read(max_bytes, chunk_size)) == [b"abcde", b"fghij"]
assert stream.buffer.peek() == b"klm"
def test_less_than_max_bytes_of_data(self):
stream = SendStream(1)
stream.buffer.write(b"abcdefghijklm")
max_bytes = 20
chunk_size = 5
assert list(stream.read(max_bytes, chunk_size)) == [b"abcde", b"fghij", b"klm"]
assert stream.buffer.empty()
def test_more_than_max_bytes_of_data(self):
stream = SendStream(1)
stream.buffer.write(b"abcdefghijklm")
max_bytes = 10
chunk_size = 5
assert list(stream.read(max_bytes, chunk_size)) == [b"abcde", b"fghij"]
assert stream.buffer.peek() == b"klm"
def test_chunk_greater_than_max_bytes(self):
stream = SendStream(1)
stream.buffer.write(b"abcdefghijklm")
max_bytes = 5
chunk_size = 10
assert list(stream.read(max_bytes, chunk_size)) == [b"abcde"]
assert stream.buffer.peek() == b"fghijklm"
def test_stream_closed(self):
stream = SendStream(1)
max_bytes = 10
chunk_size = 5
stream.close()
assert list(stream.read(max_bytes, chunk_size)) == []
def test_stream_closed_with_error(self):
stream = SendStream(1)
error = GrpcError("boom", "details")
stream.close(error)
max_bytes = 10
chunk_size = 5
with pytest.raises(GrpcError):
next(stream.read(max_bytes, chunk_size))
def test_multiple_small_messages(self, generate_messages):
stream = SendStream(1)
stream.populate(generate_messages(count=100, length=1))
header = struct.pack("?", False) + struct.pack(">I", 1)
max_bytes = 20
chunk_size = 10
chunks = list(stream.read(max_bytes, chunk_size))
expected = [
# 5 bytes header + 1 byte payload + 4 bytes of next header
header + b"\x00" + header[:4],
# remaining 1 byte of header + 1 byte payload
# + 5 bytes header + 1 byte payload + 2 bytes of next header
header[4:] + b"\x01" + header + b"\x02" + header[:2],
]
assert chunks == expected
assert sum(map(len, chunks)) == max_bytes
# queue is emptied into buffer
assert len(stream.buffer) == 100 * (5 + 1) - max_bytes # 580 bytes left
assert stream.queue.qsize() == 0
def test_multiple_large_messages(self, generate_messages):
stream = SendStream(1)
stream.populate(generate_messages(count=100, length=200))
header = struct.pack("?", False) + struct.pack(">I", 200)
max_bytes = 50
chunk_size = 10
chunks = list(stream.read(max_bytes, chunk_size))
expected = [
header + b"\x00\x00\x00\x00\x00", # 5 bytes header + 5 bytes payload
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", # 10 bytes payload
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", # 10 bytes payload
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", # 10 bytes payload
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", # 10 bytes payload
]
assert chunks == expected
assert sum(map(len, chunks)) == max_bytes
# queue is emptied into buffer
assert len(stream.buffer) == 100 * (5 + 200) - max_bytes # 20450 bytes left
assert stream.queue.qsize() == 0
def test_data_in_buffer_and_messages_in_queue(self, generate_messages):
stream = SendStream(1)
stream.buffer.write(b"\xff\xff\xff\xff\xff")
stream.populate(generate_messages(count=10, length=10))
header = struct.pack("?", False) + struct.pack(">I", 10)
max_bytes = 10
chunk_size = 10
chunks = list(stream.read(max_bytes, chunk_size))
expected = [b"\xff\xff\xff\xff\xff" + header]
assert chunks == expected
assert sum(map(len, chunks)) == max_bytes
# queue is emptied into buffer
assert len(stream.buffer) == 5 + 10 * (5 + 10) - max_bytes # 145 bytes left
assert stream.queue.qsize() == 0
|
nameko/nameko-grpc
|
test/test_streams.py
|
test_streams.py
|
py
| 15,281 |
python
|
en
|
code
| 57 |
github-code
|
6
|
30386260395
|
#!/usr/bin/env python3
import os
import urllib
import requests
import config
def dump_stories():
new_stories = 0
num_stories = 0
r = requests.get(
"https://i.instagram.com/api/v1/feed/reels_tray/",
cookies=config.instagram_cookies, headers=config.instagram_headers).json()
for user in r['tray']:
user_dir = "./stories/{0!s}-{1!s}".format(user['user']['username'], user['id'])
if not os.path.exists(user_dir):
os.makedirs(user_dir)
print("[*] dumping " + user['user']['username'])
user_stories = requests.get(
"https://i.instagram.com/api/v1/feed/user/{0!s}/reel_media/".format(user['id']),
cookies=config.instagram_cookies, headers=config.instagram_headers).json()
for item in user_stories['items']:
num_stories += 1
if 'video_versions' in item:
url = item['video_versions'][0]['url']
else:
url = item['image_versions2']['candidates'][0]['url']
filename = url.split('/')[-1].split('?')[0]
file_path = user_dir + '/' + filename
if not os.path.isfile(file_path):
new_stories += 1
print(" + " + filename)
urllib.request.urlretrieve(url, file_path)
else:
print(" - " + filename)
return len(r['tray']), num_stories, new_stories
def send_notification(message):
requests.post(
"https://api.pushover.net/1/messages.json",
data={"token": config.pushover_app_token,
"user": config.pushover_user_token,
"title": "instadump",
"message": message})
if __name__ == "__main__":
num_users, num_stories, new_stories = dump_stories()
message = "{0!s} stories ({1!s} new)\n{2!s} users".format(num_stories, new_stories, num_users)
if config.pushover_app_token and config.pushover_user_token:
send_notification(message)
else:
print(message)
|
bl1nk/instadump
|
instadump.py
|
instadump.py
|
py
| 2,019 |
python
|
en
|
code
| 9 |
github-code
|
6
|
39495192143
|
n=int(input())
s=input()
li=s.split(" ")
for i in range(len(li)):
li[i]=int(li[i])
li.sort(reverse=True)
a=li[0]
li2=[]
i=0
while i<n:
if (a%li[i]==0) and (li[i] not in li2):
li2.append(li[i])
del li[i]
i-=1
n-=1
i+=1
print(li[0])
print(li2[0])
|
skshahriarahmedraka/codeforces-python
|
1108B.py
|
1108B.py
|
py
| 330 |
python
|
en
|
code
| 4 |
github-code
|
6
|
16172136194
|
import numpy as np
from my_function import smooth_curve
from my_cnet import SimpleConvNet
from mnist import load_mnist
from my_optimizer import Adam
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import seaborn as sns
(x_train,t_train),(x_test,t_test) = load_mnist(flatten=False)
network = SimpleConvNet(input_dim=(1,28,28),
conv_param = {'filter_num': 30, 'filter_size': 5, 'pad': 0, 'stride': 1},
hidden_size=100, output_size=10, weight_init_std=0.01)
"""
epoch 全覆盖次数
mini_batch_size 批处理数据数
train_size 训练数据数
iter_per_epoch 一次全覆盖批处理次数
max_iter 整个训练批处理次数
optimizer 梯度更新选择Adam算法
current_epoch 目前进行的epoch次数
"""
epoch = 20
mini_batch_size = 100
train_size = x_train.shape[0]
iter_per_epoch = max(train_size/mini_batch_size,1)
iter_per_epoch = int(iter_per_epoch)#变为整数
max_iter = epoch*iter_per_epoch
optimizer = Adam(lr = 0.001)
current_epoch = 0
"""
画图参数
"""
train_loss_list = []
train_acc_list = []
test_acc_list = []
print("开始训练请等待...")
for i in range(max_iter):
batch_mask = np.random.choice(train_size,mini_batch_size)
x_batch = x_train[batch_mask]
t_batch = t_train[batch_mask]
grads = network.gradient(x_batch,t_batch)
grads = optimizer.update(network.params,grads)
loss = network.loss(x_batch,t_batch)
train_loss_list.append(loss)
if i %iter_per_epoch==0 :
current_epoch += 1
#取1000个数据计算正确率(节省时间)
x_train_simple,t_train_simple = x_train[:1000],t_train[:1000]
x_test_sample,t_test_sample = x_test[:1000],t_test[:1000]
train_acc = network.accuracy(x_train_simple,t_train_simple)
test_acc = network.accuracy(x_test_sample,t_test_sample)
if current_epoch == 20 :
cm = confusion_matrix(t_test_sample,np.argmax(network.predict(x_test_sample), axis=1))
cmn = cm.astype('float')/cm.sum(axis=1)[:,np.newaxis]
cmn = np.around(cmn,decimals=2)
plt.figure(figsize=(8, 8))
sns.heatmap(cmn, annot=True, cmap='Blues')
plt.ylim(0, 10)
plt.xlabel('Predicted labels')
plt.ylabel('True labels')
train_acc_list.append(train_acc)
test_acc_list.append(test_acc)
print("=== epoch : "+str(current_epoch)+", train acc:"+str(train_acc)+",test acc:"+str(test_acc)+" ===")
# network.save_parms("params.pkl")
print("训练结束,您的损失函数值已经降低到"+str(train_loss_list[-1])+"下面开始作图")
"""
画图
"""
plt.figure("loss")
x = np.arange(len(train_loss_list))
y = np.array(smooth_curve(train_loss_list))
plt.plot(x,y)
plt.xlabel("mini_batch")
plt.ylabel("loss")
plt.figure("accuracy")
x = np.arange(len(train_acc_list))
y1 = np.array(train_acc_list)
y2 = np.array(test_acc_list)
plt.xlabel("epoch")
plt.ylabel("accuracy")
plt.plot(x,y1,label="train_accuracy")
plt.plot(x,y2,label="test_accuracy")
plt.legend()
plt.show()
|
kang9kang/DL-learning
|
cnn/my_cnn_train.py
|
my_cnn_train.py
|
py
| 3,081 |
python
|
en
|
code
| 1 |
github-code
|
6
|
23642650864
|
# -*- coding:utf-8 -*-
#@Time : 2020/4/27 16:05
#@Author: Triomphe
#@File : vulscan.py
import importlib
import os
import sys
from PyQt5.QtCore import QObject, pyqtSignal
from vulscan.port_scan import portscan
from modules.mod_get_rootPath import get_root_path
sys.path.append(os.path.abspath(
os.path.dirname(__file__))+'/vuldb')
#根目录
ROOT_PATH =get_root_path()
class Vulscan(QObject):
_signal =pyqtSignal(dict)
_signal_finish=pyqtSignal()
script_plugin_list=[]
open_prot_list=[]
def __init__(self,target_ip):
super(Vulscan, self).__init__()
#文件位置
self.root_path =get_root_path()
self.target_ip =target_ip
self.init()
def init(self):
file_list = os.listdir(self.root_path + '/vulscan/vuldb')
for filename in file_list:
try:
if filename.endswith('.py') and filename.split('.')[1] == 'py':
self.script_plugin_list.append(filename.split('.')[0])
except Exception as e:
print("error : "+str(e))
#给每个插件设置 声明根目录
#开始进行扫描
def start_scan(self):
try:
self.open_prot_list=portscan(self.target_ip)
except Exception as e:
print(e)
self.open_prot_list=['80']
self.poc_check()
#漏洞验证
def poc_check(self):
for plugin in self.script_plugin_list:
res=importlib.import_module(plugin)
setattr(res,"ROOT_PATH",ROOT_PATH)
#先使用默认端口,如果存在就不使用端口扫描的进行检测
result_info=res.check(self.target_ip)
if result_info!=None:
text=res.get_plugin_info()
text['result_info']=result_info
self._signal.emit(text)
else:
#使用masscan 扫描后对所有存活端口进行扫描
for port in self.open_prot_list:
result_info=res.check(self.target_ip,port=port)
if result_info != None and result_info !="":
text=res.get_plugin_info()
text['result_info']=result_info
self._signal.emit(text)
#表示完成了.
self._signal_finish.emit()
|
TriompheL/Ratel
|
vulscan/vulscan.py
|
vulscan.py
|
py
| 2,337 |
python
|
en
|
code
| 1 |
github-code
|
6
|
26043642636
|
from __future__ import annotations
from dataclasses import dataclass
from enum import Enum
from typing import List, cast
from pants.backend.project_info import dependents
from pants.backend.project_info.dependents import Dependents, DependentsRequest
from pants.base.build_environment import get_buildroot
from pants.base.deprecated import resolve_conflicting_options
from pants.engine.addresses import Address, Addresses
from pants.engine.collection import Collection
from pants.engine.internals.graph import Owners, OwnersRequest
from pants.engine.internals.mapper import SpecsFilter
from pants.engine.rules import Get, collect_rules, rule
from pants.engine.target import UnexpandedTargets
from pants.option.option_types import EnumOption, StrOption
from pants.option.option_value_container import OptionValueContainer
from pants.option.subsystem import Subsystem
from pants.util.docutil import doc_url
from pants.util.ordered_set import FrozenOrderedSet
from pants.util.strutil import help_text
from pants.vcs.git import GitWorktree
class DependentsOption(Enum):
NONE = "none"
DIRECT = "direct"
TRANSITIVE = "transitive"
@dataclass(frozen=True)
class ChangedRequest:
sources: tuple[str, ...]
dependents: DependentsOption
class ChangedAddresses(Collection[Address]):
pass
@rule
async def find_changed_owners(
request: ChangedRequest, specs_filter: SpecsFilter
) -> ChangedAddresses:
no_dependents = request.dependents == DependentsOption.NONE
owners = await Get(
Owners,
OwnersRequest(
request.sources,
# If `--changed-dependents` is used, we cannot eagerly filter out root targets. We
# need to first find their dependents, and only then should we filter. See
# https://github.com/pantsbuild/pants/issues/15544
filter_by_global_options=no_dependents,
# Changing a BUILD file might impact the targets it defines.
match_if_owning_build_file_included_in_sources=True,
),
)
if no_dependents:
return ChangedAddresses(owners)
# See https://github.com/pantsbuild/pants/issues/15313. We filter out target generators because
# they are not useful as aliases for their generated targets in the context of
# `--changed-since`. Including them makes it look like all sibling targets from the same
# target generator have also changed.
#
# However, we also must be careful to preserve if target generators are direct owners, which
# happens when a generated file is deleted.
owner_target_generators = FrozenOrderedSet(
addr.maybe_convert_to_target_generator() for addr in owners if addr.is_generated_target
)
dependents = await Get(
Dependents,
DependentsRequest(
owners,
transitive=request.dependents == DependentsOption.TRANSITIVE,
include_roots=False,
),
)
result = FrozenOrderedSet(owners) | (dependents - owner_target_generators)
if specs_filter.is_specified:
# Finally, we must now filter out the result to only include what matches our tags, as the
# last step of https://github.com/pantsbuild/pants/issues/15544.
#
# Note that we use `UnexpandedTargets` rather than `Targets` or `FilteredTargets` so that
# we preserve target generators.
result_as_tgts = await Get(UnexpandedTargets, Addresses(result))
result = FrozenOrderedSet(
tgt.address for tgt in result_as_tgts if specs_filter.matches(tgt)
)
return ChangedAddresses(result)
@dataclass(frozen=True)
class ChangedOptions:
"""A wrapper for the options from the `Changed` Subsystem.
This is necessary because parsing of these options happens before conventional subsystems are
configured, so the normal mechanisms like `Subsystem.rules()` would not work properly.
"""
since: str | None
diffspec: str | None
dependents: DependentsOption
@classmethod
def from_options(cls, options: OptionValueContainer) -> ChangedOptions:
dependents = resolve_conflicting_options(
old_option="dependees",
new_option="dependents",
old_scope=Changed.options_scope,
new_scope=Changed.options_scope,
old_container=options,
new_container=options,
)
return cls(options.since, options.diffspec, dependents)
@property
def provided(self) -> bool:
return bool(self.since) or bool(self.diffspec)
def changed_files(self, git_worktree: GitWorktree) -> list[str]:
"""Determines the files changed according to SCM/workspace and options."""
if self.diffspec:
return cast(
List[str], git_worktree.changes_in(self.diffspec, relative_to=get_buildroot())
)
changes_since = self.since or git_worktree.current_rev_identifier
return cast(
List[str],
git_worktree.changed_files(
from_commit=changes_since, include_untracked=True, relative_to=get_buildroot()
),
)
class Changed(Subsystem):
options_scope = "changed"
help = help_text(
f"""
Tell Pants to detect what files and targets have changed from Git.
See {doc_url('advanced-target-selection')}.
"""
)
since = StrOption(
default=None,
help="Calculate changes since this Git spec (commit range/SHA/ref).",
)
diffspec = StrOption(
default=None,
help="Calculate changes contained within a given Git spec (commit range/SHA/ref).",
)
dependents = EnumOption(
default=DependentsOption.NONE,
help="Include direct or transitive dependents of changed targets.",
)
dependees = EnumOption(
default=DependentsOption.NONE,
help="Include direct or transitive dependents of changed targets.",
removal_version="2.23.0.dev0",
removal_hint="Use --dependents instead",
)
def rules():
return [*collect_rules(), *dependents.rules()]
|
pantsbuild/pants
|
src/python/pants/vcs/changed.py
|
changed.py
|
py
| 6,103 |
python
|
en
|
code
| 2,896 |
github-code
|
6
|
74128328188
|
import sys
input = sys.stdin.readline
t = int(input())
order = [list(input().strip()) for i in range(t)]
for i in range(t-1):
for j in range(len(order[i])):
if order[i][j] != order[i+1][j]:
order[i+1][j] = '?'
print("".join(order[-1]))
|
Dayeon1351/TIL
|
BAEKJOON/1032.py
|
1032.py
|
py
| 273 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41682544130
|
"""add unique index for modalities
Revision ID: 3cccf6a0af7d
Revises: ba3bae2b5e27
Create Date: 2018-01-05 14:28:03.194013
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3cccf6a0af7d'
down_revision = 'ba3bae2b5e27'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_index(op.f('ix_monday_modality_name'), 'modality', ['name'], unique=True, schema='monday')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_monday_modality_name'), table_name='modality', schema='monday')
# ### end Alembic commands ###
|
MondayHealth/provider-import
|
alembic/versions/3cccf6a0af7d_add_unique_index_for_modalities.py
|
3cccf6a0af7d_add_unique_index_for_modalities.py
|
py
| 749 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22762001346
|
print("Welcome to the GPA calculator.")
courses=int(input("Enter number of courses:"))
GpaList=[]
CreditList=[]
Markcount=0
Creditcount=0
print('Enter marks in percentage and then credit for that course in the next line')
for a in range(courses):
Markcount+=1
Creditcount+=1
mark=int(input('mark'+ str(Markcount)+':'))
credit=float(input('credit'+ str(Creditcount)+':'))
if mark>=85 and mark<=100:
gpa=float(4.0)
elif mark>=80 and mark<=84:
gpa=float(3.7)
elif mark>=77 and mark<=79:
gpa=float(3.3)
elif mark>=73 and mark<=76:
gpa=float(3.0)
elif mark>=70 and mark<=72:
gpa=float(2.7)
elif mark>=67 and mark<=69:
gpa=float(2.3)
elif mark>=63 and mark<=66:
gpa=float(2.0)
elif mark>=60 and mark<=62:
gpa=float(1.7)
elif mark>=57 and mark<=59:
gpa=float(1.3)
elif mark>=53 and mark<=56:
gpa=float(1.0)
elif mark>=50 and mark<=52:
gpa=float(0.7)
else:
gpa=float(0.0)
weightGpa= float(gpa*credit)
(GpaList.append(weightGpa))
(CreditList.append(credit))
totalSum=float(sum(GpaList))
totalCredit=float(sum(CreditList))
FinalGpa=totalSum/totalCredit
print(FinalGpa)
|
AkashMalhotra/GpaCalculator
|
GPACalculator/main.py
|
main.py
|
py
| 1,382 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38829812575
|
from django.shortcuts import render , get_object_or_404 , get_list_or_404
from django.contrib.auth.decorators import login_required
from .models import Members
# Create your views here.
@login_required(login_url="/")
def onemember_record(request , name):
objlist = get_list_or_404(Members , name = name)
objlist = objlist[::-1]
cd = totalcd(objlist)
next_pay = next_month_pay(objlist)
last_loanmonth = loan_last_month(objlist)
context = {
"objectlist" : objlist[1:],
"user":name,
"totalcd" : cd,
"nextpay" : next_pay,
"loan_end" : last_loanmonth
}
return render(request , "members/one_member_record.html" , context)
def totalcd(record):
total_cd = 0
for i in record[1:]:
total_cd += i.cd
return total_cd
def next_month_pay(record):
nextmonthpay = record[0].total
return nextmonthpay
def loan_last_month(record):
bal = record[0].loan_bal
install = record[0].installment
month = record[0].month
mon , yr = month.split("-")
if bal == 0:
return None
no_of_months = round(bal/install)
if int(no_of_months) + int(mon) <= 12:
end_month = f"{int(mon)+int(no_of_months)}-{yr}"
year = 0
while no_of_months > 12:
no_of_months -= 12
year += 1
end_month = f"{no_of_months}-{int(yr)+int(year)}"
return end_month
|
hiteshkhatana/khatana-society-django
|
members/views.py
|
views.py
|
py
| 1,267 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11896448439
|
from unittest import mock
from django.http import HttpRequest
from google_optimize.utils import _parse_experiments, get_experiments_variants
def test_parses_single_experiment_cookie():
request = HttpRequest()
request.COOKIES["_gaexp"] = "GAX1.2.utSuKi3PRbmxeG08en8VNw.18147.1"
experiments = _parse_experiments(request)
assert experiments == dict(utSuKi3PRbmxeG08en8VNw=1)
def test_parses_multiple_experiment_cookies():
request = HttpRequest()
request.COOKIES[
"_gaexp"
] = "GAX1.2.3x8_BbSCREyqtWm1H1OUrQ.18166.1!7IXTpXmLRzKwfU-Eilh_0Q.18166.0"
experiments = _parse_experiments(request)
assert experiments == {"7IXTpXmLRzKwfU-Eilh_0Q": 0, "3x8_BbSCREyqtWm1H1OUrQ": 1}
def test_parses_without_cookie():
request = HttpRequest()
experiments = _parse_experiments(request)
assert experiments is None
@mock.patch("logging.Logger.warning")
def test_logs_missing_gaexp_cookie(logger):
request = HttpRequest()
get_experiments_variants(request, [{"id": "abc"}])
logger.assert_called_with("Missing _ga_exp cookie")
@mock.patch("logging.Logger.error")
def test_logs_no_settings(logger):
request = HttpRequest()
request.COOKIES["_gaexp"] = "test"
get_experiments_variants(request, None)
logger.assert_called_with("Setting GOOGLE_OPTIMIZE_EXPERIMENTS not defined")
@mock.patch("logging.Logger.error")
def test_logs_failed_cookie_parsing(logger):
request = HttpRequest()
request.COOKIES["_gaexp"] = "test"
get_experiments_variants(request, [{"id": "abc"}])
logger.assert_called_with("Failed to parse _gaexp %s", "test")
@mock.patch("logging.Logger.warning")
def test_logs_settings_missing_experiment_id(logger):
request = HttpRequest()
request.COOKIES["_gaexp"] = "GAX1.2.3x8_BbSCREyqtWm1H1OUrQ.18166.1"
get_experiments_variants(request, [{"test": "test"}])
logger.assert_called_with("experiment id not found in experiment settings")
@mock.patch("logging.Logger.warning")
def test_logs_experiment_id_not_in_cookies(logger):
request = HttpRequest()
gaexp = "GAX1.2.3x8_BbSCREyqtWm1H1OUrQ.18166.1"
experiment_id = "test"
request.COOKIES["_gaexp"] = gaexp
get_experiments_variants(request, [{"id": experiment_id}])
logger.assert_called_with(
"experiment id %s not found in experiments cookie %s", experiment_id, gaexp
)
def test_parses_single_experiment():
request = HttpRequest()
request.COOKIES["_gaexp"] = "GAX1.2.utSuKi3PRbmxeG08en8VNw.18147.1"
experiments = [
{
"id": "utSuKi3PRbmxeG08en8VNw",
"alias": "redesign",
"variant_aliases": {0: "old_design", 1: "new_design"},
}
]
values = get_experiments_variants(request, experiments)
assert values == {"redesign": "new_design"}
def test_parses_multiple_experiments():
request = HttpRequest()
request.COOKIES[
"_gaexp"
] = "GAX1.2.3x8_BbSCREyqtWm1H1OUrQ.18166.1!7IXTpXmLRzKwfU-Eilh_0Q.18166.0"
experiments = [
{
"id": "3x8_BbSCREyqtWm1H1OUrQ",
"alias": "redesign_page",
"variant_aliases": {0: "old_design", 1: "new_design"},
},
{
"id": "7IXTpXmLRzKwfU-Eilh_0Q",
"alias": "resign_header",
"variant_aliases": {0: "old_header", 1: "new_header"},
},
]
values = get_experiments_variants(request, experiments)
assert values == {"redesign_page": "new_design", "resign_header": "old_header"}
def test_parses_experiments_without_variant_aliases():
request = HttpRequest()
request.COOKIES["_gaexp"] = "GAX1.2.utSuKi3PRbmxeG08en8VNw.18147.1"
experiments = [{"id": "utSuKi3PRbmxeG08en8VNw", "alias": "redesign"}]
values = get_experiments_variants(request, experiments)
assert values == {"redesign": 1}
def test_parses_experiments_without_experiment_alias():
request = HttpRequest()
request.COOKIES["_gaexp"] = "GAX1.2.utSuKi3PRbmxeG08en8VNw.18147.1"
experiments = [{"id": "utSuKi3PRbmxeG08en8VNw"}]
values = get_experiments_variants(request, experiments)
assert values == {"utSuKi3PRbmxeG08en8VNw": 1}
|
danihodovic/django-google-optimize
|
tests/test_utils.py
|
test_utils.py
|
py
| 4,156 |
python
|
en
|
code
| null |
github-code
|
6
|
41682684008
|
import torch
#Linear regression for f(x) = 4x+3
X= torch.tensor([1,2,3,4,5,6,7,8,9,10], dtype=torch.float32)
Y=torch.tensor([7,11,15,19,23,27,31,35,39,43], dtype= torch.float32)
w= torch.tensor(0.0,dtype=torch.float32,requires_grad=True)
def forward(x):
return (w*x)+3
def loss(y,y_exp):
return ((y_exp-y)**2).mean()
testVal = 100
print(f'Prediction before training: f({testVal}) = {forward(testVal).item():.3f}')
learningRate = 0.01
numTrainings=25
for training in range(numTrainings):
y_exp=forward(X)
error = loss(Y,y_exp)
error.backward()
with torch.no_grad():
w -= learningRate* w.grad
w.grad.zero_()
print(f'training {training+1}: W = {w.item():.3f}, loss = {error.item():.3f}')
print(f'Prediction after all training of f({testVal}) = {forward(testVal).item():.3f}')
|
kylej21/PyTorchProjects
|
linearRegression/linearReg.py
|
linearReg.py
|
py
| 818 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32177625426
|
from unittest import mock
from itertools import product
import pytest
@pytest.mark.parametrize(
'user_agent, session',
product(
[None, mock.Mock()],
[None, mock.Mock()]
)
)
def test_init(user_agent, session):
with mock.patch('Raitonoberu.raitonoberu.aiohttp') as m_aio:
from Raitonoberu.raitonoberu import Raitonoberu
# run
obj = Raitonoberu(user_agent, session)
# test
if user_agent is None:
obj.headers == {"User-Agent": "Raitonoberu"}
else:
obj.headers == user_agent
if session is None:
obj.session == m_aio.ClientSession.return_value
m_aio.ClientSession.assert_called_once_with(headers=obj.headers)
else:
obj.session == session
def test_del():
session = mock.Mock()
with mock.patch('Raitonoberu.raitonoberu.Raitonoberu.__init__', return_value=None):
from Raitonoberu.raitonoberu import Raitonoberu
obj = Raitonoberu()
obj.session = session
# run
del obj
# test
session.close.assert_called_once_with()
@pytest.mark.asyncio
@pytest.mark.parametrize('term', ['term'])
async def test_get_search_page(term):
from Raitonoberu.raitonoberu import Raitonoberu
obj = Raitonoberu()
# run
res = await obj.get_search_page(term=term)
# test
# the actual result with 'term' as input is
# 'http://www.novelupdates.com/series/the-last-apostle/'
assert res.startswith('http://www.novelupdates.com/series/')
@pytest.mark.asyncio
@pytest.mark.parametrize(
'term, exp_res',
[
(
'smiling proud wanderer',
{
'aliases': [
'Laughing in the Wind',
'State of Divinity',
'The Peerless Gallant Errant',
'The Proud and Gallant Wanderer',
'Xiao Ao Jiang Hu',
'笑傲江湖'
],
'artists': None,
'authors': ['Jin Yong'],
'completely_translated': True,
'cover': 'http://cdn.novelupdates.com/images/2017/02/IMG_2801.jpg',
'description': (
'The Smiling, Proud Wanderer is a wuxia novel by Jin Yong (Louis Cha). '
'It was first serialised in Hong Kong in the newspaper Ming Pao '
'from 20 April 1967 to 12 October 1969. The Chinese title of the novel, '
'Xiao Ao Jiang Hu, '
'literally means to live a carefree life in a mundane world of strife. '
'Alternate English translations of the title include '
'Laughing in the Wind, '
'The Peerless Gallant Errant, and The Proud and Gallant Wanderer. '
'Another alternative title, State of Divinity, '
'is used for some of the novel’s adaptations.'
),
'english_publisher': None,
'genre': ['Action', 'Adventure', 'Martial Arts', 'Wuxia'],
'language': 'Chinese',
'licensed': False,
'link': 'http://www.novelupdates.com/series/smiling-proud-wanderer/',
'novel_status': '4 Volumes (Completed)\n40 Chapters (Completed)',
'publisher': 'Ming Pao',
'related_series': None,
'tags': [
'Adapted To Drama', 'Adapted to Manhua', 'Adapted To Movie', 'Betrayal',
'Misunderstandings', 'Politics', 'Revenge', 'Special Abilities'
],
'title': 'Smiling Proud Wanderer',
'type': 'Chinese Novel',
'year': '1967'
}
)
]
)
async def test_get_first_search_result(term, exp_res):
from Raitonoberu.raitonoberu import Raitonoberu
obj = Raitonoberu()
# run
res = await obj.get_first_search_result(term=term)
# test
assert res == exp_res
@pytest.mark.asyncio
@pytest.mark.parametrize(
'term, exp_res',
[
(
'I shall seal the heavens',
[
'Xian Ni (Shared Universe)',
'Beseech The Devil (Shared Universe)',
'Against Heaven (Shared Universe)',
'A Will Eternal (Shared Universe)'
]
),
('Curing incurable diseases with semen', None),
(
'S.A.O.',
[
'Sword Art Online Alternative – Gun Gale Online (Spin-Off)',
'Sword Art Online – Progressive (Spin-Off)',
'Mahouka Koukou no Rettousei x Sword Art Online (Spin-Off)',
'Sword Art Online Alternative – Clover’s Regret (Spin-Off)',
]
),
]
)
async def test_related_series(term, exp_res):
"""test related series category."""
from Raitonoberu.raitonoberu import Raitonoberu
obj = Raitonoberu()
# run
res = await obj.get_first_search_result(term=term)
# test
res['related_series'] == exp_res
@pytest.mark.asyncio
@pytest.mark.parametrize(
'term, exp_res',
[
['Curing incurable diseases with semen', None],
['S.A.O.', 'Yen Press'],
['I shall seal the heavens', None],
]
)
async def test_english_publisher(term, exp_res):
"""test related series category."""
from Raitonoberu.raitonoberu import Raitonoberu
obj = Raitonoberu()
# run
res = await obj.get_first_search_result(term=term)
# test
res['english_publisher'] == exp_res
|
byronvanstien/Raitonoberu
|
tests/test_raitonoberu.py
|
test_raitonoberu.py
|
py
| 5,643 |
python
|
en
|
code
| 5 |
github-code
|
6
|
19972078722
|
from PIL import Image
from torchvision import transforms
import torch
import numpy as np
import pandas as pd
import sys
sys.path.append("d:\\Codes\\AI\\kaggle\\kaggle-CIFAR-10\\")
def loadImages():
# image list
images = np.zeros((300000, 3, 32, 32))
print("begining loading images")
i = 0
while True:
print(i)
try:
# open a image
imageLabel = i + 1
img_path = "datas/test/" + str(imageLabel) + ".png"
img = Image.open(img_path)
except FileNotFoundError: # 没有该图片或者图片读取完成
break
else:
# transfer image type into numpy
img = np.array(img)
img = torch.from_numpy(img)
img = img.transpose(0, 2)
img = img.transpose(1, 2)
images[i, :, :, :] = img
i += 1
return images
images = loadImages()
np.save("test_images.npy", images)
|
rowenci/kaggle-CIFAR-10
|
submission/testProcessing.py
|
testProcessing.py
|
py
| 947 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30793405343
|
import g2d
x, y, dx, dy = 240, 240, 5,5
s=4
ARENA_W, ARENA_H = 480, 360
image = g2d.load_image("ball.png")
def tick():
global x,y, dy, dx, s
g2d.clear_canvas() # Draw background
g2d.draw_image(image, (x, y)) # Draw foreground
if g2d.key_pressed("w"):
s=1
# y-=dy
elif g2d.key_pressed("a"):
s=2
# x-=dx
elif g2d.key_pressed("s"):
s=3
# y+=dy
elif g2d.key_pressed("d"):
s=4
# x+=dx
if s==1:
y-=dy
elif s==2:
x-=dx
elif s==3:
y+=dy
elif s==4:
x+=dx
def main():
g2d.init_canvas((ARENA_W, ARENA_H))
g2d.main_loop(tick) # call tick 30 times/second
main()
|
GiorCocc/python_project-unipr
|
Esercitazione2/controllo_da_tastiera.py
|
controllo_da_tastiera.py
|
py
| 752 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11169927859
|
from rest_framework import serializers
from review_app.models import FarmersMarket, Vendor
class FarmersMarketSerializer(serializers.ModelSerializer):
rating = serializers.ReadOnlyField(source='get_rating')
class Meta:
model = FarmersMarket
fields = ['id', 'fm_name', 'fm_description', 'rating', 'fm_picture_url', 'fm_banner_picture_url',
'fm_contact_name', 'fm_contact_email', 'fm_website', 'fm_facility_type',
'fm_county', 'fm_address', 'fm_lat', 'fm_long', 'fm_programs_accepted',
'fm_phone', 'fm_seasons_of_operation', 'fm_handicap_accessible', 'fm_updated']
class VendorSerializer(serializers.ModelSerializer):
rating = serializers.ReadOnlyField(source='get_rating')
class Meta:
model = Vendor
fields = ['id', 'at_farmers_market', 'vendor_name', 'vendor_description', 'rating', 'vendor_contact_name',
'vendor_contact_email', 'vendor_website', 'vendor_phone', 'vendor_type',
'vendor_picture_url', 'vendor_banner_picture_url', 'vendor_updated']
|
dhcrain/FatHen
|
fm_api/serializers.py
|
serializers.py
|
py
| 1,092 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71811421308
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""code_info
@Time : 2020 2020/7/10 15:42
@Author : Blanc
@File : double_color_ball2.py
"""
import random
import time
'''
name = time.strftime('%Y-%m-%d', time.localtime())
print(name)
f = open(file=name + '.txt', mode='w', encoding='utf-8-sig')
a = list()
for i in range(0, 5):
b = random.randint(1, 64)
a.append(b)
a = str(a)
f.write('双色球号开奖:' + a)
f.close()'''
list1 = random.sample(range(1, 65), 5)
list1 = str(list1)
name = time.strftime('%Y-%m-%d', time.localtime())
f = open(file=name + '.txt', mode='w', encoding='utf-8-sig')
f.write('双色球号开奖:' + list1)
f.close()
|
Flynn-Lu/PythonCode
|
2020python实训/Day7/double_color_ball2.py
|
double_color_ball2.py
|
py
| 685 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71968210427
|
from django.conf.urls import url
from mainapp import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^category/(?P<category_name_slug>[\w\-]+)/$', views.view_category, name='category'),
url(r'^search_dictionary/$', views.search_dictionary, name="search_dictionary"),
url(r'^search/$', views.search, name='search'),
url(r'^dictionary/', views.dictionary, name='dictionary'),
url(r'^local_help/', views.local_help, name='local_help'),
url(r'^talk_to_someone/', views.talk_to_someone, name='talk_to_someone'),
url(r'^search_questions/$', views.search_questions, name="search_questions")
]
|
Gystark/Tech4Justice2016
|
mainapp/urls.py
|
urls.py
|
py
| 637 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7092840234
|
#!/usr/bin/env python3
# from https://stackoverflow.com/a/55902915/5555077
# from contextlib import contextmanager
#
# @contextmanager
# def nullcontext(enter_result=None):
# yield enter_result
import contextlib
cm = contextlib.nullcontext()
if __name__ == "__main__":
with cm as context:
print("abc")
|
K-Wu/python_and_bash_playground
|
try_null_context_manager.py
|
try_null_context_manager.py
|
py
| 322 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8765175527
|
import bpy
import csv
import os
from bpy import context
import builtins as __builtin__
def console_print(*args, **kwargs):
for a in context.screen.areas:
if a.type == 'CONSOLE':
c = {}
c['area'] = a
c['space_data'] = a.spaces.active
c['region'] = a.regions[-1]
c['window'] = context.window
c['screen'] = context.screen
s = " ".join([str(arg) for arg in args])
for line in s.split("\n"):
bpy.ops.console.scrollback_append(c, text=line)
def print(*args, **kwargs):
"""Console print() function."""
console_print(*args, **kwargs) # to py consoles
__builtin__.print(*args, **kwargs) # to system console
def importLogos():
for i in range(1,22):
image_name = "Team{}.Logo1".format(i)
file_name = image_name + ".png"
bpy.ops.import_image.to_plane(files=[{"name":file_name, "name":file_name}], directory="Users/hyungsoobae/Desktop/K-League/image/")
models[image_name].location= (0,position1[i-1],1)
bpy.ops.object.editmode_toggle()
bpy.ops.transform.translate(value=(0,0,0.5), constraint_axis=(False,False,True), constraint_orientation='GLOBAL', mirror=False, proportional='DISABLED', proportional_edit_falloff='SMOOTH', proportional_size=1)
bpy.ops.object.editmode_toggle()
models = bpy.data.objects
scn = bpy.context.scene
data_file_path = "/Users/hyungsoobae/Desktop/K-League/data"
position1 = [0.0, 1.1, 2.2, 3.3000000000000003, 4.4, 5.5, 6.6000000000000005, 7.700000000000001, 8.8, 9.9, 11.0, 12.100000000000001, 13.200000000000001, 14.3, 15.400000000000002, 16.5, 17.6, 18.700000000000003, 19.8, 20.900000000000002, 22.0, 23.1]
tposition1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23]
outlier = 27
def reset():
scn.frame_set(0)
for model in models:
if 'Year' in model.name:
continue
if 'Team' in model.name and 'Name' in model.name:
index = []
for i,action in enumerate(model.animation_data.action.fcurves):
if action.data_path == 'location':
index.append(i)
for i in range(len(index)):
print (index)
model.animation_data.action.fcurves.remove(model.animation_data.action.fcurves[index[i]])
index = list(map(lambda x: x-1, index))
print (index)
continue
model.animation_data_clear()
model.data.animation_data_clear()
for i in range(1,23):
#print (models['Team{}'.format(i)].location)
models['Team{}'.format(i)].location[1] = position1[i-1]
models['Team{}'.format(i)].scale[2] = 0
models['Team{}.Point'.format(i)].location[0] = 0.4
models['Team{}.Point'.format(i)].location[1] = position1[i-1]
models['Team{}.Point'.format(i)].location[2] = 0.4
models['Team{}.Point'.format(i)].data.text_counter_props.ifAnimated=True
models['Team{}.Point'.format(i)].data.text_counter_props.counter = 0
for j in range(1,6):
try:
models['Team{}.Name{}'.format(i,j)].location[1] = position1[i-1]
models['Team{}.Name{}'.format(i,j)].location[2] = 2.1
models['Team{}.Name{}'.format(i,j)].rotation_euler[1] = -0.872665
models['Team{}.Name{}'.format(i,j)].data.size = 0.3
except:
pass
for j in range(1,8):
try:
models['Team{}.Logo{}'.format(i, j)].location[1] = position1[i-1]
models['Team{}.Logo{}'.format(i, j)].location[2] = 1.0
except:
pass
def get_current_teams(frame=0):
result = []
scn.frame_set(frame)
for model in models:
if 'Team' in model.name and '.' not in model.name:
if (model.location[1]) < outlier:
result.append(model)
result.sort(key=lambda x : x.location[1])
result = list(map(lambda x: x.name, result))
return result
def setNameLocation(ffrom, frame, teamName, value):
for i in range(1,6):
try:
scn.frame_set(ffrom)
models['{}.Name{}'.format(teamName,i)].keyframe_insert(data_path='location')
scn.frame_set(ffrom+frame)
models['{}.Name{}'.format(teamName, i)].location[1] = value
models['{}.Name{}'.format(teamName, i)].location[2] = models[teamName].scale[2] * 2 + 2.1
models['{}.Name{}'.format(teamName, i)].keyframe_insert(data_path='location')
except:
pass
def setLogoLocation(ffrom, frame, teamName, value):
for i in range(1,8):
try:
scn.frame_set(ffrom)
models['{}.Logo{}'.format(teamName,i)].keyframe_insert(data_path='location')
scn.frame_set(ffrom+frame)
models['{}.Logo{}'.format(teamName, i)].location[1] = value
models['{}.Logo{}'.format(teamName, i)].location[2] = models[teamName].scale[2] * 2 + 1.0
models['{}.Logo{}'.format(teamName, i)].keyframe_insert(data_path='location')
except:
pass
def setPointLocation(ffrom, frame, teamName, value, point=None):
scn.frame_set(ffrom)
models['{}.Point'.format(teamName)].keyframe_insert(data_path='location')
if point is not None:
models['{}.Point'.format(teamName)].data.keyframe_insert(data_path='text_counter_props.counter')
scn.frame_set(ffrom+frame)
models['{}.Point'.format(teamName)].location[1] = value
if models[teamName].scale[2] > 0:
models['{}.Point'.format(teamName)].location[2] = models[teamName].scale[2] * 2 + 0.4
else:
models['{}.Point'.format(teamName)].location[2] = 0.4
if point is not None:
models['{}.Point'.format(teamName)].data.text_counter_props.counter = point
models['{}.Point'.format(teamName)].data.keyframe_insert(data_path='text_counter_props.counter')
models['{}.Point'.format(teamName)].keyframe_insert(data_path='location')
def transition(year, ffrom, frame):
new_teams = []
new_teams_set = set()
fp = data_file_path + "/" + year + ".csv"
with open(fp, 'r', encoding="utf-8") as csvfile:
rdr = csv.reader(csvfile)
for i,v in enumerate(rdr):
new_teams.append((i,v[2],int(v[1])))
new_teams_set.add(v[2])
current_teams = get_current_teams(ffrom)
print (current_teams)
#Remove all non participitating teams from the table
np_teams = set(current_teams) - new_teams_set
for team in np_teams:
#print (team)
scn.frame_set(ffrom)
models[team].keyframe_insert(data_path='location')
models[team].keyframe_insert(data_path='scale')
scn.frame_set(ffrom+frame)
models[team].location[1] = outlier
models[team].scale[2] = 0
models[team].keyframe_insert(data_path='location')
models[team].keyframe_insert(data_path='scale')
setNameLocation(ffrom, frame, team, outlier)
setLogoLocation(ffrom, frame, team, outlier)
setPointLocation(ffrom, frame, team, outlier, 0)
#Move the old teams in order
current_teams = list(filter(lambda x: x not in np_teams, current_teams))
current_number = len(current_teams)
for i,team in enumerate(current_teams):
scn.frame_set(ffrom)
models[team].keyframe_insert(data_path='location')
scn.frame_set(ffrom+frame)
models[team].location[1] = position1[i]
models[team].keyframe_insert(data_path='location')
setNameLocation(ffrom, frame, team, position1[i])
setLogoLocation(ffrom, frame, team, position1[i])
setPointLocation(ffrom, frame, team, position1[i])
#Add new teams
new_teams_set = new_teams_set - set(current_teams)
for i,team in enumerate(new_teams_set):
scn.frame_set(ffrom)
models[team].keyframe_insert(data_path='location')
scn.frame_set(ffrom+frame)
models[team].location[1] = position1[current_number+i]
models[team].keyframe_insert(data_path='location')
setNameLocation(ffrom, frame, team, position1[current_number+i])
setLogoLocation(ffrom, frame, team, position1[current_number+i])
setPointLocation(ffrom, frame, team, position1[current_number+i])
def league_type_1(year, ffrom, frame, scale=10):
new_teams = []
new_teams_set = set()
fp = data_file_path + "/" + year + ".csv"
with open(fp, 'r', encoding="utf-8") as csvfile:
rdr = csv.reader(csvfile)
for i,v in enumerate(rdr):
new_teams.append((i,v[2],int(v[1])))
new_teams_set.add(v[2])
#print (new_teams)
for team in new_teams:
scn.frame_set(ffrom)
models[team[1]].keyframe_insert(data_path='location')
models[team[1]].keyframe_insert(data_path='scale')
scn.frame_set(ffrom+frame)
models[team[1]].location[1] = position1[team[0]]
models[team[1]].scale[2] = team[2] / scale
models[team[1]].keyframe_insert(data_path='location')
models[team[1]].keyframe_insert(data_path='scale')
setNameLocation(ffrom, frame, team[1], position1[team[0]])
setLogoLocation(ffrom, frame, team[1], position1[team[0]])
setPointLocation(ffrom, frame, team[1], position1[team[0]], team[2])
def league_type_3(year, ffrom, frame, scale):
league_type_1(year,ffrom,frame, scale)
def league_type_4(year, ffrom, frame, scale):
league_type_1(year,ffrom,frame, scale)
def post_season(year, ffrom, frame, scale):
league_type_1(year+'p',ffrom,frame, scale)
def league_type_5(year, ffrom, frame, scale):
league_type_1(year,ffrom,frame, scale)
def split(year, ffrom, frame, gap=2, scale=10):
new_teams = []
new_teams_set = set()
#GROUP A
fp = data_file_path + "/" + year + "a.csv"
with open(fp, 'r', encoding="utf-8") as csvfile:
rdr = csv.reader(csvfile)
for i,v in enumerate(rdr):
new_teams.append((i,v[2],int(v[1])))
new_teams_set.add(v[2])
length = len(new_teams)
for team in new_teams:
scn.frame_set(ffrom)
models[team[1]].keyframe_insert(data_path='location')
models[team[1]].keyframe_insert(data_path='scale')
scn.frame_set(ffrom+frame)
models[team[1]].location[1] = position1[team[0]]
models[team[1]].scale[2] = team[2] / scale
models[team[1]].keyframe_insert(data_path='location')
models[team[1]].keyframe_insert(data_path='scale')
setNameLocation(ffrom, frame, team[1], position1[team[0]])
setLogoLocation(ffrom, frame, team[1], position1[team[0]])
setPointLocation(ffrom, frame, team[1], position1[team[0]], team[2])
#GROUP B
new_teams = []
new_teams_set = set()
fp = data_file_path + "/" + year + "b.csv"
with open(fp, 'r', encoding="utf-8") as csvfile:
rdr = csv.reader(csvfile)
for i,v in enumerate(rdr):
new_teams.append((i,v[2],int(v[1])))
new_teams_set.add(v[2])
length = len(new_teams)
for team in new_teams:
scn.frame_set(ffrom)
models[team[1]].keyframe_insert(data_path='location')
models[team[1]].keyframe_insert(data_path='scale')
scn.frame_set(ffrom+frame)
models[team[1]].location[1] = position1[length-1+gap+team[0]]
models[team[1]].scale[2] = team[2] / scale
models[team[1]].keyframe_insert(data_path='location')
models[team[1]].keyframe_insert(data_path='scale')
setNameLocation(ffrom, frame, team[1],position1[length-1+gap+team[0]])
setLogoLocation(ffrom, frame, team[1],position1[length-1+gap+team[0]])
setPointLocation(ffrom, frame, team[1],position1[length-1+gap+team[0]], team[2])
'''
reset()
transition("1983", 0, 5)
league_type_1("1983", 5, 50, 40)
transition("1984a", 105, 15)
league_type_1("1984a", 120, 50, 40)
league_type_1("1984b", 195, 50, 40)
league_type_1("1984c", 270, 25, 40)
transition("1985", 345, 15)
league_type_1("1985", 360, 50, 40)
transition("1986a", 460, 15)
league_type_1("1986a", 475, 50, 40)
league_type_1("1986b", 550, 50, 40)
league_type_1("1986c", 625, 25, 40)
transition("1987", 700, 15)
league_type_1("1987", 715, 50, 40)
transition("1988", 815, 15)
league_type_1("1988", 830, 50, 40)
transition("1989", 930, 15)
league_type_1("1989", 945, 50, 40)
transition("1990", 1045, 15)
league_type_1("1990", 1060, 50, 40)
transition("1991", 1160, 15)
league_type_1("1991", 1175, 50, 40)
transition("1992", 1275, 15)
league_type_1("1992", 1290, 50, 40)
transition("1993", 1390, 15)
league_type_1("1993", 1405, 50, 40)
transition("1994", 1505, 15)
league_type_1("1994", 1520, 50, 40)
transition("1995a", 1620, 15)
league_type_1("1995a", 1635, 50, 40)
league_type_1("1995b", 1710, 50, 40)
league_type_1("1995c", 1785, 25, 40)
transition("1996a", 1860, 15)
league_type_1("1996a", 1875, 50, 40)
league_type_1("1996b", 1950, 50, 40)
league_type_1("1996c", 2025, 25, 40)
transition("1997", 2100, 15)
league_type_1("1997", 2115, 50, 40)
transition("1998", 2215, 15)
league_type_1("1998", 2230, 50, 40)
post_season("1998", 2305, 25, 40)
transition("1999", 2380, 15)
league_type_1("1999", 2395, 50, 40)
post_season("1999", 2470, 25, 40)
transition("2000", 2545, 15)
league_type_1("2000", 2560, 50, 40)
post_season("2000", 2635, 25, 40)
transition("2001", 2710, 15)
league_type_1("2001", 2725, 50, 40)
transition("2002", 2825, 15)
league_type_1("2002", 2840, 50, 40)
transition("2003", 2940, 15)
league_type_1("2003", 2955, 50, 40)
transition("2004a", 3055, 15)
league_type_1("2004a", 3070, 50, 40)
league_type_1("2004b", 3145, 50, 40)
league_type_1("2004c", 3220, 40, 40)
league_type_1("2004d", 3285, 25, 40)
transition("2005a", 3360, 15)
league_type_1("2005a", 3375, 50, 40)
league_type_1("2005b", 3450, 50, 40)
league_type_1("2005c", 3525, 40, 40)
league_type_1("2005d", 3590, 25, 40)
transition("2006a", 3665, 15)
league_type_1("2006a", 3680, 50, 40)
league_type_1("2006b", 3680+50+25, 50, 40)
league_type_1("2006c", 3680+50+25+50+25, 40, 40)
league_type_1("2006d", 3695+50+25+50+25+25+25, 25, 40)
transition("2007a", 3970, 15)
league_type_1("2007a", 3985, 50, 40)
league_type_1("2007b", 3985+50+25, 25, 40)
transition("2008a", 4135, 15)
league_type_1("2008a", 4150, 50, 40)
league_type_1("2008b", 4150+50+25, 25, 40)
transition("2009a", 4300, 15)
league_type_1("2009a", 4315, 50, 40)
league_type_1("2009b", 4315+50+25, 25, 40)
transition("2010a", 4465, 15)
league_type_1("2010a", 4480, 50, 40)
league_type_1("2010b", 4480+50+25, 25, 40)
transition("2011a", 4630, 15)
league_type_1("2011a", 4645, 50, 40)
league_type_1("2011b", 4645+50+25, 25, 40)
transition("2012", 4795, 15)
league_type_1("2012", 4810, 50, 40)
split("2012", 4885, 40, 2, 40)
transition("2013", 4975, 15)
league_type_1("2013", 4990, 50, 40)
split("2013", 4990+50+25, 40, 2, 40)
transition("2014", 5155, 15)
league_type_1("2014", 5170, 50, 40)
split("2014", 5245, 40, 2, 40)
transition("2015", 5335, 15)
league_type_1("2015", 5350, 50, 40)
split("2015", 5350+50+25, 40, 2, 40)
transition("2016", 5515, 15)
league_type_1("2016", 5530, 50, 40)
split("2016", 5530+50+25, 40, 2, 40)
transition("2017", 5700, 15)
league_type_1("2017", 5715, 50, 40)
split("2017", 5715+50+25, 40, 2, 40)
transition("2018", 5880, 15)
league_type_1("2018", 5910-15, 50, 40)
split("2018", 5910+50+25-15, 40, 2, 40)
'''
|
baehs1989/blender-script
|
K_LEAGUE.py
|
K_LEAGUE.py
|
py
| 15,516 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37303345220
|
# Superposition of 2 spirals
import tkinter
from matplotlib.backends.backend_tkagg import (FigureCanvasTkAgg, NavigationToolbar2Tk)
from matplotlib.figure import Figure
import matplotlib.animation as animation
import numpy as np
from matplotlib.patches import Circle
import mpl_toolkits.mplot3d.art3d as art3d
def change_k1(value):
global k1
k1 = float(value)
def change_k2(value):
global k2
k2 = float(value)
def change_o1(value):
global omega1
omega1 = float(value)
def change_o2(value):
global omega2
omega2 = float(value)
def change_a1(value):
global a1
a1 = float(value)
def change_a2(value):
global a2
a2 = float(value)
def reset_parameter():
global k1, k2, omega1, omega2, a1, a2
k1 = 1.
k2 = 1.
omega1 = omega_max
omega2 = omega_min
a1 = a_max
a2 = a_max
# Initial value of spinbox
var_a1.set(a1)
var_k1.set(k1)
var_o1.set(omega1)
var_a2.set(a2)
var_k2.set(k2)
var_o2.set(omega2)
def set_axis():
ax1.set_xlim(x_min, x_max)
ax1.set_ylim(y_min, y_max)
ax1.set_zlim(z_min, z_max)
ax1.set_title('2 spirals ')
ax1.set_xlabel('x * pi')
ax1.set_ylabel('y')
# ax1.set_zlabel('z * i')
ax1.grid()
ax2.set_xlim(x_min, x_max)
ax2.set_ylim(y_min, y_max)
ax2.set_zlim(z_min, z_max)
ax2.set_title('Superposed spiral')
ax2.set_xlabel('x * pi')
ax2.set_ylabel('y')
ax2.set_zlabel('z * i')
ax2.grid()
def update(f):
ax1.cla() # Clear ax
ax2.cla() # Clear ax
set_axis()
ax1.text(x_min, y_max, z_max * 1.3, "Step(as t)=" + str(f))
if k1 != 0:
vp1 = omega1 / k1
ax1.text(x_min, y_max, z_max * 1.0, "Phase velocity1(omega1/k1)=" + str(vp1))
if k2 != 0:
vp2 = omega2 / k2
ax1.text(x_min, y_max, z_max * 0.7, "Phase velocity2(omega2/k2)=" + str(vp2))
if (k1 - k2) != 0:
vg = (omega1 - omega2) / (k1 - k2)
ax2.text(x_min, y_max, z_max * 1.3, "Group velocity(d_omega/dk)")
ax2.text(x_min, y_max, z_max * 1.0, "=(omega1-omega2)/(k1-k2)=" + str(vg))
else:
ax2.text(x_min, y_max, z_max * 1.3, "None group velocity")
# Draw a circle
c1 = Circle((0, 0), 1, ec='gray', fill=False)
ax1.add_patch(c1)
art3d.pathpatch_2d_to_3d(c1, z=0, zdir="x")
c2 = Circle((0, 0), 1, ec='gray', fill=False)
ax2.add_patch(c2)
art3d.pathpatch_2d_to_3d(c2, z=0, zdir="x")
# Draw a center line
line1 = art3d.Line3D([x_min, x_max], [0, 0], [0, 0], color='gray', ls="-.", linewidth=1)
ax1.add_line(line1)
line2 = art3d.Line3D([x_min, x_max], [0, 0], [0, 0], color='gray', ls="-.", linewidth=1)
ax2.add_line(line2)
# Draw sine wave and superposed
y1 = a1 * np.sin((k1 * x - omega1 * f) * np.pi) # Note: math.pi for adjustment x axis as x * pi
ax1.plot(x, y1, z_min, color='gray', ls="-", linewidth=1)
y2 = a2 * np.sin((k2 * x - omega2 * f) * np.pi) # Note: math.pi for adjustment x axis as x * pi
ax1.plot(x, y2, z_min, color='gray', ls="--", linewidth=1)
ax2.plot(x, y1 + y2, z_min, color='gray', ls="-", linewidth=1)
# Draw cosine wave and superposed
y = x * 0. + y_max
z1 = a1 * np.cos((k1 * x - omega1 * f) * np.pi) # Note: math.pi for adjustment x axis as x * pi
ax1.plot(x, y, z1, color='gray', ls="-", linewidth=1)
z2 = a2 * np.cos((k2 * x - omega2 * f) * np.pi) # Note: math.pi for adjustment x axis as x * pi
ax1.plot(x, y, z2, color='gray', ls="--", linewidth=1)
ax2.plot(x, y, z1 + z2, color='gray', ls="-", linewidth=1)
# Draw additional lines
inter = abs(x_max - x_min) / num_additional_lines
for i in range(num_additional_lines):
xx = i * inter
yy1 = a1 * np.sin((k1 * xx - omega1 * f) * np.pi) # Note: math.pi for adjustment x axis as x * pi
zz1 = a1 * np.cos((k1 * xx - omega1 * f) * np.pi)
yy2 = a2 * np.sin((k2 * xx - omega2 * f) * np.pi) # Note: math.pi for adjustment x axis as x * pi
zz2 = a2 * np.cos((k2 * xx - omega2 * f) * np.pi)
line = art3d.Line3D([xx, xx], [0, yy1 + yy2], [0, zz1 + zz2], color='gray', ls="--", linewidth=1)
ax2.add_line(line)
# Draw spiral
y1 = a1 * np.sin((k1 * x - omega1 * f) * np.pi) # Note: math.pi for adjustment x axis as x * pi
z1 = a1 * np.cos((k1 * x - omega1 * f) * np.pi)
ax1.plot(x, y1, z1, label="A1=" + str(a1) + ", k1=" + str(k1) + ", omega1=" + str(omega1))
y2 = a2 * np.sin((k2 * x - omega2 * f) * np.pi) # Note: math.pi for adjustment x axis as x * pi
z2 = a2 * np.cos((k2 * x - omega2 * f) * np.pi)
ax1.plot(x, y2, z2, label="A2=" + str(a2) + ", k2=" + str(k2) + ", omega2=" + str(omega2))
ax1.legend(prop={"size": 8}, loc="best")
# Draw superposed
ax2.plot(x, y1 + y2, z1 + z2)
# Global variables
x_min = 0.
x_max = 10.
y_min = -2.
y_max = 2.
z_min = -2.
z_max = 2.
num_additional_lines = 100
# Parameter of spiral
k1 = 1.
k2 = 1.
k_min = 0.
k_max = 20.
k_step = 1.
omega1 = 0.1
omega2 = - 0.1
omega_min = -0.25
omega_max = 0.25
omega_step = 0.01
a1 = 1.
a2 = 1.
a_min = 0.
a_max = 1.
a_step = 0.1
# Generate arrange
x = np.arange(x_min, x_max, 0.005)
# Generate tkinter
root = tkinter.Tk()
root.title("Spiral")
# Generate figure and axes
fig = Figure(figsize=(10, 6))
ax1 = fig.add_subplot(121, projection='3d')
ax1.set_box_aspect((2, 1, 1))
ax2 = fig.add_subplot(122, projection='3d')
ax2.set_box_aspect((2, 1, 1))
# Embed a figure in canvas
canvas = FigureCanvasTkAgg(fig, root)
canvas.get_tk_widget().pack()
# Animation
anim = animation.FuncAnimation(fig, update, interval=50)
# Toolbar
toolbar = NavigationToolbar2Tk(canvas, root)
canvas.get_tk_widget().pack()
# Label and spinbox for a1
label_a1 = tkinter.Label(root, text="A1")
label_a1.pack(side='left')
var_a1 = tkinter.StringVar(root) # variable for spinbox-value
var_a1.set(a1) # Initial value
s_a1 = tkinter.Spinbox(root, textvariable=var_a1, format="%.1f", from_=a_min, to=a_max, increment=a_step, command=lambda: change_a1(var_a1.get()), width=5)
s_a1.pack(side='left')
# Label and spinbox for k1
label_k1 = tkinter.Label(root, text="k1")
label_k1.pack(side='left')
var_k1 = tkinter.StringVar(root) # variable for spinbox-value
var_k1.set(k1) # Initial value
s_k1 = tkinter.Spinbox(root, textvariable=var_k1, format="%.1f", from_=k_min, to=k_max, increment=k_step, command=lambda: change_k1(var_k1.get()), width=5)
s_k1.pack(side='left')
# Label and spinbox for omega1
label_o1 = tkinter.Label(root, text="omega1")
label_o1.pack(side='left')
var_o1 = tkinter.StringVar(root) # variable for spinbox-value
var_o1.set(omega1) # Initial value
s_o1 = tkinter.Spinbox(root, textvariable=var_o1, format="%.2f", from_=omega_min, to=omega_max, increment=omega_step, command=lambda: change_o1(var_o1.get()), width=5)
s_o1.pack(side='left')
# Label and spinbox for a2
label_a2 = tkinter.Label(root, text=", A2")
label_a2.pack(side='left')
var_a2 = tkinter.StringVar(root) # variable for spinbox-value
var_a2.set(a2) # Initial value
s_a2 = tkinter.Spinbox(root, textvariable=var_a2, format="%.1f", from_=a_min, to=a_max, increment=a_step, command=lambda: change_a2(var_a2.get()), width=5)
s_a2.pack(side='left')
# Label and spinbox for k2
label_k2 = tkinter.Label(root, text="k2")
label_k2.pack(side='left')
var_k2 = tkinter.StringVar(root) # variable for spinbox-value
var_k2.set(k1) # Initial value
s_k2 = tkinter.Spinbox(root, textvariable=var_k2, format="%.1f", from_=k_min, to=k_max, increment=k_step, command=lambda: change_k2(var_k2.get()), width=5)
s_k2.pack(side='left')
# Label and spinbox for omega2
label_o2 = tkinter.Label(root, text="omega2")
label_o2.pack(side='left')
var_o2 = tkinter.StringVar(root) # variable for spinbox-value
var_o2.set(omega2) # Initial value
s_o2 = tkinter.Spinbox(root, textvariable=var_o2, format="%.2f", from_=omega_min, to=omega_max, increment=omega_step, command=lambda: change_o2(var_o2.get()), width=5)
s_o2.pack(side='left')
# Reset button
b_reset = tkinter.Button(root, text="Reset", command=reset_parameter)
b_reset.pack(side='left')
# main loop
set_axis()
tkinter.mainloop()
|
marukatsutech/superposition_of_2_spirals
|
superposition_of_2_spirals.py
|
superposition_of_2_spirals.py
|
py
| 8,121 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41236219255
|
"""myProject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/4.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from rest_framework.documentation import include_docs_urls
urlpatterns = [
path('admin/', admin.site.urls),
path('students/', include('students.urls')),
path("sers/", include("sers.urls")),
path("model_serializer/", include("model_serializer.urls")),
path("school/", include("school.urls")),
path("req/", include("req.urls")),
path("demo/", include("view.urls")),
path("generic/", include("generic.urls")),
path("mixin/", include("mixin.urls")),
path("mixin-generic/", include("mixin_generic.urls")),
path("viewset/", include("viewset.urls")),
path("generic-viewset/", include("generic_viewset.urls")),
path("mixin-generic-viewset/", include("mixin_generic_viewset.urls")),
path("mixin-generic-viewset-router/", include("mixin_generic_viewset_router.urls")),
path("authenticate_permission/", include("authenticate_permission.urls")),
path("throttle_test/", include("throttle_test.urls")),
path("filter/", include("filter.urls")),
path("pagination/", include("pagination.urls")),
path("exception/", include("exception.urls")),
path("docs/", include_docs_urls(title="站点doc")),
path("docs-drf-yasg/", include("drf_yasg_doc.urls")),
]
|
beishangongzi/myProject
|
myProject/urls.py
|
urls.py
|
py
| 1,926 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36193733670
|
from graftm.graftm_package import GraftMPackage, GraftMPackageVersion3
import dendropy
import logging
import tempfile
from Bio import SeqIO
import extern
from .singlem_package import SingleMPackageVersion2
import shutil
import os
import tempdir
class PackageCreator:
def create(self, **kwargs):
input_graftm_package_path = kwargs.pop('input_graftm_package')
output_singlem_package_path = kwargs.pop('output_singlem_package')
hmm_position = kwargs.pop('hmm_position')
window_size = kwargs.pop('window_size')
force = kwargs.pop('force')
if len(kwargs) > 0:
raise Exception("Unexpected arguments detected: %s" % kwargs)
if force and os.path.exists(output_singlem_package_path):
shutil.rmtree(output_singlem_package_path)
# For protein packages, remove sequences from diamond database that are
# not in the tree so that hits can be mapped onto the tree and used for
# alpha and beta diversity metrics.
gpkg = GraftMPackage.acquire(input_graftm_package_path)
is_protein_package = SingleMPackageVersion2.graftm_package_is_protein(gpkg)
logging.info("Detected package type as %s" %
('protein' if is_protein_package else 'nucleotide'))
if is_protein_package:
tree_leaves = set()
for node in dendropy.Tree.get(
path=gpkg.reference_package_tree_path(),
schema='newick').leaf_node_iter():
# need to replace here because otherwise they don't line up with the
# diamond database IDs
node_name = node.taxon.label.replace(' ','_')
if node_name in tree_leaves:
raise Exception("Found duplicate tree leaf name in graftm package "
"tree. Currently this case is not handled, sorry")
tree_leaves.add(node_name)
for name in tree_leaves: #I don't think there is a 'peek' ?
eg_name = name
break
logging.info("Read in %i tree tip names e.g. %s" % (
len(tree_leaves), eg_name))
# Make a new fasta file of all the sequences that are leaves
found_sequence_names = set()
num_seqs_unaligned = 0
filtered_aligned_tempfile = tempfile.NamedTemporaryFile(
prefix='singlem_package_creator',
suffix='.fasta',
mode='w')
for s in SeqIO.parse(gpkg.unaligned_sequence_database_path(), "fasta"):
num_seqs_unaligned += 1
if s.id in tree_leaves:
if s.id in found_sequence_names:
raise Exception("Found duplicate sequence names in graftm unaligned"
" sequence fasta file. Currently this case is not handled,"
" sorry")
SeqIO.write([s], filtered_aligned_tempfile, "fasta")
found_sequence_names.add(s.id)
filtered_aligned_tempfile.flush()
if len(tree_leaves) != len(found_sequence_names):
for t in tree_leaves:
if t not in found_sequence_names:
raise Exception("Found some sequences that were in the tree but not the"
" unaligned sequences database e.g. %s. Something is"
" likely amiss with the input GraftM package" % t)
raise Exception("Programming error, shouldn't get here")
logging.info("All %i sequences found in tree extracted successfully from unaligned"
" sequences fasta file, which originally had %i sequences" % (
len(found_sequence_names), num_seqs_unaligned))
# Create a new diamond database
dmnd_tf = tempfile.NamedTemporaryFile(prefix='singlem_package_creator',suffix='.dmnd')
cmd = "diamond makedb --in '%s' -d '%s'" % (filtered_aligned_tempfile.name, dmnd_tf.name)
logging.info("Creating DIAMOND database")
extern.run(cmd)
# Compile the final graftm/singlem package
if len(gpkg.search_hmm_paths()) == 1 and \
gpkg.search_hmm_paths()[0] == gpkg.alignment_hmm_path():
search_hmms = None
else:
search_hmms = gpkg.search_hmm_paths()
with tempdir.TempDir() as tmpdir:
gpkg_name = os.path.join(
tmpdir,
os.path.basename(
os.path.abspath(input_graftm_package_path)).replace('.gpkg',''))
GraftMPackageVersion3.compile(gpkg_name,
gpkg.reference_package_path(),
gpkg.alignment_hmm_path(),
dmnd_tf.name if is_protein_package else None,
gpkg.maximum_range(),
filtered_aligned_tempfile.name if is_protein_package else \
gpkg.unaligned_sequence_database_path(),
gpkg.use_hmm_trusted_cutoff(),
search_hmms)
logging.debug("Finished creating GraftM package for conversion to SingleM package")
SingleMPackageVersion2.compile(output_singlem_package_path,
gpkg_name, hmm_position, window_size)
shutil.rmtree(gpkg_name)
if is_protein_package:
filtered_aligned_tempfile.close()
dmnd_tf.close()
logging.info("SingleM-compatible package creation finished")
|
ye00ye/singlem
|
singlem/package_creator.py
|
package_creator.py
|
py
| 5,879 |
python
|
en
|
code
| null |
github-code
|
6
|
28914738818
|
import os
import unittest
import json
from functools import wraps
from flask_sqlalchemy import SQLAlchemy
from app import create_app
from models import setup_db, Movie, Actor
class CapstoneTestCase(unittest.TestCase):
"""This class represents the trivia test case"""
def setUp(self):
"""Define test variables and initialize app."""
self.app = create_app()
self.client = self.app.test_client
self.database_name = "capstone1"
self.database_path = "postgres://{}:{}@{}/{}".format(
'postgres',
'123456',
'localhost:5432',
self.database_name)
setup_db(self.app)
self.header = {'Content-Type': 'application/json', 'Authorization': "Bearer eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCIsImtpZCI6InRQVVJiUEVPYTBVZWt4YmE0MVhjayJ9.eyJpc3MiOiJodHRwczovL2Rldi11ZGFjaXR5LWNhcHN0b25lMjAyMS51cy5hdXRoMC5jb20vIiwic3ViIjoiYXV0aDB8NjEwMjBlMzZjNjFmZDcwMDc3ZDA1OWEzIiwiYXVkIjoiaHR0cDovL2xvY2FsaG9zdDo1MDAwL2FwaSIsImlhdCI6MTYzMDIwMTUyMCwiZXhwIjoxNjMwMjg3OTIwLCJhenAiOiJTZWxNZ3U5RUdWRVBjNzZCdW9DaWZ1cklkOGxkendFQiIsInNjb3BlIjoiIiwicGVybWlzc2lvbnMiOlsiZGVsZXRlOmFjdG9yIiwiZGVsZXRlOm1vdmllIiwiZ2V0OmFjdG9ycyIsImdldDptb3ZpZXMiLCJwYXRjaDphY3RvciIsInBhdGNoOm1vdmllIiwicG9zdDphY3RvciIsInBvc3Q6bW92aWUiXX0.NncE9PLAGT1t0hvoZTKqeKqEYwe8SgbV-5KN-D61CNMMt4k16Dkw-nVi_0V0VzxnI3dFgRzNFZ-XnbFeej_lV583pGURGYjr8n362NI7AeumnC8ONO7na0rAgSzx-IrQ-eE9ANcNjcvOCBq_S2e6KBHbDNJLQ19kC9AhHOA6QmVzg_fmyDUkWOiVybOzaj6Zn2UaDnviRYRINWaL_jR-_PqrNCP3k6XcxA5p38y73tXAqj2TWHqGw99oQLyRBrPH2n8PQc5HC3HSFn-ZEPJUYhK0gOnBApTqADVstGSrahkgKG3pVDiyI2hE2FSxB0h4jfxNAgUmcuweeJ8_ajVyhQ"}
# binds the app to the current context
with self.app.app_context():
self.db = SQLAlchemy()
self.db.init_app(self.app)
# create all tables
self.db.create_all()
def tearDown(self):
"""Executed after reach test"""
pass
#GET MOVIES
def test_get_movies(self):
res = self.client().get('http://localhost:5000/api/movies', headers=self.header)
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(len(data['movies']))
self.assertTrue(data['total_movies'])
def test_404_if_movies_doesnt_exist(self):
res = self.client().get('http://localhost:5000/api/moviss', headers=self.header)
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'resource not found')
# #POST MOVIE
def test_post_new_movie(self):
res = self.client().post(
'http://localhost:5000/api/movies/create',
json={
'title': 'Ocean Eyes',
'release_date': '2018-10-01'
},
headers=self.header)
movies = Movie.query.all()
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(len(movies))
def test_422_if_new_movie_is_unprocessable(self):
res = self.client().post(
'http://localhost:5000/api/movies/create',
json={'title': ""},
headers=self.header)
data = json.loads(res.data)
self.assertEqual(res.status_code, 422)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'unprocessable')
#DELETE MOVIES
def test_delete_movie(self):
res = self.client().delete('http://localhost:5000/api/movies/43', headers=self.header)
data = json.loads(res.data)
movie = Movie.query.filter(Movie.id == 43).one_or_none()
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertEqual(movie, None)
def test_404_if_movie_delete_doesnt_exist(self):
res = self.client().delete('http://localhost:5000/api/movies/1000', headers=self.header)
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'resource not found')
#PATCH MOVIES
def test_patch_movie(self):
res = self.client().patch(
'http://localhost:5000/api/movies/11',
json={
'title': 'The Gifteddddddddddddddd',
'release_date': '2000-02-01'
},
headers=self.header)
movies = Movie.query.all()
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(len(movies))
def test_404_if_movie_patch_doesnt_exist(self):
res = self.client().patch(
'http://localhost:5000/api/movies/8000',
json={
'title': '',
'release_date':''
},
headers=self.header)
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'resource not found')
def test_422_if_patch_movie_is_unprocessable(self):
res = self.client().patch(
'http://localhost:5000/api/movies/6',
json={'title': ""},
headers=self.header)
data = json.loads(res.data)
self.assertEqual(res.status_code, 422)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'unprocessable')
# #GET ACTORS
def test_get_actors(self):
res = self.client().get('/api/actors', headers=self.header)
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(len(data['actors']))
self.assertTrue(data['total_actors'])
def test_404_if_actors_doesnt_exist(self):
res = self.client().get('http://localhost:5000/api/actores', headers=self.header)
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'resource not found')
# #POST ACTOR
def test_post_new_actor(self):
res = self.client().post(
'http://localhost:5000/api/actors/create',
json={
'name': 'Viola Davis',
'age': 51,
'gender': 'F'
},
headers=self.header)
actors = Movie.query.all()
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(len(actors))
def test_422_if_new_actor_is_unprocessable(self):
res = self.client().post(
'http://localhost:5000/api/actors/create',
json={},
headers=self.header)
data = json.loads(res.data)
self.assertEqual(res.status_code, 422)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'unprocessable')
#DELETE ACTOR
def test_delete_actor(self):
res = self.client().delete('http://localhost:5000/api/actors/88', headers=self.header)
data = json.loads(res.data)
actor = Actor.query.filter(Actor.id == 88).one_or_none()
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertEqual(actor, None)
def test_404_if_actor_delete_doesnt_exist(self):
res = self.client().delete('http://localhost:5000/api/actors/10000', headers=self.header)
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'resource not found')
#PATCH ACTOR
def test_patch_actor(self):
res = self.client().patch(
'http://localhost:5000/api/actors/19',
json={
'name': 'Steve Carrell',
'age': 58,
'gender': 'M'
},
headers=self.header)
actors = Actor.query.all()
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(len(actors))
def test_404_if_actor_patch_doesnt_exist(self):
res = self.client().patch(
'http://localhost:5000/api/actors/8000',
json={
'name': 'pepe grillo',
'age': '',
'gender': ''
},
headers=self.header)
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'resource not found')
def test_422_if_patch_actor_is_unprocessable(self):
res = self.client().patch(
'http://localhost:5000/api/actors/8',
json={},
headers=self.header)
data = json.loads(res.data)
self.assertEqual(res.status_code, 422)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'unprocessable')
# Make the tests conveniently executable
if __name__ == "__main__":
unittest.main()
|
steffaru/FSND_CaptionProject
|
test_flaskr.py
|
test_flaskr.py
|
py
| 9,453 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5092386647
|
S = input()
T = input()
N = len(S)
res = "No"
for i in range(N+1):
r = S[-1] + S[:N-1]
if r == T:
res = "Yes"
break
S = r
print(res)
|
sudiptob2/atcoder-training
|
Easy 100/60. String Rotation.py
|
60. String Rotation.py
|
py
| 163 |
python
|
en
|
code
| 2 |
github-code
|
6
|
41873337753
|
import sys
from firebaseConfig import firebase
from config import CLAN_CODE, CLAN_INFO_FILE_PATH, CLAN_QUEST_INFO_FILE_PATH
from clanDatabase import ClanDatabase
def main():
db = ClanDatabase(
CLAN_CODE,
CLAN_INFO_FILE_PATH,
CLAN_QUEST_INFO_FILE_PATH,
firebase.database()
)
process_command(db)
def process_command(db):
done = False
help = (
"Available commands:\n\n" +
"1. (initial_load): populates the database with clan info (note: no clan quests), if your clan is not in the database. Else, it fails and does nothing.\n\n" +
"2. (update_stats): updates all clan members' stats in the database (note: no clan quest damage).\n\n" +
"3. (update_damages): updates all clan members' damages in the database (adds damage to an array of existing damages).\n\n" +
"4. (weekly_reset): move current week's stats and damages to last week and reset current week.\n\n"
)
print(help)
while (not done):
try:
command = input()
if (command == "initial_load"):
db.initial_load_clan_info()
done = True
elif (command == "update_stats"):
db.update_everyone_stats()
done = True
elif (command == "update_damages"):
db.update_everyone_damage()
done = True
elif (command == "weekly_reset"):
db.weekly_reset()
done = True
else:
print("Sorry command not understood. Try Again.")
except Exception as error:
print(error)
if __name__ == "__main__":
main()
|
ygongdev/FishBotScripts
|
main.py
|
main.py
|
py
| 1,424 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30217328394
|
# Libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
#Import all datasets
df_customers=pd.read_csv('olist_customers_dataset.csv')
df_geolocation=pd.read_csv('olist_geolocation_dataset.csv')
df_order_items=pd.read_csv('olist_order_items_dataset.csv')
df_order_pay=pd.read_csv('olist_order_payments_dataset.csv')
df_order_reviews=pd.read_csv('olist_order_reviews_dataset.csv')
df_orders=pd.read_csv('olist_orders_dataset.csv')
df_products=pd.read_csv('olist_products_dataset.csv')
df_sellers=pd.read_csv('olist_sellers_dataset.csv')
df_product_cat=pd.read_csv('product_category_name_translation.csv')
## Merge datasets for one big with all informations
df=pd.merge(df_orders,df_order_items,on='order_id', how='right')
df=df.merge(df_products, on='product_id')
df=df.merge(df_order_reviews,on='order_id')
df=df.merge(df_sellers,on='seller_id')
df=df.merge(df_customers,on='customer_id')
df = df.rename(columns={'price':'product_price','order_item_id':'quantity'})
df = df.drop(['review_id', 'review_creation_date','review_answer_timestamp','review_comment_title','review_comment_message','customer_id'], axis=1)
print(df.groupby(by='order_status').count()) #Take look at the distribution of order status
df = df[df['order_status'] == 'delivered'] # just delivered products are relevant for rating_review
## Creating Features for Dataset: Product avg Score, Product Price avg, Seller Score avg
#Create product score and product avg price
product_scored=df.groupby(by='product_id')['review_score'].mean()
product_avg_price=df.groupby(by='product_id')['product_price'].mean()
df_product_calc=pd.concat([product_scored,product_avg_price],axis=1)
df_product_calc=df_product_calc.reset_index()
df_product_calc=df_product_calc.rename(columns={'review_score':'score_product_avg','product_price':'product_price_avg'})
#Create Seller Score
seller_scored=df.groupby(by='seller_id')['review_score'].mean()
df_seller_scored=pd.DataFrame(data=seller_scored)
df_seller_scored=df_seller_scored.reset_index()
df_seller_scored=df_seller_scored.rename(columns={'review_score':'seller_score_avg'})
#Merge new Features to major dataset
df=df.merge(df_product_calc, on='product_id')
df=df.merge(df_seller_scored, on='seller_id')
#Show all nan_values
#sns.heatmap(df.isnull(),yticklabels=False,cbar=False,cmap='terrain')
dfnull=df[df.product_name_lenght=='nan']
## Dimensions Reduction - PCA and Feature Selection
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
X=df[['product_price','freight_value','product_name_lenght','product_description_lenght','product_photos_qty',
'product_weight_g','product_length_cm','product_height_cm','product_width_cm','score_product_avg','product_price_avg','seller_score_avg','review_score']]
X=X.dropna()
y = X['review_score']
X=X.drop(['review_score'],axis=1)
chi2_selector = SelectKBest(chi2, k='all').fit_transform(X, y)
scores = chi2_selector.scores_
df_feat=pd.DataFrame(X_new)
df_feat['reviews'] = y
df_feat=df_feat.rename(columns= {0:'feat1',1:'feat2',2:'feat3',3:'feat4'})
#sns.lmplot(x='feat1',y='feat2',data=df_feat,hue='reviews',palette='cubehelix',#hue: Trennung von kategorischen Parameter
# #markers=['o','v'], #Veränderung der Symbole
# scatter_kws={'s':10}) #scatter_kws: Definition von der Göße der Symbole
## Train Test Split
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(df_feat[['feat1','feat2','feat3','feat4']], y, test_size=0.30, random_state=101) #Testsize (70%Training,30%Test), Random_state= Startwert für zufällige Datenauswahl
#Decision Tree
from sklearn.tree import DecisionTreeClassifier
dtree = DecisionTreeClassifier()
dtree.fit(X_train,y_train)
predictions_trees = dtree.predict(X_test)
from sklearn.metrics import classification_report,confusion_matrix
matrix_trees= confusion_matrix(y_test,predictions_trees)
report_trees= classification_report(y_test,predictions_trees)
# Suport Vector Machine
from sklearn.svm import SVC
svc_model = SVC() #Instanzieren des Algorithmus
svc_model.fit(X_train,y_train) #Training
predictions_svm = svc_model.predict(X_test) # Vorhersage
# Gridsearch - siehe Beispiel SVM
# Die richtigen Parameter zu finden (wie C oder Gamma Werte) ist etwas knifflig.
# Glücklicherweise können wir ein bisschen "faul" sein und eine Kombination verschiedener Varianten testen und sehen was am besten funktioniert.
# Die Idee ein "Grid" an Parametern zu erstellen und einfach auszuprobieren wird "Gridsearch" genannt.
# Da diese Methode recht üblich ist bietet SciKit Learn eine eingebaute Funktion namens GridSearchCV.
# Dabei steht das CV für "Cross Validation". Und dies wiederum bedeutet, dass GridSearchCV ein Dictionary verwendet, das die Parameter beschreibt, die getestet werden sollen, und ein Modell, das es zu trainieren gilt
#Decision Tree
from sklearn.tree import DecisionTreeClassifier
dtree = DecisionTreeClassifier()
dtree.fit(X_train,y_train)
predictions_trees = dtree.predict(X_test)
#Random Forrest
from sklearn.ensemble import RandomForestClassifier
rfc = RandomForestClassifier(n_estimators=10)
rfc.fit(X_train, y_train)
predictions_r_forest = rfc.predict(X_test)
#KNN
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=4,algorithm='auto',metric='euclidean') # Auswahl der Paramater für das Modell: Anzahl Nearst neighbor, Algorithmus(auto,kd_tree etc) Metric(euclidean distance, manhattan etc)
knn.fit(X_train,y_train)
predictions_knn =knn.predict(X_test)
## Auswertungen
#Classifaction Metriken
from sklearn.metrics import classification_report,confusion_matrix
matrix_svm= confusion_matrix(y_test,predictions_svm)
report_svm= classification_report(y_test,predictions_svm)
matrix_trees= confusion_matrix(y_test,predictions_trees)
report_trees= classification_report(y_test,predictions_trees)
matrix_r_forest= confusion_matrix(y_test,predictions_r_forest)
report_r_forest= classification_report(y_test,predictions_r_forest)
matrix_knn= confusion_matrix(y_test,predictions_knn)
report_knn= classification_report(y_test,predictions_knn)
|
ThePeziBear/MyPythonLibrary
|
Masterthesis/Olist/Data_Manipulation_V2.py
|
Data_Manipulation_V2.py
|
py
| 6,260 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3520676139
|
import json
import os
import time
import pytest
from src.rss_config_io import RssConfigIO
from src.rss_configuration import RssConfiguration
class TestRssConfiguration:
rss = "http://g1.globo.com/dynamo/rss2.xml"
timestamp = [2019, 8, 24, 2, 56, 52, 5, 236, 0]
temp = "missing_file.json"
base_content = json.dumps({rss: {RssConfiguration._timestamp_field: timestamp}})
@pytest.fixture(autouse=True)
def before_after_all(self):
try:
os.remove(self.temp)
except:
pass
yield
with open(RssConfigIO()._file, 'w') as f:
f.write(self.base_content)
def test_is_callable(self):
assert RssConfiguration(self.rss) is not None
def test_config_is_json(self):
json_config = RssConfiguration(self.rss)._config
assert (type(json_config) is dict)
assert json.dumps(json_config) is not None
def test_config_is_consistent(self):
json_config = str(RssConfiguration(self.rss)._config)
json_config2 = str(RssConfiguration(self.rss)._config)
assert (json_config == json_config2)
def test_update_on_destroy(self):
config = RssConfiguration(self.rss)
assert "x" not in config._future_config
config._future_config["x"] = "y"
del config
new_config = RssConfiguration(self.rss)._config
assert type(new_config) is dict
assert new_config["x"] == "y"
def test_default(self):
sample = RssConfiguration('b')._get_default()
assert 'timestamp' in sample
def test_default_values(self):
config = RssConfiguration('non_existing')._config
assert config['timestamp'] is not None
assert type(config['timestamp']) is list
assert config['timestamp'] == list(time.gmtime(1))
def test_get_timestamp(self):
timestamp = RssConfiguration(self.rss).get_timestamp()
assert timestamp is not None
def test_set_timestamp(self):
config = RssConfiguration(self.rss)
timestamp = [2022, 8, 24, 2, 56, 52, 5, 236, 0]
assert timestamp > config._future_config[config._timestamp_field]
config.set_timestamp(timestamp)
assert timestamp == config._future_config[config._timestamp_field]
def test_set_timestamp_no_downgrade(self):
config = RssConfiguration(self.rss)
old_timestamp = config.get_timestamp()
assert old_timestamp == self.timestamp
new_timestamp = list(time.gmtime(200))
assert old_timestamp > new_timestamp
config.set_timestamp(new_timestamp)
assert config.get_timestamp() == old_timestamp
|
maxpeixoto/rss_filterer
|
test/test_rss_configuration.py
|
test_rss_configuration.py
|
py
| 2,655 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39028052059
|
import google_auth_oauthlib
import google_auth_oauthlib.flow
scopes = ["https://www.googleapis.com/auth/youtube.force-ssl"]
api_service_name = "youtube"
api_version = "v3"
client_secrets_file = "client_secret.json"
def main():
# Get credentials and create an API client
flow = google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file(
client_secrets_file,
scopes
)
credentials = flow.run_local_server()
print(credentials.to_json())
if __name__ == "__main__":
main()
|
omirete/free-live-news
|
automations/get_youtube_bearer_token.py
|
get_youtube_bearer_token.py
|
py
| 521 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34508803750
|
# https://www.geeksforgeeks.org/next-higher-palindromic-number-using-set-digits/
# def next_palindrome(num):
# nums= [int(x) for x in num]
# if len(num) <= 3:
# return -1
# from collections import Counter
# t = Counter(nums)
# palins =[x for x,y in t.items() if y%2 ==0]
# palins=sorted(palins)
# palins.remove(int(num[0]))
# palins.append(int(num[0]))
# st=[None]*len(nums)
# palins.extend([x for x,y in t.items() if y%2 !=0])
# for i in range(len(palins)):
# # print(palins[i])
# st[i] = palins[i]
# st[-(i+1)] = palins[i]
# return st
# function to reverse the digits in the
# range i to j in 'num'
def reverse(num, i, j) :
while (i < j) :
temp = num[i]
num[i] = num[j]
num[j] = temp
i = i + 1
j = j - 1
# function to find next higher palindromic
# number using the same set of digits
def nextPalin(st) :
num = list(st)
# print(num)
n = len(st)
# if length of number is less than '3'
# then no higher palindromic number
# can be formed
if (n <= 3) :
# print "Not Possible"
return -1
# find the index of last digit
# in the 1st half of 'num'
mid = n // 2 - 1
# Start from the (mid-1)th digit and
# find the first digit that is
# smaller than the digit next to it.
i = mid - 1
# print(i)
while i >= 0 :
if (num[i] < num[i + 1]) :
break
i = i - 1
# If no such digit is found, then all
# digits are in descending order which
# means there cannot be a greater
# palindromic number with same set of
# digits
if (i < 0) :
# print "Not Possible"
return -1
# Find the smallest digit on right
# side of ith digit which is greater
# than num[i] up to index 'mid'
smallest = i + 1
j = i + 2
while j <= mid :
if (num[j] > num[i] and num[j] <
num[smallest]) :
smallest = j
j = j + 1
# swap num[i] with num[smallest]
temp = num[i]
num[i] = num[smallest]
num[smallest] = temp
# as the number is a palindrome,
# the same swap of digits should
# be performed in the 2nd half of
# 'num'
temp = num[n - i - 1]
num[n - i - 1] = num[n - smallest - 1]
num[n - smallest - 1] = temp
# reverse digits in the range (i+1)
# to mid
reverse(num, i + 1, mid)
# if n is even, then reverse
# digits in the range mid+1 to
# n-i-2
if (n % 2 == 0) :
reverse(num, mid + 1, n - i - 2)
# else if n is odd, then reverse
# digits in the range mid+2 to n-i-2
else :
reverse(num, mid + 2, n - i - 2)
# required next higher palindromic
# number
result = ''.join(num)
return result
if __name__ == "__main__":
num = "35453"
num = "4697557964"
num = '399993'
# print(list(map(int,num.split())))
print(nextPalin(num))
|
ved93/deliberate-practice-challenges
|
code-everyday-challenge/n204_next_palindrom.py
|
n204_next_palindrom.py
|
py
| 3,157 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8331020218
|
import os
import datetime
import mysql.connector
from reportlab.lib.pagesizes import letter
from reportlab.platypus import SimpleDocTemplate, Table, TableStyle, Paragraph
from reportlab.lib import colors
from reportlab.lib.styles import getSampleStyleSheet
# pip install mysql-connector-python
# pip install reportlab
def get_current_date():
# Get the current date in "YYYY-MM-DD" format
return datetime.datetime.now().strftime("%Y-%m-%d")
def get_data_from_mysql():
# Connect to the MySQL database
connection = mysql.connector.connect(
host="localhost",
user="root",
password="root",
database="asistencia_del_cia"
)
cursor = connection.cursor()
# Get the current date in "YYYY-MM-DD" format
current_date = get_current_date()
# Get the data from the "registro" table for the current date
query = f"SELECT id, fecha, nombre, hora FROM registro WHERE fecha = '{current_date}'"
cursor.execute(query)
data = cursor.fetchall()
# Close the connection
cursor.close()
connection.close()
return data
def create_pdf(data):
# Create the PDF file named "lista_asistencia_fecha.pdf"
filename = f"lista_asistencia_{get_current_date()}.pdf"
doc = SimpleDocTemplate(filename, pagesize=letter)
elements = []
# Add the title at the top of the document as a Flowable object
title = f"Attendance list for the day ({get_current_date()})"
title_style = getSampleStyleSheet()['Title']
title_paragraph = Paragraph(title, title_style)
elements.append(title_paragraph)
# Convert the data into a list for the table
data_table = [['ID', 'Date', 'Name', 'Time']] + data
# Create the table
table = Table(data_table)
# Table style
style = TableStyle([
# ... (your style code here, just like before)
])
table.setStyle(style)
# Add the table to the document
elements.append(table)
# Build the document
doc.build(elements)
if __name__ == "__main__":
# Get the data from MySQL
data = get_data_from_mysql()
# Create the PDF with the table data
create_pdf(data)
|
IgnaciodeJesusMedinaUrrunaga/Attendance-Registration-with-Facial-Recognition
|
Attendance_Registration_CIA/Attendance-list-with-facial-recognition-using-Python/transfer_today_records_to_pdf.py
|
transfer_today_records_to_pdf.py
|
py
| 2,145 |
python
|
en
|
code
| 2 |
github-code
|
6
|
39484921730
|
class LCG(object):
def __init__(self, a, c, x):
self.a = a
self.c = c
self.x = x
self.mod = 1<<32
def next(self):
self.x *= self.a
self.x += self.c
self.x %= self.mod
return self.x >> 31
def keystream(k):
g1 = LCG(71664525, 1013904223, int(k[:3], 16))
g2 = LCG(22695477, 1010101011, int(k[3:6], 16))
g3 = LCG(11015245, 987654321, int(k[6:9], 16))
gens = g1, g2, g3
for _ in range(20):
for g in gens:
g.next()
while 1:
b = 0
for i in range(8):
bits = [g.next() for g in gens]
val = sum(bits) // 2
b |= val << i
yield b
def encrypt(data, key='000000000'):
return ''.join(chr(ord(x) ^ y) for x,y in zip(data, keystream(key)))
|
Himanshukr000/CTF-DOCKERS
|
greyhatCrypto/challenges/correlation/correlation.py
|
correlation.py
|
py
| 840 |
python
|
en
|
code
| 25 |
github-code
|
6
|
3828632722
|
from collections import Counter
from data import SentimentDataset
import json
class Preprocessor:
def __init__(self, max_vocab):
self.max_vocab = max_vocab
self.vocab2enc = None
self.enc2vocab = None
self.max_len = 0
def fit(self, dataset):
words = list()
for i in range(len(dataset)):
item = dataset.getitem(i)
if item[1] is not None:
words.extend(item[1].split(' '))
vocab = Counter(words).most_common(self.max_vocab)
self.vocab2enc = {word: i+1 for i, (word, _) in enumerate(vocab)}
self.enc2vocab = {i+1: word for i, (word, _) in enumerate(vocab)}
self.enc2vocab[0] = ''
self.enc2vocab[self.max_vocab+2] = 'OOV'
def encode(self, dataset):
encoded = list()
for i in range(len(dataset)):
item = dataset.getitem(i)
encoding = list()
for word in item[1].split(' '):
encoding.append(self.vocab2enc.get(word, self.max_vocab+2))
encoded.append(list([item[0], encoding]))
return SentimentDataset(data=encoded, data_from_file=False)
def decode(self, dataset):
encoded = list()
for i in range(len(dataset)):
item = dataset.getitem(i)
encoding = list()
for word in item[1]:
encoding.append(self.enc2vocab.get(word, 'NAN'))
encoded.append(list([item[0], ' '.join(encoding).strip()]))
return SentimentDataset(data=encoded, data_from_file=False)
def pad(self, dataset):
for i in range(len(dataset)):
item = dataset.getitem(i)
if len(item[1]) > self.max_len:
self.max_len = len(item[1])
padded_data = list()
for i in range(len(dataset)):
item = dataset.getitem(i)
padded_data.append([item[0], item[1].extend([0 for _ in range(self.max_len-len(item[1]))])])
return SentimentDataset(data=padded_data, data_from_file=False)
def transform(self, dataset):
dataset = self.encode(dataset)
self.pad(dataset)
return dataset
def fit_transform(self, dataset):
self.fit(dataset)
return self.transform(dataset)
def save(self, file_name='./prepro_vocab.json'):
with open(file_name, 'w') as f_out:
json.dump({
'vocab2enc': self.vocab2enc,
'enc2vocab': self.enc2vocab,
'max_len': self.max_len,
}, f_out)
def load(self, file_name='./prepro_vocab.json'):
with open(file_name, 'r') as f_in:
data = json.load(f_in)
self.vocab2enc = data['vocab2enc']
self.enc2vocab = data['enc2vocab']
self.max_len = data['max_len']
# if __name__ == '__main__':
# p = Preprocessor(500)
# s = SentimentDataset(data='./train.csv')
# p.fit(s)
#
# s_e = p.encode(s)
# p.pad(s_e)
# s_d = p.decode(s_e)
#
# idx = 2
# print(s.getitem(idx))
# print(s_e.getitem(idx))
# print(s_d.getitem(idx))
|
yuvalofek/NLP
|
DeepLearning/prepro.py
|
prepro.py
|
py
| 3,090 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5024232926
|
from tfidf import *
zipfilename = sys.argv[1]
summarizefile = sys.argv[2]
def main():
files_dic = load_corpus(sys.argv[1])
tfidf = compute_tfidf(files_dic)
score_lst = summarize(tfidf, files_dic[sys.argv[2]], 20)
for i in range(len(score_lst)):
print(score_lst[i][0] + " " + str(round(score_lst[i][1], 3)))
if __name__ == "__main__":
main()
|
wangyuhsin/tfidf-text-summarization
|
summarize.py
|
summarize.py
|
py
| 374 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30728799570
|
import sys
# infile = open("a.in")
infile = sys.stdin
n = int(infile.readline())
for i in range(n):
c, s = [int(x) for x in infile.readline().split()]
k = s // c
m = s - k * c
q = 0
q+= ((k+1)**2) * m + (k**2)* (c-m)
print(q)
|
mdaw323/alg
|
codeforces/edu-77/a.py
|
a.py
|
py
| 260 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32617432475
|
# find maximum element in the list
def find_max(a_list):
max = None
if a_list == []:
return 0
for num in a_list:
if max == None:
max = num
elif num > max:
max = num
return max
user_input = input('Please input a list: ')
if user_input != '[]':
user_input_adjust = user_input.strip('[]').replace(' ', '').split(',') # remove '[] and blackspace' from the string and convert into list
a_list = []
for i in user_input_adjust:
a_list.append(int(i))
else:
a_list = []
print('The list you input is', a_list)
print('The maximum number in the list is', find_max(a_list))
|
TsungYuanHsu/find_max
|
find_max.py
|
find_max.py
|
py
| 653 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12634573747
|
import pygame
import sys
from snake_object import Snake
from setting import Setting
from apple import Apple
def press_keydown(snake, event):
if event.key == pygame.K_LEFT:
snake.go_x = -1
snake.go_y = 0
elif event.key == pygame.K_RIGHT:
snake.go_x = 1
snake.go_y = 0
elif event.key == pygame.K_UP:
snake.go_x = 0
snake.go_y = -1
elif event.key == pygame.K_DOWN:
snake.go_x = 0
snake.go_y = 1
elif event.key == pygame.K_q:
sys.exit()
elif event.key == pygame.K_SPACE:
snake.del_tail=False
def show_apple(sn_setting):
for apple in sn_setting.mass_apple:
apple.blitme()
def add_apple(sn_setting,screen):
sn_setting.new_apple += 1
if sn_setting.new_apple == sn_setting.speed_apple:
sn_setting.new_apple = 0
new_apple = Apple(sn_setting, screen)
sn_setting.mass_apple.append(new_apple)
def play_game():
pygame.init()
sn_setting = Setting()
screen = pygame.display.set_mode((sn_setting.screen_width, sn_setting.screen_heigth))
pygame.display.set_caption("Snake ))")
snake = Snake(sn_setting)
new_apple = Apple(sn_setting, screen)
sn_setting.mass_apple.append(new_apple)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
press_keydown(snake, event)
screen.fill(sn_setting.bg_color)
snake.update_snake()
add_apple(sn_setting,screen)
snake.test_tail()
snake.eat_apple()
snake.flip_tail(screen)
snake.flip_head(screen)
show_apple(sn_setting)
pygame.display.flip()
play_game()
|
BarSerhey/Python
|
snake/snake.py
|
snake.py
|
py
| 1,770 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30938963931
|
import pandas as pd
from config import CONFIG_DICT
import networkx as nx
import matplotlib.pyplot as plt
import random
import cv2
import numpy as np
import math
from MplCanvas import MplCanvas
import Equirec2Perspec as E2P
new_min = -100
new_max = 100
lat_min = 40.42524817 ## this is for first 500 in pittsburg need to generalize this for all places
lat_max = 40.44497464
long_min = -80.03468568
long_max = -79.98858816
class DataHelper():
def __init__(self):
self.G = nx.Graph()
self.end_points = []
# Create canvas for plot rendering:
self.canvas = MplCanvas(self, width=5, height=4, dpi=100)
self.bev_graph = MplCanvas(self, width=5, height=4, dpi=100)
self.xdata = []
self.ydata = []
# Set of visited locations.
self.visited_locations = set()
self.read_routes(CONFIG_DICT["csv_file"])
def new_lat_scale(self, x):
normalized_new_val = ((x - lat_min) / (lat_max - lat_min) * (new_max - new_min)) + new_min
return normalized_new_val
def new_long_scale(self, x):
normalized_new_val = ((x - long_min) / (long_max - long_min) * (new_max - new_min)) + new_min
return normalized_new_val
def image_name(self, pos):
return self.image_names[pos]
def panorama_split(self, theta, image, resolution = (720, 1080)):
print("\n")
print("Showing image: ", image + ".jpg")
print("Current viewing angle: ", theta)
print("\n")
equ = E2P.Equirectangular("data/Pittsburgh/"+image+".jpg") # Load equirectangular image
#
# FOV unit is degree
# theta is z-axis angle(right direction is positive, left direction is negative)
# phi is y-axis angle(up direction positive, down direction negative)
# height and width is output image dimension
#
img = equ.GetPerspective(90, -theta, 0, *resolution) # Specify parameters(FOV, theta, phi, height, width)
return img
def build_graph(self, data):
i = 0
prev_route = -1
prev_pos = (-1,-1)
prev_name = "abc"
x = []
y = []
print("\n")
print("Building graph. \n")
for index, row in data.iterrows():
route_no = row['route_no']
lat = row['latitude']
long = row['longitude']
scaled_lat = self.new_lat_scale(lat)
scaled_long = self.new_long_scale(long)
image_name = row['image_name']
current_node = (scaled_lat,scaled_long)
if (image_name not in self.G):
self.G.add_node((scaled_lat,scaled_long),image_name = image_name, latitude = lat, longitude=long, yaw =row['yaw'], ) # saves a node as image name
#print((scaled_lat,scaled_long), image_name)
if route_no == prev_route and prev_pos != (-1,-1): # Why is prev_pos only compares to one integer while it is a tuple?
# So the edges only connect nodes of the same route?
#print("adding edge")
x.append(scaled_lat) # What are these x, y lists for? Look at the elif below.
y.append(scaled_long)
self.G.add_edge(prev_pos, current_node) # need to add something like a direction on this edge like right left straight...
elif route_no != prev_route: ## going to a new route
plt.plot(x, y, '-o', linewidth=1, markersize=2) # x and y used to plot the previous route.
if(prev_pos!= (-1,-1)): # end_points mean the end point of each route.
self.end_points.append(prev_pos)
x=[]
y=[]
prev_pos = current_node
prev_route = route_no
#save the graph as a json?
self.image_names = nx.get_node_attributes(self.G, 'image_name')
plt.savefig("filename5.png")
def read_routes(self, csv_file = "data/pittsburg_500.csv" ):
data = pd.read_csv(csv_file, keep_default_na=False)
self.build_graph(data)
def find_adjacent(self, pos, action = "next"):
#print("Finding next position based on action/direction and position \n")
if action == "next":
#print(self.end_points)
#print("Current node: \n", pos)
#print("Adjacent nodes and edges: \n", (self.G.adj[pos])) # Finding adjacent nodes and edges to pos node.
#adj_nodes_list = [keys for keys,value in self.G.adj[pos].items()]
#print("Coordinate of the adjacent nodes: \n", adj_nodes_list)
return list([keys for keys,value in self.G[pos].items()]) # Return list of keys of nodes adjacent to node with key pos.
def reset(self):
# reset the position of the agent
print("Resets the position to a start \n")
#i = random.choice(range(len(self.end_points)))
i = 1000
return self.end_points[i]
def sample_location(self):
location_list = [keys for keys, values in self.G.nodes.items()]
location = random.choice(location_list)
return location
# Function to find the distances to adjacent nodes.
# This is used to check to see if the node found is actually the nearest node.
def find_distances(self, pos, adj_nodes_list):
distance_list = []
for node in adj_nodes_list:
distance_list.append(np.linalg.norm(np.array(pos) - np.array(node)))
return distance_list
def fix_angle(self, angle):
if angle < 0:
angle = 360 + angle
elif angle >= 360:
angle = angle - 360
return angle
# This function should also convert from triangular to abosulute angle?
def get_angle(self, curr, prev):
if (curr[0] - prev[0]) != 0 :
slope = (curr[1] - prev[1]) / (curr[0] - prev[0])
else:
return 0
#print(slope)
angle = math.degrees(math.atan(slope))
# The direction is from the second to the fourth quadrant.
# So angle is negative.
if (curr[0] > prev[0] and curr[1] < prev[1]):
angle = 360 + angle
# Direction: from fourth to second quadrant.
# Angle is negative.
elif (curr[0] < prev[0] and curr[1] > prev[1]):
angle = 180 + angle
# Direction: from first to third.
# Angle is positive.
elif (curr[0] < prev[0] and curr[1] < prev[1]):
angle = 180 + angle
#angle = fix_angle(angle)
return angle
# Convention we are using: in the angle_range, the first value always represent the right boundary of the cone.
# While the second value represent the left boundary of the cone.
# This function return 1 if angle is in range, 0 if not.
def angle_in_range(self, angle, angle_range):
# This is the case where the fix_angle adjusted the angle to be only from 0 to 360.
if angle_range[0] > angle_range[1]:
if angle < angle_range[1] or angle > angle_range[0]:
return 1
else:
return 0
# This is the regular case:
else:
if angle > angle_range[0] and angle < angle_range[1]:
return 1
else:
return 0
# Note on the process of finding the nearest node:
# My speculation:
# 1. Find the current angle cone of the agent, which is where the agent is looking in absolute angles.
# 2. Then get the adjacent nodes' absolute angles. Note: adjacent is defined as being connected by an edge.
# 3. Filter the adjacent nodes by fov cone using the abosolute angles.
# 4. Move to the nearest node within the cone.
# Note: Process of graph creation: Dynamic_plot.py called build_graph. Build_graph go through every line
# of the csv file then add all the nodes. What about edges?
def find_nearest(self, curr_pos, prev_pos,curr_angle, direction):
print("\n")
# This is the view angle.
center_angle = self.fix_angle(curr_angle)
# The search angle is based on positions. Independent of viewing angle.
search_angle = self.get_angle(curr_pos, prev_pos)
left_bound = self.fix_angle(search_angle+90)
right_bound = self.fix_angle(search_angle-90)
# Check the current view angle against the search angle range:
if direction == "forward":
if self.angle_in_range(curr_angle, (right_bound, left_bound)) :
search_angle_range = (right_bound , left_bound)
else:
search_angle_range = (left_bound, right_bound)
elif direction == "backward":
if self.angle_in_range(curr_angle, (right_bound, left_bound)) :
search_angle_range = (left_bound , right_bound)
else:
search_angle_range = (right_bound, left_bound)
print("Current center angle: ", center_angle)
next_pos_list = self.find_adjacent(curr_pos) # This is a list of adjacent nodes to node agents_pos_1
decision = curr_pos
image_name = self.image_name(curr_pos)
print("Current node: ", curr_pos)
print("Possible next nodes: ", len(next_pos_list))
print("List of adjacent nodes: ", next_pos_list)
print("Distances from current node to the adjacent nodes: ", self.find_distances(curr_pos, next_pos_list))
print("Search angle range: ", search_angle_range)
filtered_pos_list = []
# Filtering the adjacent nodes by angle cone.
for pos in next_pos_list:
# Getting the angle between the current nodes and all adjacent nodes.
angle = self.get_angle(pos, curr_pos)
print("Angle from ", curr_pos,"to ", pos, "is ", angle)
if self.angle_in_range(angle, search_angle_range):
filtered_pos_list.append(pos)
print("Filtered adjacent nodes list: ", filtered_pos_list)
if (len(filtered_pos_list) == 0):
print("\n")
print("No nodes found. Agent standing still.")
else:
filtered_distances_list = self.find_distances(curr_pos, filtered_pos_list)
print("Distances from current node to the filtered adjacent nodes: ", filtered_distances_list)
print("Index of min value: ", (min(filtered_distances_list)))
decision = filtered_pos_list[filtered_distances_list.index(min(filtered_distances_list))]
print("The nearest node within the angle cone is: " , decision)
print("Found a node within the angle cone. New node position: ", decision)
image_name = self.image_name(decision)
print("Showing new node's image: ", image_name)
self.panorama_split(center_angle, image_name)
return decision, image_name, center_angle
# The next two functions help in the render method.
def draw_angle_cone(self, curr_pos, angle, color = 'm'):
x = curr_pos[0]
y = curr_pos[1]
angle_range = [self.fix_angle(angle - 45), self.fix_angle(angle + 45)]
line_length = 50
for angle in angle_range:
end_y = y + line_length * math.sin(math.radians(angle))
end_x = x + line_length * math.cos(math.radians(angle))
self.canvas.axes.plot([x, end_x], [y, end_y], ':' + color)
self.canvas.draw()
def update_plot(self, curr_pos, prev_pos, curr_angle):
y_prev = prev_pos[1]
x_prev = prev_pos[0]
y = curr_pos[1]
x = curr_pos[0]
self.ydata = self.ydata + [y]
self.xdata = self.xdata + [x]
self.canvas.axes.cla() # Clear the canvas.
self.canvas.axes.plot(self.xdata, self.ydata, '-ob')
adj_nodes_list = [keys for keys, values in self.G.adj[(x,y)].items()]
num_adj_nodes = len(adj_nodes_list)
adj_nodes_list = np.array( [[x_coor, y_coor] for x_coor, y_coor in adj_nodes_list])
x_pos_list = np.array([x] * num_adj_nodes)
y_pos_list = np.array([y] * num_adj_nodes)
self.canvas.axes.plot([x_pos_list,adj_nodes_list[:,0]], [y_pos_list, adj_nodes_list[:,1]], '--or')
self.canvas.axes.plot(x, y, color = 'green', marker = 'o')
self.canvas.axes.text(x, y, '({}, {})'.format(x, y))
self.canvas.axes.plot(x_prev, y_prev, color = 'purple', marker = 'o')
# Current view of the agent.
self.draw_angle_cone(curr_pos, curr_angle, color = 'g')
self.canvas.axes.set_xlim([new_min, new_max])
self.canvas.axes.set_ylim([new_min, new_max])
self.canvas.draw()
self.canvas.show()
def bird_eye_view(self, curr_pos, radius):
adjacent_pos_list = self.find_adjacent(curr_pos)
distances_list = self.find_distances(curr_pos, adjacent_pos_list)
in_range_nodes_list = []
for distance, pos in zip(distances_list, adjacent_pos_list):
if distance <= radius:
in_range_nodes_list.append(pos)
if len(in_range_nodes_list) == 0:
print("No nodes found in range for bird eye's view.")
return None
bird_eye_graph = self.G.subgraph(in_range_nodes_list)
return bird_eye_graph
def draw_bird_eye_view(self, curr_pos, radius, graph):
#self.bev_graph.axes.cla()
nodes_list = [keys for keys, values in graph.nodes().items()]
num_nodes = len(nodes_list)
nodes_list = np.array([[x_coor, y_coor] for x_coor, y_coor in nodes_list])
x = curr_pos[0]
y = curr_pos[1]
x_pos_list = np.array([x] * num_nodes)
y_pos_list = np.array([y] * num_nodes)
self.bev_graph.axes.plot([x_pos_list,nodes_list[:,0]], [y_pos_list, nodes_list[:,1]], '--or')
self.bev_graph.axes.plot(x, y, color = 'green', marker = 'o')
self.bev_graph.axes.text(x, y, '({}, {})'.format(x, y))
self.bev_graph.axes.set_xlim([new_min, new_max])
self.bev_graph.axes.set_ylim([new_min, new_max])
# Draw a circle to see if the BEV is done correctly.
draw_circle= plt.Circle(curr_pos, radius = radius, fill = False)
self.bev_graph.axes.add_artist(draw_circle)
self.bev_graph.draw()
self.bev_graph.show()
def distance_to_goal(self, curr_pos, goal):
return np.linalg.norm(np.array(curr_pos) - np.array(goal))
|
klekkala/usc_navigate
|
src/data_helper.py
|
data_helper.py
|
py
| 14,529 |
python
|
en
|
code
| 2 |
github-code
|
6
|
24276436490
|
#!/usr/bin/env python3
import os
import requests
from fmiopendata.wfs import download_stored_query
import datetime
import json
import pandas as pd
import json
def give_prediction(stationId, month, day, hour):
place = "latlon=60.3267,24.95675" # default place is Veromiehenkylä
stationShortCode = ''
weather_area = 0
station_name = ''
with open("utils/stations.json", 'r', encoding="utf-8") as f:
stations = json.load(f)
for station in stations:
if station['stationShortCodeCategory'] == stationId:
stationShortCode = station['stationShortCode']
station_name = station['stationName']
break
if stationShortCode != '':
with open("utils/weather_stations.json", 'r', encoding="utf-8") as f:
weather_stations = json.load(f)
weather_area = weather_stations.get(stationShortCode)
with open("utils/weather-locations.json", 'r', encoding="utf-8") as f:
places = json.load(f)
place = places.get(str(weather_area))['latlon']
now = datetime.datetime.utcnow()
end_time = datetime.datetime(now.year, month, day, hour)
start_time = end_time - datetime.timedelta(hours=1)
# Convert times to properly formatted strings
start_time = start_time.isoformat(timespec="seconds") + "Z"
# -> 2020-07-07T12:00:00Z
end_time = end_time.isoformat(timespec="seconds") + "Z"
# -> 2020-07-07T13:00:00Z
obs = download_stored_query("fmi::forecast::hirlam::surface::point::multipointcoverage",
args=["starttime=" + start_time, "endtime=" + end_time, place])
print(obs.location_metadata)
print(obs.data)
time_of_day = max(obs.data.keys())
print('timestamp', time_of_day)
weather_station = list(obs.data[time_of_day].keys())[0]
print(weather_station)
data = obs.data[time_of_day][weather_station]
rain = data['Precipitation amount 1 hour']['value']
celcius = data['Air temperature']['value']
windGustSpeed = data['Wind gust']['value']
windSpeed = data['Wind speed']['value']
weather = [rain, celcius, windGustSpeed, windSpeed, station_name, weather_station, time_of_day, weather_area]
return weather
|
millalin/Train-predictor
|
application/helpers/weather_for_model.py
|
weather_for_model.py
|
py
| 2,255 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17419658730
|
import json
import os
from pyui.geom import Size
from .base import View
DATA_DIR = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "data"
)
class Text(View):
def __init__(self, text, **options):
super().__init__(**options)
self.text = str(text)
self._min_cache = None
self._width_cache = None
self._size_cache = None
self._line_cache = None
def reuse(self, other):
return self.text == other.text and self._font == other._font
@property
def _font(self):
return self.env.theme.font(self.env.font, self.env.font_size)
def minimum_size(self):
if self._min_cache is None:
self._min_cache = self._font.measure(self.text)
return self._min_cache
def content_size(self, available: Size):
if self._size_cache is None or self._width_cache != available.w:
self._width_cache = available.w
self._size_cache = self._font.measure(self.text, width=available.w)
self._line_cache = None
return self._size_cache
def draw(self, renderer, rect):
super().draw(renderer, rect)
self._line_cache = self._font.draw(
renderer, self.text, rect, self.env.blended_color, lines=self._line_cache
)
class Icon(Text):
data = json.load(open(os.path.join(DATA_DIR, "icons.json")))
def __init__(self, name, style=None, size=None):
info = Icon.data["icons"][name]
fontname = "{}/{}.otf".format(Icon.data["font"], style or info["sets"][0])
super().__init__(info["text"])
self.font(fontname, size)
|
dcwatson/pyui
|
pyui/views/text.py
|
text.py
|
py
| 1,649 |
python
|
en
|
code
| 21 |
github-code
|
6
|
30481034470
|
import Tkinter
import tkFont
top = Tkinter.Tk()
top.geometry('250x150')
top.title("IHRD")
top.config(background='black')
window1 = Tkinter.Label(top,bg='black')
window1.pack()
def def1():
execfile("graph.py")
button1=Tkinter.Button(top,text="District frequency",command=def1 ,bg='#981212',fg='white',font=tkFont.Font(weight='bold',family='Meera'))
button1.pack()
Tkinter.mainloop()
|
varshiniramesh/IHDS-analysis
|
widget.py
|
widget.py
|
py
| 385 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27855486756
|
import numpy as np
import torch
import os
from collections import OrderedDict
from torch.autograd import Variable
import itertools
import util.util as util
from util.image_pool import ImagePool
from .base_model import BaseModel
from . import networks
import sys
# TODO (1) remove CycleLoss?
# We have feat_loss_ArecA, which computes the feature loss between A and recreated A.
# It's kind of redundant with CycleLoss, which computes the pixelwise L1 loss between those two.
# But then again, we might want to keep both, so that we keep both similar
# in terms of "style" and "pixelwise resemblence".
# TODO use MSELoss of Pytorch?
def mse_loss(input, target):
return torch.sum((input - target)**2) / input.data.nelement()
def printnorm(self, input, output):
# input is a tuple of packed inputs
# output is a Variable. output.data is the Tensor we are interested
print('')
print('Inside ' + self.__class__.__name__ + ' forward')
# print('input: ', type(input))
# print('input[0]: ', type(input[0]))
# print('output: ', type(output))
# print('')
# print('input size:', input[0].size())
# print('output size:', output.data.size())
print('output norm:', output.data.norm())
def printgradnorm(self, grad_input, grad_output):
print('Inside ' + self.__class__.__name__ + ' backward')
#print('Inside class:' + self.__class__.__name__)
# print('grad_input: ', type(grad_input))
# print('grad_input[0]: ', type(grad_input[0]))
# print('grad_output: ', type(grad_output))
# print('grad_output[0]: ', type(grad_output[0]))
# print('')
# print('grad_input size:', grad_input[0].size())
# print('grad_output size:', grad_output[0].size())
print('grad_input norm:', grad_input[0].data.norm())
class CycleWGANModel(BaseModel):
def name(self):
return 'CycleWGANModel'
def initialize(self, opt):
BaseModel.initialize(self, opt)
self.one = self.Tensor([1])
self.mone = self.one * -1
if opt.which_model_netD != 'dcgan':
self.ones = torch.ones(1, 19, 19) # FIXME compute size from input and architecture of netD
self.ones = self.ones.type(new_type=self.Tensor)
# init G related losses to 0 to print in the first few iterations
self.loss_G_A = Variable(self.Tensor([0]))
self.loss_G_B = Variable(self.Tensor([0]))
self.loss_idt_A = Variable(self.Tensor([0]))
self.loss_idt_B = Variable(self.Tensor([0]))
self.loss_cycle_A = Variable(self.Tensor([0]))
self.loss_cycle_B = Variable(self.Tensor([0]))
self.feat_loss_AfB = Variable(self.Tensor([0]))
self.feat_loss_BfA = Variable(self.Tensor([0]))
self.feat_loss_fArecB = Variable(self.Tensor([0]))
self.feat_loss_fBrecA = Variable(self.Tensor([0]))
self.feat_loss_ArecA = Variable(self.Tensor([0]))
self.feat_loss_BrecB = Variable(self.Tensor([0]))
self.feat_loss = Variable(self.Tensor([0]))
#self.disp_sumGA = self.loss_G_A.clone() + self.loss_cycle_A.clone()
#self.disp_sumGB = self.loss_G_B.clone() + self.loss_cycle_B.clone()
self.loss_sumGA = Variable(self.Tensor([0]))
self.loss_sumGB = Variable(self.Tensor([0]))
self.rec_A = None
self.rec_B = None
# ----------------------------------------------------------------
nb = opt.batchSize
size = opt.fineSize
self.input_A = self.Tensor(nb, opt.input_nc, size, size)
self.input_B = self.Tensor(nb, opt.output_nc, size, size)
# load/define networks
# The naming conversion is different from those used in the paper
# Code (paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)
self.netG_A = networks.define_G(opt.input_nc, opt.output_nc,
opt.ngf, opt.which_model_netG, opt.norm, not opt.no_dropout, self.gpu_ids)
self.netG_B = networks.define_G(opt.output_nc, opt.input_nc,
opt.ngf, opt.which_model_netG, opt.norm, not opt.no_dropout, self.gpu_ids)
if self.isTrain:
use_sigmoid = opt.no_lsgan
self.netD_A = networks.define_D(opt.output_nc, opt.ndf,
opt.which_model_netD,
opt.n_layers_D, opt.norm, use_sigmoid, self.gpu_ids)
self.netD_B = networks.define_D(opt.input_nc, opt.ndf,
opt.which_model_netD,
opt.n_layers_D, opt.norm, use_sigmoid, self.gpu_ids)
if (self.opt.lambda_feat > 0):
self.netFeat = networks.define_feature_network(opt.which_model_feat, self.gpu_ids)
#self.netD_A.model[11].register_forward_hook(printnorm)
#self.netD_A.model[11].register_backward_hook(printgradnorm)
#self.netG_A.register_forward_hook(printnorm)
#self.netG_A.register_backward_hook(printgradnorm)
#self.netD_B.model[11].register_forward_hook(printnorm)
#self.netD_B.model[11].register_backward_hook(printgradnorm)
if not self.isTrain or opt.continue_train:
which_epoch = opt.which_epoch
self.load_network(self.netG_A, 'G_A', which_epoch)
self.load_network(self.netG_B, 'G_B', which_epoch)
if self.isTrain:
self.load_network(self.netD_A, 'D_A', which_epoch)
self.load_network(self.netD_B, 'D_B', which_epoch)
if self.isTrain:
self.old_lr = opt.lr
# create pools of fake images, if pool size > 0
if opt.pool_size > 0:
self.fake_A_pool = ImagePool(opt.pool_size)
self.fake_B_pool = ImagePool(opt.pool_size)
self.fake_A = None
self.fake_B = None
else:
self.fake_A_pool = None
self.fake_B_pool = None
# define loss functions
# Note: use WGAN loss for cases where we use D_A or D_B, otherwise use default loss functions
self.criterionCycle = torch.nn.L1Loss()
self.criterionIdt = torch.nn.L1Loss()
self.criterionFeat = mse_loss
self.criterionWGAN = networks.WGANLoss()
# initialize optimizers
if opt.adam:
self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()),
lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_D_A = torch.optim.Adam(self.netD_A.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_D_B = torch.optim.Adam(self.netD_B.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
else:
# in https://github.com/martinarjovsky/WassersteinGAN, only LR is provided to RMSProp
self.optimizer_G = torch.optim.RMSprop(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()),
lr=opt.lr)
self.optimizer_D_A = torch.optim.RMSprop(self.netD_A.parameters(), lr=opt.lr)
self.optimizer_D_B = torch.optim.RMSprop(self.netD_B.parameters(), lr=opt.lr)
# manage lambdas for perceptual loss
if (self.opt.lambda_feat > 0):
print("sets all lambda_feat* to lambda_feat")
self.opt.lambda_feat_AfB = self.opt.lambda_feat
self.opt.lambda_feat_BfA = self.opt.lambda_feat
self.opt.lambda_feat_fArecB = self.opt.lambda_feat
self.opt.lambda_feat_fBrecA = self.opt.lambda_feat
self.opt.lambda_feat_ArecA = self.opt.lambda_feat
self.opt.lambda_feat_BrecB = self.opt.lambda_feat
print('---------- Networks initialized -------------')
networks.print_network(self.netG_A)
networks.print_network(self.netG_B)
if self.isTrain:
networks.print_network(self.netD_A)
networks.print_network(self.netD_B)
print('-----------------------------------------------')
def set_input(self, input):
AtoB = self.opt.which_direction == 'AtoB'
input_A = input['A' if AtoB else 'B']
input_B = input['B' if AtoB else 'A']
self.input_A.resize_(input_A.size()).copy_(input_A)
self.input_B.resize_(input_B.size()).copy_(input_B)
self.image_paths = input['A_paths' if AtoB else 'B_paths']
def forward(self):
self.real_A = Variable(self.input_A)
self.real_B = Variable(self.input_B)
def test(self):
self.real_A = Variable(self.input_A, volatile=True)
self.fake_B = self.netG_A.forward(self.real_A)
self.rec_A = self.netG_B.forward(self.fake_B)
self.real_B = Variable(self.input_B, volatile=True)
self.fake_A = self.netG_B.forward(self.real_B)
self.rec_B = self.netG_A.forward(self.fake_A)
def freeze_discriminators(self, freeze=True):
for p in self.netD_A.parameters():
p.requires_grad = not freeze
for p in self.netD_B.parameters():
p.requires_grad = not freeze
def freeze_generators(self, freeze=True):
for p in self.netG_A.parameters():
p.requires_grad = not freeze
for p in self.netG_B.parameters():
p.requires_grad = not freeze
# get image paths
def get_image_paths(self):
return self.image_paths
def backward_D_basic(self, netD, real, fake):
# compute outputs for real and fake images
outD_real = netD(real)
outD_fake = netD(fake.detach())
#self.disp_outD_real = outD_real.mean()
#self.disp_outD_fake = outD_fake.mean()
wloss = self.criterionWGAN(fake=outD_fake, real=outD_real)
# import pdb; pdb.set_trace()
if self.opt.which_model_netD == 'dcgan':
wloss.backward()
else:
wloss.backward(self.ones)
return outD_real.mean(), outD_fake.mean()
def backward_D_A(self):
#if self.fake_B_pool is None or self.fake_B is None:
self.fake_B = self.netG_A(self.real_A.detach()) # generate a fake image
self.loss_D_A_real, self.loss_D_A_fake = self.backward_D_basic(self.netD_A, self.real_B, self.fake_B)
#self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_B, self.fake_B)
#else:
# fake_B = self.fake_B_pool.query(self.fake_B)
# self.loss_D_A_real, self.loss_D_A_fake = self.backward_D_basic(self.netD_A, self.real_B, fake_B)
#self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_B, fake_B)
def backward_D_B(self):
#if self.fake_A_pool is None or self.fake_A is None:
self.fake_A = self.netG_B(self.real_B.detach())
self.loss_D_B_real, self.loss_D_B_fake = self.backward_D_basic(self.netD_B, self.real_A, self.fake_A)
#self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_A, self.fake_A)
#else:
# fake_A = self.fake_A_pool.query(self.fake_A)
# self.loss_D_B_real, self.loss_D_B_fake = self.backward_D_basic(self.netD_B, self.real_A, fake_A)
#self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_A, fake_A)
def backward_G(self):
# Identity loss
if self.opt.identity > 0:
# G_A should be identity if real_B is fed.
self.idt_A = self.netG_A(self.real_B)
self.loss_idt_A = self.criterionIdt(self.idt_A, self.real_B) * self.opt.lambda_B * self.opt.identity
#self.loss_idt_A = self.criterionWGAN(fake=self.idt_A, real=self.real_B) * lambda_B * lambda_idt
#self.loss_idt_A = self.criterionWGAN(fake=self.idt_A, real=self.real_B) * lambda_idt
# G_B should be identity if real_A is fed.
self.idt_B = self.netG_B(self.real_A)
self.loss_idt_B = self.criterionIdt(self.idt_B, self.real_A) * self.opt.lambda_A * self.opt.identity
#self.loss_idt_B = self.criterionWGAN(fake=self.idt_B, real=self.real_A) * lambda_A * lambda_idt
#self.loss_idt_B = self.criterionWGAN(fake=self.idt_B, real=self.real_A) * lambda_idt
else:
self.loss_idt_A = 0
self.loss_idt_B = 0
# Freeze discriminators so that they are NOT updated
self.freeze_discriminators(True)
# WGAN loss
# D_A(G_A(A))
self.fake_B = self.netG_A(self.real_A)
outD_A_fake = self.netD_A(self.fake_B)
self.loss_G_A = self.criterionWGAN(real=outD_A_fake) # we give as it was a true sample
#self.loss_G_A.backward(retain_graph=True)
# FIXME: Api docs says not to use retain_graph and this can be done efficiently in other ways
# D_B(G_B(B))
self.fake_A = self.netG_B(self.real_B)
outD_B_fake = self.netD_B(self.fake_A)
self.loss_G_B = self.criterionWGAN(real=outD_B_fake)
#self.loss_G_B.backward(retain_graph=True)
# Forward cycle loss
if self.opt.lambda_A != 0:
self.rec_A = self.netG_B(self.fake_B)
self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * self.opt.lambda_A
#self.loss_cycle_A = self.criterionWGAN(fake=self.netD_B(self.rec_A), real=self.netD_B(self.real_A)) * lambda_A
else:
self.loss_cycle_A = 0
# Backward cycle loss
if self.opt.lambda_B != 0:
self.rec_B = self.netG_A(self.fake_A)
self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * self.opt.lambda_B
#self.loss_cycle_B = self.criterionWGAN(fake=self.netD_A(self.rec_B), real=self.netD_A(self.real_B)) * lambda_B
else:
self.loss_cycle_B = 0
self.loss_sumGA = self.loss_G_A + self.loss_cycle_A + self.loss_idt_A
self.loss_sumGB = self.loss_G_B + self.loss_cycle_B + self.loss_idt_B
#self.disp_sumGA = self.loss_G_A.clone() + self.loss_cycle_A.clone()
#self.disp_sumGB = self.loss_G_B.clone() + self.loss_cycle_B.clone()
# Perceptual losses:
if (self.opt.lambda_feat_AfB > 0 and self.opt.lambda_feat_BfA > 0):
self.feat_loss_AfB = self.criterionFeat(self.netFeat(self.real_A), self.netFeat(self.fake_B)) * self.opt.lambda_feat_AfB
self.feat_loss_BfA = self.criterionFeat(self.netFeat(self.real_B), self.netFeat(self.fake_A)) * self.opt.lambda_feat_BfA
# self.feat_loss_AfB = self.criterionWGAN(real=self.netFeat(self.real_A), fake=self.netFeat(self.fake_B)) * lambda_feat_AfB
# self.feat_loss_BfA = self.criterionWGAN(real=self.netFeat(self.real_B), fake=self.netFeat(self.fake_A)) * lambda_feat_BfA
else:
self.feat_loss_AfB = 0
self.feat_loss_BfA = 0
if (self.opt.lambda_feat_fArecB > 0 and self.opt.lambda_feat_fBrecA > 0):
self.feat_loss_fArecB = self.criterionFeat(self.netFeat(self.fake_A), self.netFeat(self.rec_B)) * self.opt.lambda_feat_fArecB
self.feat_loss_fBrecA = self.criterionFeat(self.netFeat(self.fake_B), self.netFeat(self.rec_A)) * self.opt.lambda_feat_fBrecA
# self.feat_loss_fArecB = self.criterionWGAN(self.netFeat(self.fake_A), self.netFeat(self.rec_B)) * lambda_feat_fArecB
# self.feat_loss_fBrecA = self.criterionWGAN(self.netFeat(self.fake_B), self.netFeat(self.rec_A)) * lambda_feat_fBrecA
else:
self.feat_loss_fArecB = 0
self.feat_loss_fBrecA = 0
if (self.opt.lambda_feat_ArecA > 0 and self.opt.lambda_feat_BrecB > 0):
self.feat_loss_ArecA = self.criterionFeat(self.netFeat(self.real_A), self.netFeat(self.rec_A)) * self.opt.lambda_feat_ArecA
self.feat_loss_BrecB = self.criterionFeat(self.netFeat(self.real_B), self.netFeat(self.rec_B)) * self.opt.lambda_feat_BrecB
# self.feat_loss_ArecA = self.criterionWGAN(real=self.netFeat(self.real_A), fake=self.netFeat(self.rec_A)) * lambda_feat_ArecA
# self.feat_loss_BrecB = self.criterionWGAN(real=self.netFeat(self.real_B), fake=self.netFeat(self.rec_B)) * lambda_feat_BrecB
else:
self.feat_loss_ArecA = 0
self.feat_loss_BrecB = 0
# first sum the feat losses
self.feat_loss = self.feat_loss_AfB + self.feat_loss_BfA + self.feat_loss_fArecB \
+ self.feat_loss_fBrecA + self.feat_loss_ArecA + self.feat_loss_BrecB
haveFeatLoss = not (type(self.feat_loss) is int)
# then backprop OTHER losses, with or without retaining the graph
if self.opt.which_model_netD == 'dcgan':
self.loss_sumGA.backward(retain_graph=haveFeatLoss)
self.loss_sumGB.backward(retain_graph=haveFeatLoss)
else:
self.loss_sumGA.backward(self.ones, retain_graph=haveFeatLoss)
self.loss_sumGB.backward(self.ones, retain_graph=haveFeatLoss)
if haveFeatLoss:
self.feat_loss.backward()
# Unfreeze them for the next iteration of optimize_parameters_D()
self.freeze_discriminators(False)
def optimize_parameters_D(self):
# call self.forward outside!
# D_A
self.optimizer_D_A.zero_grad()
self.backward_D_A() # generates the first fake_B for the iteration
self.optimizer_D_A.step()
# D_B
self.optimizer_D_B.zero_grad()
self.backward_D_B() # generates fake_A for the iteration
self.optimizer_D_B.step()
# clip weights for both discriminators
for p in self.netD_A.parameters():
p.data.clamp_(self.opt.clip_lower, self.opt.clip_upper)
for p in self.netD_B.parameters():
p.data.clamp_(self.opt.clip_lower, self.opt.clip_upper)
def optimize_parameters_G(self):
# call self.forward outside!
# G_A and G_B
self.optimizer_G.zero_grad()
self.backward_G()
# print("GRADS A : First conv (mean: %.8f) Last Deconv: (mean: %.8f)" % (self.netG_A.model.model[0].weight.grad.mean(), self.netG_A.model.model[3].weight.grad.mean()))
# print("GRADS B : First conv (mean: %.8f) Last Deconv: (mean: %.8f)" % (self.netG_B.model.model[0].weight.grad.mean(), self.netG_B.model.model[3].weight.grad.mean()))
self.optimizer_G.step()
# print("WEIGHTS A: First conv (mean: %.8f) Last Deconv: (mean: %.8f)" % (self.netG_A.model.model[0].weight.mean(), self.netG_A.model.model[3].weight.mean()))
# print("WEIGHTS B: First conv (mean: %.8f) Last Deconv: (mean: %.8f)" % (self.netG_B.model.model[0].weight.mean(), self.netG_B.model.model[3].weight.mean()))
#print("mean(G_A_LastConvLayer): %.9f mean(G_B_LastConvLayer): %.9f" % (self.netG_A.model[26].weight.mean(), self.netG_B.model[26].weight.mean()))
def get_current_errors(self):
#D_A = self.loss_D_A.data[0]
#D_B = self.loss_D_B.data[0]
G_A = self.loss_G_A.data[0]
G_B = self.loss_G_B.data[0]
if self.opt.which_model_netD != 'dcgan' and type(G_A) == self.Tensor:
G_A = G_A.mean()
G_B = G_B.mean()
D_A_real, D_A_fake = self.loss_D_A_real.data[0], self.loss_D_A_fake.data[0]
D_B_real, D_B_fake = self.loss_D_B_real.data[0], self.loss_D_B_fake.data[0]
#sumGA = self.loss_sumGA.data[0]
#sumGB = self.loss_sumGB.data[0]
#currentErrors = OrderedDict([('D_A', D_A), ('D_B', D_B), ('sumGA', sumGA), ('sumGB', sumGB)])
currentErrors = OrderedDict([('D_A_real', D_A_real), ('D_A_fake', D_A_fake), ('D_B_real', D_B_real), ('D_B_fake', D_B_fake),
('G_A', G_A), ('G_B', G_B)])
if self.loss_cycle_A is not 0:
Cyc_A = self.loss_cycle_A.data[0]
# this is relevant only we use WGAN for CycleLoss
if self.opt.which_model_netD != 'dcgan' and type(Cyc_A) == self.Tensor:
Cyc_A = Cyc_A.mean()
currentErrors['Cyc_A'] = Cyc_A
if self.loss_cycle_B is not 0:
Cyc_B = self.loss_cycle_B.data[0]
# this is relevant only we use WGAN for CycleLoss
if self.opt.which_model_netD != 'dcgan' and type(Cyc_B) == self.Tensor:
Cyc_B = Cyc_B.mean()
currentErrors['Cyc_B'] = Cyc_B
if self.opt.identity > 0.0:
idt_A = self.loss_idt_A.data[0]
idt_B = self.loss_idt_B.data[0]
currentErrors['idt_A'] = idt_A
currentErrors['idt_B'] = idt_B
# feat_AfB = self.feat_loss_AfB.data[0]
# feat_BfA = self.feat_loss_BfA.data[0]
#feat_fArecB = self.feat_loss_fArecB.data[0]
#feat_fBrecA = self.feat_loss_fBrecA.data[0]
#feat_ArecA = self.feat_loss_ArecA.data[0]
#feat_BrecB = self.feat_loss_BrecB.data[0]
featL = self.feat_loss.data[0]
if featL > 0.0:
currentErrors['featL'] = featL
return currentErrors
def get_current_visuals(self):
real_A = util.tensor2im(self.real_A.data)
fake_B = util.tensor2im(self.fake_B.data)
real_B = util.tensor2im(self.real_B.data)
fake_A = util.tensor2im(self.fake_A.data)
currentVisuals = OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('real_B', real_B), ('fake_A', fake_A)])
if self.rec_A is not None:
rec_A = util.tensor2im(self.rec_A.data)
currentVisuals['rec_A'] = rec_A
if self.rec_B is not None:
rec_B = util.tensor2im(self.rec_B.data)
currentVisuals['rec_B'] = rec_B
if self.opt.identity > 0.0:
idt_A = util.tensor2im(self.idt_A.data)
idt_B = util.tensor2im(self.idt_B.data)
currentVisuals['idt_B'] = idt_B
currentVisuals['idt_A'] = idt_A
return currentVisuals
def save(self, label):
self.save_network(self.netG_A, 'G_A', label, self.gpu_ids)
self.save_network(self.netD_A, 'D_A', label, self.gpu_ids)
self.save_network(self.netG_B, 'G_B', label, self.gpu_ids)
self.save_network(self.netD_B, 'D_B', label, self.gpu_ids)
def update_learning_rate(self):
lrd = self.opt.lr / self.opt.nepoch_decay
lr = self.old_lr - lrd
for param_group in self.optimizer_D_A.param_groups:
param_group['lr'] = lr
for param_group in self.optimizer_D_B.param_groups:
param_group['lr'] = lr
for param_group in self.optimizer_G.param_groups:
param_group['lr'] = lr
print('update learning rate: %f -> %f' % (self.old_lr, lr))
self.old_lr = lr
|
amandaullvin/CycleGAN_destreak_MRI
|
models/cycle_wgan_model.py
|
cycle_wgan_model.py
|
py
| 23,263 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72334950909
|
def maxUnits(boxTypes, truckSize):
boxTypes.sort(key=lambda x: x[1])
sorted_list = (boxTypes)[::-1]
print(sorted_list)
count_units = 0
for box_nums, unit in sorted_list:
if truckSize <= box_nums:
count_units += truckSize * unit
break
count_units += box_nums * unit
truckSize -= box_nums
return count_units
print(maxUnits([[5,10], [2,5], [4,7], [3,9]], 10))
|
ChitraVKumar/My-Algorithms-for-Leetcode
|
Max Units in a truck.py
|
Max Units in a truck.py
|
py
| 432 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6246195016
|
def set_font():
import platform
import matplotlib.font_manager as fm
system_name = platform.system()
if system_name == 'Windows':
return 'Malgun Gothic'
elif system_name == 'Darwin':
return 'AppleGothic'
elif system_name == 'Linux':
path = '/usr/share/font/truetype/nanum/NanumMyeongjo.ttf'
font_name = fm.FontProperties(fname=path, size=12)
return font_name
if __name__ == "__main__":
set_font()
# usage: plc.rt('font', family=set_font())
# to present minus sign: plt.rcParams['axes.unicode_minus'] = False
|
cheesecat47/ML_DL_Jan2020
|
Jan16/matplot_font.py
|
matplot_font.py
|
py
| 588 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74918974586
|
from dataclasses import dataclass
from collections import defaultdict
import math
@dataclass
class Punto:
x: int
y: int
owner: int
def distancia(p1,p2):
return abs(p1.x-p2.x)+abs(p1.y-p2.y)
def day6(file):
with open(file) as f:
lines = f.readlines()
puntosControl = list()
xlist = list()
ylist = list()
for i,line in enumerate(lines):
l = line.split(",")
xlist.append(int(l[0]))
ylist.append(int(l[1]))
puntosControl.append(Punto(x=int(l[0]),y=int(l[1]),owner=i))
esquinaSuperiorIzquierda = Punto(x=min(xlist),y=min(ylist),owner=-1)
esquinaInferiorDerecha = Punto(x=max(xlist),y=max(ylist),owner=-1)
# Los que están fuera del rango esquinaSuperiorIzquierdaxesquinaInferiorDerecha se excluyen automáticamente
excluidos = set()
world = defaultdict(lambda: -1)
#world_total = set()
world_total = 0
for i in range(esquinaSuperiorIzquierda.x-1,esquinaInferiorDerecha.x+2):
for j in range(esquinaSuperiorIzquierda.y-1,esquinaInferiorDerecha.y+2):
punto = Punto(x=i,y=j,owner=-1)
distanciaMin = math.inf
total = 0
for p in puntosControl:
if distancia(punto,p) == distanciaMin:
punto.owner = -1
if distancia(punto,p) < distanciaMin:
distanciaMin = distancia(punto,p)
punto.owner = p.owner
total += distancia(punto,p)
if total < 10000:
world_total += 1
#world_total.add((i,j))
if i == esquinaSuperiorIzquierda.x-1 or i == esquinaInferiorDerecha.x+1 or j == esquinaSuperiorIzquierda.y-1 or j == esquinaInferiorDerecha.y+1:
excluidos.add(punto.owner)
if punto.owner > -1:
world[(i,j)] = punto.owner
conteo = defaultdict(lambda: 0)
for p in world:
if not world[p] in excluidos:
conteo[world[p]] += 1
max_finite_area = max(conteo.values())
region_size = world_total
return max_finite_area,region_size
|
aarroyoc/advent-of-code-2018
|
python/day6/day6.py
|
day6.py
|
py
| 2,152 |
python
|
es
|
code
| 1 |
github-code
|
6
|
10036308173
|
"""
Input pipeline (tf.dataset and input_fn) for GQN datasets.
Adapted from the implementation provided here:
https://github.com/deepmind/gqn-datasets/blob/acca9db6d9aa7cfa4c41ded45ccb96fecc9b272e/data_reader.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import tensorflow as tf
# ---------- ad-hoc data structures ----------
DatasetInfo = collections.namedtuple(
'DatasetInfo',
['basepath', 'train_size', 'test_size', 'frame_size', 'sequence_size']
)
Context = collections.namedtuple('Context', ['frames', 'cameras'])
Query = collections.namedtuple('Query', ['context', 'query_camera'])
TaskData = collections.namedtuple('TaskData', ['query', 'target'])
# ---------- dataset constants ----------
_DATASETS = dict(
jaco=DatasetInfo(
basepath='jaco',
train_size=3600,
test_size=400,
frame_size=64,
sequence_size=11),
mazes=DatasetInfo(
basepath='mazes',
train_size=1080,
test_size=120,
frame_size=84,
sequence_size=300),
rooms_free_camera_with_object_rotations=DatasetInfo(
basepath='rooms_free_camera_with_object_rotations',
train_size=2034,
test_size=226,
frame_size=128,
sequence_size=10),
rooms_ring_camera=DatasetInfo(
basepath='rooms_ring_camera',
train_size=2160,
test_size=240,
frame_size=64,
sequence_size=10),
# super-small subset of rooms_ring for debugging purposes
rooms_ring_camera_debug=DatasetInfo(
basepath='rooms_ring_camera_debug',
train_size=1,
test_size=1,
frame_size=64,
sequence_size=10),
rooms_free_camera_no_object_rotations=DatasetInfo(
basepath='rooms_free_camera_no_object_rotations',
train_size=2160,
test_size=240,
frame_size=64,
sequence_size=10),
shepard_metzler_5_parts=DatasetInfo(
basepath='shepard_metzler_5_parts',
train_size=900,
test_size=100,
frame_size=64,
sequence_size=15),
shepard_metzler_7_parts=DatasetInfo(
basepath='shepard_metzler_7_parts',
train_size=900,
test_size=100,
frame_size=64,
sequence_size=15)
)
_NUM_CHANNELS = 3
_NUM_RAW_CAMERA_PARAMS = 5
_MODES = ('train', 'test')
# ---------- helper functions ----------
def _convert_frame_data(jpeg_data):
decoded_frames = tf.image.decode_jpeg(jpeg_data)
return tf.image.convert_image_dtype(decoded_frames, dtype=tf.float32)
def _get_dataset_files(dataset_info, mode, root):
"""Generates lists of files for a given dataset version."""
basepath = dataset_info.basepath
base = os.path.join(root, basepath, mode)
if mode == 'train':
num_files = dataset_info.train_size
else:
num_files = dataset_info.test_size
length = len(str(num_files))
template = '{:0%d}-of-{:0%d}.tfrecord' % (length, length)
record_paths = [ # indexing runs from 1 to n
os.path.join(base, template.format(i, num_files))
for i in range(1, num_files + 1)]
return record_paths
def _get_randomized_indices(context_size, dataset_info, seed):
"""Generates randomized indices into a sequence of a specific length."""
example_size = context_size + 1
indices = tf.range(0, dataset_info.sequence_size)
indices = tf.random_shuffle(indices, seed=seed)
indices = tf.slice(indices, begin=[0], size=[example_size])
return indices
def _parse(raw_data, dataset_info):
"""Parses raw data from the tfrecord."""
feature_map = {
'frames': tf.FixedLenFeature(
shape=dataset_info.sequence_size, dtype=tf.string),
'cameras': tf.FixedLenFeature(
shape=[dataset_info.sequence_size * _NUM_RAW_CAMERA_PARAMS],
dtype=tf.float32)
}
# example = tf.parse_example(raw_data, feature_map)
example = tf.parse_single_example(raw_data, feature_map)
return example
def _preprocess(example, indices, context_size, custom_frame_size, dataset_info):
"""Preprocesses the parsed data."""
# frames
example_size = context_size + 1
frames = tf.concat(example['frames'], axis=0)
frames = tf.gather(frames, indices, axis=0)
frames = tf.map_fn(
_convert_frame_data, tf.reshape(frames, [-1]),
dtype=tf.float32, back_prop=False)
dataset_image_dimensions = tuple(
[dataset_info.frame_size] * 2 + [_NUM_CHANNELS])
frames = tf.reshape(
frames, (example_size, ) + dataset_image_dimensions)
if (custom_frame_size and
custom_frame_size != dataset_info.frame_size):
frames = tf.reshape(frames, dataset_image_dimensions)
new_frame_dimensions = (custom_frame_size,) * 2 + (_NUM_CHANNELS,)
frames = tf.image.resize_bilinear(
frames, new_frame_dimensions[:2], align_corners=True)
frames = tf.reshape(
frames, (-1, example_size) + new_frame_dimensions)
# cameras
raw_pose_params = example['cameras']
raw_pose_params = tf.reshape(
raw_pose_params,
[dataset_info.sequence_size, _NUM_RAW_CAMERA_PARAMS])
raw_pose_params = tf.gather(raw_pose_params, indices, axis=0)
pos = raw_pose_params[:, 0:3]
yaw = raw_pose_params[:, 3:4]
pitch = raw_pose_params[:, 4:5]
cameras = tf.concat(
[pos, tf.sin(yaw), tf.cos(yaw), tf.sin(pitch), tf.cos(pitch)], axis=-1)
# return preprocessed tuple
preprocessed_example = {}
preprocessed_example['frames'] = frames
preprocessed_example['cameras'] = cameras
return preprocessed_example
def _prepare(preprocessed_example):
"""Prepares the preprocessed data into (feature, label) tuples."""
# decompose
frames = preprocessed_example['frames']
cameras = preprocessed_example['cameras']
# split data
context_frames = frames[:-1]
context_cameras = cameras[:-1]
target = frames[-1]
query_camera = cameras[-1]
context = Context(cameras=context_cameras, frames=context_frames)
query = Query(context=context, query_camera=query_camera)
data = TaskData(query=query, target=target)
return data, data.target
# ---------- input_fn ----------
def gqn_input_fn(
dataset_name,
root,
mode,
context_size,
batch_size=1,
num_epochs=1,
# optionally reshape frames
custom_frame_size=None,
# queue params
num_threads=4,
buffer_size=256,
seed=None):
"""
Creates a tf.data.Dataset based op that returns data.
Args:
dataset_name: string, one of ['jaco', 'mazes', 'rooms_ring_camera',
'rooms_free_camera_no_object_rotations',
'rooms_free_camera_with_object_rotations', 'shepard_metzler_5_parts',
'shepard_metzler_7_parts'].
root: string, path to the root folder of the data.
mode: one of tf.estimator.ModeKeys.
context_size: integer, number of views to be used to assemble the context.
batch_size: (optional) batch size, defaults to 1.
num_epochs: (optional) number of times to go through the dataset,
defaults to 1.
custom_frame_size: (optional) integer, required size of the returned
frames, defaults to None.
num_threads: (optional) integer, number of threads used to read and parse
the record files, defaults to 4.
buffer_size: (optional) integer, capacity of the underlying prefetch or
shuffle buffer, defaults to 256.
seed: (optional) integer, seed for the random number generators used in
the dataset.
Returns:
tf.data.dataset yielding tuples of the form (features, labels)
shapes:
features.query.context.cameras: [N, K, 7]
features.query.context.frames: [N, K, H, W, 3]
features.query.query_camera: [N, 7]
features.target (same as labels): [N, H, W, 3]
Raises:
ValueError: if the required version does not exist; if the required mode
is not supported; if the requested context_size is bigger than the
maximum supported for the given dataset version.
"""
# map estimator mode key to dataset internal mode strings
if mode == tf.estimator.ModeKeys.TRAIN:
str_mode = 'train'
else:
str_mode = 'test'
# check validity of requested dataset and split
if dataset_name not in _DATASETS:
raise ValueError('Unrecognized dataset {} requested. Available datasets '
'are {}'.format(dataset_name, _DATASETS.keys()))
if str_mode not in _MODES:
raise ValueError('Unsupported mode {} requested. Supported modes '
'are {}'.format(str_mode, _MODES))
# retrieve dataset parameters
dataset_info = _DATASETS[dataset_name]
if context_size >= dataset_info.sequence_size:
raise ValueError(
'Maximum support context size for dataset {} is {}, but '
'was {}.'.format(
dataset_name, dataset_info.sequence_size-1, context_size))
# collect the paths to all tfrecord files
record_paths = _get_dataset_files(dataset_info, str_mode, root)
# create TFRecordDataset
dataset = tf.data.TFRecordDataset(
filenames=record_paths, num_parallel_reads=num_threads)
# parse the data from tfrecords
dataset = dataset.map(
lambda raw_data: _parse(raw_data, dataset_info),
num_parallel_calls=num_threads)
# preprocess into context and target
indices = _get_randomized_indices(context_size, dataset_info, seed)
dataset = dataset.map(
lambda example: _preprocess(example, indices, context_size, custom_frame_size, dataset_info),
num_parallel_calls=num_threads)
# parse into tuple expected by tf.estimator input_fn
dataset = dataset.map(_prepare, num_parallel_calls=num_threads)
# shuffle data
if mode == tf.estimator.ModeKeys.TRAIN:
dataset = dataset.shuffle(buffer_size=(buffer_size * batch_size), seed=seed)
# set up batching
dataset = dataset.repeat(num_epochs)
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(buffer_size)
return dataset
|
ogroth/tf-gqn
|
data_provider/gqn_provider.py
|
gqn_provider.py
|
py
| 9,919 |
python
|
en
|
code
| 189 |
github-code
|
6
|
15419264437
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 9 14:52:17 2022
@author: elie
"""
#################### SCALING ####################
import os
os.chdir('/home/elie/Documents/Tecnico/2ND_PERIOD/DS/PROJECT/CODE/')
from pandas import read_csv, DataFrame, concat, unique
from pandas.plotting import register_matplotlib_converters
from matplotlib.pyplot import subplots, show, figure, savefig
from ds_charts import get_variable_types, multiple_line_chart, plot_evaluation_results
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from numpy import ndarray
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
register_matplotlib_converters()
############################# GET DATA ###############################
path = "Data/"
file = path+"air_quality_tabular_without_na"
#file = path+"nyc_car_crash_without_na"
filename = file+".csv"
data = read_csv(filename, na_values='', parse_dates=True, infer_datetime_format=True)
# SPLIT DATA BASED ON TYPE OF VARIABLE
variable_types = get_variable_types(data)
numeric_vars = variable_types['Numeric']
symbolic_vars = variable_types['Symbolic']
boolean_vars = variable_types['Binary']
df_nr = data[numeric_vars]
df_sb = data[symbolic_vars]
df_bool = data[boolean_vars]
# remove symbolic values before computation : date, time, id
data = data.drop(symbolic_vars, axis=1)
############################# NORMALIZATION ###############################
# Z SCORE
transf = StandardScaler(with_mean=True, with_std=True, copy=True).fit(df_nr)
tmp = DataFrame(transf.transform(df_nr), index=data.index, columns= numeric_vars)
norm_data_zscore = concat([tmp, df_sb, df_bool], axis=1)
#norm_data_zscore.to_csv(f'{file}_scaled_zscore.csv', index=False)
print(norm_data_zscore.describe())
norm_data_zscore = norm_data_zscore.drop(symbolic_vars, axis=1)
# MIN MAX SCALER
transf = MinMaxScaler(feature_range=(0, 1), copy=True).fit(df_nr)
tmp = DataFrame(transf.transform(df_nr), index=data.index, columns= numeric_vars)
norm_data_minmax = concat([tmp, df_sb, df_bool], axis=1)
#norm_data_minmax.to_csv(f'{file}_scaled_minmax.csv', index=False)
print(norm_data_minmax.describe())
norm_data_minmax = norm_data_minmax.drop(symbolic_vars, axis=1)
# fig, axs = subplots(1, 3, figsize=(20,10),squeeze=False)
# axs[0, 0].set_title('Original data')
# data.boxplot(ax=axs[0, 0])
# axs[0, 1].set_title('Z-score normalization')
# norm_data_zscore.boxplot(ax=axs[0, 1])
# axs[0, 2].set_title('MinMax normalization')
# norm_data_minmax.boxplot(ax=axs[0, 2])
# show()
################################## KNN ##################################
nb_rows = norm_data_zscore.shape[0]
sample_pct = 0.33
norm_data_zscore = norm_data_zscore.sample(n=round(nb_rows*sample_pct), random_state=1)
norm_data_minmax = norm_data_minmax.sample(n=round(nb_rows*sample_pct), random_state=1)
potential_cols = ["ALARM"]
nvalues = [10, 15, 20, 25, 30, 35, 40, 45]
dist = ['manhattan', 'euclidean', 'chebyshev']
values = {}
best = (0, '')
last_best = 0
for c in potential_cols:
target = c
y = norm_data_zscore.pop(target).values
X_train, X_test, y_train, y_test = train_test_split(norm_data_zscore, y, test_size=0.33, random_state=42)
labels = unique(y_train)
labels.sort()
for d in dist:
yvalues = []
for n in nvalues:
knn = KNeighborsClassifier(n_neighbors=n, metric=d)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
current_accuracy = accuracy_score(y_test, y_pred)
yvalues.append(current_accuracy)
print("For column : "+str(c)+" Accuracy with n = "+str(n)+ " and distance : "+str(d)+" => "+str(current_accuracy))
# if yvalues[-1] > last_best:
# best = (n, d)
# last_best = yvalues[-1]
values[d] = yvalues
figure()
multiple_line_chart(nvalues, values, title='KNN variants', xlabel='n', ylabel='accuracy', percentage=True)
#savefig('images/{file_tag}_knn_study.png')
show()
# figure()
# multiple_line_chart(nvalues, values, title='KNN variants', xlabel='n', ylabel='accuracy', percentage=True)
# #savefig('images/{file_tag}_knn_study.png')
# show()
# print('Best results with %d neighbors and %s'%(best[0], best[1]))
# ###### CONFUSION MATRIX #######
clf = knn = KNeighborsClassifier(n_neighbors=6, metric="manhattan")
clf.fit(X_train, y_train)
train_pred = clf.predict(X_train)
test_pred = clf.predict(X_test)
plot_evaluation_results(labels, y_train, train_pred, y_test, test_pred)
#savefig('images/{file_tag}_knn_best.png')
show()
############################### TEST ###################################"
# # GET THE PREPROCESSED DATA WITHOUT NA
# path = "Data/"
# #file = path+"air_quality_tabular_without_na"
# file = path+"NYC_collisions_tabular"
# filename = file+".csv"
# data_bis = read_csv(filename, na_values='', parse_dates=True, infer_datetime_format=True)
# for col in data_bis.columns:
# print("COL : "+str(col))
# print("/////////////////")
# print(data_bis[col].value_counts())
# print("\n\n\n")
|
elielevy3/DATA_SCIENCE_TECNICO
|
lab_3.py
|
lab_3.py
|
py
| 5,182 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4930731104
|
# -*- coding: utf-8 -*-
"""
Description: Deep Patch Learning Model
Author: wondervictor
"""
import math
import torch
import torch.nn as nn
import numpy as np
import layers
import basenet
class PatchHeadNetwork(nn.Module):
def __init__(self, use_cuda, num_classes, use_relation=False):
super(PatchHeadNetwork, self).__init__()
self.roi_align = layers.ROIAlign(out_size=7, spatial_scale=0.0625)
self.fc = nn.Sequential(
nn.Linear(512*7*7, 4096),
nn.LeakyReLU(negative_slope=0.02, inplace=True),
nn.Dropout(0.5),
nn.Linear(4096, 4096),
nn.LeakyReLU(negative_slope=0.02, inplace=True),
nn.Dropout(0.5)
)
self.patch_encoder = nn.Linear(4096, 256)
self.cls_score1 = nn.Linear(256*8, num_classes)
self.cls_score2 = nn.Linear(4096, num_classes)
self.patch_pooling = layers.MaxPatchPooling(use_cuda)
self.spm_pooling = layers.SPMMaxPooling()
for m in self.modules():
if isinstance(m, nn.Linear):
m.weight.data.normal_(0.0, 0.01)
m.bias.data.uniform_(-0.5, 0.5)
def forward(self, features, shapes, rois):
# N denotes the num_rois, B denotes the batchsize
# features: B*C*H*W
# rois: N*5
# shapes: B*2
batch_size = features.size()[0]
roi_output = self.roi_align(features, rois)
# N*512*7*7
num_rois = rois.size()[0]
output_batch_id = np.zeros(num_rois, dtype=np.int32)
for i in xrange(num_rois):
batch_id = int(rois[i].data[0])
output_batch_id[i] = batch_id
patch_features = roi_output.view(-1, 512*7*7)
# patch_features: N * (512*7*7)
patch_features = self.fc(patch_features)
# patch_features: N * 4096
encoded_features = self.patch_encoder(patch_features)
spm_features = self.spm_pooling(encoded_features, shapes, rois)
spm_features = spm_features.view(batch_size, 256 * 8)
cls_score1 = self.cls_score1(spm_features)
cls_score2_features = self.cls_score2(patch_features)
cls_score2 = self.patch_pooling(cls_score2_features, batch_size, output_batch_id)
det_scores = cls_score2_features
return cls_score1, cls_score2, det_scores
class DPL(nn.Module):
def __init__(self, use_cuda, num_classes=20, enable_base_grad=False, base='vgg', pretrained=True, use_relation=False):
super(DPL, self).__init__()
if base == 'vgg':
self.cnn = basenet.VGG16()
elif base == 'resnet50':
self.cnn = basenet.ResNet50(pretrained=True)
elif base == 'resnet34':
self.cnn = basenet.ResNet34(pretrained=True)
if not enable_base_grad:
print("Not Enable Base Model Gradient")
for param in self.cnn.parameters():
param.require_grad = False
self.use_cuda = use_cuda
self.head_network = PatchHeadNetwork(use_cuda=use_cuda, num_classes=num_classes, use_relation=use_relation)
def freeze_bn(self):
for layer in self.cnn.modules():
if isinstance(layer, nn.BatchNorm2d):
layer.eval()
def forward(self, images, shapes, rois):
features = self.cnn(images)
cls_score1, cls_score2, det_scores = self.head_network(features, shapes, rois)
return cls_score1, cls_score2, det_scores
|
wondervictor/dpl.pytorch
|
models/dpl.py
|
dpl.py
|
py
| 3,460 |
python
|
en
|
code
| 7 |
github-code
|
6
|
19072626002
|
import requests
import json
import logging
import sys
import json
import pandas as pd
from pathlib import Path
from requests_html import HTMLSession
def parse_and_download_files(servicetags_public, msftpublic_ips, officeworldwide_ips):
# URL for Feeds
azurepublic = "https://www.microsoft.com/en-us/download/confirmation.aspx?id=56519"
msftpublic = "https://www.microsoft.com/en-us/download/confirmation.aspx?id=53602"
officeworldwide = "https://endpoints.office.com/endpoints/worldwide?clientrequestid=b10c5ed1-bad1-445f-b386-b919946339a7"
session = HTMLSession()
azure_resp = session.get(azurepublic)
links = azure_resp.html.links
json_link = [link for link in links if ".json" in link]
msft_resp = session.get(msftpublic)
links = msft_resp.html.links
csv_link = [link for link in links if ".csv" in link]
# Download JSON link
azure_json = requests.get(json_link[0])
msft_csv = requests.get(csv_link[0], stream=True)
o365_json = requests.get(officeworldwide, stream=True)
# Write output file
logging.info("Writing ServiceTags_Public.json file to output directory")
with open(servicetags_public, "w") as f:
json.dump(azure_json.json(), f, indent=4)
logging.info("Writing MSFT_PublicIPs.csv file to output directory")
with open(msftpublic_ips, "wb") as f:
for line in msft_csv.iter_lines():
f.write(line + "\n".encode())
logging.info("Writing OfficeWorldWide-IPRanges.json file to output directory")
with open(officeworldwide_ips, "w") as f:
json.dump(o365_json.json(), f, indent=4)
def main():
logging.basicConfig(
stream=sys.stdout,
level=logging.DEBUG,
format="%(asctime)s:%(levelname)s: %(message)s",
)
curr_path = Path.cwd()
out_path = curr_path / "master" / "PublicFeeds" / "MSFTIPRanges"
try:
out_path.mkdir(parents=True, exist_ok=False)
except FileExistsError:
logging.info("Folder is already present")
else:
logging.info(f"{out_path} Folder was created")
servicetags_public = (
curr_path
/ "master"
/ "PublicFeeds"
/ "MSFTIPRanges"
/ "ServiceTags_Public.json"
)
msftpublic_ips = (
curr_path / "master" / "PublicFeeds" / "MSFTIPRanges" / "MSFT_PublicIPs.csv"
)
officeworldwide_ips = (
curr_path
/ "master"
/ "PublicFeeds"
/ "MSFTIPRanges"
/ "OfficeWorldWide-IPRanges.json"
)
logging.info(f"Writing json file to output directory : {servicetags_public}")
logging.info(f"Writing csv file to output directory : {msftpublic_ips}")
logging.info(f"Writing json file to output directory : {officeworldwide_ips}")
parse_and_download_files(servicetags_public, msftpublic_ips, officeworldwide_ips)
if __name__ == "__main__":
main()
|
microsoft/mstic
|
.script/get-msftpubliip-servicetags.py
|
get-msftpubliip-servicetags.py
|
py
| 2,873 |
python
|
en
|
code
| 87 |
github-code
|
6
|
34004541732
|
from heapq import heappush, heappop
n = int(input())
card = []
for _ in range(n):
heappush(card,int(input()))
# 들어오는 순서대로 정렬
print(card)
answer = 0
while card:
print(heappop(card))
# while len(card) > 1:
# a = heappop(card)
# b = heappop(card)
# heappush(card, a+b)
# answer += a+b
print(answer)
|
jinman-kim/algo
|
1715.py
|
1715.py
|
py
| 343 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4414802622
|
import turtle
#Best practice is to place functions after an import
def square(len):
for i in range(4):
turtle.forward(len)
turtle.left(90)
def rectangle(width, height):
for i in range(2):
turtle.forward(width)
turtle.left(90)
turtle.forward(height)
turtle.left(90)
turtle.shape("turtle") # optional
turtle.speed(0) # optional
turtle.color("green")
turtle.pensize(3)
# square(90)
turtle.color("blue")
turtle.penup()
turtle.forward(100)
turtle.pendown()
#square(90)
# rectangle(90, 140)
# pritty pic
for i in range(19):
square(90)
turtle.right(19)
rectangle(90, 140)
#screen to exit
turtle.Screen().exitonclick()
|
Wizard-Fingers/fun_times
|
main.py
|
main.py
|
py
| 656 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2523111157
|
"""Bubble sort implementation: running time == O(n^2)"""
import copy
class BubbleSort:
def __init__(self, items):
self.items = items
self.n = len(items)
def run(self):
A = copy.deepcopy(self.items)
for i in range(self.n-1):
for j in range(self.n-1, i, -1):
if A[j] < A[j-1]:
temp = A[j]
A[j] = A[j-1]
A[j-1] = temp
return A
if __name__=="__main__":
num_list = [5, 2, 4, 6, 1, 3, 3, 8, 9, 11, 7, 0, 1, 4, 10, 12]
bubble_sort = BubbleSort(items=num_list)
print(bubble_sort.run())
|
rb05751/Algorithms
|
Python/algorithms/sorting/bubble_sort.py
|
bubble_sort.py
|
py
| 630 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10192615887
|
import numpy as np
import matplotlib.pyplot as plt
# data I/O
filename = 'dataset.txt'
file = open(filename, 'r')
data = file.read()
# use set() to count the vacab size
chars = list(set(data))
data_size, vocab_size = len(data), len(chars)
print ('data has %d characters, %d unique.' % (data_size, vocab_size))
# dictionary to convert char to idx, idx to char
char_to_ix = { ch:i for i,ch in enumerate(chars) }
ix_to_char = { i:ch for i,ch in enumerate(chars) }
# hyperparameters
hidden_size = 50 # size of hidden layer of neurons
seq_length = 50 # number of steps to unroll the RNN for
learning_rate = 1e-1
Wxh = np.random.randn(hidden_size, vocab_size)*0.01 # input to hidden
Whh = np.random.randn(hidden_size, hidden_size)*0.01 # hidden to hidden
Why = np.random.randn(vocab_size, hidden_size)*0.01 # hidden to output
bh = np.zeros((hidden_size, 1)) # hidden bias
by = np.zeros((vocab_size, 1)) # output bias
def lossFun(inputs, targets, hprev):
xs, hs, ys, ps = {}, {}, {}, {}
## record each hidden state of
hs[-1] = np.copy(hprev)
loss = 0
# forward pass for each training data point
for t in range(len(inputs)):
xs[t] = np.zeros((vocab_size, 1)) # encode in 1-of-k representation
xs[t][inputs[t]] = 1
## hidden state, using previous hidden state hs[t-1]
hs[t] = np.tanh(np.dot(Wxh, xs[t]) + np.dot(Whh, hs[t-1]) + bh)
## unnormalized log probabilities for next chars
ys[t] = np.dot(Why, hs[t]) + by
## probabilities for next chars, softmax
ps[t] = np.exp(ys[t]) / np.sum(np.exp(ys[t]))
## softmax (cross-entropy loss)
loss += -np.log(ps[t][targets[t], 0])
# backward pass: compute gradients going backwards
dWxh, dWhh, dWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)
dbh, dby = np.zeros_like(bh), np.zeros_like(by)
dhnext = np.zeros_like(hs[0])
for t in reversed(range(len(inputs))):
## compute derivative of error w.r.t the output probabilites
## dE/dy[j] = y[j] - t[j]
dy = np.copy(ps[t])
dy[targets[t]] -= 1 # backprop into y
## output layer doesnot use activation function, so no need to compute the derivative of error with regard to the net input
## of output layer.
## then, we could directly compute the derivative of error with regard to the weight between hidden layer and output layer.
## dE/dy[j]*dy[j]/dWhy[j,k] = dE/dy[j] * h[k]
dWhy += np.dot(dy, hs[t].T)
dby += dy
## backprop into h
## derivative of error with regard to the output of hidden layer
## derivative of H, come from output layer y and also come from H(t+1), the next time H
dh = np.dot(Why.T, dy) + dhnext
## backprop through tanh nonlinearity
## derivative of error with regard to the input of hidden layer
## dtanh(x)/dx = 1 - tanh(x) * tanh(x)
dhraw = (1 - hs[t] * hs[t]) * dh
dbh += dhraw
## derivative of the error with regard to the weight between input layer and hidden layer
dWxh += np.dot(dhraw, xs[t].T)
dWhh += np.dot(dhraw, hs[t-1].T)
## derivative of the error with regard to H(t+1)
## or derivative of the error of H(t-1) with regard to H(t)
dhnext = np.dot(Whh.T, dhraw)
for dparam in [dWxh, dWhh, dWhy, dbh, dby]:
np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients
return loss, dWxh, dWhh, dWhy, dbh, dby, hs[len(inputs)-1]
## given a hidden RNN state, and a input char id, predict the coming n chars
def sample(h, seed_ix, n):
## a one-hot vector
x = np.zeros((vocab_size, 1))
x[seed_ix] = 1
ixes = []
for t in range(n):
## self.h = np.tanh(np.dot(self.W_hh, self.h) + np.dot(self.W_xh, x))
h = np.tanh(np.dot(Wxh, x) + np.dot(Whh, h) + bh)
## y = np.dot(self.W_hy, self.h)
y = np.dot(Why, h) + by
## softmax
p = np.exp(y) / np.sum(np.exp(y))
## sample according to probability distribution
ix = np.random.choice(range(vocab_size), p=p.ravel())
## update input x
## use the new sampled result as last input, then predict next char again.
x = np.zeros((vocab_size, 1))
x[ix] = 1
ixes.append(ix)
return ixes
## iterator counter
n = 0
## data pointer
p = 0
mWxh, mWhh, mWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)
mbh, mby = np.zeros_like(bh), np.zeros_like(by) # memory variables for Adagrad
smooth_loss = -np.log(1.0/vocab_size)*seq_length # loss at iteration 0
l=[]
sl=[]
## main loop
while True:
# prepare inputs (we're sweeping from left to right in steps seq_length long)
if p + seq_length + 1 >= len(data) or n == 0:
# reset RNN memory
## hprev is the hiddden state of RNN
hprev = np.zeros((hidden_size, 1))
# go from start of data
p = 0
inputs = [char_to_ix[ch] for ch in data[p : p + seq_length]]
targets = [char_to_ix[ch] for ch in data[p + 1 : p + seq_length + 1]]
# sample from the model now and then
if n % 1000 == 0:
sample_ix = sample(hprev, inputs[0], 5000)
txt = ''.join(ix_to_char[ix] for ix in sample_ix)
print ('---- sample -----')
print ('----\n %s \n----' % (txt, ))
# forward seq_length characters through the net and fetch gradient
loss, dWxh, dWhh, dWhy, dbh, dby, hprev = lossFun(inputs, targets, hprev)
## author using Adagrad(a kind of gradient descent)
smooth_loss = smooth_loss * 0.999 + loss * 0.001
if n % 1000 == 0:
print ('iter %d, loss: %f' % (n, smooth_loss)) # print progress
l.append(n)
sl.append(smooth_loss)
for param, dparam, mem in zip([Wxh, Whh, Why, bh, by],
[dWxh, dWhh, dWhy, dbh, dby],
[mWxh, mWhh, mWhy, mbh, mby]):
mem += dparam * dparam
param += -learning_rate * dparam / np.sqrt(mem + 1e-8)
p += seq_length # move data pointer
n += 1 # iteration counter
# gradient checking
from random import uniform
def gradCheck(inputs, target, hprev):
global Wxh, Whh, Why, bh, by
num_checks, delta = 10, 1e-5
_, dWxh, dWhh, dWhy, dbh, dby, _ = lossFun(inputs, targets, hprev)
for param,dparam,name in zip([Wxh, Whh, Why, bh, by], [dWxh, dWhh, dWhy, dbh, dby], ['Wxh', 'Whh', 'Why', 'bh', 'by']):
s0 = dparam.shape
s1 = param.shape
if(s0 == s1):
print('Error dims dont match: %s and %s.' % (s0, s1))
print (name)
for i in range(num_checks):
ri = int(uniform(0,param.size))
old_val = param.flat[ri]
param.flat[ri] = old_val + delta
cg0, _, _, _, _, _, _ = lossFun(inputs, targets, hprev)
param.flat[ri] = old_val - delta
cg1, _, _, _, _, _, _ = lossFun(inputs, targets, hprev)
param.flat[ri] = old_val
grad_analytic = dparam.flat[ri]
grad_numerical = (cg0 - cg1) / ( 2 * delta )
rel_error = abs(grad_analytic - grad_numerical) / abs(grad_numerical + grad_analytic)
print ('%f, %f => %e ' % (grad_numerical, grad_analytic, rel_error))
gradCheck(inputs,targets,hprev)
plt.plot(loss,smoothloss,label='Epoch Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.title('EPOCH _LOSS PLOT')
plt.legend()
plt.show()
|
shesikiran03/science-fiction-writer
|
Task-1 .py
|
Task-1 .py
|
py
| 7,092 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15598838122
|
import torch
from torch import nn
import torch.nn.functional as F
from ssd.utils_ssd.box_utils import match, log_sum_exp
# evaluate conf_loss and loc_loss
class MultiBoxLoss(nn.Module):
def __init__(self, cfg):
super(MultiBoxLoss, self).__init__()
self.num_classes = cfg.num_classes
self.threshold = cfg.overlap_thresh
self.negpos_ratio = cfg.neg_pos
self.variance = cfg.variance
def forward(self, preds, targets):
loc_data, conf_data, priors = preds
num = loc_data.size(0)
num_priors = priors.size(0)
# match priors (priors->nearest target)
loc_t = torch.Tensor(num, num_priors, 4)
conf_t = torch.LongTensor(num, num_priors)
if loc_data.is_cuda:
loc_t, conf_t = loc_t.cuda(), conf_t.cuda()
for idx in range(num):
truths = targets[idx][:, :-1]
labels = targets[idx][:, -1]
defaults = priors
match(self.threshold, truths, defaults, self.variance, labels, loc_t, conf_t, idx)
pos = conf_t > 0
# location loss
pos_idx = pos.unsqueeze(2).expand_as(loc_data)
loc_p = loc_data[pos_idx].view(-1, 4)
loc_t = loc_t[pos_idx].view(-1, 4)
loss_l = F.smooth_l1_loss(loc_p, loc_t, size_average=False)
# evaluate each priors's loss (the same as the paper)
batch_conf = conf_data
loss_c = (log_sum_exp(batch_conf) - batch_conf.gather(2, conf_t.unsqueeze(2))).squeeze(2)
# hard negative mining: note: the batch size of each iteration is not the same
# find the "max loss" background
loss_c[pos] = 0 # filter out pos boxes
_, loss_idx = loss_c.sort(1, descending=True)
_, idx_rank = loss_idx.sort(1)
num_pos = pos.long().sum(1, keepdim=True)
num_neg = torch.clamp(self.negpos_ratio * num_pos, max=pos.size(1) - 1) # size: [num, 1]
neg = idx_rank < num_neg.expand_as(idx_rank)
# confidence loss (pos:neg=1:3)
pos_idx = pos.unsqueeze(2).expand_as(conf_data)
neg_idx = neg.unsqueeze(2).expand_as(conf_data)
conf_p = conf_data[(pos_idx + neg_idx).gt(0)].view(-1, self.num_classes)
targets_weightd = conf_t[(pos + neg).gt(0)]
loss_c = F.cross_entropy(conf_p, targets_weightd, size_average=False)
return loss_l / num_pos.sum(), loss_c / num_pos.sum()
|
AceCoooool/detection-pytorch
|
ssd/utils_ssd/multiloss.py
|
multiloss.py
|
py
| 2,399 |
python
|
en
|
code
| 24 |
github-code
|
6
|
23391816430
|
import os
import shutil
import subprocess
import time
import pylab
import imageio
import numpy as np
from tqdm import tqdm
from skimage.io import imread, imsave
def create_options(model, epoch):
opts_test = {
"loadSize": 512,
"fineSize": 512,
"how_many": 'all',
"phase": 'test',
"name": model,
"checkpoints_dir": './checkpoints',
"results_dir": '.temp_output',
"which_epoch": str(epoch),
"which_direction": 'AtoB',
"resize_or_crop": '"scale_width"',
}
return opts_test
def create_bash_cmd_test(opts_test):
"""constructs bash command to run CycleGAN with the given settings"""
cmd = []
cmd.append("DATA_ROOT=.temp_input")
for opt in opts_test.keys():
cmd.append(opt + "=" + str(opts_test[opt]))
cmd += ['th', 'test.lua']
return(" ".join(cmd))
def check_correct_directory():
"""check if the script is being run from CycleGAN"""
fpath = os.path.realpath(__file__)
dirname = os.path.dirname(fpath).split('/')[-1]
if not dirname == "CycleGAN":
raise ValueError("Script should be run from CycleGAN base directory.")
def prep_directories():
"""ensures clean temporary directories for CycleGAN"""
for dir_ in ['.temp_input', '.temp_output']:
if os.path.exists(dir_):
shutil.rmtree(dir_)
for dir_ in ['testA', 'testB']:
os.makedirs(os.path.join('.temp_input', dir_))
os.makedirs(os.path.join('.temp_output'))
def grab_epochs(model):
"""
given a model name or a folder path,
returns an array of available epochs
"""
if not os.path.isdir(model):
model = os.path.join('checkpoints', model)
assert os.path.isdir(model), model + " not a valid model"
epochs = []
for file in os.listdir(model):
if file.split('.')[-1] == "t7":
epochs.append(file.split('_')[0])
epochs = [e for e in epochs if not e == 'latest']
return list(set(epochs))
def test(img, opts):
"""
performs a test inference on img, saves to a temp directory
returns the stylized image
"""
prep_directories()
for dir_ in ['testA', 'testB']:
imsave(os.path.join('.temp_input', dir_, 'img.png'), img)
# run the bash command for test phase of CycleGAN
cmd = create_bash_cmd_test(opts)
start = time.time()
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
process.wait()
print("Stylizing complete. Time elapsed:", time.time() - start)
# read image back into python
path = os.path.join('.temp_output', opts['name'], str(opts['which_epoch']) + "_test", "images", "fake_B", "img.png")
stylized_img = imread(path)
return stylized_img
def stylize_image_all_epochs(img_path, output_dir, model):
"""
processes an image with a model at all available epochs
"""
imname = os.path.split(img_path)[1].split('.')[0]
os.makedirs(output_dir, exist_ok=True)
img = imread(img_path)
available_epochs = grab_epochs(model)
for epoch in tqdm(available_epochs):
opts = create_options(model, epoch)
stylized_img = test(img, opts)
imsave(os.path.join(output_dir, imname + "-" + model + "-epoch-" + str(epoch)) + ".png", stylized_img)
def stylize_video(vid_path, out_path, model, epoch):
"""
stylizes all frames of a video
"""
video = imageio.get_reader(vid_path, 'mpeg')
writer = imageio.get_writer(out_path, fps=30)
opts = create_options(model, epoch)
# TODO: don't hardcode 30fps downsampling
for i, frame in enumerate(video):
if i % 2 == 0:
frame = test(np.array(frame), opts)
writer.append_data(frame)
if i % 10 == 0:
print(i, "of", len(video), "frames done.")
if i == len(video) - 10: # TAKE THIS OUT AFTER DONE TESTING
break
writer.close()
def repeat_stylization(img_path, out_dir, n_iter, model, epoch):
"""
Repeatedly applies a style to an image
"""
fname = os.path.splitext(img_path)[0].split("/")[-1]
img = imread(img_path)
os.makedirs(out_dir, exist_ok=True)
opts = create_options(model, epoch)
for i in range(n_iter):
img = test(img, opts)
imsave(os.path.join(out_dir, fname + "-" + model + "-" + str(epoch) + "-iter" + i))
def stylize_dir_hacky(input_dir, output_dir):
"""
this is horrible and last minute
temporary function to perform stylization
images: applies 3 available styles at 5 different epochs
the bean - pop art at all epochs
northwestern - cubism at all epochs
video: applies 3 styles to each video
"""
models = ['cubism_v2', 'impressionism', 'pop_art']
epochs_img = [50, 100, 150, 200]
files = os.listdir(input_dir)
files = [f for f in files if not f[0] == "."]
os.makedirs(output_dir)
for file in files:
filename = file.split(".")[0]
output_subdir = os.path.join(output_dir, filename + "-stylized")
os.makedirs(output_subdir, exist_ok=True)
print("Stylizing", file, "\nSaving to", output_subdir)
# Videos
if ".mp4" in file:
for model in models:
print("Applying", model, "to", file, ", saving to")
stylize_video(vid_path=os.path.join(input_dir, file),
out_path=os.path.join(output_subdir, file + '-' + model + '.mp4'),
model=model,
epoch=200)
# Photos
else:
# Images, all epochs, all models
if file in ['northwestern.jpeg', 'the_bean.jpeg']:
output_subdir_all_epochs = os.path.join(output_dir, file.split(".")[0] + "-all-epochs")
os.makedirs(output_subdir_all_epochs, exist_ok=True)
for model in models:
print("Applying", model, "to", file, "all epochs", "\nSaving to", output_subdir_all_epochs)
# try:
stylize_image_all_epochs(img_path=os.path.join(input_dir, file),
output_dir=output_subdir_all_epochs,
model=model)
# except:
# pass
# Images, only certain styles
for model in models:
for epoch in epochs_img:
try:
img = imread(os.path.join(input_dir, file))
opts = create_options(model, epoch)
stylized_img = test(img, opts)
imsave(os.path.join(output_subdir, filename + "-" + model + "-epoch-" + epoch + ".png"), stylized_img)
except:
pass
def stylize_image_all_styles(img_path, models):
pass
if __name__ == "__main__":
stylize_dir_hacky("input_5-28-17", "output_5-28-17")
|
chang/DeepPainting
|
train/test_cyclegan.py
|
test_cyclegan.py
|
py
| 6,948 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43223088295
|
from __future__ import division, print_function, unicode_literals
import unittest
from math import pi
import numpy as np
from analysis import extract_z, extract_scattering
class SigmaTest(unittest.TestCase):
def test_quad(self):
T = 0.01
w_n = pi * (2*np.arange(10)+1) * T
ImSigma = w_n * (w_n - 10)
z = extract_z(w_n, ImSigma)
self.assertAlmostEqual(z, 1/11)
gamma = extract_scattering(w_n, ImSigma)
self.assertAlmostEqual(gamma, 0)
def test_linear(self):
T = 0.02
w_n = pi * (2*np.arange(10)+1) * T
ImSigma = -0.5 - w_n
z = extract_z(w_n, ImSigma)
self.assertAlmostEqual(z, 1/2)
gamma = extract_scattering(w_n, ImSigma)
self.assertAlmostEqual(gamma, 0.5)
|
correlatedmaterialslaboratory/dmft-model
|
tests/test_analysis.py
|
test_analysis.py
|
py
| 783 |
python
|
en
|
code
| 1 |
github-code
|
6
|
32010544215
|
def simplify_fraction(fraction):
nominator = fraction[0]
denominator = fraction[1]
if denominator == 0:
raise Exception("Division by zero is undefined")
elif nominator == 0:
return 0
elif nominator > denominator:
for index in range(2, denominator + 1):
while (denominator % index == 0) and (nominator % index == 0):
nominator /= index
denominator /= index
else:
for index in range(2, nominator + 1):
while (nominator % index == 0) and (denominator % index == 0):
nominator /= index
denominator /= index
return (int(nominator), int(denominator))
def collect_fractions(fractions):
for index in range(len(fractions)):
if index == 0:
nominator1 = fractions[index][0]
denominator1 = fractions[index][1]
elif denominator1 == 0:
raise Exception("Division by zero is undefined")
elif nominator1 == 0:
return 0
else:
nominator2 = fractions[index][0]
denominator2 = fractions[index][1]
if denominator2 == 0:
raise Exception("Division by zero is undefined")
elif nominator2 == 0:
return 0
if denominator1 == denominator2:
nominator1 += nominator2
else:
current_denominator1 = denominator1
current_denominator2 = denominator2
denominator1 *= denominator2
nominator1 *= current_denominator2
nominator2 *= current_denominator1
nominator1 += nominator2
return simplify_fraction((nominator1, denominator1))
def sort_fractions(fractions):
result = []
my_dict = {}
for index in range(len(fractions)):
nominator = fractions[index][0]
denominator = fractions[index][1]
my_dict.update({(nominator, denominator): nominator / denominator})
sorted_by_value = sorted(my_dict.items(), key=lambda kv: kv[1])
for sorted_index in sorted_by_value:
result.append(sorted_index[0])
return result
|
1oss1ess/HackBulgaria-Programming101-Python-2018
|
week-2/02.Testing/testing.py
|
testing.py
|
py
| 2,177 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26552246319
|
#!/usr/bin/env python3
import fnmatch
import os
import re
import ntpath
import sys
import argparse
def get_private_declare(content):
priv_declared = []
srch = re.compile('private.*')
priv_srch_declared = srch.findall(content)
priv_srch_declared = sorted(set(priv_srch_declared))
priv_dec_str = ''.join(priv_srch_declared)
srch = re.compile('(?<![_a-zA-Z0-9])(_[a-zA-Z0-9]*?)[ ,\}\]\)";]')
priv_split = srch.findall(priv_dec_str)
priv_split = sorted(set(priv_split))
priv_declared += priv_split;
srch = re.compile('params \[.*\]|PARAMS_[0-9].*|EXPLODE_[0-9]_PVT.*|DEFAULT_PARAM.*|KEY_PARAM.*|IGNORE_PRIVATE_WARNING.*')
priv_srch_declared = srch.findall(content)
priv_srch_declared = sorted(set(priv_srch_declared))
priv_dec_str = ''.join(priv_srch_declared)
srch = re.compile('(?<![_a-zA-Z0-9])(_[a-zA-Z0-9]*?)[ ,\}\]\)";]')
priv_split = srch.findall(priv_dec_str)
priv_split = sorted(set(priv_split))
priv_declared += priv_split;
srch = re.compile('(?i)[\s]*local[\s]+(_[\w\d]*)[\s]*=.*')
priv_local = srch.findall(content)
priv_local_declared = sorted(set(priv_local))
priv_declared += priv_local_declared;
return priv_declared
def check_privates(filepath):
bad_count_file = 0
def pushClosing(t):
closingStack.append(closing.expr)
closing << Literal( closingFor[t[0]] )
def popClosing():
closing << closingStack.pop()
with open(filepath, 'r') as file:
content = file.read()
priv_use = []
priv_use = []
# Regex search privates
srch = re.compile('(?<![_a-zA-Z0-9])(_[a-zA-Z0-9]*?)[ =,\^\-\+\/\*\%\}\]\)";]')
priv_use = srch.findall(content)
priv_use = sorted(set(priv_use))
# Private declaration search
priv_declared = get_private_declare(content)
if '_this' in priv_declared: priv_declared.remove('_this')
if '_this' in priv_use: priv_use.remove('_this')
if '_x' in priv_declared: priv_declared.remove('_x')
if '_x' in priv_use: priv_use.remove('_x')
if '_forEachIndex' in priv_declared: priv_declared.remove('_forEachIndex')
if '_forEachIndex' in priv_use: priv_use.remove('_forEachIndex')
if '_foreachIndex' in priv_declared: priv_declared.remove('_foreachIndex')
if '_foreachIndex' in priv_use: priv_use.remove('_foreachIndex')
if '_foreachindex' in priv_declared: priv_declared.remove('_foreachindex')
if '_foreachindex' in priv_use: priv_use.remove('_foreachindex')
missing = []
for s in priv_use:
if s.lower() not in map(str.lower,priv_declared):
if s.lower() not in map(str.lower,missing):
missing.append(s)
if len(missing) > 0:
print (filepath)
private_output = 'private[';
first = True
for bad_priv in missing:
if first:
first = False
private_output = private_output + '"' + bad_priv
else:
private_output = private_output + '", "' + bad_priv
private_output = private_output + '"];';
print (private_output)
for bad_priv in missing:
print ('\t' + bad_priv)
bad_count_file = bad_count_file + 1
return bad_count_file
def main():
print("#########################")
print("# Search your Privates #")
print("#########################")
sqf_list = []
bad_count = 0
parser = argparse.ArgumentParser()
parser.add_argument('-m','--module', help='only search specified module addon folder', required=False, default=".")
args = parser.parse_args()
for root, dirnames, filenames in os.walk('../addons' + '/' + args.module):
for filename in fnmatch.filter(filenames, '*.sqf'):
sqf_list.append(os.path.join(root, filename))
for filename in sqf_list:
bad_count = bad_count + check_privates(filename)
print ("Bad Count {0}".format(bad_count))
if __name__ == "__main__":
main()
|
acemod/ACE3
|
tools/search_privates.py
|
search_privates.py
|
py
| 4,143 |
python
|
en
|
code
| 966 |
github-code
|
6
|
71947924667
|
def get_customer(session, w_id, d_id, c_id):
prepared = session.prepare(
"SELECT c_first, c_middle, c_last FROM customer \
WHERE c_w_id = ? AND c_d_id = ? and c_id = ?"
)
rows = session.execute(prepared.bind((w_id, d_id, c_id)))
return None if not rows else rows[0]
def get_last_order(session, w_id, d_id, c_id):
prepared = session.prepare(
"SELECT o_id, o_entry_d, o_carrier_id FROM order_by_customer \
WHERE o_w_id = ? AND o_d_id = ? AND o_c_id = ? LIMIT 1"
)
rows = session.execute(prepared.bind((w_id, d_id, c_id)))
return None if not rows else rows[0]
def get_order_items(session, w_id, d_id, o_id):
prepared = session.prepare(
"SELECT ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_delivery_d \
FROM order_line \
WHERE ol_w_id = ? AND ol_d_id = ? AND ol_o_id = ?"
)
rows = session.execute(prepared.bind((w_id, d_id, int(o_id))))
return None if not rows else rows
def order_status_xact(session, w_id, d_id, c_id):
payload = {}
payload['customer'] = get_customer(session, w_id, d_id, c_id)
order = get_last_order(session, w_id, d_id, c_id)
payload['order'] = order
payload['items'] = None
if order:
payload['items'] = get_order_items(session, w_id, d_id, order.o_id)
return payload
|
hiepsieunhan/CS4224-Cassandra
|
script/xacts/order_status_xact.py
|
order_status_xact.py
|
py
| 1,339 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23920525234
|
import threading
import tkinter as tk
from motor import Motor
motor = Motor()
thread_motor = threading.Thread(target=motor.inicia_motor, args=(True,))
thread_motor.daemon = True
thread_motor.start()
def update_value():
# Função para atualizar o valor
# Aqui você pode implementar a lógica desejada para atualizar o valor
# Exemplo: Atualizar o valor somando 1
global value
value = motor.rotacoes
value_label.config(text=str(f'{value:0.2f} RPM'))
# Chama a função novamente após 100ms
value_label.after(70, update_value)
def slider_callback(value):
# Função de callback do slider
# Exibe o valor selecionado pelo slider
motor.amperes = int(value)
# Cria a janela principal
window = tk.Tk()
window.title("Atualização de Valor")
# Valor inicial
value = 0
# Cria o rótulo para exibir o valor
value_label = tk.Label(window, text=str(value), font=("Arial", 24), height=5, width=20)
value_label.pack(pady=20)
# Slider
slider = tk.Scale(window, from_=motor.amperes, to=100, orient=tk.HORIZONTAL, command=slider_callback)
slider.pack(pady=10)
# Botão "Desligar"
desligar_button = tk.Button(window, text="Desligar", command=motor.desliga_motor)
desligar_button.pack(pady=10)
# Chama a função de atualização inicialmente
update_value()
# Inicia o loop principal da janela
window.mainloop()
|
PotatoMexicano/sistema-controle-eletrico
|
sistema_controle_eletrico/screen.py
|
screen.py
|
py
| 1,354 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
33810339763
|
import os
from random import shuffle
import tensorflow as tf
import glob
from config import config
# all functions except init and create_iterators should be empty
class Preprocessing:
def __init__(self):
print('preprocessing instance creation started')
self.dir_name = config['data_dir']
self.input_len = config['input_len']
#maybe add later noise augmentation
def create_iterators(self):
# get the filenames split into train test validation
test_files = self.get_files_from_txt('testing_list.txt')
val_files = self.get_files_from_txt('validation_list.txt')
filenames = glob.glob(os.path.join(self.dir_name, '*/**.wav'), recursive=True)
filenames = [filename for filename in filenames if 'background_noise' not in filename]
train_files = list(set(filenames) - set(val_files) - set(test_files))
shuffle(train_files)
# get the commands and some prints
self.commands = self.get_commands()
self.num_classes = len(self.commands)
print('len(train_data)', len(train_files))
print('prelen(test_data)', len(test_files))
print('len(val_data)', len(val_files))
print('commands: ', self.commands)
print('number of commands: ', len(self.commands))
# make tf dataset object
train_dataset = self.make_tf_dataset_from_list(train_files)
val_dataset = self.make_tf_dataset_from_list(val_files, is_validation = True)
test_dataset = self.make_tf_dataset_from_list(test_files)
return train_dataset, val_dataset, test_dataset
def get_files_from_txt(self, which_txt):
assert which_txt == 'testing_list.txt' or which_txt == 'validation_list.txt', 'wrong argument'
path = os.path.join(self.dir_name, which_txt)
with open(path) as f:
paths = f.readlines()
paths = [os.path.join(self.dir_name, path[:len(path) - 1]) for path in paths]
shuffle(paths)
return paths
def get_commands(self):
dirs = glob.glob(os.path.join(self.dir_name, "*", ""))
commands = [os.path.split(os.path.split(dir)[0])[1] for dir in dirs if 'background' not in dir]
return commands
@staticmethod
def decode_audio(audio_binary):
audio, _ = tf.audio.decode_wav(audio_binary)
return tf.squeeze(audio, axis=-1)
def get_label(self, file_path):
parts = tf.strings.split(file_path, os.path.sep)
label = parts[-2]
label_id = tf.argmax(label == self.commands)
label = tf.one_hot(label_id, self.num_classes)
return label
def make_tf_dataset_from_list(self, filenames_list, is_validation = False):
files = tf.data.Dataset.from_tensor_slices(filenames_list)
dataset = files.map(self.get_waveform_and_label, num_parallel_calls=tf.data.AUTOTUNE)
dataset = dataset.map(self.pad_map_func, num_parallel_calls=tf.data.AUTOTUNE)
dataset = dataset.shuffle(buffer_size=5000, reshuffle_each_iteration=True)
if is_validation:
dataset = dataset.repeat()
dataset = dataset.batch(config['train_params']['batch_size']).prefetch(tf.data.AUTOTUNE)
return dataset
def get_waveform_and_label(self, file_path):
label = self.get_label(file_path)
audio_binary = tf.io.read_file(file_path)
waveform = self.decode_audio(audio_binary)
return waveform, label
def pad_map_func(self, audio, label):
return [self.add_paddings(audio), label]
def add_paddings(self, wav):
len_wav = len(wav)
if len_wav < self.input_len:
paddings = tf.zeros((self.input_len - len_wav))
wav = tf.concat([wav, paddings], axis=0)
return wav
|
ashnik777/Audio-Classification
|
preprocessing.py
|
preprocessing.py
|
py
| 3,788 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31014267679
|
from transformers import (
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallTokenizer,
BlenderbotForConditionalGeneration,
BlenderbotTokenizer,
)
from transformers import GPT2LMHeadModel, GPT2Tokenizer
from transformers import AutoTokenizer, AutoModelForCausalLM
import sys
download_type = sys.argv[1]
if download_type == 'dialogpt':
#------dialogpt samll------#
model = GPT2LMHeadModel.from_pretrained("microsoft/DialoGPT-small")
tokenizer = GPT2Tokenizer.from_pretrained("microsoft/DialoGPT-small")
#------dialogpt medium------#
model = GPT2LMHeadModel.from_pretrained("microsoft/DialoGPT-medium")
tokenizer = GPT2Tokenizer.from_pretrained("microsoft/DialoGPT-medium")
print("dialogpt is done!")
elif download_type == 'gptneo':
#------gptneo small------#
model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-125M")
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-125M")
#------gptneo large------#
#model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-1.3B")
#tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-1.3B")
print("gptneo is done!")
elif download_type == 'blender':
#------blender small------#
model = BlenderbotSmallForConditionalGeneration.from_pretrained("facebook/blenderbot_small-90M")
tokenizer = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot_small-90M")
#------blender medium------#
model = BlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-400M-distill")
tokenizer = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
print("blender is done!")
|
DariuszO/openchat
|
model_download.py
|
model_download.py
|
py
| 1,690 |
python
|
en
|
code
| null |
github-code
|
6
|
12570656588
|
import discord
import requests
client = discord.Client()
tokenFile = open('secret.secret','r')
token = tokenFile.readline()
@client.event
async def on_message(msg):
if msg.content.startswith('$$$$'):
name = msg.content[4::]
apiCall = 'https://na.whatismymmr.com/api/v1/summoner?name=' + name
response = requests.get(apiCall)
if(response.status_code == 200):
data = response.json()
Ammr = data['ARAM']['avg']
Apct = data['ARAM']['percentile']
Arank = data['ARAM']['closestRank']
builtMsg = f'{name}\'s ARAM MMR: {Ammr}\n{Apct}th percentile, about {Arank}'
await msg.channel.send(builtMsg)
else:
await msg.channel.send('RIP LMAO.')
##test push
client.run(token)
|
gpulia/kitchenSync
|
server.py
|
server.py
|
py
| 790 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12903355857
|
import csv
with open ("election_results.txt", "w+") as output:
with open("election_data.csv" , "r") as csvfile:
readCSV = csv.reader(csvfile)
data = list(readCSV)
row_count = len(data)
print("Election Results:")
print("-------------------------")
print("Total Votes: " + str(row_count))
print("-------------------------")
cand = []
for row in data:
cand.append(row[2])
Khan_count = cand.count("Khan")
Khan_percent = (Khan_count / row_count)
Khan_format = format(Khan_percent, ".3%")
print("Khan: " + str(Khan_format) + " " +
"(" + str(Khan_count) + ")")
Correy_count = cand.count("Correy")
Correy_percent = (Correy_count / row_count)
Correy_format = format(Correy_percent, ".3%")
print("Correy: " + str(Correy_format) + " " +
"(" + str(Correy_count) + ")")
Li_count = cand.count("Li")
Li_percent = (Li_count / row_count)
Li_format = format(Li_percent, ".3%")
print("Li: " + str(Li_format) + " " +
"(" + str(Li_count) + ")")
O_Tooley_count = cand.count("O'Tooley")
O_Tooley_percent = (O_Tooley_count / row_count)
O_Tooley_format = format(O_Tooley_percent, ".3%")
print("O'Tooley: " + str(O_Tooley_format) + " " +
"(" + str(O_Tooley_count) + ")")
print("-------------------------")
print("Winner: Khan")
print("-------------------------")
output.write("""Election Results:
-------------------------
Total Votes: 20000
-------------------------
Khan: 62.810% (12562)
Correy: 19.940% (3988)
Li: 14.285% (2857)
O'Tooley: 2.960% (592)
-------------------------
Winner: Khan
-------------------------
Election Results:""")
|
Aspace-dev/Austin-Spacek---Homework-2
|
python-challenge/as_pypoll/austin_s_polling.py
|
austin_s_polling.py
|
py
| 1,846 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5522014624
|
import openpyxl
def get_exl(file,Sheet):
exl = openpyxl.load_workbook(file)
table = exl[Sheet]
max_rows = table.max_row
max_column = table.max_column
# print(max_rows,max_column)
data = []
for row in range(1, max_rows):
rowdata = []
for column in range(3, max_column-1):
rowdata.append(table.cell(row+1, column+1).value)
data.append(rowdata)
return data
if __name__ == '__main__':
run = get_exl('../TestData/data/testdata.xlsx','查询终端设备')
print(run)
|
commiting/TEST
|
Tools/getexcel.py
|
getexcel.py
|
py
| 541 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34346397693
|
"""
Exercises of the book "Think python"
2.10 Exercises
"""
import math
import datetime
import decimal
# Exercise 2
# Using the Python as a calculator
# 1. The volume of a sphere with radius r is 4/3 π r3. What is the volume of a sphere with radius 5?
radius = 5
print("The volume of a sphere: ", (4 / 3 * math.pi * radius**3))
# 2. Suppose the cover price of a book is $24.95, but bookstores get a 40% discount.
# Shipping costs $3 for the first copy and 75 cents for each additional copy.
# What is the total wholesale cost for 60 copies?
cover_price = decimal.Decimal('24.95')
discount = decimal.Decimal('0.4')
copies_amount = decimal.Decimal(60)
s_cost_first = decimal.Decimal(3) # Shipping_cost for the first copy
s_cost_add = decimal.Decimal('0.75') # Shipping_cost for each additional copy
# Total wholesale cost
sum = (
(cover_price * (1 - discount)) * copies_amount
+ (s_cost_add * (copies_amount - 1))
+ s_cost_first
)
print("Total wholesale cost for 60 copies: ", round(sum, 2))
# 3. If I leave my house at 6:52 am and run 1 mile at an easy pace (8:15 per mile),
# then 3 miles at tempo (7:12 per mile) and 1 mile at easy pace again,
# what time do I get home for breakfast?
# Speed (minutes)
easy_pace = "08:15"
tempo = "07:12"
def time_to_seconds(time: str):
"""Function convert time from str format('07:12', MM:SS) to amount of seconds."""
return (int(time[: time.find(":")]) * 60) + (int(time[time.find(":") + 1 :]))
# Convert time to seconds
easy_pace = time_to_seconds(easy_pace)
tempo = time_to_seconds(tempo)
# Start time of the run
start_time = datetime.datetime.strptime("6:52", "%H:%M")
# Calculate duration of the run in seconds (miles*time per mile)
whole_time = 2 * easy_pace + 3 * tempo
# Calculate time of the end of the run
home_time = (start_time + datetime.timedelta(seconds=whole_time)).time()
print("You will get home at: ", home_time)
|
LiliiaMykhaliuk/think-python
|
chapter2/2.10.2.py
|
2.10.2.py
|
py
| 1,916 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33065831217
|
from __future__ import (
absolute_import,
unicode_literals,
)
from application.utils.utils import (
get_near_positions,
remove_occupied_position,
pick_random
)
from application.entity.point import Point
from application.entity.enemy_ship import EnemyShip
class EnemyBoard(object):
def __init__(self, width, height, ships):
self.width = width
self.height = height
self.valid_positions = []
self.remain_positions = []
self.fired_positions = []
self.hit_positions = []
self.remain_ship = []
for x in range(width):
for y in range(height):
if (x + y) % 2 == 0:
self.valid_positions.append(Point(x, y))
self.remain_positions.append(Point(x, y))
self.init_ships(ships)
def init_ships(self, ships):
for ship_data in ships:
for index in range(int(ship_data['quantity'])):
ship = EnemyShip(ship_data['type'])
self.remain_ship.append(ship)
def fire(self):
fire_point = self.get_high_expect_positions()
self.remain_positions.remove(fire_point)
self.fired_positions.append(fire_point)
return fire_point
def get_high_expect_positions(self):
high_expect_positions = []
if self.hit_positions:
for position in self.hit_positions:
near_positions = get_near_positions(position)
high_expect_positions = remove_occupied_position(near_positions, self.fired_positions)
if high_expect_positions:
break
else:
return self.fire_random()
if not high_expect_positions:
return self.fire_random()
return pick_random(high_expect_positions)
def fire_random(self):
fire_point = pick_random(self.remain_positions)
self.remain_positions.remove(fire_point)
self.fired_positions.append(fire_point)
return fire_point
|
NTNguyetMinh/hackathon
|
application/entity/enemy_board.py
|
enemy_board.py
|
py
| 2,026 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40960795543
|
import pytest
from collections import Counter
from ottoscript.base import OttoBase, OttoContext
from ottoscript.datatypes import (Number,
String,
Var,
Entity,
List,
Dict,
Target,
Area,
Input)
@pytest.mark.asyncio
async def test_numeric():
"""Verify we correctly parse a number"""
n = Number().parseString("15")[0]
output = await n.eval()
assert output == 15
@pytest.mark.asyncio
async def test_string():
"""Verify we correctly parse a string"""
n = String().parseString("'foo'")[0]
output = await n.eval()
assert output == 'foo'
@pytest.mark.asyncio
async def test_var_no_fetch():
"""Verify we correctly parse a var"""
n = Var().parseString("@foo")[0]
assert n.name == '@foo'
@pytest.mark.asyncio
async def test_var_with_attributes():
"""Verify we correctly parse a var with attributes"""
ctx = OttoContext()
d_string = "(one=1, two=2)"
e_string = "ship.crew"
ctx.update_vars({'@foo_entity': Entity().parseString(e_string)[0]})
ctx.update_vars({'@foo_dict': Dict().parseString(d_string)[0]})
OttoBase.set_context(ctx)
n = Var().parseString("@foo_entity:name")[0]
r = await n.eval()
assert r == "ship.crew"
n = Var().parseString("@foo_entity:brightness")[0]
r = await n.eval()
assert r == 1
r = n.fetch()
assert r.name == "ship.crew.brightness"
n = Var().parseString("@foo_entity:number")[0]
r = await n.eval()
assert r == 1
n = Var().parseString("@foo_dict:one")[0]
r = await n.eval()
assert r == 1
n = Var().parseString("@foo_dict:two")[0]
r = await n.eval()
assert r == 2
@pytest.mark.asyncio
async def test_entity():
"""Verify we correctly parse an entity"""
test_list = [('ship.crew', 'ship.crew', 'ship.crew'),
('ship.crew:uniform', 'ship.crew.uniform', 1)
]
for test in test_list:
n = Entity().parseString(test[0])[0]
assert n.name == test[1]
assert await n.eval() == test[2]
@pytest.mark.asyncio
async def test_list():
"""Verify we correctly parse a list"""
ctx = OttoContext()
ctx.update_vars({'@foo': 'foostring'})
OttoBase.set_context(ctx)
string = "'test1', 27, ship.crew, @foo"
expected = [String().parseString('"test1"')[0],
Number().parseString('27')[0],
Entity().parseString('ship.crew')[0],
Var().parseString('@foo')[0]]
n1 = List().parseString(string)[0]
assert Counter([type(x) for x in n1.contents]) \
== Counter([type(x) for x in expected])
n2 = List().parseString(f"({string})")[0]
assert Counter([type(x) for x in n2.contents]) \
== Counter([type(x) for x in expected])
@pytest.mark.asyncio
async def test_list_single():
"""Verify we correctly parse a number"""
string = "ship.crew"
expected = list
n1 = List().parseString(string)[0]
assert type(n1.contents) == expected
@pytest.mark.asyncio
async def test_dictionary():
"""Verify we correctly parse a dictionary"""
ctx = OttoContext()
ctx.update_vars({'@foo': 'foostring'})
OttoBase.set_context(ctx)
string = "(first = 1, second = 'foo', third = ship.crew, fourth = @foo)"
expected = {'first': 1,
'second': 'foo',
"third": 'ship.crew',
"fourth": 'foostring'}
n1 = Dict().parseString(string)[0]
result = await n1.eval()
assert result == expected
@pytest.mark.asyncio
async def test_target():
ctx = OttoContext()
area = Area().parseString('kitchen')[0]
ctx.update_vars({'@area': area})
arealist = List(Area()).parseString('kitchen, living_room')[0]
ctx.update_vars({'@arealist': arealist})
OttoBase.set_context(ctx)
tests = [('ship.crew, ship.phasers',
{'entity_id': ['ship.crew', 'ship.phasers'], 'area_id': []}
),
('AREA kitchen, living_room',
{'area_id': ['kitchen', 'living_room'], 'entity_id': []}
),
('AREA @area',
{'area_id': ['kitchen'], 'entity_id': []}
),
('AREA @arealist',
{'area_id': ['kitchen', 'living_room'], 'entity_id': []}
)
]
for test in tests:
n = Target().parseString(test[0])[0]
result = await n.eval()
assert result == test[1]
@pytest.mark.asyncio
async def test_input():
ctx = OttoContext()
ctx.update_vars({'@foostring': String().parseString("'foostring'")[0],
'@foonumber': Number().parseString("30.0")[0]})
OttoBase.set_context(ctx)
tests = [{"type": "text",
"string": "'foostring'",
"expected": "foostring"},
{"type": "text",
"string": "@foostring",
"expected": "foostring"},
{"type": "text",
"string": "foo.string",
"expected": "foo.string"},
{"type": "numeric",
"string": "15",
"expected": 15.0},
{"type": "numeric",
"string": "@foonumber",
"expected": 30.0},
{"type": "numeric",
"string": "foo.number:attr",
"expected": 1.0},
{"type": "any",
"string": "'foostring'",
"expected": "foostring"},
{"type": "any",
"string": "@foostring",
"expected": "foostring"},
{"type": "any",
"string": "foo.string",
"expected": "foo.string"},
{"type": "any",
"string": "15",
"expected": 15.0},
{"type": "any",
"string": "@foonumber",
"expected": 30.0}
]
for test in tests:
n = Input(test["type"]).parseString(test["string"])[0]
print(test["string"])
result = await n.eval()
assert result == test["expected"]
|
qui3xote/otto
|
tests/test_datatypes/datatype_test.py
|
datatype_test.py
|
py
| 6,300 |
python
|
en
|
code
| 1 |
github-code
|
6
|
74888826427
|
from rest_framework.serializers import CharField, ModelSerializer
from .models.base import CanadianCommonCv
from .models.employment import AcademicWorkExperience, Employment
from .models.personal_information import Identification, Email, Website
from .models.recognitions import AreaOfResearch
from .models.user_profile import UserProfile
class AreaOfResearchSerializer(ModelSerializer):
class Meta:
model = AreaOfResearch
fields = [
'area',
'sector',
'field'
]
class WebsiteSerializer(ModelSerializer):
class Meta:
model = Website
fields = ['url']
class EmailSerializer(ModelSerializer):
class Meta:
model = Email
fields = ['address']
class IdentificationSerializer(ModelSerializer):
email = EmailSerializer(many=True, read_only=True, source='email_set')
website = WebsiteSerializer(many=True, read_only=True, source="website_set")
class Meta:
model = Identification
fields = [
'email',
'title',
'website',
'family_name',
'first_name',
'middle_name',
'previous_family_name',
'previous_first_name'
]
class AcademicWorkExperienceSerializer(ModelSerializer):
class Meta:
model = AcademicWorkExperience
fields = [
'department',
'position_title'
]
class EmploymentSerializer(ModelSerializer):
academic_work_experience = AcademicWorkExperienceSerializer(many=True, read_only=True)
class Meta:
model = Employment
fields = ['academic_work_experience']
class UserProfileSerializer(ModelSerializer):
research_description = CharField(source='research_interest', read_only=True)
research_interests = AreaOfResearchSerializer(many=True, read_only=True, source='user_aor')
class Meta:
model = UserProfile
fields = [
'research_description',
'research_interests'
]
class CanadianCommonCvSerializer(ModelSerializer):
identification = IdentificationSerializer(read_only=True)
employment = EmploymentSerializer(read_only=True)
user_profile = UserProfileSerializer(read_only=True)
def to_representation(self, instance):
ret = super().to_representation(instance)
ret['research_description'] = ret['user_profile']['research_description']
ret['research_interests'] = ret['user_profile']['research_interests']
ret.pop('user_profile')
return ret
class Meta:
model = CanadianCommonCv
fields = [
'identification',
'employment',
'user_profile'
]
|
c3g/ccv_api
|
ccv/serializers.py
|
serializers.py
|
py
| 2,733 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38943946635
|
import wx
import sys
from datetime import date
import webbrowser
from pbalance_c import pbalance_c as balance_c
from pincome_c import pincome_c as income_c
from pportfolio_c import pportfolio_c as portfolio_c
from padddata_c import padddata_c as adddata_c
from praw_c import praw_c as raw_c
from puseradd_c import puseradd_c as useradd_c
from plugin_c import plugin_c
from datautils_c import datautils_c
from datacoll_c import datacoll_c
class gui_c(wx.Frame):
def __init__(self,parent,program,version,about,owner):
wx.Frame.__init__(self,parent,-1,program+' '+version,size=(900,650))
self.CreateStatusBar()
self.SetMinSize((640,480))
self.pinfo = program
self.vinfo = version
self.ainfo = about
self.owner = owner
self.projectopen = False
self.frametitle = program+' '+version
# CALLBACK
owner.registerupdcallback(self.updatecallback)
# PLUGINS
#self.plugins = [balance_c,income_c,useradd_c,addquotes_c,raw_c,adddata_c]
self.plugins = [balance_c,income_c,portfolio_c,useradd_c,adddata_c,raw_c]
self.datacoll = self.owner.datacoll
# COLOURS
#self.palette = DEF_COLORS
#self.palette1 = DEF_COLORS_PROFIT
# MENU BAR
self.menubar = wx.MenuBar()
# FILE
self.menufile = wx.Menu()
item = self.menufile.Append(gui_c.ID_NEWPROJ,'New project...','Create a new project')
item = self.menufile.Append(gui_c.ID_OPEPROJ,'Open project...','Open an existing project')
item = self.menufile.Append(gui_c.ID_SAVPROJ,'Save project','Save current project')
self.menufile.AppendSeparator()
item = self.menufile.Append(gui_c.ID_EXIT,'Exit','Exit program')
#self.menufile.Enable(gui_c.ID_SAVPROJ,False)
self.menubar.Append(self.menufile,'File')
# VIEW
self.menuview = wx.Menu()
item = self.menuview.Append(gui_c.ID_DETAILS,'Details','Show a detailed textual view',kind=wx.ITEM_RADIO)
item = self.menuview.Append(gui_c.ID_HISTORY,'Monthly history','Show a monthly historical textual view',kind=wx.ITEM_RADIO)
item = self.menuview.Append(gui_c.ID_HISTORYY,'Yearly history','Show a yearly historical textual view',kind=wx.ITEM_RADIO)
item = self.menuview.Append(gui_c.ID_GDETAILS,'Detailed plot','Show a detailed graphical view',kind=wx.ITEM_RADIO)
item = self.menuview.Append(gui_c.ID_GHISTORY,'Monthly graph','Show a monthly historical graphical view',kind=wx.ITEM_RADIO)
item = self.menuview.Append(gui_c.ID_GHISTORYY,'Yearly graph','Show a yearly historical graphical view',kind=wx.ITEM_RADIO)
self.menubar.Append(self.menuview,'View')
# PROJECT
self.menuproj = wx.Menu()
item = self.menuproj.Append(gui_c.ID_UPDPROJ,'Download online quotes','Update stock quotes and currency rates online')
item = self.menuproj.Append(gui_c.ID_CALPROJ,'Re-calculate result','Re-calculate all matrices and data')
#self.menuproj.Enable(gui_c.ID_UPDPROJ,False)
#self.menuproj.Enable(gui_c.ID_CALPROJ,False)
self.menubar.Append(self.menuproj,'Project')
# HELP
self.menuabout = wx.Menu()
item = self.menuabout.Append(gui_c.ID_UPDATE,'Search for updates...','Online update search')
item = self.menuabout.Append(gui_c.ID_USERGU,'User\'s Guide... (web)','Frugal\'s User\'s Guide')
item = self.menuabout.Append(gui_c.ID_ABOUT,'About','About %s'%(self.pinfo))
self.menubar.Append(self.menuabout,'About')
# UPDATE
#self.menuupdate = wx.Menu()
#item = self.menuupdate.Append(gui_c.ID_UPDATE,'Show available update','Show available updates')
#self.menubar.Append(self.menuupdate,'Update')
# ADD MENU AND EVENTS
self.SetMenuBar(self.menubar)
#self.Bind(wx.EVT_MENU, self.OnExit, id=wx.ID_EXIT)
#wx.EVT_MENU(self,gui_c.ID_NEWPROJ,self.new)
self.Bind(wx.EVT_MENU, self.new, id=gui_c.ID_NEWPROJ)
#wx.EVT_MENU(self,gui_c.ID_OPEPROJ,self.open)
self.Bind(wx.EVT_MENU, self.open, id=gui_c.ID_OPEPROJ)
#wx.EVT_MENU(self,gui_c.ID_SAVPROJ,self.save)
self.Bind(wx.EVT_MENU, self.save, id=gui_c.ID_SAVPROJ)
#wx.EVT_MENU(self,gui_c.ID_EXIT,self.quit)
self.Bind(wx.EVT_MENU, self.quit, id=gui_c.ID_EXIT)
#wx.EVT_MENU(self,gui_c.ID_UPDPROJ,self.updateproject)
self.Bind(wx.EVT_MENU, self.updateproject, id=gui_c.ID_UPDPROJ)
#wx.EVT_MENU(self,gui_c.ID_CALPROJ,self.calc)
self.Bind(wx.EVT_MENU, self.calc, id=gui_c.ID_CALPROJ)
#wx.EVT_MENU(self,gui_c.ID_USERGU,self.usersguide)
self.Bind(wx.EVT_MENU, self.usersguide, id=gui_c.ID_USERGU)
#wx.EVT_MENU(self,gui_c.ID_ABOUT,self.about)
self.Bind(wx.EVT_MENU, self.about, id=gui_c.ID_ABOUT)
#wx.EVT_MENU(self,gui_c.ID_UPDATE,self.showupdate)
self.Bind(wx.EVT_MENU, self.showupdate, id=gui_c.ID_UPDATE)
self.Bind(wx.EVT_CLOSE,self.closeprogram)
# SPLITTER
self.splitv = wx.SplitterWindow(self)
# TREE
self.tree = wx.TreeCtrl(self.splitv,-1)
self.troot = self.tree.AddRoot('Frugal')
self.tree.Expand(self.troot)
self.tree.SelectItem(self.troot)
self.tree.Bind(wx.EVT_TREE_SEL_CHANGED,self.treeevent)
# PANEL
lc = wx.ListCtrl(self.splitv,style=wx.LC_REPORT)
# CONF SPLITTERS
self.splitv.SplitVertically(self.tree,lc,200)
self.Centre()
if len(sys.argv) > 1:
# assume path to project is first variable
path = sys.argv[1]
pd = wx.ProgressDialog('Opening project','Please wait - this might take a while...',100,self)
pd.Update(10)
self.openpath(path)
pd.Destroy()
def new(self,event):
if self.projectopen == True:
#TODO error message
return
dialog = wx.DirDialog(self,'Select new project directory','.',wx.DD_DEFAULT_STYLE)
if dialog.ShowModal() == wx.ID_OK:
path = dialog.GetPath()
dialog.Destroy()
dialog = wx.SingleChoiceDialog(self,'Choose project currency:','Currency',self.datacoll.ALLOWEDCURRENCIES)
if dialog.ShowModal() == wx.ID_OK:
currency = dialog.GetStringSelection()
else:
currency = 'EUR'
dialog.Destroy()
pd = wx.ProgressDialog('Create new project','Please wait... this might take a while.',100,self)
if self.owner.new(path,currency) == False:
pd.Destroy()
self.errdlg('Error: Failed to create new project!')
return
else:
dialog.Destroy()
return
if currency == '':
currency = 'EUR'
self.openpath(path,pd)
pd.Destroy()
def open(self,event):
if self.projectopen == True:
#TODO error message
return
dialog = wx.DirDialog(self,'Select project directory','.',wx.DD_DEFAULT_STYLE)
if dialog.ShowModal() == wx.ID_OK:
path = dialog.GetPath()
pd = wx.ProgressDialog('Opening project','Please wait - this might take a while...',100,self)
pd.Update(10)
self.openpath(path,pd)
pd.Destroy()
dialog.Destroy()
def openpath(self,path,pd=False):
if self.owner.open(path,pd) == True:
#self.menufile.Enable(gui_c.ID_NEWPROJ,False)
#self.menufile.Enable(gui_c.ID_OPEPROJ,False)
#self.menufile.Enable(gui_c.ID_SAVPROJ,True)
#self.menuproj.Enable(gui_c.ID_UPDPROJ,True)
#self.menuproj.Enable(gui_c.ID_CALPROJ,True)
self.createtree()
self.projectopen = True
else:
self.errdlg('Error: Failed to open project!')
def save(self,event):
if self.projectopen == False:
#TODO error message
return
pd = wx.ProgressDialog('Saving project','Please wait - this might take a while...',100,self)
pd.Update(10)
rv = self.owner.save(pd)
pd.Destroy()
if rv == False:
self.errdlg('Error: Failed to save project!')
return
self.unsaved_data = 0
def closeprogram(self,event):
if self.datacoll.unsaved == 1:
msg = wx.MessageDialog(self,'The financial data has been modified.\nDo you want to save your changes before exiting?','Save data',wx.YES_NO|wx.YES_DEFAULT|wx.CANCEL|wx.ICON_QUESTION)
rv = msg.ShowModal()
msg.Destroy()
if rv == wx.ID_YES:
self.owner.datacoll.savefiles()
if rv == wx.ID_CANCEL:
return
self.Destroy()
def quit(self,event):
self.Close()
def updateproject(self,event):
if self.projectopen == False:
#TODO error message
return
pd = wx.ProgressDialog('Updating stock quotes and currency rates','Please wait - this might take a while...',100,self)
pd.Update(10)
if self.owner.downloadquotes(pd) == True:
self.tree.SelectItem(self.troot)
pd.Destroy()
self.calc(0)
def calc(self,event):
if self.projectopen == False:
#TODO error message
return
pd = wx.ProgressDialog('Calculating result','Please wait - this might take a while...',100,self)
pd.Update(10)
if self.owner.calc(pd) == True:
self.tree.SelectItem(self.troot)
pd.Destroy()
def showupdate(self,event):
updateavailable = self.owner.checkforupdate()
if updateavailable == 1:
msg = 'A new version of Frugal is available for download!\n\nPlease go to http://www.samoht.se/frugal/ and click Download!'
self.SetTitle(self.frametitle+' (new version available for download)')
elif updateavailable == -1:
msg = 'Failed to read latest version! Please retry.'
else:
msg = 'No new version of Frugal available!'
notice = wx.MessageDialog(self,msg,self.pinfo,wx.OK|wx.ICON_INFORMATION)
notice.ShowModal()
notice.Destroy()
def updatecallback(self):
# UPDATE
#self.menuupdate = wx.Menu()
#item = self.menuupdate.Append(gui_c.ID_UPDATE,'Show available update','Show available updates')
#self.menubar.Append(self.menuupdate,'Update')
self.SetTitle(self.frametitle+' (new version available for download)')
def about(self,event):
about = wx.MessageDialog(self,self.ainfo,self.pinfo,wx.OK|wx.ICON_INFORMATION)
about.ShowModal()
about.Destroy()
def usersguide(self,event):
webbrowser.open('http://www.samoht.se/frugal/usersguide.php')
def createtree(self):
for p in self.plugins:
itm = self.tree.AppendItem(self.troot,p.NAME)
for f in p.FUNCTIONS:
self.tree.AppendItem(itm,f)
self.tree.Expand(self.troot)
def treeevent(self,event):
itm = event.GetItem()
text = self.tree.GetItemText(itm)
selectedplugin = None
attr = []
if self.menuview.FindItemById(gui_c.ID_DETAILS).IsChecked():
attr.append('details')
elif self.menuview.FindItemById(gui_c.ID_HISTORY).IsChecked():
attr.append('history')
elif self.menuview.FindItemById(gui_c.ID_HISTORYY).IsChecked():
attr.append('historyy')
elif self.menuview.FindItemById(gui_c.ID_GDETAILS).IsChecked():
attr.append('gdetails')
elif self.menuview.FindItemById(gui_c.ID_GHISTORY).IsChecked():
attr.append('ghistory')
elif self.menuview.FindItemById(gui_c.ID_GHISTORYY).IsChecked():
attr.append('ghistoryy')
for p in self.plugins:
if p.NAME == text:
selectedplugin = p(self,self.splitv,func=p.NAME,attr=attr)
for f in p.FUNCTIONS:
if f == text:
selectedplugin = p(self,self.splitv,func=f,attr=attr)
if not selectedplugin == None:
self.showpanel(selectedplugin)
def showpanel(self,panel):
old = self.splitv.GetWindow2()
self.splitv.ReplaceWindow(old,panel)
old.Destroy()
def errdlg(self,text='',title='Error',style=wx.OK|wx.ICON_ERROR):
return self.msgdlg(text,title,style)
def msgdlg(self,text='',title='Information',style=wx.OK|wx.ICON_INFORMATION):
msg = wx.MessageDialog(self,text,title,style)
rv = msg.ShowModal()
msg.Destroy()
return rv
ID_NEWPROJ = 1001
ID_OPEPROJ = 1002
ID_SAVPROJ = 1003
ID_EXIT = 1004
ID_CALPROJ = 1005
ID_ABOUT = 1006
ID_ADDTRAN = 1007
ID_DETAILS = 1008
ID_HISTORY = 1009
ID_GDETAILS = 1010
ID_GHISTORY = 1011
ID_CLOPROJ = 1012
ID_UPDATE = 1013
ID_HISTORYY = 1014
ID_GHISTORYY = 1015
ID_UPDPROJ = 1016
ID_USERGU = 1017
class appl_c(wx.App):
def OnInit(self):
return True
def setup(self,program,version,about,owner):
frame = gui_c(None,program,version,about,owner)
frame.Show()
|
tomluvoe/frugal
|
src/gui_c.py
|
gui_c.py
|
py
| 11,466 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39122629885
|
import glob
import PinshCmd
import BomHostField
from commonUtil import *
class ScpHostField(PinshCmd.PinshCmd):
def __init__(self):
PinshCmd.PinshCmd.__init__(self, name = "scpHostName")
self.helpText = "<path>\tA path to a remote location. Follows pattern: <hostname>:/path/to/file"
self.bomHostField = BomHostField.BomHostField()
self.cmdOwner = 0
self.pathCache = {}
self.tokenDelimiter = ''
def checkPossiblePathCache(self, hostName, checkPath):
basePath = '/'.join(checkPath.split('/')[:-1])
if not basePath in self.pathCache:
p = mode.getBomConnection(hostName)
p.freshen()
remotePaths = p.checkPossiblePaths(basePath+'/')
self.pathCache[basePath] = remotePaths
returnValue = []
for path in self.pathCache[basePath]:
if checkPath in path:
returnValue.append(hostName+":"+path)
return returnValue
def getPossibleHostNames(self, dest):
if ':' in dest:
target = dest.split(':')[0]
else:
target = dest
possibleHostNames = self.bomHostField.preferredNames([target], 0)
return possibleHostNames
def preferredNames(self, tokens, index):
dest = tokens[index]
hostNames = self.getPossibleHostNames(dest)
if ':' in dest:
if len(hostNames) != 1:
return []
hostName = hostNames[0]
path = dest.split(':')[1]
if not path:
return [hostName+':']
return self.checkPossiblePathCache(hostName, path)
else:
if hostNames:
return hostNames
else:
possibleLocalNames = glob.glob(dest+"*")
if dest == '.':
possibleLocalNames.append('.')
if os.path.isdir(dest):
possibleLocalNames.append(dest)
return possibleLocalNames
return []
def match(self, tokens, index):
possibleMatches = self.preferredNames(tokens, index)
if not possibleMatches:
return NO_MATCH, 1
if len(possibleMatches) > 1:
return PARTIAL, 1
return COMPLETE, 1
if __name__ == "__main__":
# A server must be enabled for the tests to work properly.
from libTest import startTest, runTest, endTest
hostField = ScpHostField()
status = OK
startTest()
status = runTest(hostField.preferredNames, [["bigap:/usr/l"], 0], ['bigap:/usr/lib/', 'bigap:/usr/libexec/', 'bigap:/usr/local/'], status)
status = runTest(hostField.preferredNames, [["biga"], 0], ['bigap'], status)
status = runTest(hostField.preferredNames, [["/usr/l"], 0], ['/usr/lib', '/usr/lib64', '/usr/local'], status)
status = runTest(hostField.preferredNames, [["/tmp/2.ovpn"], 0], ['/tmp/2.ovpn'], status)
status = runTest(hostField.preferredNames, [["bigap:/tmp/"], 0], ['bigap:/tmp/sudoers', 'bigap:/tmp/'], status)
#status = runTest(hostField.preferredNames, [["/tmp/"], 0], ['/tmp/'], status)
endTest(status)
|
psbanka/bombardier
|
cli/lib/broken/ScpHostField.py
|
ScpHostField.py
|
py
| 3,148 |
python
|
en
|
code
| 1 |
github-code
|
6
|
37197472203
|
# n, m, k, p = [int(input()) for _ in range(4)]
# print(n - ((m + k) - p))
##############
#2
# ls = input().split()
# a = set(ls)
# print(len(ls) - len(a))
# res = set()
# for i in range(int(input())):
# res.add(input())
# f = input()
# if f in res:
# print('REPEAT')
# else:
# print('OK')
################
# m = int(input())
# n = int(input())
# dom = {input() for _ in range(m)}
# for i in range(n):
# if input() in dom:
# print('YES')
# else:
# print('NO')
#############
# a = {int(i) for i in input().split()}
# b = {int(i) for i in input().split()}
# res = set(a.intersection(b))
# if len(res) == 0:
# print('BAD DAY')
# else:
# print(*sorted(res, reverse=True))
# myset1 = {int(i) for i in input().split()}
# myset2 = {int(i) for i in input().split()}
#
# f = myset2 - myset1
# flag = myset1.issubset(myset2)
# if len(f) != 0:
# # print('NO')
# # else:
# # if flag == True:
# # print('YES')
# # else:
# # print('NO')
# m, n = int(input()), int(input())
# mat = {input() for _ in range(m)}
# info = {input() for _ in range(n)}
#
# res = set()
# res.update(mat - info)
# res.update(info - mat)
#
# if len(res) == 0:
# print('NO')
# else:
# print(len(res))
###########
# a, b = set(input().split()), set(input().split())
# a.update(b)
# print(*sorted(a))
#############
# m = int(input())
# n = int(input())
# all_shool = [input() for _ in range(m+n)]
#
# a = {}
# for i in all_shool:
# a.setdefault(i, all_shool.count(i))
#
# couny = 0
# for i in a.values():
# if i == 1:
# couny += 1
# if couny == 0:
# print('NO')
# else:
# print(couny)
# #########
# # res = [input() for _ in range(int(input()))]
# one = set(res[0])
# for i in res:
# # one.intersection_update(i)
# # print(*sorted(one))
res = [{input() for _ in range(int(input()))} for _ in range(int(input()))]
one = set(res[0])
for i in res:
one.intersection_update(i)
print(sorted(one), sep='\n')
|
alecksandr-slavin/git_work
|
New_Python_up/Множества/ex.1.py
|
ex.1.py
|
py
| 1,973 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14394640679
|
import os
class InputProject():
# data
# Project path; file list;
# id-file saving
def __init__(self, inputPathList, extNameArr):
self.fileList = [] # projectId-file
self.projectList = inputPathList
if len(extNameArr) <= 0:
print('extName configuration error. Please check config.json.')
return False
else:
self.__extName = set()
for i in extNameArr:
self.__extName.add(i)
for projectId in range(len(inputPathList)):
if self.__checkPath(inputPathList[projectId]):
self.fileList.append(self.__addFiles(inputPathList[projectId]))
else:
print("path:" + inputPathList[projectId] + "not found.")
def __addFiles(self, inputPath):
res = []
fileWalking = os.walk(inputPath)
tmp = ''
for path, dir_list, file_list in fileWalking:
for file_name in file_list:
tmp = os.path.abspath(path + '/' + file_name)
if self.__ifTargetFile(tmp):
res.append(tmp)
return res
def addFile(self, filePath):
if self.__ifTargetFile(filePath):
self.fileList.append(filePath)
def __ifTargetFile(self, filePath):
extStr = os.path.splitext(filePath)[1]
return {extStr}.issubset(self.__extName)
def __checkPath(self,pathStr):
return os.path.exists(pathStr)
def saveFileList(self, folderPath):
fileListPath = folderPath + '/fileList.txt'
file = open(fileListPath,'w')
for projectId in range(len(self.fileList)):
for filePath in self.fileList[projectId]:
file.write(str(projectId) + ',' + filePath + '\n')
print("File list created in file:" + fileListPath)
file.close()
|
zhuwq585/MSCCD
|
modules/InputManagement.py
|
InputManagement.py
|
py
| 1,881 |
python
|
en
|
code
| 4 |
github-code
|
6
|
73183731388
|
import numpy as np
import cv2
img = cv2.imread('/Users/macbookair/PycharmProjects/PR/homework2/unpro.jpg')
bg = cv2.imread('/Users/macbookair/PycharmProjects/PR/homework2/back2.png')#---->3750*2500
mask = np.zeros(img.shape[:2],np.uint8)
bgdModel = np.zeros((1,65),np.float64)
fgdModel = np.zeros((1,65),np.float64)
rect = (1250,25,2000,2325)
# rect = (1,1,l-2,w-2)
#1250 25 2400 2450
cv2.grabCut(img,mask,rect,bgdModel,fgdModel,5,cv2.GC_INIT_WITH_RECT)
mask2 = np.where((mask==2)|(mask==0),0,1).astype('uint8')
mask2 = cv2.erode(mask2,None,iterations=2)
mask2 = cv2.dilate(mask2,None,iterations=1)
mask_inv = np.where((mask2==0),1,0).astype('uint8')
img1 = img*mask2[:,:,np.newaxis]
img2 = bg*mask_inv[:,:,np.newaxis]
dst=cv2.addWeighted(img1,1,img2,1,0)
cv2.imshow('output',dst)
cv2.imwrite('output.jpg',dst)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
xubinchen-very6/Pattern-recognition
|
prml/homework2/背景变变变.py
|
背景变变变.py
|
py
| 855 |
python
|
en
|
code
| 4 |
github-code
|
6
|
8215468580
|
import cv2
import numpy as np
from PIL import Image
# Load the image
img = cv2.imread('ParkingLot.jpg')
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Apply edge detection
edges = cv2.Canny(gray, 50, 150, apertureSize=3)
# Apply Hough line detection
lines = cv2.HoughLines(edges, rho=1, theta=np.pi/180, threshold=100)
# Cluster the detected lines
line_clusters = []
for line in lines:
rho, theta = line[0]
if len(line_clusters) == 0:
line_clusters.append([(rho, theta)])
else:
found_cluster = False
for cluster in line_clusters:
if abs(cluster[0][0] - rho) < 50 and abs(cluster[0][1] - theta) < np.pi/36:
cluster.append((rho, theta))
found_cluster = True
break
if not found_cluster:
line_clusters.append([(rho, theta)])
# Find intersection points
intersections = []
for cluster in line_clusters:
for i in range(len(cluster)):
rho1, theta1 = cluster[i]
for j in range(i+1, len(cluster)):
rho2, theta2 = cluster[j]
A = np.array([[np.cos(theta1), np.sin(theta1)], [np.cos(theta2), np.sin(theta2)]])
b = np.array([rho1, rho2])
x, y = np.linalg.solve(A, b)
if x >= 0 and x < img.shape[1] and y >= 0 and y < img.shape[0]:
intersections.append((int(x), int(y)))
# Find parking space polygons
polygons = []
for i in range(len(intersections)):
for j in range(i+1, len(intersections)):
for k in range(j+1, len(intersections)):
for l in range(k+1, len(intersections)):
p1, p2, p3, p4 = intersections[i], intersections[j], intersections[k], intersections[l]
sides = [cv2.norm(np.array(p1) - np.array(p2)),
cv2.norm(np.array(p2) - np.array(p3)),
cv2.norm(np.array(p3) - np.array(p4)),
cv2.norm(np.array(p4) - np.array(p1))]
if all(side > 30 for side in sides):
area = cv2.contourArea(np.array([p1, p2, p3, p4]))
if area > 1000:
polygons.append([p1, p2, p3, p4])
# Draw the polygons on the image
for i, parking_space in enumerate(polygons):
pts = np.array(parking_space, np.int32)
pts = pts.reshape((-1, 1, 2))
cv2.polylines(img, [pts], True, (0, 255, 0), thickness=2)
# Display the image with polygons drawn
cv2.imshow('Parking Lot with Polygons', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
TongshenH/AuE8200_perception
|
hw3/test.py
|
test.py
|
py
| 2,541 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19130670937
|
from datetime import datetime
import docker
import mock
import unittest
from infra.services.android_docker import containers
class FakeDevice(object):
"""Mocks a usb_device.Device"""
def __init__(self, serial, physical_port):
self.serial = serial
self.physical_port = physical_port
self.major = 0
self.minor = 0
self.bus = 0
self.dev_file_path = ''
class FakeClient(object):
"""Mocks the client object returned from docker's client API.
containers.DockerClient wraps it. Mocked here to verify wrapper class
bheaves correctly.
"""
def __init__(self):
self.containers = None
class FakeContainer(object):
"""Used to mock containers.Container"""
def __init__(self, name, uptime=None):
self._container = FakeContainerBackend(name)
self.name = name
self.uptime = uptime
self.swarming_bot_killed = False
def get_container_uptime(self, now): # pylint: disable=unused-argument
return self.uptime
def kill_swarming_bot(self):
self.swarming_bot_killed = True
class FakeContainerBackend(object):
"""Mocks the container objects returned from docker's client API.
containers.Container wraps each one. Mocked here to verify the wrapper class
behaves correctly.
"""
def __init__(self, name):
self.name = name
self.was_deleted = False
self.was_started = False
self.is_paused = False
self.exec_outputs = []
self.exec_inputs = []
self.attrs = {}
def remove(self):
self.was_deleted = True
def start(self):
self.was_started = True
def pause(self):
assert not self.is_paused
self.is_paused = True
def unpause(self):
assert self.is_paused
self.is_paused = False
def exec_run(self, cmd):
self.exec_inputs.append(cmd)
return self.exec_outputs.pop(0)
class FakeContainerList(object):
"""Mocks the container list objects returned from docker's client API."""
def __init__(self, containers_list):
self._list = containers_list
def create(self, **kwargs):
return FakeContainerBackend(kwargs['name'])
def list(self, filters=None): # pylint: disable=unused-argument
return self._list
def get(self, name):
for c in self._list:
if c.name == name:
return c
raise docker.errors.NotFound('omg container missing')
class TestGetNames(unittest.TestCase):
def setUp(self):
self.device = FakeDevice('serial123', 1)
def test_container_name(self):
container_name = containers.get_container_name(self.device)
self.assertEqual(container_name, 'android_serial123')
@mock.patch('socket.gethostname')
def test_container_hostname(self, mock_gethostname):
mock_gethostname.return_value = 'build123-a4'
container_hostname = containers.get_container_hostname(self.device)
self.assertEqual(container_hostname, 'build123-a4--device1')
class TestDockerClient(unittest.TestCase):
def setUp(self):
self.fake_client = FakeClient()
self.container_names = ['android_serial1', 'android_serial2']
self.fake_client.containers = FakeContainerList(
[FakeContainerBackend(name) for name in self.container_names])
@mock.patch('docker.from_env')
def test_get_running_containers(self, mock_from_env):
mock_from_env.return_value = self.fake_client
running_containers = containers.DockerClient().get_running_containers()
self.assertEqual(
set(c.name for c in running_containers), set(self.container_names))
@mock.patch('docker.from_env')
def test_get_container(self, mock_from_env):
mock_from_env.return_value = self.fake_client
fake_device = FakeDevice('serial2', 2)
container = containers.DockerClient().get_container(fake_device)
self.assertEqual(container.name, 'android_serial2')
@mock.patch('docker.from_env')
def test_get_missing_container(self, mock_from_env):
mock_from_env.return_value = self.fake_client
fake_device = FakeDevice('missing_device', 1)
container = containers.DockerClient().get_container(fake_device)
self.assertEqual(container, None)
@mock.patch('docker.from_env')
def test_stop_old_containers(self, mock_from_env):
young_container = FakeContainer('young_container', uptime=10)
old_container = FakeContainer('old_container', uptime=999)
mock_from_env.return_value = self.fake_client
containers.DockerClient().stop_old_containers(
[young_container, old_container], 100)
self.assertFalse(young_container.swarming_bot_killed)
self.assertTrue(old_container.swarming_bot_killed)
@mock.patch('docker.from_env')
def test_delete_stopped_containers(self, mock_from_env):
mock_from_env.return_value = self.fake_client
containers.DockerClient().delete_stopped_containers()
self.assertTrue(
all(c.was_deleted for c in self.fake_client.containers.list()))
@mock.patch('docker.from_env')
def test_create_missing_containers(self, mock_from_env):
running_containers = [
FakeContainer('android_serial1'),
FakeContainer('android_serial2'),
]
devices = [
FakeDevice('serial1', 1),
FakeDevice('serial2', 2),
FakeDevice('serial3', 3),
]
self.fake_client.containers = FakeContainerList(running_containers)
mock_from_env.return_value = self.fake_client
needs_reboot = containers.DockerClient().create_missing_containers(
running_containers, devices, 'image')
# Ensure serial3 needs to be rebooted. This indicates that a new container
# was created for it.
self.assertEquals([d.serial for d in needs_reboot], ['serial3'])
class TestContainer(unittest.TestCase):
def setUp(self):
self.container_backend = FakeContainerBackend('container1')
self.container = containers.Container(self.container_backend)
def test_get_container_uptime(self):
now = datetime.strptime(
'2000-01-01T01:30:00.000000', '%Y-%m-%dT%H:%M:%S.%f')
self.container_backend.attrs = {
'State': {'StartedAt': '2000-01-01T00:00:00.0000000000'}
}
uptime = self.container.get_container_uptime(now)
self.assertEquals(uptime, 90)
def test_get_swarming_bot_pid(self):
self.container_backend.exec_outputs = ['123']
pid = self.container.get_swarming_bot_pid()
self.assertEquals(pid, 123)
def test_get_swarming_bot_pid_backend_error(self):
self.container_backend.exec_outputs = ['rpc error: omg failure']
pid = self.container.get_swarming_bot_pid()
self.assertEquals(pid, None)
def test_get_swarming_bot_pid_lsof_error(self):
self.container_backend.exec_outputs = ['omg lsof failure']
pid = self.container.get_swarming_bot_pid()
self.assertEquals(pid, None)
def test_kill_swarming_bot(self):
self.container_backend.exec_outputs = ['123', '']
self.container.kill_swarming_bot()
self.assertEquals(self.container_backend.exec_inputs[-1], 'kill -15 123')
def test_kill_swarming_bot_error(self):
self.container_backend.exec_outputs = ['omg failure']
self.container.kill_swarming_bot()
# Ensure nothing was killed when the bot's pid couldn't be found.
self.assertFalse(
any('kill -15' in cmd for cmd in self.container_backend.exec_inputs))
@mock.patch('time.sleep')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.close')
@mock.patch('os.path.exists')
def test_add_device(self, mock_path_exists, mock_close, mock_write, mock_open,
mock_sleep):
mock_sleep.return_value = None
mock_path_exists.return_value = True
self.container_backend.attrs = {'Id': 'abc123'}
self.container_backend.exec_outputs = ['', '']
device = FakeDevice('serial1', 1)
device.major = 111
device.minor = 9
device.bus = 1
device.dev_file_path = '/dev/bus/usb/001/123'
self.container.add_device(device)
self.assertTrue('abc123' in mock_open.call_args[0][0])
# Ensure the device's major and minor numbers were written to the
# cgroup file.
self.assertEqual(mock_write.call_args[0][1], 'c 111:9 rwm')
self.assertTrue(mock_close.called)
self.assertFalse(self.container_backend.is_paused)
@mock.patch('time.sleep')
@mock.patch('os.open')
@mock.patch('os.path.exists')
def test_add_device_missing_cgroup(self, mock_path_exists, mock_open,
mock_sleep):
mock_sleep.return_value = None
mock_path_exists.return_value = False
self.container_backend.attrs = {'Id': 'abc123'}
self.container_backend.exec_outputs = ['']
device = FakeDevice('serial1', 1)
self.container.add_device(device)
self.assertFalse(mock_open.called)
self.assertEquals(len(self.container_backend.exec_inputs), 1)
self.assertFalse(self.container_backend.is_paused)
@mock.patch('time.sleep')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.close')
@mock.patch('os.path.exists')
def test_add_device_os_open_error(self, mock_path_exists, mock_close,
mock_write, mock_open, mock_sleep):
mock_sleep.return_value = None
mock_path_exists.return_value = True
mock_open.side_effect = OSError('omg open error')
self.container_backend.attrs = {'Id': 'abc123'}
self.container_backend.exec_outputs = ['']
device = FakeDevice('serial1', 1)
device.major = 111
device.minor = 9
self.container.add_device(device)
self.assertTrue('abc123' in mock_open.call_args[0][0])
self.assertFalse(mock_write.called)
self.assertFalse(mock_close.called)
self.assertEquals(len(self.container_backend.exec_inputs), 1)
self.assertFalse(self.container_backend.is_paused)
@mock.patch('time.sleep')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.close')
@mock.patch('os.path.exists')
def test_add_device_os_write_error(self, mock_path_exists, mock_close,
mock_write, mock_open, mock_sleep):
mock_sleep.return_value = None
mock_path_exists.return_value = True
mock_write.side_effect = OSError('omg write error')
self.container_backend.attrs = {'Id': 'abc123'}
self.container_backend.exec_outputs = ['']
device = FakeDevice('serial1', 1)
device.major = 111
device.minor = 9
self.container.add_device(device)
self.assertTrue('abc123' in mock_open.call_args[0][0])
self.assertEquals(mock_write.call_args[0][1], 'c 111:9 rwm')
self.assertTrue(mock_close.called)
self.assertEquals(len(self.container_backend.exec_inputs), 1)
self.assertFalse(self.container_backend.is_paused)
|
mithro/chromium-infra
|
infra/services/android_docker/test/containers_test.py
|
containers_test.py
|
py
| 10,531 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30798123556
|
# -*- coding: UTF-8 -*-
# 百度人脸识别:https://ai.baidu.com/ai-doc/FACE/ek37c1qiz#%E4%BA%BA%E8%84%B8%E6%A3%80%E6%B5%8B
from aip import AipFace
from config import BAIDU_ID, BAIDU_KEY, BAIDU_SECRET_KEY
'''
百度人脸识别
优点:可免费使用,个人账户的限制为2QPS,企业账户的限制为10QPS
'''
""" 你的 APPID AK SK """
APP_ID = BAIDU_ID
API_KEY = BAIDU_KEY
SECRET_KEY = BAIDU_SECRET_KEY
client = AipFace(APP_ID, API_KEY, SECRET_KEY)
"""
image = "取决于image_type参数,传入BASE64字符串或URL字符串或FACE_TOKEN字符串"
imageType = "BASE64"
# 调用人脸检测
client.detect(image, imageType)
# 如果有可选参数
options = dict()
options["face_field"] = "age"
options["max_face_num"] = 2
options["face_type"] = "LIVE"
options["liveness_control"] = "LOW"
# 带参数调用人脸检测
client.detect(image, imageType, options)
"""
if __name__ == "__main__":
url = "https://smartracing.oss-cn-hangzhou.aliyuncs.com/shared/images/profiles/full/1571196895035.jpg"
options = {
"face_field": "age,beauty,expression,face_shape,emotion"
}
res = client.detect(url, "URL", options)
face = res['result']['face_list'][0]
expression = {"none": "不笑", "smile": "微笑", "laugh": "大笑"}
face_shape = {"square":" 正方形", "triangle": "三角形", "oval": "椭圆", "heart": "心形", "round": "圆形"}
emotion = {"angry": "愤怒", "disgust": "厌恶", "fear": "恐惧", "happy": "高兴", "sad": "伤心", "surprise": "惊讶", "neutral": "无情绪"}
print(f"检测年龄:{face['age']},颜值:{face['beauty']},表情:{expression.get(face['expression']['type'])},脸型:{face_shape.get(face['face_shape']['type'])}, 情绪:{emotion.get(face['emotion']['type'])}")
|
bobcjxin/rank_face
|
base_baidu.py
|
base_baidu.py
|
py
| 1,773 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4640722197
|
# -*- coding: utf-8 -*-
'''This example demonstrates how to extract named entities from text using default model.'''
from __future__ import unicode_literals, print_function
from pprint import pprint
from estnltk import Tokenizer, PyVabamorfAnalyzer, NerTagger
text = '''Eesti Vabariik on riik Põhja-Euroopas.
Eesti piirneb põhjas üle Soome lahe Soome Vabariigiga.
Riigikogu on Eesti Vabariigi parlament. Riigikogule kuulub Eestis seadusandlik võim.
2005. aastal sai peaministriks Andrus Ansip, kes püsis sellel kohal 2014. aastani.
2006. aastal valiti presidendiks Toomas Hendrik Ilves.
'''
tokenizer = Tokenizer()
analyzer = PyVabamorfAnalyzer()
tagger = NerTagger()
# tag the documents
ner_tagged = tagger(analyzer(tokenizer(text)))
# print the words and their explicit labels in BIO notation
pprint(list(zip(ner_tagged.word_texts, ner_tagged.labels)))
# print words grouped as named entities
pprint(ner_tagged.named_entities)
|
keeleleek/estnltk
|
estnltk/examples/old/ner_tag.py
|
ner_tag.py
|
py
| 945 |
python
|
et
|
code
| null |
github-code
|
6
|
29841696751
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 8 16:09:50 2022
@author: Owner
"""
from Hamiltonian import Hamiltonian
import numpy as np
import math
import matplotlib.pyplot as plt
import copy
from Hamiltonian import Hamiltonian
from Fock_vector import fock_vector
import Ryser_Algorithm as ryser
import config as configs
from numpy import linalg as la
from decimal import Decimal
from Fock_vector import fock_vector
from Disc_Hamiltonian import disc_Hamiltonian
params = {
'axes.labelsize': 35,
'font.size': 35,
'legend.fontsize': 35,
'lines.linewidth' : 2,
'lines.markersize' : 35,
'xtick.labelsize': 35,
'ytick.labelsize': 35,
'figure.figsize': [40, 20]
}
plt.rcParams.update(params)
def Dirac_Delta(a, b):
'''
Simple delta function
'''
if (a == b):
return 1
else:
return 0
class non_rotatingHamiltonian(Hamiltonian):
'''
Core class for impelementing Full-Configuration Interaction
Hamiltonians in bosonic Fock space basis for non-rotating pancakes
'''
def __init__(self,N, M, S, length_ratio=10, L=0):
super().__init__(N,M)
self.tolerance = 1e-10
self. L = L # Restrict total angular momentum for each Fock vector
self.S = S
assert M == 2*S + 1
self.M = M
# Set interaction energy scale to 1
self.V0 = 1
self.lengthratio = length_ratio # = (a_z/a_s: trap length/scattering length)
# Scale kinetic energy scale accordingly
self.T0 = (np.pi**2)*self.lengthratio
self.condensate_fraction = None # No. excitations in lowest SP state
self.GP_amplitude = None # Weight of Gross-Pitaevskii (fully condensed) permanent
self.GP_index = None
self.MF_perm = None # Amplitude of dominant permanent in FCI expansion
self.MF_energy = None # Energy content of dominant permanent in FCI expansion
self.MF_amplitude = None # Amplitude of Gross-Pitaevskii permanent
def generate_basis(self):
'''
Generate many-body basis states from repeated combinations
and index them
'''
print('Basis generation...')
configs.sphere_config_fast(int(self.N), int(self.M), int(self.L), int(self.S))
index = 0
file='Sphere_Configurations_N%dM%dL%dS%d.txt'%(self.N, self.M, self.L, self.S)
print('Reading in configurations...')
with open(file, 'r') as f:
for line in f:
split_line = line.split()
if (split_line[0]=='N,'):
continue
N = split_line[0]
M = split_line[1]
L = split_line[2]
S = split_line[3]
basis = []
config = split_line[4:]
#print(config)
for item in config:
#print(item)
basis.append(int(item))
#print(N, M, L, len(config), len(basis))
#print(config, basis)
#print(self.N, N, self.M, M)
assert int(N) == self.N and int(M) == self.M and int(M) == 2*(self.S) +1
vector = fock_vector(int(N), int(M), np.array(basis), int(S))
assert vector.ang_mom() == self.L
vector = fock_vector(int(N), int(M), np.array(basis), S= int(S), index=index)
self.basis.append(vector)
if (self.S in vector.occup_basis):
if (vector.occups[self.S] == self.N):
self.GP_index = index
index += 1
if (index % 100 == 0):
print('No. basis read-in ', index)
print('Basis generation complete')
print('Fock space size: ', self.fock_size)
self.basis = np.array(self.basis)
self.fock_size = index
self.many_body_H = np.zeros((self.fock_size, self.fock_size))
def matrix_overlap(self, i, j, k, l):
'''
Construct many-body overlap matrix for disc Hamiltonian
'''
self.additionalfactor = 1
if (i+j != k+l):
return 0
else:
return self.V0*self.additionalfactor
def kineticterm(self, i):
return self.T0*((i-self.S)**2)
def diag_entry(self, basis):
'''
Returns diagonal entry for contact repulsion Hamiltonian
'''
assert len(self.basis) == self.fock_size # Check if basis generation has not been invoked
diag_element = 0
occup_basis = np.sort(basis.occup_basis)
#print(basis.print_info())
#print(occup_basis)
for index in range(len(occup_basis)):
i = occup_basis[index]
diag_element += self.kineticterm(i)*basis.occups[i]
if basis.occups[i] > 1:
#print(i)
# Half factor comes from Hamiltonian definition
diag_element += 0.5*self.matrix_overlap(i, i, i, i)\
*basis.occups[i]*(basis.occups[i]-1)
# we only have to consider non-equal i, j pairs as symmetry
# gives equal elements for ijij jiij, ijji, jiji basis indices
for jndex in range(index+1, len(occup_basis)):
j = occup_basis[jndex]
#print(i, j)
diag_element += 2*self.matrix_overlap(i, j, i, j)\
*basis.occups[i]*(basis.occups[j])
return diag_element
def construct_off_diag_entries(self, basis):
off_diag_element = 0
occup_basis = np.sort(basis.occup_basis)
new_occups = np.zeros(self.M)
for index in range(len(occup_basis)):
i = occup_basis[index]
for jndex in range(index, len(occup_basis)):
j = occup_basis[jndex]
for k in range(self.M):
new_occups = np.zeros(self.M)
new_basis_index = None
l = i + j - k
if (l >= self.M or l < 0):
continue
if (k == i or k == j or l == k or l == i or l == j):
continue
if (i != j):
# Copy basis occupation
for q in basis.occup_basis:
new_occups[q] = basis.occups[q]
# Construct basis with non-zero entry
new_occups[i] = basis.occups[i] - 1
new_occups[j] = basis.occups[j] - 1
if (k in basis.occups):
new_occups[k] = basis.occups[k] + 1
else:
new_occups[k] = 1
if (l in basis.occups):
new_occups[l] = basis.occups[l] + 1
else:
new_occups[l] = 1
new_fock = fock_vector(self.N, self.M, new_occups)
new_basis_index = None
# Search newly constructed basis index
for basis2 in self.basis:
if basis2.occups == new_fock.occups:
if (basis2.index != basis.index):
new_basis_index = basis2.index
break
if (new_basis_index is None):
print('New basis not in Hamiltonian space')
print(new_fock.print_info)
self.show_basis()
assert 0
# Assign matrix element
self.many_body_H[basis.index, new_basis_index] = \
2*np.sqrt(basis.occups[i]*basis.occups[j]*new_occups[k]*new_occups[l])*self.matrix_overlap(i, j, k, l)
self.many_body_H[new_basis_index, basis.index] = self.many_body_H[basis.index, new_basis_index]
else:
if (basis.occups[i] < 2):
continue
# Construct basis with non-zero entry for i = j
for q in basis.occup_basis:
new_occups[q] = basis.occups[q]
# See Wilkin paper for angular momentum transfer rules
new_occups[i] = basis.occups[i] - 2
if (k in basis.occups):
new_occups[k] = basis.occups[k] + 1
else:
new_occups[k] = 1
if (l in basis.occups):
new_occups[l] = basis.occups[l] + 1
else:
new_occups[l] = 1
new_fock = fock_vector(self.N, self.M, new_occups)
new_basis_index = None
# Search newly constructed basis index
for basis2 in self.basis:
if basis2.occups == new_fock.occups:
if (basis2.index != basis.index):
new_basis_index = basis2.index
break
if (new_basis_index is None):
print('New basis not in Hamiltonian space')
print(new_fock.print_info)
self.show_basis()
assert 0
# Assign matrix element
self.many_body_H[basis.index, new_basis_index] = \
np.sqrt(basis.occups[i]*(basis.occups[i]-1)*new_occups[k]*new_occups[l])*self.matrix_overlap(i, j, k, l)
self.many_body_H[new_basis_index, basis.index] = self.many_body_H[basis.index, new_basis_index]
def construct_Hamiltonian_fast(self):
# Wilkin exact eigenstates paper prescription
assert len(self.basis) == self.fock_size # Check if basis generation has not been invoked
# Diagonal entries
#print(self.basis)
print('Hamiltonian construction...')
print('Fock size: ', self.fock_size)
counter = 1
for basis in self.basis:
self.many_body_H[basis.index, basis.index] = self.diag_entry(basis)
self.construct_off_diag_entries(basis)
if (counter % 100 == 0):
print('Fast Hamiltonian construction progress [%] ', (counter/self.fock_size)*100)
counter += 1
def ground_state_analysis(self):
# Index of MF permanent
#print(np.max(self.e_vector_ground.T))
#print(self.e_vector_ground[0])
max_index = np.where(max(self.e_vector_ground[0], key=abs) == self.e_vector_ground[0])[0][0]
print('max index', max_index)
self.MF_perm = self.basis[max_index]
self.MF_amplitude = self.e_vector_ground[0][max_index]
print('Mean-field permanent info')
print(self.MF_perm.print_info())
print('Amplitude: ', self.MF_amplitude)
self.GP_amplitude = self.e_vector_ground[0][self.GP_index]
#print('All permanents:', self.e_vector_ground[0])
print('Gross-Pitaevskii permanent amplitude: ', self.GP_amplitude)
#print('Permanent energy: ', self.many_body_H[max_index, max_index])
#print('MF energy / E0: ', self.many_body_H[max_index, max_index]/self.e_ground)
# Calculating condensate fraction
self.condensate_fraction = 0
for index, amplitude in enumerate(self.e_vector_ground.squeeze()):
if (self.S not in self.basis[index].occup_basis):
continue
else:
self.condensate_fraction += \
abs(amplitude)**2 * self.basis[index].occups[self.S]/self.N
print('Expected condensate fraction: ', self.condensate_fraction)
print('Condensate depletion: ', 1-self.condensate_fraction)
return self.MF_amplitude, self.GP_amplitude, self.condensate_fraction
def check_degeneracy(self):
'''
Find degeneracies within spectrum
'''
self.degen_evalues = []
self.degen_evectors = []
for i in range(len(self.evalues)):
self.degen_evalues.append([i])
self.degen_evectors.append([self.evectors.T[i]])
for j in range(i+1, len(self.evalues)):
if abs(self.evalues[i] - self.evalues[j]) <= self.tolerance:
self.degen_evalues[-1].append(j)
self.degen_evectors[-1].append(self.evectors.T[j])
degeneracy = np.zeros(len(self.evalues))
for i in range(len(self.evalues)):
degeneracy[i] = len(self.degen_evalues[i])
plt.title('Degeneracy of spectrum\n'+\
'Disc geometry\nN = %d M = %d L = %d'%(self.N, self.M, self.L))
plt.bar(x=np.arange(1, self.fock_size+1), height=degeneracy)
plt.xlabel('Sorted eigenstate index')
plt.ylabel('Degeneracy')
plt.grid()
plt.legend()
plt.savefig('BEC_Degeneracy_N%d_M%d_S%d_L%d.jpeg'%(self.N, self.M, self.S, self.L))
plt.close()
plt.title('Eigenvalue spectrum\n'+\
'Disc geometry\nN = %d M = %d L = %d'%(self.N, self.M, self.L))
nT, binsT, patchesT = plt.hist(x=self.evalues, bins=15, color='red',
alpha=0.7, rwidth=0.85, label='FCI Spectrum')
plt.xlabel('Eigenvalues [$V_0$]')
plt.ylabel('Degeneracy')
plt.legend()
plt.grid()
plt.savefig('BEC_Spectrum_N%d_M%d_S%d_L%d.jpeg'%(self.N, self.M, self.S, self.L))
plt.close()
assert (self.evalues.min() == self.evalues[0])
assert (self.fock_size == len(self.evalues))
for ground_index in range(len(self.degen_evalues[0])):
print(len(self.degen_evectors[0]), len(self.degen_evalues[0]))
assert len(self.degen_evectors[0]) == len(self.degen_evalues[0])
#print(self.degen_evectors[0][ground_index])
print(len(self.degen_evectors[0][ground_index]))
print(ground_index)
plt.figure(ground_index)
plt.title('Degenerate ground state configuration index %d \n'%(ground_index)+\
'Disc geometry\nN = %d M = %d L = %d'%(self.N, self.M, self.L))
plt.bar(x=np.arange(1, self.fock_size+1), height=self.degen_evectors[0][ground_index][0])
#nT, binsT, patchesT =\
#plt.hist(x=self.degen_evectors[0][ground_index],bins=self.fock_size, color='red',alpha=0.7, rwidth=0.85, label='Full Configuration')
plt.xlabel('Many-body basis index')
plt.ylabel('Amplitude')
plt.grid()
plt.legend()
plt.savefig('BEC_Ground_Config_i%d_N%d_M%d_S%d_L%d.jpeg'%(ground_index, self.N, self.M,self.S, self.L))
plt.close()
#print('Degenerate evector indices')
#print(self.degen_evalues)
#print(self.degen_evectors)
'''
H = non_rotatingHamiltonian(N=3,S=1,M=3)
H.generate_basis()
H.construct_Hamiltonian_fast()
#H.print_matrix(H.many_body_H)
evalues, evecs = H.diagonalise()
print('Hamiltonian eigenvalues [V0]')
print(evalues)
print('Ground state energy [V0] ', H.e_ground)
print('Ground state configuration', H.e_vector_ground)
#H.show_basis()
MF_amp, GP_amp = H.ground_state_analysis()
print(MF_amp, GP_amp)
#H.check_sign_problem()
H.check_degeneracy()
'''
|
ahadriaz99/MSci-Project
|
NonRotatingDisc.py
|
NonRotatingDisc.py
|
py
| 16,186 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34001792552
|
words_dict = {'proper' : '적절한',
'possible' : '가능한',
'moral' : '도덕적인',
'patient' : '참을성 있는',
'balance' : '균형',
'perfect' : '완벽한',
'logical' : '논리적인',
'legal' : '합법적인',
'relevant' : '관련 있는',
'responsible' : '책임감 있는',
'regular' : '규칙적인'}
# correct : 정확한 -> in 을 붙여서 반댓말을 만든다(incorrect)
# 아래의 경우에는 in이 아니라 변화형이 앞에 붙게 된다.
# 1. 단어가 b 또는 m 또는 p로 시작하는 경우 -> im
# 2. 단어가 l 로 시작하는 경우 -> il
# 3. 단어가 r 로 시작하는 경우 -> ir
def get_opposite(words_dict): # words_dict는 반댓말로 바꾸고 싶은 단어들이 있다.(딕셔너리로 들어온다)
# 반댓말로 만들고 나서 dictinary 형식으로 리턴 할 것이기 때문에
opposite_dict = {}
# key : 원래 단어 / value : 반대 단어
for word in words_dict.keys():
# 반댓말로 만들때 원래 단어가 어떤 철자로 시작하는지를 알아야 규칙을 적용할 수 있다.
start = word[0]
# 리턴할 반대의미를 가진 단어
opposite = ""
# 조건문을 통해서 start가 어떤 철자인지 알아본 후에 규칙을 적용
if start == "b" or start == "m" or start == "p":
opposite = "im" + word
elif start == "l":
opposite = "il" + word
elif start == "r":
opposite = "ir" + word
else:
opposite = "in" + word
# 반댓말을 만들고 나서 딕셔너리에 추가를해준다.
opposite_dict[word] = opposite # word 키에 opposite value(값)을 짝지어 준다.
sorted_result = sorted(opposite_dict.items(), key=lambda item: item[1])
# 원래말 : 반댓말 이렇게 이루어진 딕셔너리에서
# items() -> (key, value)
# item = (key, value)
# item[0] key
# item[1] value <- 이걸 기준으로 정렬하겠다.
# 반댓말(value)를 기준으로 정렬할 때
return sorted_result
print(get_opposite(words_dict))
|
jinmoonJ/algorithm
|
0726/05_05_t.py
|
05_05_t.py
|
py
| 2,135 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
14076978576
|
'''
Write a python server program that
0. initialized a socket connection on localhost and port 10000
1. accepts a connection from a client
2. receives a "Hi <name>" message from the client
3. generates a random numbers and keeps it a secret
4. sends a message "READY" to the client
5. waits for the client to send a guess
6. checks if the number is
6.1 equal to the secret then it should send a message "Correct! <name> took X attempts to guess the secret"
6.2 send a message "HIGH" if the guess is greater than the secret
6.3 send a message "LOW" if the guess is lower than the secrent
7. closes the client connection and waits for the next one
'''
import socket
from random import randint
server_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM);
server_socket.bind(('localhost',10000))
server_socket.listen(1)
# while True:
# connection, clietn_address = server_socket.accept()
# print("Connetion established!")
# while True:
# client_data = connection.recv(1024)
# if (client_data != "q" and len(client_data) > 0):
# print(client_data.decode("utf-8"))
# connection.send(client_data)
# else:
# print(client_data.decode("utf-8"))
# connection.send(client_data)
# break
# if (client_data != 'q'):
# connection.close()
while True:
connection, client_address = server_socket.accept()
count = 0;
name = connection.recv(1024).decode("utf-8")
print(name[3:])
value = randint(0,100)
print("vaue = ",value)
connection.send("READY".encode("utf-8"))
while True:
client_data = int(connection.recv(1024).decode("utf-8"))
count += 1
if client_data == value:
connection.send(str(f"Correct! {name} took {count} attempts to guess the secret").encode("utf-8"))
break
elif value > client_data:
connection.send("LOW".encode("utf-8"))
else:
connection.send("HIGH".encode("utf-8"))
connection.close()
|
koterupanchajanyareddy/KPJ
|
guessing_server/guessing_server.py
|
guessing_server.py
|
py
| 1,956 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3492737799
|
"""Provides functional layers for the model"""
import numpy as np
import torch
import torch.nn.functional as F
from common_types import Tensor, Union, _float, _int
from torch.types import Device, _size
_opt_arg = Union[_int, _size]
_opt_tensor = Union[Tensor, None]
def conv2d(x: Tensor,
weight: Tensor,
bias: _opt_tensor = None,
device: Device = 'cpu',
stride: _opt_arg = 1,
padding: _opt_arg = 0,
dilation: _opt_arg = 1,
groups: _int = 1) -> Tensor:
return F.conv2d(x, weight.to(device), bias.to(device), stride, padding,
dilation, groups)
def batchnorm(x: Tensor,
weight: Tensor = None,
bias: _opt_tensor = None,
device: Device = 'cpu',
running_mean: _opt_tensor = None,
running_var: _opt_tensor = None,
training: bool = True,
eps: _float = 1e-5,
momentum: _float = 0.1) -> Tensor:
''' momentum = 1 restricts stats to the current mini-batch '''
# This hack only works when momentum is 1 and avoids needing to track
# running stats by substuting dummy variables
running_mean = torch.zeros(np.prod(np.array(x.data.size()[1]))).to(device)
running_var = torch.ones(np.prod(np.array(x.data.size()[1]))).to(device)
return F.batch_norm(x, running_mean, running_var, weight, bias, training,
momentum, eps)
def leaky_relu(x: Tensor, negative_slope: _float = 0.01) -> Tensor:
return F.leaky_relu(x, negative_slope, True)
def pixel_shuffle(x: Tensor, scale: _int):
return F.pixel_shuffle(x, scale)
|
RashedDoha/meta-drn-pytorch
|
model/layers.py
|
layers.py
|
py
| 1,647 |
python
|
en
|
code
| 0 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.